64 bit gather/scatter fix for knc.h

This commit is contained in:
Anton Mitrokhin
2014-08-14 17:27:56 +04:00
parent 6b5b547e2f
commit 7adacf5a7b

View File

@@ -40,10 +40,11 @@
#include <immintrin.h> #include <immintrin.h>
#include <zmmintrin.h> #include <zmmintrin.h>
#define INT32_MIN (-0x7fffffff - 1)
#include <iostream> // for operator<<(m512[i]) #include <iostream> // for operator<<(m512[i])
#include <iomanip> // for operator<<(m512[i]) #include <iomanip> // for operator<<(m512[i])
#define FORCEINLINE __forceinline #define FORCEINLINE __forceinline
#ifdef _MSC_VER #ifdef _MSC_VER
#define PRE_ALIGN(x) /*__declspec(align(x))*/ #define PRE_ALIGN(x) /*__declspec(align(x))*/
@@ -1749,6 +1750,8 @@ __gather_base_offsets32_double(uint8_t *base, uint32_t scale, __vec16_i32 offset
static FORCEINLINE __vec16_f static FORCEINLINE __vec16_f
__gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
__vec16_i1 mask) { __vec16_i1 mask) {
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
// There is no gather instruction with 64-bit offsets in KNC. // There is no gather instruction with 64-bit offsets in KNC.
// We have to manually iterate over the upper 32 bits ;-) // We have to manually iterate over the upper 32 bits ;-)
__vec16_i1 still_to_do = mask; __vec16_i1 still_to_do = mask;
@@ -1759,10 +1762,10 @@ __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offset
__vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi,
__smear_i32<__vec16_i32>((int32_t)hi32), __smear_i32<__vec16_i32>((int32_t)hi32),
_MM_CMPINT_EQ); _MM_CMPINT_EQ);
void * base = (void*)((unsigned long)_base + void * base = (void*)((unsigned long)_base +
((scale*(unsigned long)hi32) << 32)); ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
ret = _mm512_mask_i32extgather_ps(ret, match, offsets.v_lo, base,
ret = _mm512_mask_i32extgather_ps(ret, match, signed_offsets, base,
_MM_UPCONV_PS_NONE, scale, _MM_UPCONV_PS_NONE, scale,
_MM_HINT_NONE); _MM_HINT_NONE);
still_to_do = _mm512_kxor(match, still_to_do); still_to_do = _mm512_kxor(match, still_to_do);
@@ -1776,6 +1779,8 @@ static FORCEINLINE __vec16_i8
__gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
__vec16_i1 mask) __vec16_i1 mask)
{ {
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
__vec16_i1 still_to_do = mask; __vec16_i1 still_to_do = mask;
__vec16_i32 tmp; __vec16_i32 tmp;
while (still_to_do) { while (still_to_do) {
@@ -1786,8 +1791,8 @@ __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
_MM_CMPINT_EQ); _MM_CMPINT_EQ);
void * base = (void*)((unsigned long)_base + void * base = (void*)((unsigned long)_base +
((scale*(unsigned long)hi32) << 32)); ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
tmp = _mm512_mask_i32extgather_epi32(tmp, match, offsets.v_lo, base, tmp = _mm512_mask_i32extgather_epi32(tmp, match, signed_offsets, base,
_MM_UPCONV_EPI32_SINT8, scale, _MM_UPCONV_EPI32_SINT8, scale,
_MM_HINT_NONE); _MM_HINT_NONE);
still_to_do = _mm512_kxor(match,still_to_do); still_to_do = _mm512_kxor(match,still_to_do);
@@ -1802,6 +1807,8 @@ static FORCEINLINE void
__scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
__vec16_f value, __vec16_f value,
__vec16_i1 mask) { __vec16_i1 mask) {
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
__vec16_i1 still_to_do = mask; __vec16_i1 still_to_do = mask;
while (still_to_do) { while (still_to_do) {
int first_active_lane = _mm_tzcnt_32((int)still_to_do); int first_active_lane = _mm_tzcnt_32((int)still_to_do);
@@ -1811,8 +1818,8 @@ __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offse
_MM_CMPINT_EQ); _MM_CMPINT_EQ);
void * base = (void*)((unsigned long)_base + void * base = (void*)((unsigned long)_base +
((scale*(unsigned long)hi32) << 32)); ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
_mm512_mask_i32extscatter_ps(base, match, offsets.v_lo, _mm512_mask_i32extscatter_ps(base, match, signed_offsets,
value, value,
_MM_DOWNCONV_PS_NONE, scale, _MM_DOWNCONV_PS_NONE, scale,
_MM_HINT_NONE); _MM_HINT_NONE);
@@ -1824,6 +1831,8 @@ static FORCEINLINE void
__scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
__vec16_i32 value, __vec16_i32 value,
__vec16_i1 mask) { __vec16_i1 mask) {
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
__vec16_i1 still_to_do = mask; __vec16_i1 still_to_do = mask;
while (still_to_do) { while (still_to_do) {
int first_active_lane = _mm_tzcnt_32((int)still_to_do); int first_active_lane = _mm_tzcnt_32((int)still_to_do);
@@ -1833,8 +1842,8 @@ __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets
_MM_CMPINT_EQ); _MM_CMPINT_EQ);
void * base = (void*)((unsigned long)_base + void * base = (void*)((unsigned long)_base +
((scale*(unsigned long)hi32) << 32)); ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
_mm512_mask_i32extscatter_epi32(base, match, offsets.v_lo, _mm512_mask_i32extscatter_epi32(base, match, signed_offsets,
value, value,
_MM_DOWNCONV_EPI32_NONE, scale, _MM_DOWNCONV_EPI32_NONE, scale,
_MM_HINT_NONE); _MM_HINT_NONE);