modified all gather/scatter instructions in 'knc-i1x16.h'
This commit is contained in:
@@ -53,7 +53,7 @@
|
||||
#endif
|
||||
|
||||
#define KNC 1
|
||||
#if 0
|
||||
#if 1
|
||||
extern "C"
|
||||
{
|
||||
int printf(const unsigned char *, ...);
|
||||
@@ -2164,6 +2164,7 @@ static FORCEINLINE __vec16_i8 __gather_base_offsets32_i8(uint8_t *base, uint32_t
|
||||
static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask)
|
||||
{
|
||||
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
||||
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
|
||||
__vec16_i1 still_to_do = mask;
|
||||
__vec16_i32 tmp;
|
||||
while (still_to_do) {
|
||||
@@ -2174,8 +2175,8 @@ static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_
|
||||
_MM_CMPINT_EQ);
|
||||
|
||||
void * base = (void*)((unsigned long)_base +
|
||||
((scale*(unsigned long)hi32) << 32));
|
||||
tmp = _mm512_mask_i32extgather_epi32(tmp, match, offsets.v_lo, base,
|
||||
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
|
||||
tmp = _mm512_mask_i32extgather_epi32(tmp, match, signed_offsets, base,
|
||||
_MM_UPCONV_EPI32_SINT8, scale,
|
||||
_MM_HINT_NONE);
|
||||
still_to_do = _mm512_kxor(match,still_to_do);
|
||||
@@ -2199,6 +2200,7 @@ static FORCEINLINE __vec16_i32 __gather_base_offsets32_i32(uint8_t *base, uint32
|
||||
static FORCEINLINE __vec16_i32 __gather_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask)
|
||||
{
|
||||
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
||||
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
|
||||
// There is no gather instruction with 64-bit offsets in KNC.
|
||||
// We have to manually iterate over the upper 32 bits ;-)
|
||||
__vec16_i1 still_to_do = mask;
|
||||
@@ -2209,10 +2211,10 @@ static FORCEINLINE __vec16_i32 __gather_base_offsets64_i32(uint8_t *_base, uint3
|
||||
__vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi,
|
||||
__smear_i32<__vec16_i32>((int32_t)hi32),
|
||||
_MM_CMPINT_EQ);
|
||||
|
||||
|
||||
void * base = (void*)((unsigned long)_base +
|
||||
((scale*(unsigned long)hi32) << 32));
|
||||
ret = _mm512_mask_i32extgather_epi32(ret, match, offsets.v_lo, base,
|
||||
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
|
||||
ret = _mm512_mask_i32extgather_epi32(ret, match, signed_offsets, base,
|
||||
_MM_UPCONV_EPI32_NONE, scale,
|
||||
_MM_HINT_NONE);
|
||||
still_to_do = _mm512_kxor(match, still_to_do);
|
||||
@@ -2232,6 +2234,7 @@ static FORCEINLINE __vec16_f __gather_base_offsets32_float(uint8_t *base, uint32
|
||||
static FORCEINLINE __vec16_f __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask)
|
||||
{
|
||||
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
||||
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
|
||||
// There is no gather instruction with 64-bit offsets in KNC.
|
||||
// We have to manually iterate over the upper 32 bits ;-)
|
||||
__vec16_i1 still_to_do = mask;
|
||||
@@ -2244,8 +2247,8 @@ static FORCEINLINE __vec16_f __gather_base_offsets64_float(uint8_t *_base, uint3
|
||||
_MM_CMPINT_EQ);
|
||||
|
||||
void * base = (void*)((unsigned long)_base +
|
||||
((scale*(unsigned long)hi32) << 32));
|
||||
ret = _mm512_mask_i32extgather_ps(ret, match, offsets.v_lo, base,
|
||||
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
|
||||
ret = _mm512_mask_i32extgather_ps(ret, match, signed_offsets, base,
|
||||
_MM_UPCONV_PS_NONE, scale,
|
||||
_MM_HINT_NONE);
|
||||
still_to_do = _mm512_kxor(match, still_to_do);
|
||||
@@ -2341,7 +2344,8 @@ static FORCEINLINE void __scatter_base_offsets32_i32(uint8_t *b, uint32_t scale,
|
||||
static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i32 value, __vec16_i1 mask)
|
||||
{
|
||||
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
||||
|
||||
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
|
||||
|
||||
__vec16_i1 still_to_do = mask;
|
||||
while (still_to_do) {
|
||||
int first_active_lane = _mm_tzcnt_32((int)still_to_do);
|
||||
@@ -2351,8 +2355,8 @@ static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t sc
|
||||
_MM_CMPINT_EQ);
|
||||
|
||||
void * base = (void*)((unsigned long)_base +
|
||||
((scale*(unsigned long)hi32) << 32));
|
||||
_mm512_mask_i32extscatter_epi32(base, match, offsets.v_lo,
|
||||
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
|
||||
_mm512_mask_i32extscatter_epi32(base, match, signed_offsets,
|
||||
value,
|
||||
_MM_DOWNCONV_EPI32_NONE, scale,
|
||||
_MM_HINT_NONE);
|
||||
@@ -2373,7 +2377,7 @@ static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t
|
||||
{
|
||||
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
||||
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
|
||||
|
||||
|
||||
__vec16_i1 still_to_do = mask;
|
||||
while (still_to_do) {
|
||||
int first_active_lane = _mm_tzcnt_32((int)still_to_do);
|
||||
@@ -2383,7 +2387,7 @@ static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t
|
||||
_MM_CMPINT_EQ);
|
||||
|
||||
void * base = (void*)((unsigned long)_base +
|
||||
((scale*(unsigned long)hi32) << 32) - INT32_MIN);
|
||||
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
|
||||
|
||||
_mm512_mask_i32extscatter_ps(base, match, signed_offsets,
|
||||
value,
|
||||
|
||||
Reference in New Issue
Block a user