From 9c9c77d2db26339d5462e7b639ade40560c93dec Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Tue, 12 Aug 2014 14:28:31 +0400 Subject: [PATCH 1/5] changes in __scatter_base_offsets64_float --- examples/intrinsics/knc-i1x16.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/examples/intrinsics/knc-i1x16.h b/examples/intrinsics/knc-i1x16.h index 2e6afed5..8236ccd2 100644 --- a/examples/intrinsics/knc-i1x16.h +++ b/examples/intrinsics/knc-i1x16.h @@ -38,6 +38,8 @@ #include #include +#define INT32_MIN (-0x7fffffff - 1) + #ifdef _MSC_VER #define FORCEINLINE __forceinline #define PRE_ALIGN(x) /*__declspec(align(x))*/ @@ -2370,6 +2372,7 @@ static FORCEINLINE void __scatter_base_offsets32_float(void *base, uint32_t scal static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_f value, __vec16_i1 mask) { const __vec16_i64 offsets = _offsets.cvt2hilo(); + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); __vec16_i1 still_to_do = mask; while (still_to_do) { @@ -2380,8 +2383,9 @@ static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32)); - _mm512_mask_i32extscatter_ps(base, match, offsets.v_lo, + ((scale*(unsigned long)hi32) << 32) - INT32_MIN); + + _mm512_mask_i32extscatter_ps(base, match, signed_offsets, value, _MM_DOWNCONV_PS_NONE, scale, _MM_HINT_NONE); From 6b5b547e2f11e077d896c06dd9e3dc828ad75188 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Tue, 12 Aug 2014 17:02:33 +0400 Subject: [PATCH 2/5] modified all gather/scatter instructions in 'knc-i1x16.h' --- examples/intrinsics/knc-i1x16.h | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/examples/intrinsics/knc-i1x16.h b/examples/intrinsics/knc-i1x16.h index 8236ccd2..4bb6420d 100644 --- a/examples/intrinsics/knc-i1x16.h +++ b/examples/intrinsics/knc-i1x16.h @@ -53,7 +53,7 @@ #endif #define KNC 1 -#if 0 +#if 1 extern "C" { int printf(const unsigned char *, ...); @@ -2164,6 +2164,7 @@ static FORCEINLINE __vec16_i8 __gather_base_offsets32_i8(uint8_t *base, uint32_t static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask) { const __vec16_i64 offsets = _offsets.cvt2hilo(); + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); __vec16_i1 still_to_do = mask; __vec16_i32 tmp; while (still_to_do) { @@ -2174,8 +2175,8 @@ static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_ _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32)); - tmp = _mm512_mask_i32extgather_epi32(tmp, match, offsets.v_lo, base, + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + tmp = _mm512_mask_i32extgather_epi32(tmp, match, signed_offsets, base, _MM_UPCONV_EPI32_SINT8, scale, _MM_HINT_NONE); still_to_do = _mm512_kxor(match,still_to_do); @@ -2199,6 +2200,7 @@ static FORCEINLINE __vec16_i32 __gather_base_offsets32_i32(uint8_t *base, uint32 static FORCEINLINE __vec16_i32 __gather_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask) { const __vec16_i64 offsets = _offsets.cvt2hilo(); + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); // There is no gather instruction with 64-bit offsets in KNC. // We have to manually iterate over the upper 32 bits ;-) __vec16_i1 still_to_do = mask; @@ -2209,10 +2211,10 @@ static FORCEINLINE __vec16_i32 __gather_base_offsets64_i32(uint8_t *_base, uint3 __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, __smear_i32<__vec16_i32>((int32_t)hi32), _MM_CMPINT_EQ); - + void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32)); - ret = _mm512_mask_i32extgather_epi32(ret, match, offsets.v_lo, base, + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + ret = _mm512_mask_i32extgather_epi32(ret, match, signed_offsets, base, _MM_UPCONV_EPI32_NONE, scale, _MM_HINT_NONE); still_to_do = _mm512_kxor(match, still_to_do); @@ -2232,6 +2234,7 @@ static FORCEINLINE __vec16_f __gather_base_offsets32_float(uint8_t *base, uint32 static FORCEINLINE __vec16_f __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask) { const __vec16_i64 offsets = _offsets.cvt2hilo(); + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); // There is no gather instruction with 64-bit offsets in KNC. // We have to manually iterate over the upper 32 bits ;-) __vec16_i1 still_to_do = mask; @@ -2244,8 +2247,8 @@ static FORCEINLINE __vec16_f __gather_base_offsets64_float(uint8_t *_base, uint3 _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32)); - ret = _mm512_mask_i32extgather_ps(ret, match, offsets.v_lo, base, + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + ret = _mm512_mask_i32extgather_ps(ret, match, signed_offsets, base, _MM_UPCONV_PS_NONE, scale, _MM_HINT_NONE); still_to_do = _mm512_kxor(match, still_to_do); @@ -2341,7 +2344,8 @@ static FORCEINLINE void __scatter_base_offsets32_i32(uint8_t *b, uint32_t scale, static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i32 value, __vec16_i1 mask) { const __vec16_i64 offsets = _offsets.cvt2hilo(); - + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); + __vec16_i1 still_to_do = mask; while (still_to_do) { int first_active_lane = _mm_tzcnt_32((int)still_to_do); @@ -2351,8 +2355,8 @@ static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t sc _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32)); - _mm512_mask_i32extscatter_epi32(base, match, offsets.v_lo, + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + _mm512_mask_i32extscatter_epi32(base, match, signed_offsets, value, _MM_DOWNCONV_EPI32_NONE, scale, _MM_HINT_NONE); @@ -2373,7 +2377,7 @@ static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t { const __vec16_i64 offsets = _offsets.cvt2hilo(); const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); - + __vec16_i1 still_to_do = mask; while (still_to_do) { int first_active_lane = _mm_tzcnt_32((int)still_to_do); @@ -2383,7 +2387,7 @@ static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) - INT32_MIN); + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); _mm512_mask_i32extscatter_ps(base, match, signed_offsets, value, From 7adacf5a7bc0ebe1e33bf917c627fb641adaef92 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 14 Aug 2014 17:27:56 +0400 Subject: [PATCH 3/5] 64 bit gather/scatter fix for knc.h --- examples/intrinsics/knc.h | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/examples/intrinsics/knc.h b/examples/intrinsics/knc.h index e674f409..3d1bcafc 100644 --- a/examples/intrinsics/knc.h +++ b/examples/intrinsics/knc.h @@ -40,10 +40,11 @@ #include #include +#define INT32_MIN (-0x7fffffff - 1) + #include // for operator<<(m512[i]) #include // for operator<<(m512[i]) - #define FORCEINLINE __forceinline #ifdef _MSC_VER #define PRE_ALIGN(x) /*__declspec(align(x))*/ @@ -1749,6 +1750,8 @@ __gather_base_offsets32_double(uint8_t *base, uint32_t scale, __vec16_i32 offset static FORCEINLINE __vec16_f __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __vec16_i1 mask) { + + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); // There is no gather instruction with 64-bit offsets in KNC. // We have to manually iterate over the upper 32 bits ;-) __vec16_i1 still_to_do = mask; @@ -1759,10 +1762,10 @@ __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offset __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, __smear_i32<__vec16_i32>((int32_t)hi32), _MM_CMPINT_EQ); - void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32)); - ret = _mm512_mask_i32extgather_ps(ret, match, offsets.v_lo, base, + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + + ret = _mm512_mask_i32extgather_ps(ret, match, signed_offsets, base, _MM_UPCONV_PS_NONE, scale, _MM_HINT_NONE); still_to_do = _mm512_kxor(match, still_to_do); @@ -1776,6 +1779,8 @@ static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __vec16_i1 mask) { + + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); __vec16_i1 still_to_do = mask; __vec16_i32 tmp; while (still_to_do) { @@ -1786,8 +1791,8 @@ __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32)); - tmp = _mm512_mask_i32extgather_epi32(tmp, match, offsets.v_lo, base, + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + tmp = _mm512_mask_i32extgather_epi32(tmp, match, signed_offsets, base, _MM_UPCONV_EPI32_SINT8, scale, _MM_HINT_NONE); still_to_do = _mm512_kxor(match,still_to_do); @@ -1802,6 +1807,8 @@ static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __vec16_f value, __vec16_i1 mask) { + + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); __vec16_i1 still_to_do = mask; while (still_to_do) { int first_active_lane = _mm_tzcnt_32((int)still_to_do); @@ -1811,8 +1818,8 @@ __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offse _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32)); - _mm512_mask_i32extscatter_ps(base, match, offsets.v_lo, + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + _mm512_mask_i32extscatter_ps(base, match, signed_offsets, value, _MM_DOWNCONV_PS_NONE, scale, _MM_HINT_NONE); @@ -1824,6 +1831,8 @@ static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __vec16_i32 value, __vec16_i1 mask) { + + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); __vec16_i1 still_to_do = mask; while (still_to_do) { int first_active_lane = _mm_tzcnt_32((int)still_to_do); @@ -1833,8 +1842,8 @@ __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32)); - _mm512_mask_i32extscatter_epi32(base, match, offsets.v_lo, + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + _mm512_mask_i32extscatter_epi32(base, match, signed_offsets, value, _MM_DOWNCONV_EPI32_NONE, scale, _MM_HINT_NONE); From bd8d02527b52eff5189d4c2f79e2443f434aca6b Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Fri, 15 Aug 2014 14:43:22 +0400 Subject: [PATCH 4/5] removed ugly INT32_MIN define (included limits.h) and updated the copyright --- examples/intrinsics/knc-i1x16.h | 27 +++++++++++++-------------- examples/intrinsics/knc.h | 21 ++++++++++----------- 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/examples/intrinsics/knc-i1x16.h b/examples/intrinsics/knc-i1x16.h index 4bb6420d..171fca09 100644 --- a/examples/intrinsics/knc-i1x16.h +++ b/examples/intrinsics/knc-i1x16.h @@ -1,5 +1,5 @@ /** - Copyright (c) 2010-2013, Intel Corporation + Copyright (c) 2010-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,15 +31,14 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include +#include // INT_MIN +#include #include #include #include #include #include -#define INT32_MIN (-0x7fffffff - 1) - #ifdef _MSC_VER #define FORCEINLINE __forceinline #define PRE_ALIGN(x) /*__declspec(align(x))*/ @@ -2164,7 +2163,7 @@ static FORCEINLINE __vec16_i8 __gather_base_offsets32_i8(uint8_t *base, uint32_t static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask) { const __vec16_i64 offsets = _offsets.cvt2hilo(); - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); __vec16_i1 still_to_do = mask; __vec16_i32 tmp; while (still_to_do) { @@ -2175,7 +2174,7 @@ static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_ _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); tmp = _mm512_mask_i32extgather_epi32(tmp, match, signed_offsets, base, _MM_UPCONV_EPI32_SINT8, scale, _MM_HINT_NONE); @@ -2200,7 +2199,7 @@ static FORCEINLINE __vec16_i32 __gather_base_offsets32_i32(uint8_t *base, uint32 static FORCEINLINE __vec16_i32 __gather_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask) { const __vec16_i64 offsets = _offsets.cvt2hilo(); - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); // There is no gather instruction with 64-bit offsets in KNC. // We have to manually iterate over the upper 32 bits ;-) __vec16_i1 still_to_do = mask; @@ -2213,7 +2212,7 @@ static FORCEINLINE __vec16_i32 __gather_base_offsets64_i32(uint8_t *_base, uint3 _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); ret = _mm512_mask_i32extgather_epi32(ret, match, signed_offsets, base, _MM_UPCONV_EPI32_NONE, scale, _MM_HINT_NONE); @@ -2234,7 +2233,7 @@ static FORCEINLINE __vec16_f __gather_base_offsets32_float(uint8_t *base, uint32 static FORCEINLINE __vec16_f __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask) { const __vec16_i64 offsets = _offsets.cvt2hilo(); - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); // There is no gather instruction with 64-bit offsets in KNC. // We have to manually iterate over the upper 32 bits ;-) __vec16_i1 still_to_do = mask; @@ -2247,7 +2246,7 @@ static FORCEINLINE __vec16_f __gather_base_offsets64_float(uint8_t *_base, uint3 _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); ret = _mm512_mask_i32extgather_ps(ret, match, signed_offsets, base, _MM_UPCONV_PS_NONE, scale, _MM_HINT_NONE); @@ -2344,7 +2343,7 @@ static FORCEINLINE void __scatter_base_offsets32_i32(uint8_t *b, uint32_t scale, static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i32 value, __vec16_i1 mask) { const __vec16_i64 offsets = _offsets.cvt2hilo(); - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); __vec16_i1 still_to_do = mask; while (still_to_do) { @@ -2355,7 +2354,7 @@ static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t sc _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); _mm512_mask_i32extscatter_epi32(base, match, signed_offsets, value, _MM_DOWNCONV_EPI32_NONE, scale, @@ -2376,7 +2375,7 @@ static FORCEINLINE void __scatter_base_offsets32_float(void *base, uint32_t scal static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_f value, __vec16_i1 mask) { const __vec16_i64 offsets = _offsets.cvt2hilo(); - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); __vec16_i1 still_to_do = mask; while (still_to_do) { @@ -2387,7 +2386,7 @@ static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); _mm512_mask_i32extscatter_ps(base, match, signed_offsets, value, diff --git a/examples/intrinsics/knc.h b/examples/intrinsics/knc.h index 3d1bcafc..23b35070 100644 --- a/examples/intrinsics/knc.h +++ b/examples/intrinsics/knc.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2012, Intel Corporation + Copyright (c) 2012-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,6 +31,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include // INT_MIN #include #include #include @@ -40,8 +41,6 @@ #include #include -#define INT32_MIN (-0x7fffffff - 1) - #include // for operator<<(m512[i]) #include // for operator<<(m512[i]) @@ -1751,7 +1750,7 @@ static FORCEINLINE __vec16_f __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __vec16_i1 mask) { - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); // There is no gather instruction with 64-bit offsets in KNC. // We have to manually iterate over the upper 32 bits ;-) __vec16_i1 still_to_do = mask; @@ -1763,7 +1762,7 @@ __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offset __smear_i32<__vec16_i32>((int32_t)hi32), _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); ret = _mm512_mask_i32extgather_ps(ret, match, signed_offsets, base, _MM_UPCONV_PS_NONE, scale, @@ -1780,7 +1779,7 @@ __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __vec16_i1 mask) { - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); __vec16_i1 still_to_do = mask; __vec16_i32 tmp; while (still_to_do) { @@ -1791,7 +1790,7 @@ __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); tmp = _mm512_mask_i32extgather_epi32(tmp, match, signed_offsets, base, _MM_UPCONV_EPI32_SINT8, scale, _MM_HINT_NONE); @@ -1808,7 +1807,7 @@ __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offse __vec16_f value, __vec16_i1 mask) { - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); __vec16_i1 still_to_do = mask; while (still_to_do) { int first_active_lane = _mm_tzcnt_32((int)still_to_do); @@ -1818,7 +1817,7 @@ __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offse _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); _mm512_mask_i32extscatter_ps(base, match, signed_offsets, value, _MM_DOWNCONV_PS_NONE, scale, @@ -1832,7 +1831,7 @@ __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets __vec16_i32 value, __vec16_i1 mask) { - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN)); + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); __vec16_i1 still_to_do = mask; while (still_to_do) { int first_active_lane = _mm_tzcnt_32((int)still_to_do); @@ -1842,7 +1841,7 @@ __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets _MM_CMPINT_EQ); void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN)); + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); _mm512_mask_i32extscatter_epi32(base, match, signed_offsets, value, _MM_DOWNCONV_EPI32_NONE, scale, From 77dc94ab22ed744590e81e7aa07cb24adc519e78 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Fri, 15 Aug 2014 15:18:11 +0400 Subject: [PATCH 5/5] undefined printf functions in knc-i1x16.h --- examples/intrinsics/knc-i1x16.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/intrinsics/knc-i1x16.h b/examples/intrinsics/knc-i1x16.h index 171fca09..be9cbd1c 100644 --- a/examples/intrinsics/knc-i1x16.h +++ b/examples/intrinsics/knc-i1x16.h @@ -52,7 +52,7 @@ #endif #define KNC 1 -#if 1 +#if 0 extern "C" { int printf(const unsigned char *, ...);