From a2d42c3242415165e8110965d29e258d77794a91 Mon Sep 17 00:00:00 2001 From: Jean-Luc Duprat Date: Wed, 1 Aug 2012 14:37:25 -0700 Subject: [PATCH] KNC: all masked_load_*() and masked_store_*() functions need to do unaligned accesses --- examples/intrinsics/knc.h | 109 +++++++++++++++++++++++++++++++++----- 1 file changed, 95 insertions(+), 14 deletions(-) diff --git a/examples/intrinsics/knc.h b/examples/intrinsics/knc.h index 6abffe74..c6872d7f 100644 --- a/examples/intrinsics/knc.h +++ b/examples/intrinsics/knc.h @@ -1859,17 +1859,22 @@ static FORCEINLINE __vec16_i16 __masked_load_i16(void *p, } */ -template static FORCEINLINE __vec16_i32 __masked_load_i32(void *p, __vec16_i1 mask) { +#if 0 +template +static FORCEINLINE __vec16_i32 __masked_load_i32(void *p, __vec16_i1 mask) { __vec16_i32 ret; ret = _mm512_mask_extloadunpackhi_epi32(ret, mask, p, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); ret = _mm512_mask_extloadunpacklo_epi32(ret, mask, p, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); return ret; } -template <> static FORCEINLINE __vec16_i32 __masked_load_i32<64>(void *p, __vec16_i1 mask) { +template <> +static FORCEINLINE __vec16_i32 __masked_load_i32<64>(void *p, __vec16_i1 mask) { return _mm512_mask_load_epi32(_mm512_undefined_epi32(), mask, p); } +#endif +// Currently, when a pseudo_gather is converted into a masked load, it has to be unaligned static FORCEINLINE __vec16_i32 __masked_load_i32(void *p, __vec16_i1 mask) { __vec16_i32 ret; ret = _mm512_mask_extloadunpackhi_epi32(ret, mask, p, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); @@ -1894,15 +1899,54 @@ static FORCEINLINE __vec16_i64 __masked_load_i64(void *p, } */ -static FORCEINLINE __vec16_f __masked_load_float(void *p, __vec16_i1 mask) { - __vec16_f ret; - return _mm512_mask_load_ps(ret, mask, p); +#if 0 +template static FORCEINLINE __vec16_f __masked_load_float(void *p, __vec16_i1 mask) { + __vec16_f ret = _mm512_undefined_ps(); + ret = _mm512_mask_extloadunpackhi_ps(ret, mask, p, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); + ret = _mm512_mask_extloadunpacklo_ps(ret, mask, p, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); + return ret; } +template <> static FORCEINLINE __vec16_f __masked_load_float<64>(void *p, __vec16_i1 mask) { + return _mm512_mask_load_ps(_mm512_undefined_ps(), mask, p); +} +#endif + +static FORCEINLINE __vec16_f __masked_load_float(void *p, __vec16_i1 mask) { + __vec16_f ret = _mm512_undefined_ps(); + ret = _mm512_mask_extloadunpackhi_ps(ret, mask, p, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); + ret = _mm512_mask_extloadunpacklo_ps(ret, mask, p, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); + return ret; +} + +#if 0 +template static FORCEINLINE __vec16_d __masked_load_double(void *p, __vec16_i1 mask) { + __vec16_d ret; + ret.v1 = _mm512_undefined_pd(); + ret.v2 = _mm512_undefined_pd(); + ret.v1 = _mm512_mask_extloadunpackhi_pd(ret.v1, mask.m8.m1, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + ret.v1 = _mm512_mask_extloadunpacklo_pd(ret.v1, mask.m8.m1, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + ret.v2 = _mm512_mask_extloadunpackhi_pd(ret.v2, mask.m8.m2, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + ret.v2 = _mm512_mask_extloadunpacklo_pd(ret.v2, mask.m8.m2, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + return ret; +} + +template <> static FORCEINLINE __vec16_d __masked_load_double<64>(void *p, __vec16_i1 mask) { + __vec16_d ret; + ret.v1 = _mm512_mask_load_pd(_mm512_undefined_pd(), mask.m8.m1, p); + ret.v2 = _mm512_mask_load_pd(_mm512_undefined_pd(), mask.m8.m2, p); + return ret; +} +#endif + static FORCEINLINE __vec16_d __masked_load_double(void *p, __vec16_i1 mask) { __vec16_d ret; - ret.v1 = _mm512_mask_load_pd(ret.v1, mask.m8.m1, p); - ret.v2 = _mm512_mask_load_pd(ret.v2, mask.m8.m2, p); + ret.v1 = _mm512_undefined_pd(); + ret.v2 = _mm512_undefined_pd(); + ret.v1 = _mm512_mask_extloadunpackhi_pd(ret.v1, mask.m8.m1, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + ret.v1 = _mm512_mask_extloadunpacklo_pd(ret.v1, mask.m8.m1, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + ret.v2 = _mm512_mask_extloadunpackhi_pd(ret.v2, mask.m8.m2, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + ret.v2 = _mm512_mask_extloadunpacklo_pd(ret.v2, mask.m8.m2, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); return ret; } @@ -1924,10 +1968,21 @@ static FORCEINLINE void __masked_store_i16(void *p, __vec16_i16 val, } */ -static FORCEINLINE void __masked_store_i32(void *p, __vec16_i32 val, - __vec16_i1 mask) { +#if 0 +template static FORCEINLINE void __masked_store_i32(void *p, __vec16_i32 val, __vec16_i1 mask) { + _mm512_mask_extpackstorehi_epi32(p, mask, val, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); + _mm512_mask_extpackstorelo_epi32(p, mask, val, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); +} + +template <> static FORCEINLINE void __masked_store_i32<64>(void *p, __vec16_i32 val, __vec16_i1 mask) { _mm512_mask_store_epi32(p, mask, val); } +#endif + +static FORCEINLINE void __masked_store_i32(void *p, __vec16_i32 val, __vec16_i1 mask) { + _mm512_mask_extpackstorehi_epi32(p, mask, val, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); + _mm512_mask_extpackstorelo_epi32(p, mask, val, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); +} /* static FORCEINLINE void __masked_store_i64(void *p, __vec16_i64 val, @@ -1938,16 +1993,42 @@ static FORCEINLINE void __masked_store_i64(void *p, __vec16_i64 val, } */ -static FORCEINLINE void __masked_store_float(void *p, __vec16_f val, - __vec16_i1 mask) { - _mm512_mask_store_ps(p, mask, val); +#if 0 +template static FORCEINLINE void __masked_store_float(void *p, __vec16_f val, __vec16_i1 mask) { + _mm512_mask_extpackstorehi_ps(p, mask, val, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); + _mm512_mask_extpackstorelo_ps(p, mask, val, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); } -static FORCEINLINE void __masked_store_double(void *p, __vec16_d val, - __vec16_i1 mask) { +template <> static FORCEINLINE void __masked_store_float<64>(void *p, __vec16_f val, __vec16_i1 mask) { + _mm512_mask_store_ps(p, mask, val); +} +#endif + +static FORCEINLINE void __masked_store_float(void *p, __vec16_f val, __vec16_i1 mask) { + _mm512_mask_extpackstorehi_ps(p, mask, val, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); + _mm512_mask_extpackstorelo_ps(p, mask, val, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); +} + +#if 0 +template static FORCEINLINE void __masked_store_double(void *p, __vec16_d val, __vec16_i1 mask) { + _mm512_mask_extpackstorehi_pd(p, mask.m8.m1, val.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); + _mm512_mask_extpackstorelo_pd(p, mask.m8.m1, val.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); + _mm512_mask_extpackstorehi_pd(((uint8_t*)p)+64, mask.m8.m2, val.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); + _mm512_mask_extpackstorelo_pd(((uint8_t*)p)+64, mask.m8.m2, val.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); +} + +template <> static FORCEINLINE void __masked_store_double<64>(void *p, __vec16_d val, __vec16_i1 mask) { _mm512_mask_store_pd(p, mask.m8.m1, val.v1); _mm512_mask_store_pd(((uint8_t*)p)+64, mask.m8.m2, val.v2); } +#endif + +static FORCEINLINE void __masked_store_double(void *p, __vec16_d val, __vec16_i1 mask) { + _mm512_mask_extpackstorehi_pd(p, mask.m8.m1, val.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); + _mm512_mask_extpackstorelo_pd(p, mask.m8.m1, val.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); + _mm512_mask_extpackstorehi_pd(((uint8_t*)p)+64, mask.m8.m2, val.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); + _mm512_mask_extpackstorelo_pd(((uint8_t*)p)+64, mask.m8.m2, val.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); +} /* static FORCEINLINE void __masked_store_blend_i8(void *p, __vec16_i8 val,