KNC: all masked_load_*() and masked_store_*() functions need to do unaligned accesses

This commit is contained in:
Jean-Luc Duprat
2012-08-01 14:37:25 -07:00
parent 52836aae87
commit a2d42c3242

View File

@@ -1859,17 +1859,22 @@ static FORCEINLINE __vec16_i16 __masked_load_i16(void *p,
}
*/
template <int ALIGN> static FORCEINLINE __vec16_i32 __masked_load_i32(void *p, __vec16_i1 mask) {
#if 0
template <int ALIGN>
static FORCEINLINE __vec16_i32 __masked_load_i32(void *p, __vec16_i1 mask) {
__vec16_i32 ret;
ret = _mm512_mask_extloadunpackhi_epi32(ret, mask, p, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE);
ret = _mm512_mask_extloadunpacklo_epi32(ret, mask, p, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE);
return ret;
}
template <> static FORCEINLINE __vec16_i32 __masked_load_i32<64>(void *p, __vec16_i1 mask) {
template <>
static FORCEINLINE __vec16_i32 __masked_load_i32<64>(void *p, __vec16_i1 mask) {
return _mm512_mask_load_epi32(_mm512_undefined_epi32(), mask, p);
}
#endif
// Currently, when a pseudo_gather is converted into a masked load, it has to be unaligned
static FORCEINLINE __vec16_i32 __masked_load_i32(void *p, __vec16_i1 mask) {
__vec16_i32 ret;
ret = _mm512_mask_extloadunpackhi_epi32(ret, mask, p, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE);
@@ -1894,15 +1899,54 @@ static FORCEINLINE __vec16_i64 __masked_load_i64(void *p,
}
*/
static FORCEINLINE __vec16_f __masked_load_float(void *p, __vec16_i1 mask) {
__vec16_f ret;
return _mm512_mask_load_ps(ret, mask, p);
#if 0
template <int ALIGN> static FORCEINLINE __vec16_f __masked_load_float(void *p, __vec16_i1 mask) {
__vec16_f ret = _mm512_undefined_ps();
ret = _mm512_mask_extloadunpackhi_ps(ret, mask, p, _MM_UPCONV_PS_NONE, _MM_HINT_NONE);
ret = _mm512_mask_extloadunpacklo_ps(ret, mask, p, _MM_UPCONV_PS_NONE, _MM_HINT_NONE);
return ret;
}
template <> static FORCEINLINE __vec16_f __masked_load_float<64>(void *p, __vec16_i1 mask) {
return _mm512_mask_load_ps(_mm512_undefined_ps(), mask, p);
}
#endif
static FORCEINLINE __vec16_f __masked_load_float(void *p, __vec16_i1 mask) {
__vec16_f ret = _mm512_undefined_ps();
ret = _mm512_mask_extloadunpackhi_ps(ret, mask, p, _MM_UPCONV_PS_NONE, _MM_HINT_NONE);
ret = _mm512_mask_extloadunpacklo_ps(ret, mask, p, _MM_UPCONV_PS_NONE, _MM_HINT_NONE);
return ret;
}
#if 0
template <int ALIGN> static FORCEINLINE __vec16_d __masked_load_double(void *p, __vec16_i1 mask) {
__vec16_d ret;
ret.v1 = _mm512_undefined_pd();
ret.v2 = _mm512_undefined_pd();
ret.v1 = _mm512_mask_extloadunpackhi_pd(ret.v1, mask.m8.m1, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE);
ret.v1 = _mm512_mask_extloadunpacklo_pd(ret.v1, mask.m8.m1, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE);
ret.v2 = _mm512_mask_extloadunpackhi_pd(ret.v2, mask.m8.m2, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE);
ret.v2 = _mm512_mask_extloadunpacklo_pd(ret.v2, mask.m8.m2, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE);
return ret;
}
template <> static FORCEINLINE __vec16_d __masked_load_double<64>(void *p, __vec16_i1 mask) {
__vec16_d ret;
ret.v1 = _mm512_mask_load_pd(_mm512_undefined_pd(), mask.m8.m1, p);
ret.v2 = _mm512_mask_load_pd(_mm512_undefined_pd(), mask.m8.m2, p);
return ret;
}
#endif
static FORCEINLINE __vec16_d __masked_load_double(void *p, __vec16_i1 mask) {
__vec16_d ret;
ret.v1 = _mm512_mask_load_pd(ret.v1, mask.m8.m1, p);
ret.v2 = _mm512_mask_load_pd(ret.v2, mask.m8.m2, p);
ret.v1 = _mm512_undefined_pd();
ret.v2 = _mm512_undefined_pd();
ret.v1 = _mm512_mask_extloadunpackhi_pd(ret.v1, mask.m8.m1, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE);
ret.v1 = _mm512_mask_extloadunpacklo_pd(ret.v1, mask.m8.m1, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE);
ret.v2 = _mm512_mask_extloadunpackhi_pd(ret.v2, mask.m8.m2, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE);
ret.v2 = _mm512_mask_extloadunpacklo_pd(ret.v2, mask.m8.m2, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE);
return ret;
}
@@ -1924,10 +1968,21 @@ static FORCEINLINE void __masked_store_i16(void *p, __vec16_i16 val,
}
*/
static FORCEINLINE void __masked_store_i32(void *p, __vec16_i32 val,
__vec16_i1 mask) {
#if 0
template <int ALIGN> static FORCEINLINE void __masked_store_i32(void *p, __vec16_i32 val, __vec16_i1 mask) {
_mm512_mask_extpackstorehi_epi32(p, mask, val, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE);
_mm512_mask_extpackstorelo_epi32(p, mask, val, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE);
}
template <> static FORCEINLINE void __masked_store_i32<64>(void *p, __vec16_i32 val, __vec16_i1 mask) {
_mm512_mask_store_epi32(p, mask, val);
}
#endif
static FORCEINLINE void __masked_store_i32(void *p, __vec16_i32 val, __vec16_i1 mask) {
_mm512_mask_extpackstorehi_epi32(p, mask, val, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE);
_mm512_mask_extpackstorelo_epi32(p, mask, val, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE);
}
/*
static FORCEINLINE void __masked_store_i64(void *p, __vec16_i64 val,
@@ -1938,16 +1993,42 @@ static FORCEINLINE void __masked_store_i64(void *p, __vec16_i64 val,
}
*/
static FORCEINLINE void __masked_store_float(void *p, __vec16_f val,
__vec16_i1 mask) {
_mm512_mask_store_ps(p, mask, val);
#if 0
template <int ALIGN> static FORCEINLINE void __masked_store_float(void *p, __vec16_f val, __vec16_i1 mask) {
_mm512_mask_extpackstorehi_ps(p, mask, val, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_mask_extpackstorelo_ps(p, mask, val, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
static FORCEINLINE void __masked_store_double(void *p, __vec16_d val,
__vec16_i1 mask) {
template <> static FORCEINLINE void __masked_store_float<64>(void *p, __vec16_f val, __vec16_i1 mask) {
_mm512_mask_store_ps(p, mask, val);
}
#endif
static FORCEINLINE void __masked_store_float(void *p, __vec16_f val, __vec16_i1 mask) {
_mm512_mask_extpackstorehi_ps(p, mask, val, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_mask_extpackstorelo_ps(p, mask, val, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
#if 0
template <int ALIGN> static FORCEINLINE void __masked_store_double(void *p, __vec16_d val, __vec16_i1 mask) {
_mm512_mask_extpackstorehi_pd(p, mask.m8.m1, val.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE);
_mm512_mask_extpackstorelo_pd(p, mask.m8.m1, val.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE);
_mm512_mask_extpackstorehi_pd(((uint8_t*)p)+64, mask.m8.m2, val.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE);
_mm512_mask_extpackstorelo_pd(((uint8_t*)p)+64, mask.m8.m2, val.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE);
}
template <> static FORCEINLINE void __masked_store_double<64>(void *p, __vec16_d val, __vec16_i1 mask) {
_mm512_mask_store_pd(p, mask.m8.m1, val.v1);
_mm512_mask_store_pd(((uint8_t*)p)+64, mask.m8.m2, val.v2);
}
#endif
static FORCEINLINE void __masked_store_double(void *p, __vec16_d val, __vec16_i1 mask) {
_mm512_mask_extpackstorehi_pd(p, mask.m8.m1, val.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE);
_mm512_mask_extpackstorelo_pd(p, mask.m8.m1, val.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE);
_mm512_mask_extpackstorehi_pd(((uint8_t*)p)+64, mask.m8.m2, val.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE);
_mm512_mask_extpackstorelo_pd(((uint8_t*)p)+64, mask.m8.m2, val.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE);
}
/*
static FORCEINLINE void __masked_store_blend_i8(void *p, __vec16_i8 val,