Support for cache 2/3 and all targets

This commit is contained in:
Vsevolod Livinskiy
2014-10-02 16:25:23 +04:00
parent 0a6eb61ad0
commit eb61d5df72
13 changed files with 262 additions and 39 deletions

View File

@@ -1540,6 +1540,15 @@ static FORCEINLINE void __prefetch_read_uniform_3(unsigned char *) {
static FORCEINLINE void __prefetch_read_uniform_nt(unsigned char *) {
}
#define PREFETCH_READ_VARYING(CACHE_NUM) \
static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM##_native(uint8_t *base, uint32_t scale, \
__vec16_i32 offsets, __vec16_i1 mask) {} \
static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM(__vec16_i64 addr, __vec16_i1 mask) {} \
PREFETCH_READ_VARYING(1)
PREFETCH_READ_VARYING(2)
PREFETCH_READ_VARYING(3)
PREFETCH_READ_VARYING(nt)
///////////////////////////////////////////////////////////////////////////
// atomics

View File

@@ -1624,6 +1624,16 @@ static FORCEINLINE void __prefetch_read_uniform_3(unsigned char *) {
static FORCEINLINE void __prefetch_read_uniform_nt(unsigned char *) {
}
#define PREFETCH_READ_VARYING(CACHE_NUM) \
static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM##_native(uint8_t *base, uint32_t scale, \
__vec32_i32 offsets, __vec32_i1 mask) {} \
static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM(__vec32_i64 addr, __vec32_i1 mask) {} \
PREFETCH_READ_VARYING(1)
PREFETCH_READ_VARYING(2)
PREFETCH_READ_VARYING(3)
PREFETCH_READ_VARYING(nt)
///////////////////////////////////////////////////////////////////////////
// atomics

View File

@@ -1757,6 +1757,16 @@ static FORCEINLINE void __prefetch_read_uniform_3(unsigned char *) {
static FORCEINLINE void __prefetch_read_uniform_nt(unsigned char *) {
}
#define PREFETCH_READ_VARYING(CACHE_NUM) \
static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM##_native(uint8_t *base, uint32_t scale, \
__vec64_i32 offsets, __vec64_i1 mask) {} \
static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM(__vec64_i64 addr, __vec64_i1 mask) {} \
PREFETCH_READ_VARYING(1)
PREFETCH_READ_VARYING(2)
PREFETCH_READ_VARYING(3)
PREFETCH_READ_VARYING(nt)
///////////////////////////////////////////////////////////////////////////
// atomics

View File

@@ -2550,16 +2550,26 @@ static FORCEINLINE void __prefetch_read_uniform_nt(unsigned char *p) {
// _mm_prefetch(p, _MM_HINT_NTA); // prefetch into L1$ with non-temporal hint
}
static FORCEINLINE void __prefetch_read_varying_1_native(uint8_t *base, uint32_t scale,
__vec16_i32 offsets, __vec16_i1 mask) {
_mm512_mask_prefetch_i32gather_ps (offsets, mask, base, scale, _MM_HINT_T0);
offsets = _mm512_permutevar_epi32(_mm512_set_16to16_pi(7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8), offsets);
__vec16_i1 copy_mask = _mm512_kmov(mask);
_mm512_kswapb(mask, copy_mask);
_mm512_mask_prefetch_i32gather_ps (offsets, mask, base, scale, _MM_HINT_T0);
}
#define PREFETCH_READ_VARYING(CACHE_NUM, HINT) \
static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM##_native(uint8_t *base, uint32_t scale, \
__vec16_i32 offsets, __vec16_i1 mask) { \
_mm512_mask_prefetch_i32gather_ps (offsets, mask, base, scale, HINT); \
offsets = _mm512_permutevar_epi32(_mm512_set_16to16_pi(7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8), offsets);\
__vec16_i1 copy_mask = _mm512_kmov(mask); \
_mm512_kswapb(mask, copy_mask); \
_mm512_mask_prefetch_i32gather_ps (offsets, mask, base, scale, _MM_HINT_T0); \
} \
static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM(__vec16_i64 addr, __vec16_i1 mask) {} \
PREFETCH_READ_VARYING(1, _MM_HINT_T0)
PREFETCH_READ_VARYING(2, _MM_HINT_T1)
PREFETCH_READ_VARYING(nt, _MM_HINT_T2)
static FORCEINLINE void __prefetch_read_varying_3_native(uint8_t *base, uint32_t scale,
__vec16_i32 offsets, __vec16_i1 mask) {}
static FORCEINLINE void __prefetch_read_varying_3(__vec16_i64 addr, __vec16_i1 mask) {}
static FORCEINLINE void __prefetch_read_varying_1(__vec16_i64 addr, __vec16_i1 mask) {}
///////////////////////////////////////////////////////////////////////////
// atomics
///////////////////////////////////////////////////////////////////////////

View File

@@ -2606,6 +2606,26 @@ static FORCEINLINE void __prefetch_read_uniform_nt(unsigned char *p) {
// _mm_prefetch(p, _MM_HINT_NTA); // prefetch into L1$ with non-temporal hint
}
#define PREFETCH_READ_VARYING(CACHE_NUM, HINT) \
static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM##_native(uint8_t *base, uint32_t scale, \
__vec16_i32 offsets, __vec16_i1 mask) { \
_mm512_mask_prefetch_i32gather_ps (offsets, mask, base, scale, HINT); \
offsets = _mm512_permutevar_epi32(_mm512_set_16to16_pi(7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8), offsets);\
__vec16_i1 copy_mask = _mm512_kmov(mask); \
_mm512_kswapb(mask, copy_mask); \
_mm512_mask_prefetch_i32gather_ps (offsets, mask, base, scale, _MM_HINT_T0); \
} \
static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM(__vec16_i64 addr, __vec16_i1 mask) {} \
PREFETCH_READ_VARYING(1, _MM_HINT_T0)
PREFETCH_READ_VARYING(2, _MM_HINT_T1)
PREFETCH_READ_VARYING(nt, _MM_HINT_T2)
static FORCEINLINE void __prefetch_read_varying_3_native(uint8_t *base, uint32_t scale,
__vec16_i32 offsets, __vec16_i1 mask) {}
static FORCEINLINE void __prefetch_read_varying_3(__vec16_i64 addr, __vec16_i1 mask) {}
///////////////////////////////////////////////////////////////////////////
// atomics

View File

@@ -1926,13 +1926,25 @@ static FORCEINLINE void __prefetch_read_uniform_nt(const char *p) {
// _mm_prefetch(p, _MM_HINT_NTA); // prefetch into L1$ with non-temporal hint
}
static FORCEINLINE void __prefetch_read_varying_1_native(uint8_t *base, uint32_t scale,
__vec16_i32 offsets, __vec16_i1 mask) {
_mm512_prefetch_i32gather_ps(offsets, base, scale, _MM_HINT_T0);
offsets = _mm512_permutevar_epi32(_mm512_set_16to16_pi(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15), offsets);
/* TODO: permutevar mask */
_mm512_prefetch_i32gather_ps(offsets, base, scale, _MM_HINT_T0);
}
#define PREFETCH_READ_VARYING(CACHE_NUM, HINT) \
static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM##_native(uint8_t *base, uint32_t scale, \
__vec16_i32 offsets, __vec16_i1 mask) { \
_mm512_mask_prefetch_i32gather_ps (offsets, mask, base, scale, HINT); \
offsets = _mm512_permutevar_epi32(_mm512_set_16to16_pi(7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8), offsets);\
__vec16_i1 copy_mask = _mm512_kmov(mask); \
_mm512_kswapb(mask, copy_mask); \
_mm512_mask_prefetch_i32gather_ps (offsets, mask, base, scale, _MM_HINT_T0); \
} \
static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM(__vec16_i64 addr, __vec16_i1 mask) {} \
PREFETCH_READ_VARYING(1, _MM_HINT_T0)
PREFETCH_READ_VARYING(2, _MM_HINT_T1)
PREFETCH_READ_VARYING(nt, _MM_HINT_T2)
static FORCEINLINE void __prefetch_read_varying_3_native(uint8_t *base, uint32_t scale,
__vec16_i32 offsets, __vec16_i1 mask) {}
static FORCEINLINE void __prefetch_read_varying_3(__vec16_i64 addr, __vec16_i1 mask) {}
///////////////////////////////////////////////////////////////////////////
// atomics

View File

@@ -3898,6 +3898,15 @@ static FORCEINLINE void __prefetch_read_uniform_nt(unsigned char *ptr) {
_mm_prefetch((char *)ptr, _MM_HINT_NTA);
}
#define PREFETCH_READ_VARYING(CACHE_NUM) \
static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM##_native(uint8_t *base, uint32_t scale, \
__vec4_i32 offsets, __vec4_i1 mask) {} \
static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM(__vec4_i64 addr, __vec4_i1 mask) {} \
PREFETCH_READ_VARYING(1)
PREFETCH_READ_VARYING(2)
PREFETCH_READ_VARYING(3)
PREFETCH_READ_VARYING(nt)
///////////////////////////////////////////////////////////////////////////
// atomics