Add peephole optimization to eliminate some mask AND operations.

On KNC, the various vector comparison instructions can optionally
be masked; if a mask is provided, the result is effectively that
the value returned is the AND of the mask with the result of the
comparison.

This change adds an optimization pass to the C++ backend that looks
for vector ANDs where one operand is a comparison and rewrites
them--e.g. "__and(__equal_float(a, b), c)" is changed to
"__equal_float_and_mask(a, b, c)", saving an instruction in the end.

Issue #319.
This commit is contained in:
Matt Pharr
2012-07-07 08:35:38 -07:00
parent 974b40c8af
commit 8ef6bc1636
6 changed files with 304 additions and 0 deletions

View File

@@ -638,42 +638,92 @@ static FORCEINLINE __vec16_i1 __equal_i32(const __vec16_i32 &a, const __vec16_i3
return _mm512_cmpeq_epi32_mask((__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __equal_i32_and_mask(const __vec16_i32 &a, const __vec16_i32 &b,
__vec16_i1 m) {
return _mm512_mask_cmpeq_epi32_mask((__mmask16)m, (__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __not_equal_i32(__vec16_i32 a, __vec16_i32 b) {
return _mm512_cmpneq_epi32_mask((__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __not_equal_i32_and_mask(__vec16_i32 a, __vec16_i32 b,
__vec16_i1 m) {
return _mm512_mask_cmpneq_epi32_mask((__mmask16)m, (__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __unsigned_less_equal_i32(__vec16_i32 a, __vec16_i32 b) {
return _mm512_cmple_epu32_mask((__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __unsigned_less_equal_i32_and_mask(__vec16_i32 a, __vec16_i32 b,
__vec16_i1 m) {
return _mm512_mask_cmple_epu32_mask((__mmask16)m, (__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __signed_less_equal_i32(__vec16_i32 a, __vec16_i32 b) {
return _mm512_cmple_epi32_mask((__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __signed_less_equal_i32_and_mask(__vec16_i32 a, __vec16_i32 b,
__vec16_i1 m) {
return _mm512_mask_cmple_epi32_mask((__mmask16)m, (__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __unsigned_greater_equal_i32(__vec16_i32 a, __vec16_i32 b) {
return _mm512_cmpge_epu32_mask((__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __unsigned_greater_equal_i32_and_mask(__vec16_i32 a, __vec16_i32 b,
__vec16_i1 m) {
return _mm512_mask_cmpge_epu32_mask((__mmask16)m, (__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __signed_greater_equal_i32(__vec16_i32 a, __vec16_i32 b) {
return _mm512_cmpge_epi32_mask((__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __signed_greater_equal_i32_and_mask(__vec16_i32 a, __vec16_i32 b,
__vec16_i1 m) {
return _mm512_mask_cmpge_epi32_mask((__mmask16)m, (__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __unsigned_less_than_i32(__vec16_i32 a, __vec16_i32 b) {
return _mm512_cmplt_epu32_mask((__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __unsigned_less_than_i32_and_mask(__vec16_i32 a, __vec16_i32 b,
__vec16_i1 m) {
return _mm512_mask_cmplt_epu32_mask((__mmask16)m, (__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __signed_less_than_i32(__vec16_i32 a, __vec16_i32 b) {
return _mm512_cmplt_epi32_mask((__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __signed_less_than_i32_and_mask(__vec16_i32 a, __vec16_i32 b,
__vec16_i1 m) {
return _mm512_mask_cmplt_epi32_mask((__mmask16)m, (__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __unsigned_greater_than_i32(__vec16_i32 a, __vec16_i32 b) {
return _mm512_cmpgt_epu32_mask((__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __unsigned_greater_than_i32_and_mask(__vec16_i32 a, __vec16_i32 b,
__vec16_i1 m) {
return _mm512_mask_cmpgt_epu32_mask((__mmask16)m, (__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __signed_greater_than_i32(__vec16_i32 a, __vec16_i32 b) {
return _mm512_cmpgt_epi32_mask((__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i1 __signed_greater_than_i32_and_mask(__vec16_i32 a, __vec16_i32 b,
__vec16_i1 m) {
return _mm512_mask_cmpgt_epi32_mask((__mmask16)m, (__m512i)a, (__m512i)b);
}
static FORCEINLINE __vec16_i32 __select(__vec16_i1 mask,
__vec16_i32 a, __vec16_i32 b) {
return _mm512_mask_mov_epi32(b.v, mask.m, a.v);
@@ -778,10 +828,22 @@ static FORCEINLINE __vec16_i1 __equal_i64(const __vec16_i64 &a, const __vec16_i6
return _mm512_mask_cmpeq_epi32_mask(lo_match,a.v_hi,b.v_hi);
}
static FORCEINLINE __vec16_i1 __equal_i64_and_mask(const __vec16_i64 &a, const __vec16_i64 &b,
__vec16_i1 mask) {
__mmask16 lo_match = _mm512_cmpeq_epi32_mask(a.v_lo,b.v_lo);
__mmask16 full_match = _mm512_mask_cmpeq_epi32_mask(lo_match,a.v_hi,b.v_hi);
return _mm512_kand(full_match, (__mmask16)mask);
}
static FORCEINLINE __vec16_i1 __not_equal_i64(const __vec16_i64 &a, const __vec16_i64 &b) {
return __not(__equal(a,b));
}
static FORCEINLINE __vec16_i1 __not_equal_i64_and_mask(const __vec16_i64 &a, const __vec16_i64 &b,
__vec16_i1 mask) {
return __and(__not(__equal(a,b)), mask);
}
CMP_OP(__vec16_i64, uint64_t, __unsigned_less_equal, <=)
CMP_OP(__vec16_i64, int64_t, __signed_less_equal, <=)
CMP_OP(__vec16_i64, uint64_t, __unsigned_greater_equal, >=)
@@ -919,26 +981,56 @@ static FORCEINLINE __vec16_i1 __equal_float(__vec16_f a, __vec16_f b) {
return _mm512_cmpeq_ps_mask(a, b);
}
static FORCEINLINE __vec16_i1 __equal_float_and_mask(__vec16_f a, __vec16_f b,
__vec16_i1 m) {
return _mm512_mask_cmpeq_ps_mask(m, a, b);
}
static FORCEINLINE __vec16_i1 __not_equal_float(__vec16_f a, __vec16_f b) {
return _mm512_cmpneq_ps_mask(a, b);
}
static FORCEINLINE __vec16_i1 __not_equal_float_and_mask(__vec16_f a, __vec16_f b,
__vec16_i1 m) {
return _mm512_mask_cmpneq_ps_mask(m, a, b);
}
static FORCEINLINE __vec16_i1 __less_than_float(__vec16_f a, __vec16_f b) {
return _mm512_cmplt_ps_mask(a, b);
}
static FORCEINLINE __vec16_i1 __less_than_float_and_mask(__vec16_f a, __vec16_f b,
__vec16_i1 m) {
return _mm512_mask_cmplt_ps_mask(m, a, b);
}
static FORCEINLINE __vec16_i1 __less_equal_float(__vec16_f a, __vec16_f b) {
return _mm512_cmple_ps_mask(a, b);
}
static FORCEINLINE __vec16_i1 __less_equal_float_and_mask(__vec16_f a, __vec16_f b,
__vec16_i1 m) {
return _mm512_mask_cmple_ps_mask(m, a, b);
}
static FORCEINLINE __vec16_i1 __greater_than_float(__vec16_f a, __vec16_f b) {
return _mm512_cmpnle_ps_mask(a, b);
}
static FORCEINLINE __vec16_i1 __greater_than_float_and_mask(__vec16_f a, __vec16_f b,
__vec16_i1 m) {
return _mm512_mask_cmpnle_ps_mask(m, a, b);
}
static FORCEINLINE __vec16_i1 __greater_equal_float(__vec16_f a, __vec16_f b) {
return _mm512_cmpnlt_ps_mask(a, b);
}
static FORCEINLINE __vec16_i1 __greater_equal_float_and_mask(__vec16_f a, __vec16_f b,
__vec16_i1 m) {
return _mm512_mask_cmpnlt_ps_mask(m, a, b);
}
/*
static FORCEINLINE __vec16_i1 __ordered(__vec16_f a, __vec16_f b) {
__vec16_i1 ret;
@@ -1050,6 +1142,14 @@ static FORCEINLINE __vec16_i1 __equal_double(__vec16_d a, __vec16_d b) {
return ret;
}
static FORCEINLINE __vec16_i1 __equal_double_and_mask(__vec16_d a, __vec16_d b,
__vec16_i1 m) {
__vec16_i1 ret;
ret.m8.m1 = _mm512_mask_cmpeq_pd_mask(m.m8.m1, a.v1, b.v1);
ret.m8.m2 = _mm512_mask_cmpeq_pd_mask(m.m8.m2, a.v2, b.v2);
return ret;
}
static FORCEINLINE __vec16_i1 __not_equal_double(__vec16_d a, __vec16_d b) {
__vec16_i1 ret;
ret.m8.m1 = _mm512_cmpneq_pd_mask(a.v1, b.v1);
@@ -1057,6 +1157,14 @@ static FORCEINLINE __vec16_i1 __not_equal_double(__vec16_d a, __vec16_d b) {
return ret;
}
static FORCEINLINE __vec16_i1 __not_equal_double_and_mask(__vec16_d a, __vec16_d b,
__vec16_i1 m) {
__vec16_i1 ret;
ret.m8.m1 = _mm512_mask_cmpneq_pd_mask(m.m8.m1, a.v1, b.v1);
ret.m8.m2 = _mm512_mask_cmpneq_pd_mask(m.m8.m2, a.v2, b.v2);
return ret;
}
static FORCEINLINE __vec16_i1 __less_than_double(__vec16_d a, __vec16_d b) {
__vec16_i1 ret;
ret.m8.m1 = _mm512_cmplt_pd_mask(a.v1, b.v1);
@@ -1064,6 +1172,14 @@ static FORCEINLINE __vec16_i1 __less_than_double(__vec16_d a, __vec16_d b) {
return ret;
}
static FORCEINLINE __vec16_i1 __less_than_double_and_mask(__vec16_d a, __vec16_d b,
__vec16_i1 m) {
__vec16_i1 ret;
ret.m8.m1 = _mm512_mask_cmplt_pd_mask(m.m8.m1, a.v1, b.v1);
ret.m8.m2 = _mm512_mask_cmplt_pd_mask(m.m8.m2, a.v2, b.v2);
return ret;
}
static FORCEINLINE __vec16_i1 __less_equal_double(__vec16_d a, __vec16_d b) {
__vec16_i1 ret;
ret.m8.m1 = _mm512_cmple_pd_mask(a.v1, b.v1);
@@ -1071,6 +1187,14 @@ static FORCEINLINE __vec16_i1 __less_equal_double(__vec16_d a, __vec16_d b) {
return ret;
}
static FORCEINLINE __vec16_i1 __less_equal_double_and_mask(__vec16_d a, __vec16_d b,
__vec16_i1 m) {
__vec16_i1 ret;
ret.m8.m1 = _mm512_mask_cmple_pd_mask(m.m8.m1, a.v1, b.v1);
ret.m8.m2 = _mm512_mask_cmple_pd_mask(m.m8.m2, a.v2, b.v2);
return ret;
}
static FORCEINLINE __vec16_i1 __greater_than_double(__vec16_d a, __vec16_d b) {
__vec16_i1 ret;
ret.m8.m1 = _mm512_cmpnle_pd_mask(a.v1, b.v1);
@@ -1078,6 +1202,14 @@ static FORCEINLINE __vec16_i1 __greater_than_double(__vec16_d a, __vec16_d b) {
return ret;
}
static FORCEINLINE __vec16_i1 __greater_than_double_and_mask(__vec16_d a, __vec16_d b,
__vec16_i1 m) {
__vec16_i1 ret;
ret.m8.m1 = _mm512_mask_cmpnle_pd_mask(m.m8.m1, a.v1, b.v1);
ret.m8.m2 = _mm512_mask_cmpnle_pd_mask(m.m8.m2, a.v2, b.v2);
return ret;
}
static FORCEINLINE __vec16_i1 __greater_equal_double(__vec16_d a, __vec16_d b) {
__vec16_i1 ret;
ret.m8.m1 = _mm512_cmpnlt_pd_mask(a.v1, b.v1);
@@ -1085,6 +1217,14 @@ static FORCEINLINE __vec16_i1 __greater_equal_double(__vec16_d a, __vec16_d b) {
return ret;
}
static FORCEINLINE __vec16_i1 __greater_equal_double_and_mask(__vec16_d a, __vec16_d b,
__vec16_i1 m) {
__vec16_i1 ret;
ret.m8.m1 = _mm512_mask_cmpnlt_pd_mask(m.m8.m1, a.v1, b.v1);
ret.m8.m2 = _mm512_mask_cmpnlt_pd_mask(m.m8.m2, a.v2, b.v2);
return ret;
}
/*
static FORCEINLINE __vec16_i1 __ordered(__vec16_d a, __vec16_d b) {