Add reduce_add() for int8 and int16 types.
This maps to specialized instructions (e.g. PSADBW) when available.
This commit is contained in:
@@ -1162,19 +1162,20 @@ REDUCE_ADD(double, __vec16_d, __reduce_add_double)
|
||||
REDUCE_MINMAX(double, __vec16_d, __reduce_min_double, <)
|
||||
REDUCE_MINMAX(double, __vec16_d, __reduce_max_double, >)
|
||||
|
||||
REDUCE_ADD(uint32_t, __vec16_i32, __reduce_add_int32)
|
||||
REDUCE_ADD(int16_t, __vec16_i8, __reduce_add_int8)
|
||||
REDUCE_ADD(int32_t, __vec16_i16, __reduce_add_int16)
|
||||
|
||||
REDUCE_ADD(int64_t, __vec16_i32, __reduce_add_int32)
|
||||
REDUCE_MINMAX(int32_t, __vec16_i32, __reduce_min_int32, <)
|
||||
REDUCE_MINMAX(int32_t, __vec16_i32, __reduce_max_int32, >)
|
||||
|
||||
REDUCE_ADD(uint32_t, __vec16_i32, __reduce_add_uint32)
|
||||
REDUCE_MINMAX(uint32_t, __vec16_i32, __reduce_min_uint32, <)
|
||||
REDUCE_MINMAX(uint32_t, __vec16_i32, __reduce_max_uint32, >)
|
||||
|
||||
REDUCE_ADD(uint64_t, __vec16_i64, __reduce_add_int64)
|
||||
REDUCE_ADD(int64_t, __vec16_i64, __reduce_add_int64)
|
||||
REDUCE_MINMAX(int64_t, __vec16_i64, __reduce_min_int64, <)
|
||||
REDUCE_MINMAX(int64_t, __vec16_i64, __reduce_max_int64, >)
|
||||
|
||||
REDUCE_ADD(uint64_t, __vec16_i64, __reduce_add_uint64)
|
||||
REDUCE_MINMAX(uint64_t, __vec16_i64, __reduce_min_uint64, <)
|
||||
REDUCE_MINMAX(uint64_t, __vec16_i64, __reduce_max_uint64, >)
|
||||
|
||||
|
||||
@@ -1231,19 +1231,20 @@ REDUCE_ADD(double, __vec32_d, __reduce_add_double)
|
||||
REDUCE_MINMAX(double, __vec32_d, __reduce_min_double, <)
|
||||
REDUCE_MINMAX(double, __vec32_d, __reduce_max_double, >)
|
||||
|
||||
REDUCE_ADD(uint32_t, __vec32_i32, __reduce_add_int32)
|
||||
REDUCE_ADD(int16_t, __vec16_i8, __reduce_add_int8)
|
||||
REDUCE_ADD(int32_t, __vec16_i16, __reduce_add_int16)
|
||||
|
||||
REDUCE_ADD(int64_t, __vec32_i32, __reduce_add_int32)
|
||||
REDUCE_MINMAX(int32_t, __vec32_i32, __reduce_min_int32, <)
|
||||
REDUCE_MINMAX(int32_t, __vec32_i32, __reduce_max_int32, >)
|
||||
|
||||
REDUCE_ADD(uint32_t, __vec32_i32, __reduce_add_uint32)
|
||||
REDUCE_MINMAX(uint32_t, __vec32_i32, __reduce_min_uint32, <)
|
||||
REDUCE_MINMAX(uint32_t, __vec32_i32, __reduce_max_uint32, >)
|
||||
|
||||
REDUCE_ADD(uint64_t, __vec32_i64, __reduce_add_int64)
|
||||
REDUCE_ADD(int64_t, __vec32_i64, __reduce_add_int64)
|
||||
REDUCE_MINMAX(int64_t, __vec32_i64, __reduce_min_int64, <)
|
||||
REDUCE_MINMAX(int64_t, __vec32_i64, __reduce_max_int64, >)
|
||||
|
||||
REDUCE_ADD(uint64_t, __vec32_i64, __reduce_add_uint64)
|
||||
REDUCE_MINMAX(uint64_t, __vec32_i64, __reduce_min_uint64, <)
|
||||
REDUCE_MINMAX(uint64_t, __vec32_i64, __reduce_max_uint64, >)
|
||||
|
||||
|
||||
@@ -1364,19 +1364,20 @@ REDUCE_ADD(double, __vec64_d, __reduce_add_double)
|
||||
REDUCE_MINMAX(double, __vec64_d, __reduce_min_double, <)
|
||||
REDUCE_MINMAX(double, __vec64_d, __reduce_max_double, >)
|
||||
|
||||
REDUCE_ADD(uint32_t, __vec64_i32, __reduce_add_int32)
|
||||
REDUCE_ADD(int16_t, __vec16_i8, __reduce_add_int8)
|
||||
REDUCE_ADD(int32_t, __vec16_i16, __reduce_add_int16)
|
||||
|
||||
REDUCE_ADD(int64_t, __vec64_i32, __reduce_add_int32)
|
||||
REDUCE_MINMAX(int32_t, __vec64_i32, __reduce_min_int32, <)
|
||||
REDUCE_MINMAX(int32_t, __vec64_i32, __reduce_max_int32, >)
|
||||
|
||||
REDUCE_ADD(uint32_t, __vec64_i32, __reduce_add_uint32)
|
||||
REDUCE_MINMAX(uint32_t, __vec64_i32, __reduce_min_uint32, <)
|
||||
REDUCE_MINMAX(uint32_t, __vec64_i32, __reduce_max_uint32, >)
|
||||
|
||||
REDUCE_ADD(uint64_t, __vec64_i64, __reduce_add_int64)
|
||||
REDUCE_ADD(int64_t, __vec64_i64, __reduce_add_int64)
|
||||
REDUCE_MINMAX(int64_t, __vec64_i64, __reduce_min_int64, <)
|
||||
REDUCE_MINMAX(int64_t, __vec64_i64, __reduce_max_int64, >)
|
||||
|
||||
REDUCE_ADD(uint64_t, __vec64_i64, __reduce_add_uint64)
|
||||
REDUCE_MINMAX(uint64_t, __vec64_i64, __reduce_min_uint64, <)
|
||||
REDUCE_MINMAX(uint64_t, __vec64_i64, __reduce_max_uint64, >)
|
||||
|
||||
|
||||
@@ -1511,6 +1511,22 @@ static FORCEINLINE int64_t __count_trailing_zeros_i64(const __vec1_i64 mask) {
|
||||
// reductions
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static FORCEINLINE int16_t __reduce_add_i8(__vec16_i8 v) {
|
||||
// TODO: improve this!
|
||||
int16_t ret = 0;
|
||||
for (int i = 0; i < 16; ++i)
|
||||
ret += v.v[i];
|
||||
return ret;
|
||||
}
|
||||
|
||||
static FORCEINLINE int32_t __reduce_add_i16(__vec16_i16 v) {
|
||||
// TODO: improve this!
|
||||
int32_t ret = 0;
|
||||
for (int i = 0; i < 16; ++i)
|
||||
ret += v.v[i];
|
||||
return ret;
|
||||
}
|
||||
|
||||
static FORCEINLINE uint32_t __reduce_add_i32(__vec16_i32 v) {
|
||||
return _mm512_reduce_add_epi32(v);
|
||||
}
|
||||
|
||||
@@ -1607,6 +1607,9 @@ static FORCEINLINE int64_t __count_leading_zeros_i64(uint64_t v) {
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// reductions
|
||||
|
||||
REDUCE_ADD(int16_t, __vec32_i8, __reduce_add_int8)
|
||||
REDUCE_ADD(int32_t, __vec32_i16, __reduce_add_int16)
|
||||
|
||||
static FORCEINLINE float __reduce_add_float(__vec32_f v) {
|
||||
return _mm512_reduce_add_ps(v.v1) + _mm512_reduce_add_ps(v.v2);
|
||||
}
|
||||
|
||||
@@ -2528,6 +2528,22 @@ static FORCEINLINE int64_t __count_leading_zeros_i64(uint64_t v) {
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// reductions
|
||||
|
||||
static FORCEINLINE int16_t __reduce_add_int8(__vec4_i8 v) {
|
||||
// TODO: improve
|
||||
int16_t ret = 0;
|
||||
for (int i = 0; i < 4; ++i)
|
||||
ret += v.v[i];
|
||||
return ret;
|
||||
}
|
||||
|
||||
static FORCEINLINE int32_t __reduce_add_int16(__vec4_i16 v) {
|
||||
// TODO: improve
|
||||
int32_t ret = 0;
|
||||
for (int i = 0; i < 4; ++i)
|
||||
ret += v.v[i];
|
||||
return ret;
|
||||
}
|
||||
|
||||
static FORCEINLINE float __reduce_add_float(__vec4_f v) {
|
||||
float r = bits_as_float(_mm_extract_ps(v.v, 0));
|
||||
r += bits_as_float(_mm_extract_ps(v.v, 1));
|
||||
|
||||
Reference in New Issue
Block a user