More varied support for constant vectors from C++ backend.
If we have a vector of all zeros, a __setzero_* function call is emitted, permitting calling specialized intrinsics for this. Undefined values are reflected with an __undef_* call, which similarly allows passing that information along. This change also includes a cleanup to the signature of the __smear_* functions; since they already have different names depending on the scalar value type, we don't need to use the trick of passing an undefined value of the return vector type as the first parameter as an indirect way to overload by return value. Issue #317.
This commit is contained in:
@@ -297,10 +297,18 @@ template <int ALIGN> static FORCEINLINE void __store(__vec4_i1 *p, __vec4_i1 val
|
||||
_mm_storeu_ps((float *)(&p->v), value.v);
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i1 __smear_i1(__vec4_i1, int v) {
|
||||
static FORCEINLINE __vec4_i1 __smear_i1(int v) {
|
||||
return __vec4_i1(v, v, v, v);
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i1 __setzero_i1() {
|
||||
return __vec4_i1(_mm_setzero_ps());
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i1 __undef_i1() {
|
||||
return __vec4_i1();
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int8
|
||||
|
||||
@@ -524,10 +532,18 @@ static FORCEINLINE void __insert_element(__vec4_i8 *v, int index, int8_t val) {
|
||||
((int8_t *)v)[index] = val;
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i8 __smear_i8(__vec4_i8, int8_t v) {
|
||||
static FORCEINLINE __vec4_i8 __smear_i8(int8_t v) {
|
||||
return _mm_set1_epi8(v);
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i8 __setzero_i8() {
|
||||
return _mm_set1_epi8(0);
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i8 __undef_i8() {
|
||||
return __vec4_i8();
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i8 __broadcast_i8(__vec4_i8 v, int index) {
|
||||
return _mm_set1_epi8(__extract_element(v, index));
|
||||
}
|
||||
@@ -783,10 +799,18 @@ static FORCEINLINE void __insert_element(__vec4_i16 *v, int index, int16_t val)
|
||||
((int16_t *)v)[index] = val;
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i16 __smear_i16(__vec4_i16, int16_t v) {
|
||||
static FORCEINLINE __vec4_i16 __smear_i16(int16_t v) {
|
||||
return _mm_set1_epi16(v);
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i16 __setzero_i16() {
|
||||
return _mm_set1_epi16(0);
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i16 __undef_i16() {
|
||||
return __vec4_i16();
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i16 __broadcast_i16(__vec4_i16 v, int index) {
|
||||
return _mm_set1_epi16(__extract_element(v, index));
|
||||
}
|
||||
@@ -1020,10 +1044,18 @@ static FORCEINLINE __vec4_i32 __select(__vec4_i1 mask, __vec4_i32 a, __vec4_i32
|
||||
_mm_castsi128_ps(a.v), mask.v));
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i32 __smear_i32(__vec4_i32, int32_t v) {
|
||||
static FORCEINLINE __vec4_i32 __smear_i32(int32_t v) {
|
||||
return _mm_set1_epi32(v);
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i32 __setzero_i32() {
|
||||
return _mm_castps_si128(_mm_setzero_ps());
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i32 __undef_i32() {
|
||||
return __vec4_i32();
|
||||
}
|
||||
|
||||
static FORCEINLINE int32_t __extract_element(__vec4_i32 v, int index) {
|
||||
return ((int32_t *)&v)[index];
|
||||
}
|
||||
@@ -1281,10 +1313,18 @@ static FORCEINLINE __vec4_i64 __select(__vec4_i1 mask, __vec4_i64 a, __vec4_i64
|
||||
return __vec4_i64(_mm_castpd_si128(r0), _mm_castpd_si128(r1));
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i64 __smear_i64(__vec4_i64, int64_t v) {
|
||||
static FORCEINLINE __vec4_i64 __smear_i64(int64_t v) {
|
||||
return __vec4_i64(v, v, v, v);
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i64 __setzero_i64() {
|
||||
return __vec4_i64(0, 0, 0, 0);
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i64 __undef_i64() {
|
||||
return __vec4_i64();
|
||||
}
|
||||
|
||||
static FORCEINLINE int64_t __extract_element(__vec4_i64 v, int index) {
|
||||
return ((int64_t *)&v)[index];
|
||||
}
|
||||
@@ -1385,10 +1425,18 @@ static FORCEINLINE __vec4_f __select(__vec4_i1 mask, __vec4_f a, __vec4_f b) {
|
||||
return _mm_blendv_ps(b.v, a.v, mask.v);
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_f __smear_float(__vec4_f, float v) {
|
||||
static FORCEINLINE __vec4_f __smear_float(float v) {
|
||||
return _mm_set1_ps(v);
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_f __setzero_float() {
|
||||
return _mm_setzero_ps();
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_f __undef_float() {
|
||||
return __vec4_f();
|
||||
}
|
||||
|
||||
static FORCEINLINE float __extract_element(__vec4_f v, int index) {
|
||||
return ((float *)&v)[index];
|
||||
}
|
||||
@@ -1517,10 +1565,18 @@ static FORCEINLINE __vec4_d __select(__vec4_i1 mask, __vec4_d a, __vec4_d b) {
|
||||
return __vec4_d(r0, r1);
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_d __smear_double(__vec4_d, double v) {
|
||||
static FORCEINLINE __vec4_d __smear_double(double v) {
|
||||
return __vec4_d(_mm_set1_pd(v), _mm_set1_pd(v));
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_d __setzero_double() {
|
||||
return __vec4_d(_mm_setzero_pd(), _mm_setzero_pd());
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_d __undef_double() {
|
||||
return __vec4_d();
|
||||
}
|
||||
|
||||
static FORCEINLINE double __extract_element(__vec4_d v, int index) {
|
||||
return ((double *)&v)[index];
|
||||
}
|
||||
@@ -1617,13 +1673,11 @@ static FORCEINLINE __vec4_i16 __cast_sext(__vec4_i16, __vec4_i8 val) {
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i8 __cast_sext(__vec4_i8, __vec4_i1 v) {
|
||||
return __select(v, __smear_i8(__vec4_i8(), 0xff),
|
||||
__smear_i8(__vec4_i8(), 0));
|
||||
return __select(v, __smear_i8(0xff), __setzero_i8());
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i16 __cast_sext(__vec4_i16, __vec4_i1 v) {
|
||||
return __select(v, __smear_i16(__vec4_i16(), 0xffff),
|
||||
__smear_i16(__vec4_i16(), 0));
|
||||
return __select(v, __smear_i16(0xffff), __setzero_i16());
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i32 __cast_sext(__vec4_i32, __vec4_i1 v) {
|
||||
@@ -1683,12 +1737,11 @@ static FORCEINLINE __vec4_i16 __cast_zext(__vec4_i16, __vec4_i8 val) {
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i8 __cast_zext(__vec4_i8, __vec4_i1 v) {
|
||||
return __select(v, __smear_i8(__vec4_i8(), 1), __smear_i8(__vec4_i8(), 0));
|
||||
return __select(v, __smear_i8(1), __setzero_i8());
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i16 __cast_zext(__vec4_i16, __vec4_i1 v) {
|
||||
return __select(v, __smear_i16(__vec4_i16(), 1),
|
||||
__smear_i16(__vec4_i16(), 0));
|
||||
return __select(v, __smear_i16(1), __setzero_i16());
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i32 __cast_zext(__vec4_i32, __vec4_i1 v) {
|
||||
@@ -1696,7 +1749,7 @@ static FORCEINLINE __vec4_i32 __cast_zext(__vec4_i32, __vec4_i1 v) {
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_i64 __cast_zext(__vec4_i64, __vec4_i1 v) {
|
||||
return __select(v, __smear_i64(__vec4_i64(), 1), __smear_i64(__vec4_i64(), 0));
|
||||
return __select(v, __smear_i64(1), __setzero_i64());
|
||||
}
|
||||
|
||||
// truncations
|
||||
@@ -1856,11 +1909,11 @@ static FORCEINLINE __vec4_d __cast_uitofp(__vec4_d, __vec4_i64 val) {
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_f __cast_uitofp(__vec4_f, __vec4_i1 v) {
|
||||
return __select(v, __smear_float(__vec4_f(), 1.), __smear_float(__vec4_f(), 0.));
|
||||
return __select(v, __smear_float(1.), __setzero_float());
|
||||
}
|
||||
|
||||
static FORCEINLINE __vec4_d __cast_uitofp(__vec4_d, __vec4_i1 v) {
|
||||
return __select(v, __smear_double(__vec4_d(), 1.), __smear_double(__vec4_d(), 0.));
|
||||
return __select(v, __smear_double(1.), __setzero_double());
|
||||
}
|
||||
|
||||
// float/double to signed int
|
||||
@@ -2795,8 +2848,8 @@ lGatherBaseOffsets32(RetVec, RetScalar, unsigned char *p, __vec4_i32 offsets,
|
||||
RetScalar r[4];
|
||||
#if 1
|
||||
// "Fast gather" trick...
|
||||
offsets = __select(mask, offsets, __smear_i32(__vec4_i32(), 0));
|
||||
constOffset = __select(mask, constOffset, __smear_i32(__vec4_i32(), 0));
|
||||
offsets = __select(mask, offsets, __setzero_i32());
|
||||
constOffset = __select(mask, constOffset, __setzero_i32());
|
||||
|
||||
int offset = scale * _mm_extract_epi32(offsets.v, 0) + _mm_extract_epi32(constOffset.v, 0);
|
||||
RetScalar *ptr = (RetScalar *)(p + offset);
|
||||
@@ -2853,8 +2906,8 @@ lGatherBaseOffsets64(RetVec, RetScalar, unsigned char *p, __vec4_i64 offsets,
|
||||
RetScalar r[4];
|
||||
#if 1
|
||||
// "Fast gather" trick...
|
||||
offsets = __select(mask, offsets, __smear_i64(__vec4_i64(), 0));
|
||||
constOffset = __select(mask, constOffset, __smear_i64(__vec4_i64(), 0));
|
||||
offsets = __select(mask, offsets, __setzero_i64());
|
||||
constOffset = __select(mask, constOffset, __setzero_i64());
|
||||
|
||||
int64_t offset = scale * _mm_extract_epi64(offsets.v[0], 0) + _mm_extract_epi64(constOffset.v[0], 0);
|
||||
RetScalar *ptr = (RetScalar *)(p + offset);
|
||||
|
||||
Reference in New Issue
Block a user