All the smear(), setzero() and undef() APIs are now templated on the return type.

Modified ISPC's internal mangling to pass these through unchanged.
Tried hard to make sure this is not going to introduce an ABI change.
This commit is contained in:
Jean-Luc Duprat
2012-07-17 17:06:36 -07:00
parent 1334a84861
commit aecd6e0878
8 changed files with 163 additions and 98 deletions

View File

@@ -322,15 +322,18 @@ template <int ALIGN> static FORCEINLINE void __store(__vec4_i1 *p, __vec4_i1 val
_mm_storeu_ps((float *)(&p->v), value.v);
}
static FORCEINLINE __vec4_i1 __smear_i1(int v) {
template <class RetVecType> __vec4_i1 __smear_i1(int v);
template <> FORCEINLINE __vec4_i1 __smear_i1<__vec4_i1>(int v) {
return __vec4_i1(v, v, v, v);
}
static FORCEINLINE __vec4_i1 __setzero_i1() {
template <class RetVecType> __vec4_i1 __setzero_i1();
template <> FORCEINLINE __vec4_i1 __setzero_i1<__vec4_i1>() {
return __vec4_i1(_mm_setzero_ps());
}
static FORCEINLINE __vec4_i1 __undef_i1() {
template <class RetVecType> __vec4_i1 __undef_i1();
template <> FORCEINLINE __vec4_i1 __undef_i1<__vec4_i1>() {
return __vec4_i1();
}
@@ -560,15 +563,18 @@ static FORCEINLINE void __insert_element(__vec4_i8 *v, int index, int8_t val) {
((int8_t *)v)[index] = val;
}
static FORCEINLINE __vec4_i8 __smear_i8(int8_t v) {
template <class RetVecType> __vec4_i8 __smear_i8(int8_t v);
template <> FORCEINLINE __vec4_i8 __smear_i8<__vec4_i8>(int8_t v) {
return _mm_set1_epi8(v);
}
static FORCEINLINE __vec4_i8 __setzero_i8() {
template <class RetVecType> __vec4_i8 __setzero_i8();
template <> FORCEINLINE __vec4_i8 __setzero_i8<__vec4_i8>() {
return _mm_set1_epi8(0);
}
static FORCEINLINE __vec4_i8 __undef_i8() {
template <class RetVecType> __vec4_i8 __undef_i8();
template <> FORCEINLINE __vec4_i8 __undef_i8<__vec4_i8>() {
return __vec4_i8();
}
@@ -829,15 +835,18 @@ static FORCEINLINE void __insert_element(__vec4_i16 *v, int index, int16_t val)
((int16_t *)v)[index] = val;
}
static FORCEINLINE __vec4_i16 __smear_i16(int16_t v) {
template <class RetVecType> __vec4_i16 __smear_i16(int16_t v);
template <> FORCEINLINE __vec4_i16 __smear_i16<__vec4_i16>(int16_t v) {
return _mm_set1_epi16(v);
}
static FORCEINLINE __vec4_i16 __setzero_i16() {
template <class RetVecType> __vec4_i16 __setzero_i16();
template <> FORCEINLINE __vec4_i16 __setzero_i16<__vec4_i16>() {
return _mm_set1_epi16(0);
}
static FORCEINLINE __vec4_i16 __undef_i16() {
template <class RetVecType> __vec4_i16 __undef_i16();
template <> FORCEINLINE __vec4_i16 __undef_i16<__vec4_i16>() {
return __vec4_i16();
}
@@ -1076,15 +1085,18 @@ static FORCEINLINE __vec4_i32 __select(__vec4_i1 mask, __vec4_i32 a, __vec4_i32
_mm_castsi128_ps(a.v), mask.v));
}
static FORCEINLINE __vec4_i32 __smear_i32(int32_t v) {
template <class RetVecType> __vec4_i32 __smear_i32(int32_t v);
template <> FORCEINLINE __vec4_i32 __smear_i32<__vec4_i32>(int32_t v) {
return _mm_set1_epi32(v);
}
static FORCEINLINE __vec4_i32 __setzero_i32() {
template <class RetVecType> __vec4_i32 __setzero_i32();
template <> FORCEINLINE __vec4_i32 __setzero_i32<__vec4_i32>() {
return _mm_castps_si128(_mm_setzero_ps());
}
static FORCEINLINE __vec4_i32 __undef_i32() {
template <class RetVecType> __vec4_i32 __undef_i32();
template <> FORCEINLINE __vec4_i32 __undef_i32<__vec4_i32>() {
return __vec4_i32();
}
@@ -1347,15 +1359,18 @@ static FORCEINLINE __vec4_i64 __select(__vec4_i1 mask, __vec4_i64 a, __vec4_i64
return __vec4_i64(_mm_castpd_si128(r0), _mm_castpd_si128(r1));
}
static FORCEINLINE __vec4_i64 __smear_i64(int64_t v) {
template <class RetVecType> __vec4_i64 __smear_i64(int64_t v);
template <> FORCEINLINE __vec4_i64 __smear_i64<__vec4_i64>(int64_t v) {
return __vec4_i64(v, v, v, v);
}
static FORCEINLINE __vec4_i64 __setzero_i64() {
template <class RetVecType> __vec4_i64 __setzero_i64();
template <> FORCEINLINE __vec4_i64 __setzero_i64<__vec4_i64>() {
return __vec4_i64(0, 0, 0, 0);
}
static FORCEINLINE __vec4_i64 __undef_i64() {
template <class RetVecType> __vec4_i64 __undef_i64();
template <> FORCEINLINE __vec4_i64 __undef_i64<__vec4_i64>() {
return __vec4_i64();
}
@@ -1465,15 +1480,18 @@ static FORCEINLINE __vec4_f __select(__vec4_i1 mask, __vec4_f a, __vec4_f b) {
return _mm_blendv_ps(b.v, a.v, mask.v);
}
static FORCEINLINE __vec4_f __smear_float(float v) {
template <class RetVecType> __vec4_f __smear_float(float v);
template <> FORCEINLINE __vec4_f __smear_float<__vec4_f>(float v) {
return _mm_set1_ps(v);
}
static FORCEINLINE __vec4_f __setzero_float() {
template <class RetVecType> __vec4_f __setzero_float();
template <> FORCEINLINE __vec4_f __setzero_float<__vec4_f>() {
return _mm_setzero_ps();
}
static FORCEINLINE __vec4_f __undef_float() {
template <class RetVecType> __vec4_f __undef_float();
template <> FORCEINLINE __vec4_f __undef_float<__vec4_f>() {
return __vec4_f();
}
@@ -1614,15 +1632,18 @@ static FORCEINLINE __vec4_d __select(__vec4_i1 mask, __vec4_d a, __vec4_d b) {
return __vec4_d(r0, r1);
}
static FORCEINLINE __vec4_d __smear_double(double v) {
template <class RetVecType> __vec4_d __smear_double(double v);
template <> FORCEINLINE __vec4_d __smear_double<__vec4_d>(double v) {
return __vec4_d(_mm_set1_pd(v), _mm_set1_pd(v));
}
static FORCEINLINE __vec4_d __setzero_double() {
template <class RetVecType> __vec4_d __setzero_double();
template <> FORCEINLINE __vec4_d __setzero_double<__vec4_d>() {
return __vec4_d(_mm_setzero_pd(), _mm_setzero_pd());
}
static FORCEINLINE __vec4_d __undef_double() {
template <class RetVecType> __vec4_d __undef_double();
template <> FORCEINLINE __vec4_d __undef_double<__vec4_d>() {
return __vec4_d();
}
@@ -1722,11 +1743,11 @@ static FORCEINLINE __vec4_i16 __cast_sext(__vec4_i16, __vec4_i8 val) {
}
static FORCEINLINE __vec4_i8 __cast_sext(__vec4_i8, __vec4_i1 v) {
return __select(v, __smear_i8(0xff), __setzero_i8());
return __select(v, __smear_i8<__vec4_i8>(0xff), __setzero_i8<__vec4_i8>());
}
static FORCEINLINE __vec4_i16 __cast_sext(__vec4_i16, __vec4_i1 v) {
return __select(v, __smear_i16(0xffff), __setzero_i16());
return __select(v, __smear_i16<__vec4_i16>(0xffff), __setzero_i16<__vec4_i16>());
}
static FORCEINLINE __vec4_i32 __cast_sext(__vec4_i32, __vec4_i1 v) {
@@ -1786,11 +1807,11 @@ static FORCEINLINE __vec4_i16 __cast_zext(__vec4_i16, __vec4_i8 val) {
}
static FORCEINLINE __vec4_i8 __cast_zext(__vec4_i8, __vec4_i1 v) {
return __select(v, __smear_i8(1), __setzero_i8());
return __select(v, __smear_i8<__vec4_i8>(1), __setzero_i8<__vec4_i8>());
}
static FORCEINLINE __vec4_i16 __cast_zext(__vec4_i16, __vec4_i1 v) {
return __select(v, __smear_i16(1), __setzero_i16());
return __select(v, __smear_i16<__vec4_i16>(1), __setzero_i16<__vec4_i16>());
}
static FORCEINLINE __vec4_i32 __cast_zext(__vec4_i32, __vec4_i1 v) {
@@ -1798,7 +1819,7 @@ static FORCEINLINE __vec4_i32 __cast_zext(__vec4_i32, __vec4_i1 v) {
}
static FORCEINLINE __vec4_i64 __cast_zext(__vec4_i64, __vec4_i1 v) {
return __select(v, __smear_i64(1), __setzero_i64());
return __select(v, __smear_i64<__vec4_i64>(1), __setzero_i64<__vec4_i64>());
}
// truncations
@@ -1958,11 +1979,11 @@ static FORCEINLINE __vec4_d __cast_uitofp(__vec4_d, __vec4_i64 val) {
}
static FORCEINLINE __vec4_f __cast_uitofp(__vec4_f, __vec4_i1 v) {
return __select(v, __smear_float(1.), __setzero_float());
return __select(v, __smear_float<__vec4_f>(1.), __setzero_float<__vec4_f>());
}
static FORCEINLINE __vec4_d __cast_uitofp(__vec4_d, __vec4_i1 v) {
return __select(v, __smear_double(1.), __setzero_double());
return __select(v, __smear_double<__vec4_d>(1.), __setzero_double<__vec4_d>());
}
// float/double to signed int
@@ -2897,7 +2918,7 @@ lGatherBaseOffsets32(RetVec, RetScalar, unsigned char *p, uint32_t scale,
RetScalar r[4];
#if 1
// "Fast gather" trick...
offsets = __select(mask, offsets, __setzero_i32());
offsets = __select(mask, offsets, __setzero_i32<__vec4_i32>());
int offset = scale * _mm_extract_epi32(offsets.v, 0);
RetScalar *ptr = (RetScalar *)(p + offset);
@@ -2954,7 +2975,7 @@ lGatherBaseOffsets64(RetVec, RetScalar, unsigned char *p, uint32_t scale,
RetScalar r[4];
#if 1
// "Fast gather" trick...
offsets = __select(mask, offsets, __setzero_i64());
offsets = __select(mask, offsets, __setzero_i64<__vec4_i64>());
int64_t offset = scale * _mm_extract_epi64(offsets.v[0], 0);
RetScalar *ptr = (RetScalar *)(p + offset);