Merge pull request #321 from mmp/setzero

More varied support for constant vectors from C++ backend.
This commit is contained in:
Jean-Luc Duprat
2012-07-09 08:57:05 -07:00
7 changed files with 330 additions and 104 deletions

View File

@@ -327,13 +327,26 @@ static FORCEINLINE TYPE NAME(TYPE a, int32_t b) { \
}
#define SMEAR(VTYPE, NAME, STYPE) \
static FORCEINLINE VTYPE __smear_##NAME(VTYPE retType, STYPE v) { \
static FORCEINLINE VTYPE __smear_##NAME(STYPE v) { \
VTYPE ret; \
for (int i = 0; i < 32; ++i) \
ret.v[i] = v; \
return ret; \
}
#define SETZERO(VTYPE, NAME) \
static FORCEINLINE VTYPE __setzero_##NAME() { \
VTYPE ret; \
for (int i = 0; i < 32; ++i) \
ret.v[i] = 0; \
return ret; \
}
#define UNDEF(VTYPE, NAME) \
static FORCEINLINE VTYPE __undef_##NAME() { \
return VTYPE(); \
}
#define BROADCAST(VTYPE, NAME, STYPE) \
static FORCEINLINE VTYPE __broadcast_##NAME(VTYPE v, int index) { \
VTYPE ret; \
@@ -459,13 +472,24 @@ template <int ALIGN> static FORCEINLINE void __store(__vec32_i1 *p, __vec32_i1 v
*ptr = v.v;
}
static FORCEINLINE __vec32_i1 __smear_i1(__vec32_i1, int v) {
static FORCEINLINE __vec32_i1 __smear_i1(int v) {
return __vec32_i1(v, v, v, v, v, v, v, v,
v, v, v, v, v, v, v, v,
v, v, v, v, v, v, v, v,
v, v, v, v, v, v, v, v);
}
static FORCEINLINE __vec32_i1 __setzero_i1() {
return __vec32_i1(0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0);
}
static FORCEINLINE __vec32_i1 __undef_i1() {
return __vec32_i1();
}
///////////////////////////////////////////////////////////////////////////
// int8
@@ -505,6 +529,8 @@ CMP_OP(__vec32_i8, i8, int8_t, __signed_greater_than, >)
SELECT(__vec32_i8)
INSERT_EXTRACT(__vec32_i8, int8_t)
SMEAR(__vec32_i8, i8, int8_t)
SETZERO(__vec32_i8, i8)
UNDEF(__vec32_i8, i8)
BROADCAST(__vec32_i8, i8, int8_t)
ROTATE(__vec32_i8, i8, int8_t)
SHUFFLES(__vec32_i8, i8, int8_t)
@@ -548,6 +574,8 @@ CMP_OP(__vec32_i16, i16, int16_t, __signed_greater_than, >)
SELECT(__vec32_i16)
INSERT_EXTRACT(__vec32_i16, int16_t)
SMEAR(__vec32_i16, i16, int16_t)
SETZERO(__vec32_i16, i16)
UNDEF(__vec32_i16, i16)
BROADCAST(__vec32_i16, i16, int16_t)
ROTATE(__vec32_i16, i16, int16_t)
SHUFFLES(__vec32_i16, i16, int16_t)
@@ -591,6 +619,8 @@ CMP_OP(__vec32_i32, i32, int32_t, __signed_greater_than, >)
SELECT(__vec32_i32)
INSERT_EXTRACT(__vec32_i32, int32_t)
SMEAR(__vec32_i32, i32, int32_t)
SETZERO(__vec32_i32, i32)
UNDEF(__vec32_i32, i32)
BROADCAST(__vec32_i32, i32, int32_t)
ROTATE(__vec32_i32, i32, int32_t)
SHUFFLES(__vec32_i32, i32, int32_t)
@@ -634,6 +664,8 @@ CMP_OP(__vec32_i64, i64, int64_t, __signed_greater_than, >)
SELECT(__vec32_i64)
INSERT_EXTRACT(__vec32_i64, int64_t)
SMEAR(__vec32_i64, i64, int64_t)
SETZERO(__vec32_i64, i64)
UNDEF(__vec32_i64, i64)
BROADCAST(__vec32_i64, i64, int64_t)
ROTATE(__vec32_i64, i64, int64_t)
SHUFFLES(__vec32_i64, i64, int64_t)
@@ -669,6 +701,8 @@ static FORCEINLINE __vec32_i1 __ordered(__vec32_f a, __vec32_f b) {
SELECT(__vec32_f)
INSERT_EXTRACT(__vec32_f, float)
SMEAR(__vec32_f, float, float)
SETZERO(__vec32_f, float)
UNDEF(__vec32_f, float)
BROADCAST(__vec32_f, float, float)
ROTATE(__vec32_f, float, float)
SHUFFLES(__vec32_f, float, float)
@@ -819,6 +853,8 @@ static FORCEINLINE __vec32_i1 __ordered(__vec32_d a, __vec32_d b) {
SELECT(__vec32_d)
INSERT_EXTRACT(__vec32_d, double)
SMEAR(__vec32_d, double, double)
SETZERO(__vec32_d, double)
UNDEF(__vec32_d, double)
BROADCAST(__vec32_d, double, double)
ROTATE(__vec32_d, double, double)
SHUFFLES(__vec32_d, double, double)