Merge remote-tracking branch 'upstream/master' into nvptx

This commit is contained in:
egaburov
2013-10-29 15:24:40 +01:00
19 changed files with 808 additions and 760 deletions

View File

@@ -311,6 +311,17 @@ static FORCEINLINE VTYPE __rotate_##NAME(VTYPE v, int index) { \
return ret; \
} \
#define SHIFT(VTYPE, NAME, STYPE) \
static FORCEINLINE VTYPE __shift_##NAME(VTYPE v, int index) { \
VTYPE ret; \
for (int i = 0; i < 16; ++i) { \
int modIndex = i+index; \
STYPE val = ((modIndex >= 0) && (modIndex < 16)) ? v.v[modIndex] : 0; \
ret.v[i] = val; \
} \
return ret; \
} \
#define SHUFFLES(VTYPE, NAME, STYPE) \
static FORCEINLINE VTYPE __shuffle_##NAME(VTYPE v, __vec16_i32 index) { \
VTYPE ret; \
@@ -492,6 +503,7 @@ SETZERO(__vec16_i8, i8)
UNDEF(__vec16_i8, i8)
BROADCAST(__vec16_i8, i8, int8_t)
ROTATE(__vec16_i8, i8, int8_t)
SHIFT(__vec16_i8, i8, int8_t)
SHUFFLES(__vec16_i8, i8, int8_t)
LOAD_STORE(__vec16_i8, int8_t)
@@ -537,6 +549,7 @@ SETZERO(__vec16_i16, i16)
UNDEF(__vec16_i16, i16)
BROADCAST(__vec16_i16, i16, int16_t)
ROTATE(__vec16_i16, i16, int16_t)
SHIFT(__vec16_i16, i16, int16_t)
SHUFFLES(__vec16_i16, i16, int16_t)
LOAD_STORE(__vec16_i16, int16_t)
@@ -582,6 +595,7 @@ SETZERO(__vec16_i32, i32)
UNDEF(__vec16_i32, i32)
BROADCAST(__vec16_i32, i32, int32_t)
ROTATE(__vec16_i32, i32, int32_t)
SHIFT(__vec16_i32, i32, int32_t)
SHUFFLES(__vec16_i32, i32, int32_t)
LOAD_STORE(__vec16_i32, int32_t)
@@ -627,6 +641,7 @@ SETZERO(__vec16_i64, i64)
UNDEF(__vec16_i64, i64)
BROADCAST(__vec16_i64, i64, int64_t)
ROTATE(__vec16_i64, i64, int64_t)
SHIFT(__vec16_i64, i64, int64_t)
SHUFFLES(__vec16_i64, i64, int64_t)
LOAD_STORE(__vec16_i64, int64_t)
@@ -672,6 +687,7 @@ SETZERO(__vec16_f, float)
UNDEF(__vec16_f, float)
BROADCAST(__vec16_f, float, float)
ROTATE(__vec16_f, float, float)
SHIFT(__vec16_f, float, float)
SHUFFLES(__vec16_f, float, float)
LOAD_STORE(__vec16_f, float)
@@ -832,6 +848,7 @@ SETZERO(__vec16_d, double)
UNDEF(__vec16_d, double)
BROADCAST(__vec16_d, double, double)
ROTATE(__vec16_d, double, double)
SHIFT(__vec16_d, double, double)
SHUFFLES(__vec16_d, double, double)
LOAD_STORE(__vec16_d, double)

View File

@@ -451,6 +451,17 @@ static FORCEINLINE VTYPE __rotate_##NAME(VTYPE v, int index) { \
return ret; \
} \
#define SHIFT(VTYPE, NAME, STYPE) \
static FORCEINLINE VTYPE __shift_##NAME(VTYPE v, int index) { \
VTYPE ret; \
for (int i = 0; i < 16; ++i) { \
int modIndex = i+index; \
STYPE val = ((modIndex >= 0) && (modIndex < 16)) ? v[modIndex] : 0; \
ret[i] = val; \
} \
return ret; \
} \
/* knc::macro::used */
#define SHUFFLES(VTYPE, NAME, STYPE) \
static FORCEINLINE VTYPE __shuffle_##NAME(VTYPE v, __vec16_i32 index) { \
@@ -566,6 +577,7 @@ SETZERO(__vec16_i8, i8)
UNDEF(__vec16_i8, i8)
BROADCAST(__vec16_i8, i8, int8_t)
ROTATE(__vec16_i8, i8, int8_t)
SHIFT(__vec16_i8, i8, int8_t)
SHUFFLES(__vec16_i8, i8, int8_t)
LOAD_STORE(__vec16_i8, int8_t)
@@ -612,6 +624,7 @@ SETZERO(__vec16_i16, i16)
UNDEF(__vec16_i16, i16)
BROADCAST(__vec16_i16, i16, int16_t)
ROTATE(__vec16_i16, i16, int16_t)
SHIFT(__vec16_i16, i16, int16_t)
SHUFFLES(__vec16_i16, i16, int16_t)
LOAD_STORE(__vec16_i16, int16_t)
@@ -688,6 +701,8 @@ static FORCEINLINE __vec16_i32 __rotate_i32(__vec16_i32 v, int index)
return _mm512_mask_permutevar_epi32(v, 0xFFFF, shuffle, v);
}
SHIFT(__vec16_i32, i32, int32_t)
static FORCEINLINE __vec16_i32 __shuffle_i32 (__vec16_i32 v, __vec16_i32 index)
{
return _mm512_mask_permutevar_epi32(v, 0xFFFF, __and(index, __smear_i32<__vec16_i32>(0xF)), v);
@@ -942,6 +957,8 @@ static FORCEINLINE __vec16_i64 __rotate_i64(const __vec16_i64 _v, const int inde
const __vec16_i32 ret_lo = __rotate_i32(v_lo, index);
return CASTI2L(ret_hi, ret_lo);
}
SHIFT(__vec16_i64, i64, int64_t)
static FORCEINLINE __vec16_i64 __shuffle_double(__vec16_i64 _v, const __vec16_i32 index)
{
CASTL2I(_v, v_hi, v_lo);
@@ -1063,6 +1080,7 @@ static FORCEINLINE __vec16_f __rotate_float(__vec16_f _v, int index)
const __vec16_i32 shuffle = _mm512_and_epi32(_mm512_add_epi32(__ispc_stride1, idx), __smear_i32<__vec16_i32>(0xF));
return _mm512_castsi512_ps(_mm512_mask_permutevar_epi32(v, 0xFFFF, shuffle, v));
}
SHIFT(__vec16_f, float, float)
static FORCEINLINE __vec16_f __shuffle_float(__vec16_f v, __vec16_i32 index)
{
return _mm512_castsi512_ps(_mm512_mask_permutevar_epi32(_mm512_castps_si512(v), 0xffff, index, _mm512_castps_si512(v)));
@@ -1333,6 +1351,7 @@ static FORCEINLINE __vec16_d __rotate_double(const __vec16_d _v, const int index
const __vec16_f ret_lo = __rotate_float(v_lo, index);
return CASTF2D(ret_hi, ret_lo);
}
SHIFT(__vec16_d, double, double)
static FORCEINLINE __vec16_d __shuffle_double(__vec16_d _v, const __vec16_i32 index)
{
CASTD2F(_v, v_hi, v_lo);

View File

@@ -108,22 +108,21 @@ struct __vec4_i64 {
};
struct __vec4_i32 {
__vec4_i32() { }
FORCEINLINE __vec4_i32() { }
FORCEINLINE __vec4_i32(__m128i vv) : v(vv) { }
FORCEINLINE __vec4_i32(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
FORCEINLINE __vec4_i32(int32_t a, int32_t b, int32_t c, int32_t d) {
v = _mm_set_epi32(d, c, b, a);
}
FORCEINLINE __vec4_i32(uint32_t *p) {
FORCEINLINE __vec4_i32(int32_t *p) {
v = _mm_loadu_si128((__m128i *)p);
}
FORCEINLINE __vec4_i32(const __vec4_i32 &other) : v(other.v) {}
FORCEINLINE __vec4_i32& operator =(const __vec4_i32 &o) { v=o.v; return *this; }
FORCEINLINE operator __m128() const { return _mm_castsi128_ps(v); }
__m128i v;
};
static inline int32_t __extract_element(__vec4_i32 v, int index);
struct __vec4_i16 {
__vec4_i16() { }
FORCEINLINE __vec4_i16(__m128i vv) : v(vv) { }
@@ -215,6 +214,64 @@ INSERT_EXTRACT(__vec1_i64, int64_t)
INSERT_EXTRACT(__vec1_f, float)
INSERT_EXTRACT(__vec1_d, double)
static FORCEINLINE bool __extract_element(const __vec4_i1 &v, int index) {
return ((int32_t *)&v)[index] ? true : false;
}
static FORCEINLINE void __insert_element(__vec4_i1 *v, int index, bool val) {
((int32_t *)v)[index] = val ? -1 : 0;
}
static FORCEINLINE int8_t __extract_element(const __vec4_i8 &v, int index) {
return ((int8_t *)&v)[index];
}
static FORCEINLINE void __insert_element(__vec4_i8 *v, int index, int8_t val) {
((int8_t *)v)[index] = val;
}
static FORCEINLINE int16_t __extract_element(const __vec4_i16 &v, int index) {
return ((int16_t *)&v)[index];
}
static FORCEINLINE void __insert_element(__vec4_i16 *v, int index, int16_t val) {
((int16_t *)v)[index] = val;
}
static FORCEINLINE int32_t __extract_element(const __vec4_i32 &v, int index) {
return ((int32_t *)&v)[index];
}
static FORCEINLINE void __insert_element(__vec4_i32 *v, int index, int32_t val) {
((int32_t *)v)[index] = val;
}
static FORCEINLINE int64_t __extract_element(const __vec4_i64 &v, int index) {
return ((int64_t *)&v)[index];
}
static FORCEINLINE void __insert_element(__vec4_i64 *v, int index, int64_t val) {
((int64_t *)v)[index] = val;
}
static FORCEINLINE float __extract_element(const __vec4_f &v, int index) {
return ((float *)&v)[index];
}
static FORCEINLINE void __insert_element(__vec4_f *v, int index, float val) {
((float *)v)[index] = val;
}
static FORCEINLINE double __extract_element(const __vec4_d &v, int index) {
return ((double *)&v)[index];
}
static FORCEINLINE void __insert_element(__vec4_d *v, int index, double val) {
((double *)v)[index] = val;
}
#define CAST_BITS_SCALAR(TO, FROM) \
static FORCEINLINE TO __cast_bits(TO, FROM v) { \
union { \
@@ -313,13 +370,6 @@ static FORCEINLINE __vec4_i1 __select(__vec4_i1 mask, __vec4_i1 a, __vec4_i1 b)
return _mm_blendv_ps(b.v, a.v, mask.v);
}
static FORCEINLINE bool __extract_element(__vec4_i1 v, int index) {
return ((int32_t *)&v)[index] ? true : false;
}
static FORCEINLINE void __insert_element(__vec4_i1 *v, int index, bool val) {
((int32_t *)v)[index] = val ? -1 : 0;
}
template <int ALIGN> static FORCEINLINE __vec4_i1 __load(const __vec4_i1 *v) {
// FIXME: handle align of 16...
@@ -564,13 +614,6 @@ static FORCEINLINE __vec4_i8 __select(__vec4_i1 mask, __vec4_i8 a, __vec4_i8 b)
_mm_extract_epi8(b.v, 3));
}
static FORCEINLINE int8_t __extract_element(__vec4_i8 v, int index) {
return ((int8_t *)&v)[index];
}
static FORCEINLINE void __insert_element(__vec4_i8 *v, int index, int8_t val) {
((int8_t *)v)[index] = val;
}
template <class RetVecType> __vec4_i8 __smear_i8(int8_t v);
template <> FORCEINLINE __vec4_i8 __smear_i8<__vec4_i8>(int8_t v) {
@@ -598,6 +641,20 @@ static FORCEINLINE __vec4_i8 __rotate_i8(__vec4_i8 v, int delta) {
__extract_element(v, (delta+3) & 0x3));
}
static FORCEINLINE __vec4_i8 __shift_i8(__vec4_i8 v, int delta) {
int8_t v1, v2, v3, v4;
int d1, d2, d3, d4;
d1 = delta+0;
d2 = delta+1;
d3 = delta+2;
d4 = delta+3;
v1 = ((d1 >= 0) && (d1 < 4)) ? __extract_element(v, d1) : 0;
v2 = ((d2 >= 0) && (d2 < 4)) ? __extract_element(v, d2) : 0;
v3 = ((d3 >= 0) && (d3 < 4)) ? __extract_element(v, d3) : 0;
v4 = ((d4 >= 0) && (d4 < 4)) ? __extract_element(v, d4) : 0;
return __vec4_i8(v1, v2, v3, v4);
}
static FORCEINLINE __vec4_i8 __shuffle_i8(__vec4_i8 v, __vec4_i32 index) {
return __vec4_i8(__extract_element(v, __extract_element(index, 0) & 0x3),
__extract_element(v, __extract_element(index, 1) & 0x3),
@@ -836,13 +893,6 @@ static FORCEINLINE __vec4_i16 __select(__vec4_i1 mask, __vec4_i16 a, __vec4_i16
_mm_extract_epi16(b.v, 3));
}
static FORCEINLINE int16_t __extract_element(__vec4_i16 v, int index) {
return ((int16_t *)&v)[index];
}
static FORCEINLINE void __insert_element(__vec4_i16 *v, int index, int16_t val) {
((int16_t *)v)[index] = val;
}
template <class RetVecType> __vec4_i16 __smear_i16(int16_t v);
template <> FORCEINLINE __vec4_i16 __smear_i16<__vec4_i16>(int16_t v) {
@@ -870,6 +920,20 @@ static FORCEINLINE __vec4_i16 __rotate_i16(__vec4_i16 v, int delta) {
__extract_element(v, (delta+3) & 0x3));
}
static FORCEINLINE __vec4_i16 __shift_i16(__vec4_i16 v, int delta) {
int16_t v1, v2, v3, v4;
int d1, d2, d3, d4;
d1 = delta+0;
d2 = delta+1;
d3 = delta+2;
d4 = delta+3;
v1 = ((d1 >= 0) && (d1 < 4)) ? __extract_element(v, d1) : 0;
v2 = ((d2 >= 0) && (d2 < 4)) ? __extract_element(v, d2) : 0;
v3 = ((d3 >= 0) && (d3 < 4)) ? __extract_element(v, d3) : 0;
v4 = ((d4 >= 0) && (d4 < 4)) ? __extract_element(v, d4) : 0;
return __vec4_i16(v1, v2, v3, v4);
}
static FORCEINLINE __vec4_i16 __shuffle_i16(__vec4_i16 v, __vec4_i32 index) {
return __vec4_i16(__extract_element(v, __extract_element(index, 0) & 0x3),
__extract_element(v, __extract_element(index, 1) & 0x3),
@@ -1109,13 +1173,6 @@ template <> FORCEINLINE __vec4_i32 __undef_i32<__vec4_i32>() {
return __vec4_i32();
}
static FORCEINLINE int32_t __extract_element(__vec4_i32 v, int index) {
return ((int32_t *)&v)[index];
}
static FORCEINLINE void __insert_element(__vec4_i32 *v, int index, int32_t val) {
((int32_t *)v)[index] = val;
}
static FORCEINLINE __vec4_i32 __broadcast_i32(__vec4_i32 v, int index) {
return _mm_set1_epi32(__extract_element(v, index));
@@ -1128,6 +1185,21 @@ static FORCEINLINE __vec4_i32 __rotate_i32(__vec4_i32 v, int delta) {
__extract_element(v, (delta+3) & 0x3));
}
#include <iostream>
static FORCEINLINE __vec4_i32 __shift_i32(const __vec4_i32 &v, int delta) {
int32_t v1, v2, v3, v4;
int32_t d1, d2, d3, d4;
d1 = delta+0;
d2 = delta+1;
d3 = delta+2;
d4 = delta+3;
v1 = ((d1 >= 0) && (d1 < 4)) ? __extract_element(v, d1) : 0;
v2 = ((d2 >= 0) && (d2 < 4)) ? __extract_element(v, d2) : 0;
v3 = ((d3 >= 0) && (d3 < 4)) ? __extract_element(v, d3) : 0;
v4 = ((d4 >= 0) && (d4 < 4)) ? __extract_element(v, d4) : 0;
return __vec4_i32(v1, v2, v3, v4);
}
static FORCEINLINE __vec4_i32 __shuffle_i32(__vec4_i32 v, __vec4_i32 index) {
return __vec4_i32(__extract_element(v, __extract_element(index, 0) & 0x3),
__extract_element(v, __extract_element(index, 1) & 0x3),
@@ -1383,13 +1455,6 @@ template <> FORCEINLINE __vec4_i64 __undef_i64<__vec4_i64>() {
return __vec4_i64();
}
static FORCEINLINE int64_t __extract_element(__vec4_i64 v, int index) {
return ((int64_t *)&v)[index];
}
static FORCEINLINE void __insert_element(__vec4_i64 *v, int index, int64_t val) {
((int64_t *)v)[index] = val;
}
static FORCEINLINE __vec4_i64 __broadcast_i64(__vec4_i64 v, int index) {
uint64_t val = __extract_element(v, index);
@@ -1403,6 +1468,20 @@ static FORCEINLINE __vec4_i64 __rotate_i64(__vec4_i64 v, int delta) {
__extract_element(v, (delta+3) & 0x3));
}
static FORCEINLINE __vec4_i64 __shift_i64(__vec4_i64 v, int delta) {
int64_t v1, v2, v3, v4;
int d1, d2, d3, d4;
d1 = delta+0;
d2 = delta+1;
d3 = delta+2;
d4 = delta+3;
v1 = ((d1 >= 0) && (d1 < 4)) ? __extract_element(v, d1) : 0;
v2 = ((d2 >= 0) && (d2 < 4)) ? __extract_element(v, d2) : 0;
v3 = ((d3 >= 0) && (d3 < 4)) ? __extract_element(v, d3) : 0;
v4 = ((d4 >= 0) && (d4 < 4)) ? __extract_element(v, d4) : 0;
return __vec4_i64(v1, v2, v3, v4);
}
static FORCEINLINE __vec4_i64 __shuffle_i64(__vec4_i64 v, __vec4_i32 index) {
return __vec4_i64(__extract_element(v, __extract_element(index, 0) & 0x3),
__extract_element(v, __extract_element(index, 1) & 0x3),
@@ -1504,13 +1583,6 @@ template <> FORCEINLINE __vec4_f __undef_float<__vec4_f>() {
return __vec4_f();
}
static FORCEINLINE float __extract_element(__vec4_f v, int index) {
return ((float *)&v)[index];
}
static FORCEINLINE void __insert_element(__vec4_f *v, int index, float val) {
((float *)v)[index] = val;
}
static FORCEINLINE __vec4_f __broadcast_float(__vec4_f v, int index) {
return _mm_set1_ps(__extract_element(v, index));
@@ -1523,6 +1595,20 @@ static FORCEINLINE __vec4_f __rotate_float(__vec4_f v, int delta) {
__extract_element(v, (delta+3) & 0x3));
}
static FORCEINLINE __vec4_f __shift_float(__vec4_f v, int delta) {
float v1, v2, v3, v4;
int d1, d2, d3, d4;
d1 = delta+0;
d2 = delta+1;
d3 = delta+2;
d4 = delta+3;
v1 = ((d1 >= 0) && (d1 < 4)) ? __extract_element(v, d1) : 0.f;
v2 = ((d2 >= 0) && (d2 < 4)) ? __extract_element(v, d2) : 0.f;
v3 = ((d3 >= 0) && (d3 < 4)) ? __extract_element(v, d3) : 0.f;
v4 = ((d4 >= 0) && (d4 < 4)) ? __extract_element(v, d4) : 0.f;
return __vec4_f(v1, v2, v3, v4);
}
static FORCEINLINE __vec4_f __shuffle_float(__vec4_f v, __vec4_i32 index) {
return __vec4_f(__extract_element(v, __extract_element(index, 0) & 0x3),
__extract_element(v, __extract_element(index, 1) & 0x3),
@@ -1656,13 +1742,6 @@ template <> FORCEINLINE __vec4_d __undef_double<__vec4_d>() {
return __vec4_d();
}
static FORCEINLINE double __extract_element(__vec4_d v, int index) {
return ((double *)&v)[index];
}
static FORCEINLINE void __insert_element(__vec4_d *v, int index, double val) {
((double *)v)[index] = val;
}
static FORCEINLINE __vec4_d __broadcast_double(__vec4_d v, int index) {
return __vec4_d(_mm_set1_pd(__extract_element(v, index)),
@@ -1676,6 +1755,20 @@ static FORCEINLINE __vec4_d __rotate_double(__vec4_d v, int delta) {
__extract_element(v, (delta+3) & 0x3));
}
static FORCEINLINE __vec4_d __shift_double(__vec4_d v, int delta) {
double v1, v2, v3, v4;
int d1, d2, d3, d4;
d1 = delta+0;
d2 = delta+1;
d3 = delta+2;
d4 = delta+3;
v1 = ((d1 >= 0) && (d1 < 4)) ? __extract_element(v, d1) : 0;
v2 = ((d2 >= 0) && (d2 < 4)) ? __extract_element(v, d2) : 0;
v3 = ((d3 >= 0) && (d3 < 4)) ? __extract_element(v, d3) : 0;
v4 = ((d4 >= 0) && (d4 < 4)) ? __extract_element(v, d4) : 0;
return __vec4_d(v1, v2, v3, v4);
}
static FORCEINLINE __vec4_d __shuffle_double(__vec4_d v, __vec4_i32 index) {
return __vec4_d(__extract_element(v, __extract_element(index, 0) & 0x3),
__extract_element(v, __extract_element(index, 1) & 0x3),
@@ -1889,7 +1982,7 @@ static FORCEINLINE __vec4_f __cast_sitofp(__vec4_f, __vec4_i16 val) {
(float)((int16_t)_mm_extract_epi16(val.v, 3)));
}
static FORCEINLINE __vec4_f __cast_sitofp(__vec4_f, __vec4_i32 val) {
static FORCEINLINE __vec4_f __cast_sitofp(__vec4_f, const __vec4_i32 &val) {
return _mm_cvtepi32_ps(val.v);
}