Update masked_load/store implementations for generic targets to take void *s

(Fixes compile errors when we try to actually use these!)
This commit is contained in:
Matt Pharr
2012-01-17 18:11:26 -08:00
parent 08189ce08c
commit c6d1cebad4
2 changed files with 16 additions and 16 deletions

View File

@@ -938,7 +938,7 @@ REDUCE_MINMAX(uint64_t, __vec16_i64, __reduce_max_uint64, >)
///////////////////////////////////////////////////////////////////////////
// masked load/store
static FORCEINLINE __vec16_i8 __masked_load_8(unsigned char *p,
static FORCEINLINE __vec16_i8 __masked_load_8(void *p,
__vec16_i1 mask) {
__vec16_i8 ret;
int8_t *ptr = (int8_t *)p;
@@ -948,7 +948,7 @@ static FORCEINLINE __vec16_i8 __masked_load_8(unsigned char *p,
return ret;
}
static FORCEINLINE __vec16_i16 __masked_load_16(unsigned char *p,
static FORCEINLINE __vec16_i16 __masked_load_16(void *p,
__vec16_i1 mask) {
__vec16_i16 ret;
int16_t *ptr = (int16_t *)p;
@@ -958,7 +958,7 @@ static FORCEINLINE __vec16_i16 __masked_load_16(unsigned char *p,
return ret;
}
static FORCEINLINE __vec16_i32 __masked_load_32(unsigned char *p,
static FORCEINLINE __vec16_i32 __masked_load_32(void *p,
__vec16_i1 mask) {
__vec16_i32 ret;
int32_t *ptr = (int32_t *)p;
@@ -968,7 +968,7 @@ static FORCEINLINE __vec16_i32 __masked_load_32(unsigned char *p,
return ret;
}
static FORCEINLINE __vec16_i64 __masked_load_64(unsigned char *p,
static FORCEINLINE __vec16_i64 __masked_load_64(void *p,
__vec16_i1 mask) {
__vec16_i64 ret;
int64_t *ptr = (int64_t *)p;
@@ -978,7 +978,7 @@ static FORCEINLINE __vec16_i64 __masked_load_64(unsigned char *p,
return ret;
}
static FORCEINLINE void __masked_store_8(unsigned char *p, __vec16_i8 val,
static FORCEINLINE void __masked_store_8(void *p, __vec16_i8 val,
__vec16_i1 mask) {
int8_t *ptr = (int8_t *)p;
for (int i = 0; i < 16; ++i)
@@ -986,7 +986,7 @@ static FORCEINLINE void __masked_store_8(unsigned char *p, __vec16_i8 val,
ptr[i] = val.v[i];
}
static FORCEINLINE void __masked_store_16(unsigned char *p, __vec16_i16 val,
static FORCEINLINE void __masked_store_16(void *p, __vec16_i16 val,
__vec16_i1 mask) {
int16_t *ptr = (int16_t *)p;
for (int i = 0; i < 16; ++i)
@@ -994,7 +994,7 @@ static FORCEINLINE void __masked_store_16(unsigned char *p, __vec16_i16 val,
ptr[i] = val.v[i];
}
static FORCEINLINE void __masked_store_32(unsigned char *p, __vec16_i32 val,
static FORCEINLINE void __masked_store_32(void *p, __vec16_i32 val,
__vec16_i1 mask) {
int32_t *ptr = (int32_t *)p;
for (int i = 0; i < 16; ++i)
@@ -1002,7 +1002,7 @@ static FORCEINLINE void __masked_store_32(unsigned char *p, __vec16_i32 val,
ptr[i] = val.v[i];
}
static FORCEINLINE void __masked_store_64(unsigned char *p, __vec16_i64 val,
static FORCEINLINE void __masked_store_64(void *p, __vec16_i64 val,
__vec16_i1 mask) {
int64_t *ptr = (int64_t *)p;
for (int i = 0; i < 16; ++i)