Choose type for integer literals to match the target mask size (if possible).
On a target with a 16-bit mask (for example), we would choose the type of an integer literal "1024" to be an int16. Previously, we used an int32, which is a worse fit and leads to less efficient code than an int16 on a 16-bit mask target. (However, we'd still give an integer literal 1000000 the type int32, even in a 16-bit target.) Updated the tests to still pass with 8 and 16-bit targets, given this change.
This commit is contained in:
@@ -3,7 +3,7 @@ export uniform int width() { return programCount; }
|
||||
|
||||
export void f_f(uniform float RET[], uniform float aFOO[]) {
|
||||
RET[programIndex] = -1;
|
||||
int32 a = ~(1 << programIndex);
|
||||
int32 a = ~(1ul << programIndex);
|
||||
if ((programIndex < 32) && (programIndex & 1) == 0) {
|
||||
RET[programIndex] = exclusive_scan_and(a);
|
||||
}
|
||||
@@ -15,7 +15,7 @@ export void result(uniform float RET[]) {
|
||||
if ((programIndex & 1) == 0 && programIndex > 0 && programIndex < 32) {
|
||||
int val = 0xffffffff;
|
||||
for (int i = 0; i < programIndex-1; i += 2)
|
||||
val &= ~(1<<i);
|
||||
val &= ~(1ul<<i);
|
||||
RET[programIndex] = val;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user