New LLVM IR load instruction
This commit is contained in:
@@ -160,7 +160,7 @@ declare void @abort() noreturn nounwind
|
||||
|
||||
define void @__set_system_isa() {
|
||||
entry:
|
||||
%bi = load i32* @__system_best_isa
|
||||
%bi = load PTR_OP_ARGS(`i32 ',` @__system_best_isa')
|
||||
%unset = icmp eq i32 %bi, -1
|
||||
br i1 %unset, label %set_system_isa, label %done
|
||||
|
||||
|
||||
@@ -159,8 +159,8 @@ define(`svml_define',`
|
||||
;; i32 4, i32 5, i32 6, i32 7>
|
||||
;; store <8 x float> %sin, <8 x float> * %1
|
||||
;;
|
||||
;; %cosa = load <4 x float> * %cospa
|
||||
;; %cosb = load <4 x float> * %cospb
|
||||
;; %cosa = load PTR_OP_ARGS(`<4 x float> ',` %cospa')
|
||||
;; %cosb = load PTR_OP_ARGS(`<4 x float> ',` %cospb')
|
||||
;; %cos = shufflevector <4 x float> %cosa, <4 x float> %cosb,
|
||||
;; <8 x i32> <i32 0, i32 1, i32 2, i32 3,
|
||||
;; i32 4, i32 5, i32 6, i32 7>
|
||||
|
||||
@@ -191,7 +191,7 @@ define void @__fastmath() nounwind alwaysinline {
|
||||
%ptr = alloca i32
|
||||
%ptr8 = bitcast i32 * %ptr to i8 *
|
||||
call void @llvm.x86.sse.stmxcsr(i8 * %ptr8)
|
||||
%oldval = load i32 *%ptr
|
||||
%oldval = load PTR_OP_ARGS(`i32 ',`%ptr')
|
||||
|
||||
; turn on DAZ (64)/FTZ (32768) -> 32832
|
||||
%update = or i32 %oldval, 32832
|
||||
|
||||
@@ -559,7 +559,7 @@ declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>,
|
||||
define void @__masked_store_blend_i32(<16 x i32>* nocapture, <16 x i32>,
|
||||
<16 x i32>) nounwind alwaysinline {
|
||||
%maskAsFloat = bitcast <16 x i32> %2 to <16 x float>
|
||||
%oldValue = load <16 x i32>* %0, align 4
|
||||
%oldValue = load PTR_OP_ARGS(`<16 x i32>',` %0, align 4')
|
||||
%oldAsFloat = bitcast <16 x i32> %oldValue to <16 x float>
|
||||
%newAsFloat = bitcast <16 x i32> %1 to <16 x float>
|
||||
|
||||
@@ -596,7 +596,7 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>,
|
||||
|
||||
define void @__masked_store_blend_i64(<16 x i64>* nocapture %ptr, <16 x i64> %newi64,
|
||||
<16 x i32> %mask) nounwind alwaysinline {
|
||||
%oldValue = load <16 x i64>* %ptr, align 8
|
||||
%oldValue = load PTR_OP_ARGS(`<16 x i64>',` %ptr, align 8')
|
||||
%old = bitcast <16 x i64> %oldValue to <16 x double>
|
||||
%old0d = shufflevector <16 x double> %old, <16 x double> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
|
||||
@@ -487,7 +487,7 @@ declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>,
|
||||
define void @__masked_store_blend_i32(<8 x i32>* nocapture, <8 x i32>,
|
||||
<8 x i32>) nounwind alwaysinline {
|
||||
%mask_as_float = bitcast <8 x i32> %2 to <8 x float>
|
||||
%oldValue = load <8 x i32>* %0, align 4
|
||||
%oldValue = load PTR_OP_ARGS(`<8 x i32>',` %0, align 4')
|
||||
%oldAsFloat = bitcast <8 x i32> %oldValue to <8 x float>
|
||||
%newAsFloat = bitcast <8 x i32> %1 to <8 x float>
|
||||
%blend = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %oldAsFloat,
|
||||
@@ -501,7 +501,7 @@ define void @__masked_store_blend_i32(<8 x i32>* nocapture, <8 x i32>,
|
||||
|
||||
define void @__masked_store_blend_i64(<8 x i64>* nocapture %ptr, <8 x i64> %new,
|
||||
<8 x i32> %i32mask) nounwind alwaysinline {
|
||||
%oldValue = load <8 x i64>* %ptr, align 8
|
||||
%oldValue = load PTR_OP_ARGS(`<8 x i64>',` %ptr, align 8')
|
||||
%mask = bitcast <8 x i32> %i32mask to <8 x float>
|
||||
|
||||
; Do 4x64-bit blends by doing two <8 x i32> blends, where the <8 x i32> values
|
||||
|
||||
@@ -453,7 +453,7 @@ define void @__masked_store_blend_i32(<4 x i32>* nocapture, <4 x i32>,
|
||||
<4 x i64>) nounwind alwaysinline {
|
||||
%mask = trunc <4 x i64> %2 to <4 x i32>
|
||||
%mask_as_float = bitcast <4 x i32> %mask to <4 x float>
|
||||
%oldValue = load <4 x i32>* %0, align 4
|
||||
%oldValue = load PTR_OP_ARGS(` <4 x i32>',` %0, align 4')
|
||||
%oldAsFloat = bitcast <4 x i32> %oldValue to <4 x float>
|
||||
%newAsFloat = bitcast <4 x i32> %1 to <4 x float>
|
||||
%blend = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %oldAsFloat,
|
||||
@@ -471,7 +471,7 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>,
|
||||
define void @__masked_store_blend_i64(<4 x i64>* nocapture , <4 x i64>,
|
||||
<4 x i64>) nounwind alwaysinline {
|
||||
%mask_as_double = bitcast <4 x i64> %2 to <4 x double>
|
||||
%oldValue = load <4 x i64>* %0, align 4
|
||||
%oldValue = load PTR_OP_ARGS(` <4 x i64>',` %0, align 4')
|
||||
%oldAsDouble = bitcast <4 x i64> %oldValue to <4 x double>
|
||||
%newAsDouble = bitcast <4 x i64> %1 to <4 x double>
|
||||
%blend = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %oldAsDouble,
|
||||
|
||||
@@ -194,7 +194,7 @@ define <1 x float> @__vselect_float(<1 x float>, <1 x float>,
|
||||
|
||||
define void @__masked_store_blend_i8(<1 x i8>* nocapture, <1 x i8>,
|
||||
<1 x i32> %mask) nounwind alwaysinline {
|
||||
%val = load <1 x i8> * %0, align 4
|
||||
%val = load PTR_OP_ARGS(`<1 x i8> ',` %0, align 4')
|
||||
%newval = call <1 x i8> @__vselect_i8(<1 x i8> %val, <1 x i8> %1, <1 x i32> %mask)
|
||||
store <1 x i8> %newval, <1 x i8> * %0, align 4
|
||||
ret void
|
||||
@@ -202,7 +202,7 @@ define void @__masked_store_blend_i8(<1 x i8>* nocapture, <1 x i8>,
|
||||
|
||||
define void @__masked_store_blend_i16(<1 x i16>* nocapture, <1 x i16>,
|
||||
<1 x i32> %mask) nounwind alwaysinline {
|
||||
%val = load <1 x i16> * %0, align 4
|
||||
%val = load PTR_OP_ARGS(`<1 x i16> ',` %0, align 4')
|
||||
%newval = call <1 x i16> @__vselect_i16(<1 x i16> %val, <1 x i16> %1, <1 x i32> %mask)
|
||||
store <1 x i16> %newval, <1 x i16> * %0, align 4
|
||||
ret void
|
||||
@@ -210,7 +210,7 @@ define void @__masked_store_blend_i16(<1 x i16>* nocapture, <1 x i16>,
|
||||
|
||||
define void @__masked_store_blend_i32(<1 x i32>* nocapture, <1 x i32>,
|
||||
<1 x i32> %mask) nounwind alwaysinline {
|
||||
%val = load <1 x i32> * %0, align 4
|
||||
%val = load PTR_OP_ARGS(`<1 x i32> ',` %0, align 4')
|
||||
%newval = call <1 x i32> @__vselect_i32(<1 x i32> %val, <1 x i32> %1, <1 x i32> %mask)
|
||||
store <1 x i32> %newval, <1 x i32> * %0, align 4
|
||||
ret void
|
||||
@@ -218,7 +218,7 @@ define void @__masked_store_blend_i32(<1 x i32>* nocapture, <1 x i32>,
|
||||
|
||||
define void @__masked_store_blend_i64(<1 x i64>* nocapture, <1 x i64>,
|
||||
<1 x i32> %mask) nounwind alwaysinline {
|
||||
%val = load <1 x i64> * %0, align 4
|
||||
%val = load PTR_OP_ARGS(`<1 x i64> ',` %0, align 4')
|
||||
%newval = call <1 x i64> @__vselect_i64(<1 x i64> %val, <1 x i64> %1, <1 x i32> %mask)
|
||||
store <1 x i64> %newval, <1 x i64> * %0, align 4
|
||||
ret void
|
||||
|
||||
@@ -278,7 +278,7 @@ declare void @__masked_store_double(<WIDTH x double>* nocapture, <WIDTH x double
|
||||
|
||||
define void @__masked_store_blend_i8(<WIDTH x i8>* nocapture, <WIDTH x i8>,
|
||||
<WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load <WIDTH x i8> * %0
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x i8> ',` %0')
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x i8> %1, <WIDTH x i8> %v
|
||||
store <WIDTH x i8> %v1, <WIDTH x i8> * %0
|
||||
ret void
|
||||
@@ -286,7 +286,7 @@ define void @__masked_store_blend_i8(<WIDTH x i8>* nocapture, <WIDTH x i8>,
|
||||
|
||||
define void @__masked_store_blend_i16(<WIDTH x i16>* nocapture, <WIDTH x i16>,
|
||||
<WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load <WIDTH x i16> * %0
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x i16> ',` %0')
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x i16> %1, <WIDTH x i16> %v
|
||||
store <WIDTH x i16> %v1, <WIDTH x i16> * %0
|
||||
ret void
|
||||
@@ -294,7 +294,7 @@ define void @__masked_store_blend_i16(<WIDTH x i16>* nocapture, <WIDTH x i16>,
|
||||
|
||||
define void @__masked_store_blend_i32(<WIDTH x i32>* nocapture, <WIDTH x i32>,
|
||||
<WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load <WIDTH x i32> * %0
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x i32> ',` %0')
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x i32> %1, <WIDTH x i32> %v
|
||||
store <WIDTH x i32> %v1, <WIDTH x i32> * %0
|
||||
ret void
|
||||
@@ -302,7 +302,7 @@ define void @__masked_store_blend_i32(<WIDTH x i32>* nocapture, <WIDTH x i32>,
|
||||
|
||||
define void @__masked_store_blend_float(<WIDTH x float>* nocapture, <WIDTH x float>,
|
||||
<WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load <WIDTH x float> * %0
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x float> ',` %0')
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x float> %1, <WIDTH x float> %v
|
||||
store <WIDTH x float> %v1, <WIDTH x float> * %0
|
||||
ret void
|
||||
@@ -310,7 +310,7 @@ define void @__masked_store_blend_float(<WIDTH x float>* nocapture, <WIDTH x flo
|
||||
|
||||
define void @__masked_store_blend_i64(<WIDTH x i64>* nocapture,
|
||||
<WIDTH x i64>, <WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load <WIDTH x i64> * %0
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x i64> ',` %0')
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x i64> %1, <WIDTH x i64> %v
|
||||
store <WIDTH x i64> %v1, <WIDTH x i64> * %0
|
||||
ret void
|
||||
@@ -318,7 +318,7 @@ define void @__masked_store_blend_i64(<WIDTH x i64>* nocapture,
|
||||
|
||||
define void @__masked_store_blend_double(<WIDTH x double>* nocapture,
|
||||
<WIDTH x double>, <WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load <WIDTH x double> * %0
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x double> ',` %0')
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x double> %1, <WIDTH x double> %v
|
||||
store <WIDTH x double> %v1, <WIDTH x double> * %0
|
||||
ret void
|
||||
|
||||
@@ -279,7 +279,7 @@ masked_store_float_double()
|
||||
|
||||
define void @__masked_store_blend_i8(<WIDTH x i8>* nocapture %ptr, <WIDTH x i8> %new,
|
||||
<WIDTH x MASK> %mask) nounwind alwaysinline {
|
||||
%old = load <WIDTH x i8> * %ptr
|
||||
%old = load PTR_OP_ARGS(`<WIDTH x i8> ',` %ptr')
|
||||
%mask1 = trunc <WIDTH x MASK> %mask to <WIDTH x i1>
|
||||
%result = select <WIDTH x i1> %mask1, <WIDTH x i8> %new, <WIDTH x i8> %old
|
||||
store <WIDTH x i8> %result, <WIDTH x i8> * %ptr
|
||||
@@ -288,7 +288,7 @@ define void @__masked_store_blend_i8(<WIDTH x i8>* nocapture %ptr, <WIDTH x i8>
|
||||
|
||||
define void @__masked_store_blend_i16(<WIDTH x i16>* nocapture %ptr, <WIDTH x i16> %new,
|
||||
<WIDTH x MASK> %mask) nounwind alwaysinline {
|
||||
%old = load <WIDTH x i16> * %ptr
|
||||
%old = load PTR_OP_ARGS(`<WIDTH x i16> ',` %ptr')
|
||||
%mask1 = trunc <WIDTH x MASK> %mask to <WIDTH x i1>
|
||||
%result = select <WIDTH x i1> %mask1, <WIDTH x i16> %new, <WIDTH x i16> %old
|
||||
store <WIDTH x i16> %result, <WIDTH x i16> * %ptr
|
||||
@@ -297,7 +297,7 @@ define void @__masked_store_blend_i16(<WIDTH x i16>* nocapture %ptr, <WIDTH x i1
|
||||
|
||||
define void @__masked_store_blend_i32(<WIDTH x i32>* nocapture %ptr, <WIDTH x i32> %new,
|
||||
<WIDTH x MASK> %mask) nounwind alwaysinline {
|
||||
%old = load <WIDTH x i32> * %ptr
|
||||
%old = load PTR_OP_ARGS(`<WIDTH x i32> ',` %ptr')
|
||||
%mask1 = trunc <WIDTH x MASK> %mask to <WIDTH x i1>
|
||||
%result = select <WIDTH x i1> %mask1, <WIDTH x i32> %new, <WIDTH x i32> %old
|
||||
store <WIDTH x i32> %result, <WIDTH x i32> * %ptr
|
||||
@@ -306,7 +306,7 @@ define void @__masked_store_blend_i32(<WIDTH x i32>* nocapture %ptr, <WIDTH x i3
|
||||
|
||||
define void @__masked_store_blend_i64(<WIDTH x i64>* nocapture %ptr,
|
||||
<WIDTH x i64> %new, <WIDTH x MASK> %mask) nounwind alwaysinline {
|
||||
%old = load <WIDTH x i64> * %ptr
|
||||
%old = load PTR_OP_ARGS(`<WIDTH x i64> ',` %ptr')
|
||||
%mask1 = trunc <WIDTH x MASK> %mask to <WIDTH x i1>
|
||||
%result = select <WIDTH x i1> %mask1, <WIDTH x i64> %new, <WIDTH x i64> %old
|
||||
store <WIDTH x i64> %result, <WIDTH x i64> * %ptr
|
||||
|
||||
@@ -1523,7 +1523,7 @@ gen_masked_store(double)
|
||||
|
||||
define void @__masked_store_blend_i8(<WIDTH x i8>* nocapture, <WIDTH x i8>,
|
||||
<WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load <WIDTH x i8> * %0
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x i8> ',` %0')
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x i8> %1, <WIDTH x i8> %v
|
||||
store <WIDTH x i8> %v1, <WIDTH x i8> * %0
|
||||
ret void
|
||||
@@ -1531,7 +1531,7 @@ define void @__masked_store_blend_i8(<WIDTH x i8>* nocapture, <WIDTH x i8>,
|
||||
|
||||
define void @__masked_store_blend_i16(<WIDTH x i16>* nocapture, <WIDTH x i16>,
|
||||
<WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load <WIDTH x i16> * %0
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x i16> ',` %0')
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x i16> %1, <WIDTH x i16> %v
|
||||
store <WIDTH x i16> %v1, <WIDTH x i16> * %0
|
||||
ret void
|
||||
@@ -1539,7 +1539,7 @@ define void @__masked_store_blend_i16(<WIDTH x i16>* nocapture, <WIDTH x i16>,
|
||||
|
||||
define void @__masked_store_blend_i32(<WIDTH x i32>* nocapture, <WIDTH x i32>,
|
||||
<WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load <WIDTH x i32> * %0
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x i32> ',` %0')
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x i32> %1, <WIDTH x i32> %v
|
||||
store <WIDTH x i32> %v1, <WIDTH x i32> * %0
|
||||
ret void
|
||||
@@ -1547,7 +1547,7 @@ define void @__masked_store_blend_i32(<WIDTH x i32>* nocapture, <WIDTH x i32>,
|
||||
|
||||
define void @__masked_store_blend_float(<WIDTH x float>* nocapture, <WIDTH x float>,
|
||||
<WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load <WIDTH x float> * %0
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x float> ',` %0')
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x float> %1, <WIDTH x float> %v
|
||||
store <WIDTH x float> %v1, <WIDTH x float> * %0
|
||||
ret void
|
||||
@@ -1555,7 +1555,7 @@ define void @__masked_store_blend_float(<WIDTH x float>* nocapture, <WIDTH x flo
|
||||
|
||||
define void @__masked_store_blend_i64(<WIDTH x i64>* nocapture,
|
||||
<WIDTH x i64>, <WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load <WIDTH x i64> * %0
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x i64> ',` %0')
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x i64> %1, <WIDTH x i64> %v
|
||||
store <WIDTH x i64> %v1, <WIDTH x i64> * %0
|
||||
ret void
|
||||
@@ -1563,7 +1563,7 @@ define void @__masked_store_blend_i64(<WIDTH x i64>* nocapture,
|
||||
|
||||
define void @__masked_store_blend_double(<WIDTH x double>* nocapture,
|
||||
<WIDTH x double>, <WIDTH x i1>) nounwind alwaysinline {
|
||||
%v = load <WIDTH x double> * %0
|
||||
%v = load PTR_OP_ARGS(`<WIDTH x double> ',` %0')
|
||||
%v1 = select <WIDTH x i1> %2, <WIDTH x double> %1, <WIDTH x double> %v
|
||||
store <WIDTH x double> %v1, <WIDTH x double> * %0
|
||||
ret void
|
||||
|
||||
@@ -97,7 +97,7 @@ define void @__fastmath() nounwind alwaysinline {
|
||||
%ptr = alloca i32
|
||||
%ptr8 = bitcast i32 * %ptr to i8 *
|
||||
call void @llvm.x86.sse.stmxcsr(i8 * %ptr8)
|
||||
%oldval = load i32 *%ptr
|
||||
%oldval = load PTR_OP_ARGS(`i32 ',`%ptr')
|
||||
|
||||
; turn on DAZ (64)/FTZ (32768) -> 32832
|
||||
%update = or i32 %oldval, 32832
|
||||
|
||||
@@ -576,7 +576,7 @@ masked_store_blend_8_16_by_8()
|
||||
|
||||
define void @__masked_store_blend_i32(<8 x i32>* nocapture, <8 x i32>,
|
||||
<8 x i32> %mask) nounwind alwaysinline {
|
||||
%val = load <8 x i32> * %0, align 4
|
||||
%val = load PTR_OP_ARGS(`<8 x i32> ',` %0, align 4')
|
||||
%newval = call <8 x i32> @__vselect_i32(<8 x i32> %val, <8 x i32> %1, <8 x i32> %mask)
|
||||
store <8 x i32> %newval, <8 x i32> * %0, align 4
|
||||
ret void
|
||||
@@ -584,7 +584,7 @@ define void @__masked_store_blend_i32(<8 x i32>* nocapture, <8 x i32>,
|
||||
|
||||
define void @__masked_store_blend_i64(<8 x i64>* nocapture %ptr, <8 x i64> %new,
|
||||
<8 x i32> %mask) nounwind alwaysinline {
|
||||
%oldValue = load <8 x i64>* %ptr, align 8
|
||||
%oldValue = load PTR_OP_ARGS(`<8 x i64>',` %ptr, align 8')
|
||||
|
||||
; Do 8x64-bit blends by doing two <8 x i32> blends, where the <8 x i32> values
|
||||
; are actually bitcast <2 x i64> values
|
||||
|
||||
@@ -399,7 +399,7 @@ reduce_equal(4)
|
||||
|
||||
define void @__masked_store_blend_i32(<4 x i32>* nocapture, <4 x i32>,
|
||||
<4 x i32> %mask) nounwind alwaysinline {
|
||||
%val = load <4 x i32> * %0, align 4
|
||||
%val = load PTR_OP_ARGS(`<4 x i32> ',` %0, align 4')
|
||||
%newval = call <4 x i32> @__vselect_i32(<4 x i32> %val, <4 x i32> %1, <4 x i32> %mask)
|
||||
store <4 x i32> %newval, <4 x i32> * %0, align 4
|
||||
ret void
|
||||
@@ -407,7 +407,7 @@ define void @__masked_store_blend_i32(<4 x i32>* nocapture, <4 x i32>,
|
||||
|
||||
define void @__masked_store_blend_i64(<4 x i64>* nocapture %ptr, <4 x i64> %new,
|
||||
<4 x i32> %mask) nounwind alwaysinline {
|
||||
%oldValue = load <4 x i64>* %ptr, align 8
|
||||
%oldValue = load PTR_OP_ARGS(`<4 x i64>',` %ptr, align 8')
|
||||
|
||||
; Do 4x64-bit blends by doing two <4 x i32> blends, where the <4 x i32> values
|
||||
; are actually bitcast <2 x i64> values
|
||||
|
||||
@@ -390,7 +390,7 @@ define void @__masked_store_blend_i64(<8 x i64>* nocapture, <8 x i64>,
|
||||
<8 x MASK> %mask) nounwind
|
||||
alwaysinline {
|
||||
%mask_as_i1 = trunc <8 x MASK> %mask to <8 x i1>
|
||||
%old = load <8 x i64>* %0, align 4
|
||||
%old = load PTR_OP_ARGS(`<8 x i64>',` %0, align 4')
|
||||
%blend = select <8 x i1> %mask_as_i1, <8 x i64> %1, <8 x i64> %old
|
||||
store <8 x i64> %blend, <8 x i64>* %0, align 4
|
||||
ret void
|
||||
@@ -399,7 +399,7 @@ define void @__masked_store_blend_i64(<8 x i64>* nocapture, <8 x i64>,
|
||||
define void @__masked_store_blend_i32(<8 x i32>* nocapture, <8 x i32>,
|
||||
<8 x MASK> %mask) nounwind alwaysinline {
|
||||
%mask_as_i1 = trunc <8 x MASK> %mask to <8 x i1>
|
||||
%old = load <8 x i32>* %0, align 4
|
||||
%old = load PTR_OP_ARGS(`<8 x i32>',` %0, align 4')
|
||||
%blend = select <8 x i1> %mask_as_i1, <8 x i32> %1, <8 x i32> %old
|
||||
store <8 x i32> %blend, <8 x i32>* %0, align 4
|
||||
ret void
|
||||
@@ -408,7 +408,7 @@ define void @__masked_store_blend_i32(<8 x i32>* nocapture, <8 x i32>,
|
||||
define void @__masked_store_blend_i16(<8 x i16>* nocapture, <8 x i16>,
|
||||
<8 x MASK> %mask) nounwind alwaysinline {
|
||||
%mask_as_i1 = trunc <8 x MASK> %mask to <8 x i1>
|
||||
%old = load <8 x i16>* %0, align 4
|
||||
%old = load PTR_OP_ARGS(`<8 x i16>',` %0, align 4')
|
||||
%blend = select <8 x i1> %mask_as_i1, <8 x i16> %1, <8 x i16> %old
|
||||
store <8 x i16> %blend, <8 x i16>* %0, align 4
|
||||
ret void
|
||||
@@ -417,7 +417,7 @@ define void @__masked_store_blend_i16(<8 x i16>* nocapture, <8 x i16>,
|
||||
define void @__masked_store_blend_i8(<8 x i8>* nocapture, <8 x i8>,
|
||||
<8 x MASK> %mask) nounwind alwaysinline {
|
||||
%mask_as_i1 = trunc <8 x MASK> %mask to <8 x i1>
|
||||
%old = load <8 x i8>* %0, align 4
|
||||
%old = load PTR_OP_ARGS(`<8 x i8>',` %0, align 4')
|
||||
%blend = select <8 x i1> %mask_as_i1, <8 x i8> %1, <8 x i8> %old
|
||||
store <8 x i8> %blend, <8 x i8>* %0, align 4
|
||||
ret void
|
||||
|
||||
@@ -396,7 +396,7 @@ define void @__masked_store_blend_i64(<16 x i64>* nocapture, <16 x i64>,
|
||||
<16 x i8> %mask) nounwind
|
||||
alwaysinline {
|
||||
%mask_as_i1 = trunc <16 x MASK> %mask to <16 x i1>
|
||||
%old = load <16 x i64>* %0, align 4
|
||||
%old = load PTR_OP_ARGS(`<16 x i64>',` %0, align 4')
|
||||
%blend = select <16 x i1> %mask_as_i1, <16 x i64> %1, <16 x i64> %old
|
||||
store <16 x i64> %blend, <16 x i64>* %0, align 4
|
||||
ret void
|
||||
@@ -405,7 +405,7 @@ define void @__masked_store_blend_i64(<16 x i64>* nocapture, <16 x i64>,
|
||||
define void @__masked_store_blend_i32(<16 x i32>* nocapture, <16 x i32>,
|
||||
<16 x MASK> %mask) nounwind alwaysinline {
|
||||
%mask_as_i1 = trunc <16 x MASK> %mask to <16 x i1>
|
||||
%old = load <16 x i32>* %0, align 4
|
||||
%old = load PTR_OP_ARGS(`<16 x i32>',` %0, align 4')
|
||||
%blend = select <16 x i1> %mask_as_i1, <16 x i32> %1, <16 x i32> %old
|
||||
store <16 x i32> %blend, <16 x i32>* %0, align 4
|
||||
ret void
|
||||
@@ -414,7 +414,7 @@ define void @__masked_store_blend_i32(<16 x i32>* nocapture, <16 x i32>,
|
||||
define void @__masked_store_blend_i16(<16 x i16>* nocapture, <16 x i16>,
|
||||
<16 x MASK> %mask) nounwind alwaysinline {
|
||||
%mask_as_i1 = trunc <16 x MASK> %mask to <16 x i1>
|
||||
%old = load <16 x i16>* %0, align 4
|
||||
%old = load PTR_OP_ARGS(`<16 x i16>',` %0, align 4')
|
||||
%blend = select <16 x i1> %mask_as_i1, <16 x i16> %1, <16 x i16> %old
|
||||
store <16 x i16> %blend, <16 x i16>* %0, align 4
|
||||
ret void
|
||||
@@ -424,7 +424,7 @@ declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) noun
|
||||
|
||||
define void @__masked_store_blend_i8(<16 x i8>* nocapture, <16 x i8>,
|
||||
<16 x MASK> %mask) nounwind alwaysinline {
|
||||
%old = load <16 x i8>* %0, align 4
|
||||
%old = load PTR_OP_ARGS(`<16 x i8>',` %0, align 4')
|
||||
%blend = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %old, <16 x i8> %1,
|
||||
<16 x i8> %mask)
|
||||
store <16 x i8> %blend, <16 x i8>* %0, align 4
|
||||
|
||||
@@ -187,7 +187,7 @@ define void @__fastmath() nounwind alwaysinline {
|
||||
%ptr = alloca i32
|
||||
%ptr8 = bitcast i32 * %ptr to i8 *
|
||||
call void @llvm.x86.sse.stmxcsr(i8 * %ptr8)
|
||||
%oldval = load i32 *%ptr
|
||||
%oldval = load PTR_OP_ARGS(`i32 ',`%ptr')
|
||||
|
||||
; turn on DAZ (64)/FTZ (32768) -> 32832
|
||||
%update = or i32 %oldval, 32832
|
||||
|
||||
@@ -471,7 +471,7 @@ define void @__masked_store_blend_i32(<8 x i32>* nocapture, <8 x i32>,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%mask_b = shufflevector <8 x float> %mask_as_float, <8 x float> undef,
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
%oldValue = load <8 x i32>* %0, align 4
|
||||
%oldValue = load PTR_OP_ARGS(`<8 x i32>',` %0, align 4')
|
||||
%oldAsFloat = bitcast <8 x i32> %oldValue to <8 x float>
|
||||
%newAsFloat = bitcast <8 x i32> %1 to <8 x float>
|
||||
%old_a = shufflevector <8 x float> %oldAsFloat, <8 x float> undef,
|
||||
@@ -500,7 +500,7 @@ define void @__masked_store_blend_i64(<8 x i64>* nocapture %ptr, <8 x i64> %new,
|
||||
|
||||
%mask_as_float = bitcast <8 x i32> %mask to <8 x float>
|
||||
|
||||
%old = load <8 x i64>* %ptr, align 8
|
||||
%old = load PTR_OP_ARGS(`<8 x i64>',` %ptr, align 8')
|
||||
|
||||
; set up the first two 64-bit values
|
||||
%old01 = shufflevector <8 x i64> %old, <8 x i64> undef, <2 x i32> <i32 0, i32 1>
|
||||
|
||||
@@ -433,7 +433,7 @@ declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>,
|
||||
define void @__masked_store_blend_i32(<4 x i32>* nocapture, <4 x i32>,
|
||||
<4 x i32> %mask) nounwind alwaysinline {
|
||||
%mask_as_float = bitcast <4 x i32> %mask to <4 x float>
|
||||
%oldValue = load <4 x i32>* %0, align 4
|
||||
%oldValue = load PTR_OP_ARGS(`<4 x i32>',` %0, align 4')
|
||||
%oldAsFloat = bitcast <4 x i32> %oldValue to <4 x float>
|
||||
%newAsFloat = bitcast <4 x i32> %1 to <4 x float>
|
||||
%blend = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %oldAsFloat,
|
||||
@@ -447,7 +447,7 @@ define void @__masked_store_blend_i32(<4 x i32>* nocapture, <4 x i32>,
|
||||
|
||||
define void @__masked_store_blend_i64(<4 x i64>* nocapture %ptr, <4 x i64> %new,
|
||||
<4 x i32> %i32mask) nounwind alwaysinline {
|
||||
%oldValue = load <4 x i64>* %ptr, align 8
|
||||
%oldValue = load PTR_OP_ARGS(`<4 x i64>',` %ptr, align 8')
|
||||
%mask = bitcast <4 x i32> %i32mask to <4 x float>
|
||||
|
||||
; Do 4x64-bit blends by doing two <4 x i32> blends, where the <4 x i32> values
|
||||
|
||||
@@ -2142,7 +2142,7 @@ declare void @_aligned_free(i8 *)
|
||||
|
||||
define noalias i8 * @__new_uniform_32rt(i64 %size) {
|
||||
%conv = trunc i64 %size to i32
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
%ptr = tail call i8* @_aligned_malloc(i32 %conv, i32 %alignment)
|
||||
ret i8* %ptr
|
||||
}
|
||||
@@ -2151,7 +2151,7 @@ define <WIDTH x i64> @__new_varying32_32rt(<WIDTH x i32> %size, <WIDTH x MASK> %
|
||||
%ret = alloca <WIDTH x i64>
|
||||
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %ret
|
||||
%ret64 = bitcast <WIDTH x i64> * %ret to i64 *
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
|
||||
per_lane(WIDTH, <WIDTH x MASK> %mask, `
|
||||
%sz_LANE_ID = extractelement <WIDTH x i32> %size, i32 LANE
|
||||
@@ -2160,7 +2160,7 @@ define <WIDTH x i64> @__new_varying32_32rt(<WIDTH x i32> %size, <WIDTH x MASK> %
|
||||
%store_LANE_ID = getelementptr PTR_OP_ARGS(`i64', `%ret64, i32 LANE')
|
||||
store i64 %ptr_int_LANE_ID, i64 * %store_LANE_ID')
|
||||
|
||||
%r = load <WIDTH x i64> * %ret
|
||||
%r = load PTR_OP_ARGS(`<WIDTH x i64> ',` %ret')
|
||||
ret <WIDTH x i64> %r
|
||||
}
|
||||
|
||||
@@ -2195,7 +2195,7 @@ declare i8* @_aligned_malloc(i64, i64)
|
||||
declare void @_aligned_free(i8 *)
|
||||
|
||||
define noalias i8 * @__new_uniform_64rt(i64 %size) {
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
%alignment64 = sext i32 %alignment to i64
|
||||
%ptr = tail call i8* @_aligned_malloc(i64 %size, i64 %alignment64)
|
||||
ret i8* %ptr
|
||||
@@ -2205,7 +2205,7 @@ define <WIDTH x i64> @__new_varying32_64rt(<WIDTH x i32> %size, <WIDTH x MASK> %
|
||||
%ret = alloca <WIDTH x i64>
|
||||
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %ret
|
||||
%ret64 = bitcast <WIDTH x i64> * %ret to i64 *
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
%alignment64 = sext i32 %alignment to i64
|
||||
|
||||
per_lane(WIDTH, <WIDTH x MASK> %mask, `
|
||||
@@ -2216,7 +2216,7 @@ define <WIDTH x i64> @__new_varying32_64rt(<WIDTH x i32> %size, <WIDTH x MASK> %
|
||||
%store_LANE_ID = getelementptr PTR_OP_ARGS(`i64', `%ret64, i32 LANE')
|
||||
store i64 %ptr_int_LANE_ID, i64 * %store_LANE_ID')
|
||||
|
||||
%r = load <WIDTH x i64> * %ret
|
||||
%r = load PTR_OP_ARGS(`<WIDTH x i64> ',` %ret')
|
||||
ret <WIDTH x i64> %r
|
||||
}
|
||||
|
||||
@@ -2224,7 +2224,7 @@ define <WIDTH x i64> @__new_varying64_64rt(<WIDTH x i64> %size, <WIDTH x MASK> %
|
||||
%ret = alloca <WIDTH x i64>
|
||||
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %ret
|
||||
%ret64 = bitcast <WIDTH x i64> * %ret to i64 *
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
%alignment64 = sext i32 %alignment to i64
|
||||
|
||||
per_lane(WIDTH, <WIDTH x MASK> %mask, `
|
||||
@@ -2234,7 +2234,7 @@ define <WIDTH x i64> @__new_varying64_64rt(<WIDTH x i64> %size, <WIDTH x MASK> %
|
||||
%store_LANE_ID = getelementptr PTR_OP_ARGS(`i64', `%ret64, i32 LANE')
|
||||
store i64 %ptr_int_LANE_ID, i64 * %store_LANE_ID')
|
||||
|
||||
%r = load <WIDTH x i64> * %ret
|
||||
%r = load PTR_OP_ARGS(`<WIDTH x i64> ',` %ret')
|
||||
ret <WIDTH x i64> %r
|
||||
}
|
||||
|
||||
@@ -2437,7 +2437,7 @@ define <$1 x i64> @__$2_varying_$3(<$1 x i64>, <$1 x i64>) nounwind alwaysinline
|
||||
store i64 %v_`'i, i64 * %ptr_`'i
|
||||
')
|
||||
|
||||
%ret = load <$1 x i64> * %rptr
|
||||
%ret = load PTR_OP_ARGS(`<$1 x i64> ',` %rptr')
|
||||
ret <$1 x i64> %ret
|
||||
}
|
||||
')
|
||||
@@ -2483,7 +2483,7 @@ entry:
|
||||
|
||||
load:
|
||||
%ptr = bitcast i8 * %0 to <WIDTH x $1> *
|
||||
%valall = load <WIDTH x $1> * %ptr, align $2
|
||||
%valall = load PTR_OP_ARGS(`<WIDTH x $1> ',` %ptr, align $2')
|
||||
ret <WIDTH x $1> %valall
|
||||
|
||||
loop:
|
||||
@@ -2500,7 +2500,7 @@ load_lane:
|
||||
; allocaed memory above
|
||||
%ptr32 = bitcast i8 * %0 to $1 *
|
||||
%lane_ptr = getelementptr PTR_OP_ARGS(`$1', `%ptr32, i32 %lane')
|
||||
%val = load $1 * %lane_ptr
|
||||
%val = load PTR_OP_ARGS(`$1 ',` %lane_ptr')
|
||||
%store_ptr = getelementptr PTR_OP_ARGS(`$1', `%retptr32, i32 %lane')
|
||||
store $1 %val, $1 * %store_ptr
|
||||
br label %lane_done
|
||||
@@ -2511,7 +2511,7 @@ lane_done:
|
||||
br i1 %done, label %return, label %loop
|
||||
|
||||
return:
|
||||
%r = load <WIDTH x $1> * %retptr
|
||||
%r = load PTR_OP_ARGS(`<WIDTH x $1> ',` %retptr')
|
||||
ret <WIDTH x $1> %r
|
||||
}
|
||||
')
|
||||
@@ -2535,7 +2535,7 @@ define void @__masked_store_$1(<WIDTH x $1>* nocapture, <WIDTH x $1>, <WIDTH x M
|
||||
define(`masked_store_blend_8_16_by_4', `
|
||||
define void @__masked_store_blend_i8(<4 x i8>* nocapture, <4 x i8>,
|
||||
<4 x i32>) nounwind alwaysinline {
|
||||
%old = load <4 x i8> * %0, align 1
|
||||
%old = load PTR_OP_ARGS(`<4 x i8> ',` %0, align 1')
|
||||
ifelse(LLVM_VERSION,LLVM_3_0,`
|
||||
%old32 = bitcast <4 x i8> %old to i32
|
||||
%new32 = bitcast <4 x i8> %1 to i32
|
||||
@@ -2559,7 +2559,7 @@ define void @__masked_store_blend_i8(<4 x i8>* nocapture, <4 x i8>,
|
||||
|
||||
define void @__masked_store_blend_i16(<4 x i16>* nocapture, <4 x i16>,
|
||||
<4 x i32>) nounwind alwaysinline {
|
||||
%old = load <4 x i16> * %0, align 2
|
||||
%old = load PTR_OP_ARGS(`<4 x i16> ',` %0, align 2')
|
||||
ifelse(LLVM_VERSION,LLVM_3_0,`
|
||||
%old64 = bitcast <4 x i16> %old to i64
|
||||
%new64 = bitcast <4 x i16> %1 to i64
|
||||
@@ -2585,7 +2585,7 @@ define void @__masked_store_blend_i16(<4 x i16>* nocapture, <4 x i16>,
|
||||
define(`masked_store_blend_8_16_by_4_mask64', `
|
||||
define void @__masked_store_blend_i8(<4 x i8>* nocapture, <4 x i8>,
|
||||
<4 x i64>) nounwind alwaysinline {
|
||||
%old = load <4 x i8> * %0, align 1
|
||||
%old = load PTR_OP_ARGS(`<4 x i8> ',` %0, align 1')
|
||||
ifelse(LLVM_VERSION,LLVM_3_0,`
|
||||
%old32 = bitcast <4 x i8> %old to i32
|
||||
%new32 = bitcast <4 x i8> %1 to i32
|
||||
@@ -2609,7 +2609,7 @@ define void @__masked_store_blend_i8(<4 x i8>* nocapture, <4 x i8>,
|
||||
|
||||
define void @__masked_store_blend_i16(<4 x i16>* nocapture, <4 x i16>,
|
||||
<4 x i64>) nounwind alwaysinline {
|
||||
%old = load <4 x i16> * %0, align 2
|
||||
%old = load PTR_OP_ARGS(`<4 x i16> ',` %0, align 2')
|
||||
ifelse(LLVM_VERSION,LLVM_3_0,`
|
||||
%old64 = bitcast <4 x i16> %old to i64
|
||||
%new64 = bitcast <4 x i16> %1 to i64
|
||||
@@ -2635,7 +2635,7 @@ define void @__masked_store_blend_i16(<4 x i16>* nocapture, <4 x i16>,
|
||||
define(`masked_store_blend_8_16_by_8', `
|
||||
define void @__masked_store_blend_i8(<8 x i8>* nocapture, <8 x i8>,
|
||||
<8 x i32>) nounwind alwaysinline {
|
||||
%old = load <8 x i8> * %0, align 1
|
||||
%old = load PTR_OP_ARGS(`<8 x i8> ',` %0, align 1')
|
||||
ifelse(LLVM_VERSION,LLVM_3_0,`
|
||||
%old64 = bitcast <8 x i8> %old to i64
|
||||
%new64 = bitcast <8 x i8> %1 to i64
|
||||
@@ -2659,7 +2659,7 @@ define void @__masked_store_blend_i8(<8 x i8>* nocapture, <8 x i8>,
|
||||
|
||||
define void @__masked_store_blend_i16(<8 x i16>* nocapture, <8 x i16>,
|
||||
<8 x i32>) nounwind alwaysinline {
|
||||
%old = load <8 x i16> * %0, align 2
|
||||
%old = load PTR_OP_ARGS(`<8 x i16> ',` %0, align 2')
|
||||
ifelse(LLVM_VERSION,LLVM_3_0,`
|
||||
%old128 = bitcast <8 x i16> %old to i128
|
||||
%new128 = bitcast <8 x i16> %1 to i128
|
||||
@@ -2686,7 +2686,7 @@ define void @__masked_store_blend_i16(<8 x i16>* nocapture, <8 x i16>,
|
||||
define(`masked_store_blend_8_16_by_16', `
|
||||
define void @__masked_store_blend_i8(<16 x i8>* nocapture, <16 x i8>,
|
||||
<16 x i32>) nounwind alwaysinline {
|
||||
%old = load <16 x i8> * %0, align 1
|
||||
%old = load PTR_OP_ARGS(`<16 x i8> ',` %0, align 1')
|
||||
ifelse(LLVM_VERSION,LLVM_3_0,`
|
||||
%old128 = bitcast <16 x i8> %old to i128
|
||||
%new128 = bitcast <16 x i8> %1 to i128
|
||||
@@ -2710,7 +2710,7 @@ define void @__masked_store_blend_i8(<16 x i8>* nocapture, <16 x i8>,
|
||||
|
||||
define void @__masked_store_blend_i16(<16 x i16>* nocapture, <16 x i16>,
|
||||
<16 x i32>) nounwind alwaysinline {
|
||||
%old = load <16 x i16> * %0, align 2
|
||||
%old = load PTR_OP_ARGS(`<16 x i16> ',` %0, align 2')
|
||||
ifelse(LLVM_VERSION,LLVM_3_0,`
|
||||
%old256 = bitcast <16 x i16> %old to i256
|
||||
%new256 = bitcast <16 x i16> %1 to i256
|
||||
@@ -2760,7 +2760,7 @@ entry:
|
||||
if.then: ; preds = %entry
|
||||
%idxprom = ashr i64 %call, 32
|
||||
%arrayidx = getelementptr inbounds PTR_OP_ARGS(`i32', `startptr, i64 %idxprom')
|
||||
%val = load i32* %arrayidx, align 4
|
||||
%val = load PTR_OP_ARGS(`i32',` %arrayidx, align 4')
|
||||
%valvec = insertelement <1 x i32> undef, i32 %val, i32 0
|
||||
store <1 x i32> %valvec, <1 x i32>* %val_ptr, align 4
|
||||
br label %if.end
|
||||
@@ -2849,7 +2849,7 @@ domixed:
|
||||
%castptr = bitcast <$1 x $2> * %ptr to <$1 x $4> *
|
||||
%castv = bitcast <$1 x $2> %v to <$1 x $4>
|
||||
call void @__masked_store_blend_i$6(<$1 x $4> * %castptr, <$1 x $4> %castv, <$1 x MASK> %mask)
|
||||
%blendvec = load <$1 x $2> * %ptr
|
||||
%blendvec = load PTR_OP_ARGS(`<$1 x $2> ',` %ptr')
|
||||
br label %check_neighbors
|
||||
|
||||
check_neighbors:
|
||||
@@ -2985,12 +2985,12 @@ define <WIDTH x $1> @__gather32_$1(<WIDTH x i32> %ptrs,
|
||||
per_lane(WIDTH, <WIDTH x MASK> %vecmask, `
|
||||
%iptr_LANE_ID = extractelement <WIDTH x i32> %ptrs, i32 LANE
|
||||
%ptr_LANE_ID = inttoptr i32 %iptr_LANE_ID to $1 *
|
||||
%val_LANE_ID = load $1 * %ptr_LANE_ID
|
||||
%val_LANE_ID = load PTR_OP_ARGS(`$1 ',` %ptr_LANE_ID')
|
||||
%store_ptr_LANE_ID = getelementptr PTR_OP_ARGS(`<WIDTH x $1>', `%ret_ptr, i32 0, i32 LANE')
|
||||
store $1 %val_LANE_ID, $1 * %store_ptr_LANE_ID
|
||||
')
|
||||
|
||||
%ret = load <WIDTH x $1> * %ret_ptr
|
||||
%ret = load PTR_OP_ARGS(`<WIDTH x $1> ',` %ret_ptr')
|
||||
ret <WIDTH x $1> %ret
|
||||
}
|
||||
|
||||
@@ -3001,12 +3001,12 @@ define <WIDTH x $1> @__gather64_$1(<WIDTH x i64> %ptrs,
|
||||
per_lane(WIDTH, <WIDTH x MASK> %vecmask, `
|
||||
%iptr_LANE_ID = extractelement <WIDTH x i64> %ptrs, i32 LANE
|
||||
%ptr_LANE_ID = inttoptr i64 %iptr_LANE_ID to $1 *
|
||||
%val_LANE_ID = load $1 * %ptr_LANE_ID
|
||||
%val_LANE_ID = load PTR_OP_ARGS(`$1 ',` %ptr_LANE_ID')
|
||||
%store_ptr_LANE_ID = getelementptr PTR_OP_ARGS(`<WIDTH x $1>', `%ret_ptr, i32 0, i32 LANE')
|
||||
store $1 %val_LANE_ID, $1 * %store_ptr_LANE_ID
|
||||
')
|
||||
|
||||
%ret = load <WIDTH x $1> * %ret_ptr
|
||||
%ret = load PTR_OP_ARGS(`<WIDTH x $1> ',` %ret_ptr')
|
||||
ret <WIDTH x $1> %ret
|
||||
}
|
||||
')
|
||||
@@ -3033,7 +3033,7 @@ define <WIDTH x $1> @__gather_elt32_$1(i8 * %ptr, <WIDTH x i32> %offsets, i32 %o
|
||||
|
||||
; load value and insert into returned value
|
||||
%ptrcast = bitcast i8 * %finalptr to $1 *
|
||||
%val = load $1 *%ptrcast
|
||||
%val = load PTR_OP_ARGS(`$1 ',`%ptrcast')
|
||||
%updatedret = insertelement <WIDTH x $1> %ret, $1 %val, i32 %lane
|
||||
ret <WIDTH x $1> %updatedret
|
||||
}
|
||||
@@ -3054,7 +3054,7 @@ define <WIDTH x $1> @__gather_elt64_$1(i8 * %ptr, <WIDTH x i64> %offsets, i32 %o
|
||||
|
||||
; load value and insert into returned value
|
||||
%ptrcast = bitcast i8 * %finalptr to $1 *
|
||||
%val = load $1 *%ptrcast
|
||||
%val = load PTR_OP_ARGS(`$1 ',`%ptrcast')
|
||||
%updatedret = insertelement <WIDTH x $1> %ret, $1 %val, i32 %lane
|
||||
ret <WIDTH x $1> %updatedret
|
||||
}
|
||||
@@ -3072,13 +3072,13 @@ define <WIDTH x $1> @__gather_factored_base_offsets32_$1(i8 * %ptr, <WIDTH x i32
|
||||
store <WIDTH x i32> zeroinitializer, <WIDTH x i32> * %offsetsPtr
|
||||
call void @__masked_store_blend_i32(<WIDTH x i32> * %offsetsPtr, <WIDTH x i32> %offsets,
|
||||
<WIDTH x MASK> %vecmask)
|
||||
%newOffsets = load <WIDTH x i32> * %offsetsPtr
|
||||
%newOffsets = load PTR_OP_ARGS(`<WIDTH x i32> ',` %offsetsPtr')
|
||||
|
||||
%deltaPtr = alloca <WIDTH x i32>
|
||||
store <WIDTH x i32> zeroinitializer, <WIDTH x i32> * %deltaPtr
|
||||
call void @__masked_store_blend_i32(<WIDTH x i32> * %deltaPtr, <WIDTH x i32> %offset_delta,
|
||||
<WIDTH x MASK> %vecmask)
|
||||
%newDelta = load <WIDTH x i32> * %deltaPtr
|
||||
%newDelta = load PTR_OP_ARGS(`<WIDTH x i32> ',` %deltaPtr')
|
||||
|
||||
%ret0 = call <WIDTH x $1> @__gather_elt32_$1(i8 * %ptr, <WIDTH x i32> %newOffsets,
|
||||
i32 %offset_scale, <WIDTH x i32> %newDelta,
|
||||
@@ -3103,13 +3103,13 @@ define <WIDTH x $1> @__gather_factored_base_offsets64_$1(i8 * %ptr, <WIDTH x i64
|
||||
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %offsetsPtr
|
||||
call void @__masked_store_blend_i64(<WIDTH x i64> * %offsetsPtr, <WIDTH x i64> %offsets,
|
||||
<WIDTH x MASK> %vecmask)
|
||||
%newOffsets = load <WIDTH x i64> * %offsetsPtr
|
||||
%newOffsets = load PTR_OP_ARGS(`<WIDTH x i64> ',` %offsetsPtr')
|
||||
|
||||
%deltaPtr = alloca <WIDTH x i64>
|
||||
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %deltaPtr
|
||||
call void @__masked_store_blend_i64(<WIDTH x i64> * %deltaPtr, <WIDTH x i64> %offset_delta,
|
||||
<WIDTH x MASK> %vecmask)
|
||||
%newDelta = load <WIDTH x i64> * %deltaPtr
|
||||
%newDelta = load PTR_OP_ARGS(`<WIDTH x i64> ',` %deltaPtr')
|
||||
|
||||
%ret0 = call <WIDTH x $1> @__gather_elt64_$1(i8 * %ptr, <WIDTH x i64> %newOffsets,
|
||||
i32 %offset_scale, <WIDTH x i64> %newDelta,
|
||||
|
||||
234
builtins/util.m4
234
builtins/util.m4
@@ -1214,7 +1214,7 @@ not_const:
|
||||
%ptr_as_elt_array = bitcast <WIDTH x $1> * %ptr to [eval(2*WIDTH) x $1] *
|
||||
%load_ptr = getelementptr PTR_OP_ARGS(`[eval(2*WIDTH) x $1]', `%ptr_as_elt_array, i32 0, i32 %offset')
|
||||
%load_ptr_vec = bitcast $1 * %load_ptr to <WIDTH x $1> *
|
||||
%result = load <WIDTH x $1> * %load_ptr_vec, align $2
|
||||
%result = load PTR_OP_ARGS(`<WIDTH x $1> ',` %load_ptr_vec, align $2')
|
||||
ret <WIDTH x $1> %result
|
||||
}
|
||||
|
||||
@@ -1231,7 +1231,7 @@ define <WIDTH x $1> @__shift_$1(<WIDTH x $1>, i32) nounwind readnone alwaysinlin
|
||||
%ptr_as_elt_array = bitcast <WIDTH x $1> * %ptr to [eval(3*WIDTH) x $1] *
|
||||
%load_ptr = getelementptr PTR_OP_ARGS(`[eval(3*WIDTH) x $1]', `%ptr_as_elt_array, i32 0, i32 %offset')
|
||||
%load_ptr_vec = bitcast $1 * %load_ptr to <WIDTH x $1> *
|
||||
%result = load <WIDTH x $1> * %load_ptr_vec, align $2
|
||||
%result = load PTR_OP_ARGS(`<WIDTH x $1> ',` %load_ptr_vec, align $2')
|
||||
ret <WIDTH x $1> %result
|
||||
}
|
||||
|
||||
@@ -1277,12 +1277,12 @@ not_const:
|
||||
%baseptr = bitcast <eval(2*WIDTH) x $1> * %ptr to $1 *
|
||||
|
||||
%ptr_0 = getelementptr PTR_OP_ARGS(`$1', `%baseptr, i32 %index_0')
|
||||
%val_0 = load $1 * %ptr_0
|
||||
%val_0 = load PTR_OP_ARGS(`$1 ',` %ptr_0')
|
||||
%result_0 = insertelement <WIDTH x $1> undef, $1 %val_0, i32 0
|
||||
|
||||
forloop(i, 1, eval(WIDTH-1), `
|
||||
%ptr_`'i = getelementptr PTR_OP_ARGS(`$1', `%baseptr, i32 %index_`'i')
|
||||
%val_`'i = load $1 * %ptr_`'i
|
||||
%val_`'i = load PTR_OP_ARGS(`$1 ',` %ptr_`'i')
|
||||
%result_`'i = insertelement <WIDTH x $1> %result_`'eval(i-1), $1 %val_`'i, i32 i
|
||||
')
|
||||
|
||||
@@ -1525,7 +1525,7 @@ define <$1 x $2> @__atomic_compare_exchange_$3_global($2* %ptr, <$1 x $2> %cmp,
|
||||
%rp_LANE_ID = getelementptr PTR_OP_ARGS(`$2', `%rptr32, i32 LANE')
|
||||
store $2 %r_LANE_ID, $2 * %rp_LANE_ID')
|
||||
|
||||
%r = load <$1 x $2> * %rptr
|
||||
%r = load PTR_OP_ARGS(`<$1 x $2> ',` %rptr')
|
||||
ret <$1 x $2> %r
|
||||
}
|
||||
|
||||
@@ -1790,13 +1790,13 @@ define void
|
||||
|
||||
;; Similarly for the output pointers
|
||||
%out0a = bitcast <8 x float> * %out0 to <4 x float> *
|
||||
%out0b = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 1')
|
||||
%out0b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 1')
|
||||
%out1a = bitcast <8 x float> * %out1 to <4 x float> *
|
||||
%out1b = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 1')
|
||||
%out1b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 1')
|
||||
%out2a = bitcast <8 x float> * %out2 to <4 x float> *
|
||||
%out2b = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 1')
|
||||
%out2b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 1')
|
||||
%out3a = bitcast <8 x float> * %out3 to <4 x float> *
|
||||
%out3b = getelementptr PTR_OP_ARGS(`float>', `t> * %out3a, i32 1')
|
||||
%out3b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out3a, i32 1')
|
||||
|
||||
;; Do the first part--given input vectors like
|
||||
;; <x0 y0 z0 x1 y1 z1 x2 y2> <z2 x3 y3 z3 x4 y4 z4 x5> <y5 z5 x6 y6 z6 x7 y7 z7>,
|
||||
@@ -1839,13 +1839,13 @@ define void
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
|
||||
%out0a = bitcast <8 x float> * %out0 to <4 x float> *
|
||||
%out0b = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 1')
|
||||
%out0b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 1')
|
||||
%out1a = bitcast <8 x float> * %out1 to <4 x float> *
|
||||
%out1b = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 1')
|
||||
%out1b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 1')
|
||||
%out2a = bitcast <8 x float> * %out2 to <4 x float> *
|
||||
%out2b = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 1')
|
||||
%out2b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 1')
|
||||
%out3a = bitcast <8 x float> * %out3 to <4 x float> *
|
||||
%out3b = getelementptr PTR_OP_ARGS(`float>', `t> * %out3a, i32 1')
|
||||
%out3b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out3a, i32 1')
|
||||
|
||||
;; First part--given input vectors
|
||||
;; <x0 x1 x2 x3 x4 x5 x6 x7> <y0 y1 y2 y3 y4 y5 y6 y7> <z0 z1 z2 z3 z4 z5 z6 z7>
|
||||
@@ -1882,11 +1882,11 @@ define void
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
|
||||
%out0a = bitcast <8 x float> * %out0 to <4 x float> *
|
||||
%out0b = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 1')
|
||||
%out0b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 1')
|
||||
%out1a = bitcast <8 x float> * %out1 to <4 x float> *
|
||||
%out1b = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 1')
|
||||
%out1b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 1')
|
||||
%out2a = bitcast <8 x float> * %out2 to <4 x float> *
|
||||
%out2b = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 1')
|
||||
%out2b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 1')
|
||||
|
||||
call void @__aos_to_soa3_float4(<4 x float> %v0a, <4 x float> %v0b,
|
||||
<4 x float> %v1a, <4 x float> * %out0a, <4 x float> * %out1a,
|
||||
@@ -1916,11 +1916,11 @@ define void
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
|
||||
%out0a = bitcast <8 x float> * %out0 to <4 x float> *
|
||||
%out0b = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 1')
|
||||
%out0b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 1')
|
||||
%out1a = bitcast <8 x float> * %out1 to <4 x float> *
|
||||
%out1b = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 1')
|
||||
%out1b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 1')
|
||||
%out2a = bitcast <8 x float> * %out2 to <4 x float> *
|
||||
%out2b = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 1')
|
||||
%out2b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 1')
|
||||
|
||||
call void @__soa_to_aos3_float4(<4 x float> %v0a, <4 x float> %v1a,
|
||||
<4 x float> %v2a, <4 x float> * %out0a, <4 x float> * %out0b,
|
||||
@@ -1972,21 +1972,21 @@ define void
|
||||
<4 x i32> <i32 12, i32 13, i32 14, i32 15>
|
||||
|
||||
%out0a = bitcast <16 x float> * %out0 to <4 x float> *
|
||||
%out0b = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 1')
|
||||
%out0c = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 2')
|
||||
%out0d = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 3')
|
||||
%out0b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 1')
|
||||
%out0c = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 2')
|
||||
%out0d = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 3')
|
||||
%out1a = bitcast <16 x float> * %out1 to <4 x float> *
|
||||
%out1b = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 1')
|
||||
%out1c = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 2')
|
||||
%out1d = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 3')
|
||||
%out1b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 1')
|
||||
%out1c = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 2')
|
||||
%out1d = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 3')
|
||||
%out2a = bitcast <16 x float> * %out2 to <4 x float> *
|
||||
%out2b = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 1')
|
||||
%out2c = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 2')
|
||||
%out2d = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 3')
|
||||
%out2b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 1')
|
||||
%out2c = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 2')
|
||||
%out2d = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 3')
|
||||
%out3a = bitcast <16 x float> * %out3 to <4 x float> *
|
||||
%out3b = getelementptr PTR_OP_ARGS(`float>', `t> * %out3a, i32 1')
|
||||
%out3c = getelementptr PTR_OP_ARGS(`float>', `t> * %out3a, i32 2')
|
||||
%out3d = getelementptr PTR_OP_ARGS(`float>', `t> * %out3a, i32 3')
|
||||
%out3b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out3a, i32 1')
|
||||
%out3c = getelementptr PTR_OP_ARGS(`<4 x float>', `%out3a, i32 2')
|
||||
%out3d = getelementptr PTR_OP_ARGS(`<4 x float>', `%out3a, i32 3')
|
||||
|
||||
call void @__aos_to_soa4_float4(<4 x float> %v0a, <4 x float> %v0b,
|
||||
<4 x float> %v0c, <4 x float> %v0d, <4 x float> * %out0a,
|
||||
@@ -2043,21 +2043,21 @@ define void
|
||||
<4 x i32> <i32 12, i32 13, i32 14, i32 15>
|
||||
|
||||
%out0a = bitcast <16 x float> * %out0 to <4 x float> *
|
||||
%out0b = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 1')
|
||||
%out0c = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 2')
|
||||
%out0d = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 3')
|
||||
%out0b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 1')
|
||||
%out0c = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 2')
|
||||
%out0d = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 3')
|
||||
%out1a = bitcast <16 x float> * %out1 to <4 x float> *
|
||||
%out1b = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 1')
|
||||
%out1c = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 2')
|
||||
%out1d = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 3')
|
||||
%out1b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 1')
|
||||
%out1c = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 2')
|
||||
%out1d = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 3')
|
||||
%out2a = bitcast <16 x float> * %out2 to <4 x float> *
|
||||
%out2b = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 1')
|
||||
%out2c = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 2')
|
||||
%out2d = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 3')
|
||||
%out2b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 1')
|
||||
%out2c = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 2')
|
||||
%out2d = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 3')
|
||||
%out3a = bitcast <16 x float> * %out3 to <4 x float> *
|
||||
%out3b = getelementptr PTR_OP_ARGS(`float>', `t> * %out3a, i32 1')
|
||||
%out3c = getelementptr PTR_OP_ARGS(`float>', `t> * %out3a, i32 2')
|
||||
%out3d = getelementptr PTR_OP_ARGS(`float>', `t> * %out3a, i32 3')
|
||||
%out3b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out3a, i32 1')
|
||||
%out3c = getelementptr PTR_OP_ARGS(`<4 x float>', `%out3a, i32 2')
|
||||
%out3d = getelementptr PTR_OP_ARGS(`<4 x float>', `%out3a, i32 3')
|
||||
|
||||
call void @__soa_to_aos4_float4(<4 x float> %v0a, <4 x float> %v1a,
|
||||
<4 x float> %v2a, <4 x float> %v3a, <4 x float> * %out0a,
|
||||
@@ -2105,17 +2105,17 @@ define void
|
||||
<4 x i32> <i32 12, i32 13, i32 14, i32 15>
|
||||
|
||||
%out0a = bitcast <16 x float> * %out0 to <4 x float> *
|
||||
%out0b = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 1')
|
||||
%out0c = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 2')
|
||||
%out0d = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 3')
|
||||
%out0b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 1')
|
||||
%out0c = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 2')
|
||||
%out0d = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 3')
|
||||
%out1a = bitcast <16 x float> * %out1 to <4 x float> *
|
||||
%out1b = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 1')
|
||||
%out1c = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 2')
|
||||
%out1d = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 3')
|
||||
%out1b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 1')
|
||||
%out1c = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 2')
|
||||
%out1d = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 3')
|
||||
%out2a = bitcast <16 x float> * %out2 to <4 x float> *
|
||||
%out2b = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 1')
|
||||
%out2c = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 2')
|
||||
%out2d = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 3')
|
||||
%out2b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 1')
|
||||
%out2c = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 2')
|
||||
%out2d = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 3')
|
||||
|
||||
call void @__aos_to_soa3_float4(<4 x float> %v0a, <4 x float> %v0b,
|
||||
<4 x float> %v0c, <4 x float> * %out0a, <4 x float> * %out1a,
|
||||
@@ -2163,17 +2163,17 @@ define void
|
||||
<4 x i32> <i32 12, i32 13, i32 14, i32 15>
|
||||
|
||||
%out0a = bitcast <16 x float> * %out0 to <4 x float> *
|
||||
%out0b = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 1')
|
||||
%out0c = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 2')
|
||||
%out0d = getelementptr PTR_OP_ARGS(`float>', `t> * %out0a, i32 3')
|
||||
%out0b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 1')
|
||||
%out0c = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 2')
|
||||
%out0d = getelementptr PTR_OP_ARGS(`<4 x float>', `%out0a, i32 3')
|
||||
%out1a = bitcast <16 x float> * %out1 to <4 x float> *
|
||||
%out1b = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 1')
|
||||
%out1c = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 2')
|
||||
%out1d = getelementptr PTR_OP_ARGS(`float>', `t> * %out1a, i32 3')
|
||||
%out1b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 1')
|
||||
%out1c = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 2')
|
||||
%out1d = getelementptr PTR_OP_ARGS(`<4 x float>', `%out1a, i32 3')
|
||||
%out2a = bitcast <16 x float> * %out2 to <4 x float> *
|
||||
%out2b = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 1')
|
||||
%out2c = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 2')
|
||||
%out2d = getelementptr PTR_OP_ARGS(`float>', `t> * %out2a, i32 3')
|
||||
%out2b = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 1')
|
||||
%out2c = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 2')
|
||||
%out2d = getelementptr PTR_OP_ARGS(`<4 x float>', `%out2a, i32 3')
|
||||
|
||||
call void @__soa_to_aos3_float4(<4 x float> %v0a, <4 x float> %v1a,
|
||||
<4 x float> %v2a, <4 x float> * %out0a, <4 x float> * %out0b,
|
||||
@@ -2198,13 +2198,13 @@ define void
|
||||
<WIDTH x float> * noalias %out2, <WIDTH x float> * noalias %out3)
|
||||
nounwind alwaysinline {
|
||||
%p0 = bitcast float * %p to <WIDTH x float> *
|
||||
%v0 = load <WIDTH x float> * %p0, align 4
|
||||
%v0 = load PTR_OP_ARGS(`<WIDTH x float> ',` %p0, align 4')
|
||||
%p1 = getelementptr PTR_OP_ARGS(`<WIDTH x float>', `%p0, i32 1')
|
||||
%v1 = load <WIDTH x float> * %p1, align 4
|
||||
%v1 = load PTR_OP_ARGS(`<WIDTH x float> ',` %p1, align 4')
|
||||
%p2 = getelementptr PTR_OP_ARGS(`<WIDTH x float>', `%p0, i32 2')
|
||||
%v2 = load <WIDTH x float> * %p2, align 4
|
||||
%v2 = load PTR_OP_ARGS(`<WIDTH x float> ',` %p2, align 4')
|
||||
%p3 = getelementptr PTR_OP_ARGS(`<WIDTH x float>', `%p0, i32 3')
|
||||
%v3 = load <WIDTH x float> * %p3, align 4
|
||||
%v3 = load PTR_OP_ARGS(`<WIDTH x float> ',` %p3, align 4')
|
||||
call void @__aos_to_soa4_float`'WIDTH (<WIDTH x float> %v0, <WIDTH x float> %v1,
|
||||
<WIDTH x float> %v2, <WIDTH x float> %v3, <WIDTH x float> * %out0,
|
||||
<WIDTH x float> * %out1, <WIDTH x float> * %out2, <WIDTH x float> * %out3)
|
||||
@@ -2231,11 +2231,11 @@ define void
|
||||
<WIDTH x float> * %out0, <WIDTH x float> * %out1,
|
||||
<WIDTH x float> * %out2) nounwind alwaysinline {
|
||||
%p0 = bitcast float * %p to <WIDTH x float> *
|
||||
%v0 = load <WIDTH x float> * %p0, align 4
|
||||
%v0 = load PTR_OP_ARGS(`<WIDTH x float> ',` %p0, align 4')
|
||||
%p1 = getelementptr PTR_OP_ARGS(`<WIDTH x float>', `%p0, i32 1')
|
||||
%v1 = load <WIDTH x float> * %p1, align 4
|
||||
%v1 = load PTR_OP_ARGS(`<WIDTH x float> ',` %p1, align 4')
|
||||
%p2 = getelementptr PTR_OP_ARGS(`<WIDTH x float>', `%p0, i32 2')
|
||||
%v2 = load <WIDTH x float> * %p2, align 4
|
||||
%v2 = load PTR_OP_ARGS(`<WIDTH x float> ',` %p2, align 4')
|
||||
call void @__aos_to_soa3_float`'WIDTH (<WIDTH x float> %v0, <WIDTH x float> %v1,
|
||||
<WIDTH x float> %v2, <WIDTH x float> * %out0, <WIDTH x float> * %out1,
|
||||
<WIDTH x float> * %out2)
|
||||
@@ -3397,9 +3397,9 @@ declare void @free(i8 *)
|
||||
define noalias i8 * @__new_uniform_32rt(i64 %size) {
|
||||
%ptr = alloca i8*
|
||||
%conv = trunc i64 %size to i32
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
%call1 = call i32 @posix_memalign(i8** %ptr, i32 %alignment, i32 %conv)
|
||||
%ptr_val = load i8** %ptr
|
||||
%ptr_val = load PTR_OP_ARGS(`i8*',` %ptr')
|
||||
ret i8* %ptr_val
|
||||
}
|
||||
|
||||
@@ -3407,7 +3407,7 @@ define <WIDTH x i64> @__new_varying32_32rt(<WIDTH x i32> %size, <WIDTH x MASK> %
|
||||
%ret = alloca <WIDTH x i64>
|
||||
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %ret
|
||||
%ret64 = bitcast <WIDTH x i64> * %ret to i64 *
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
|
||||
per_lane(WIDTH, <WIDTH x MASK> %mask, `
|
||||
%sz_LANE_ID = extractelement <WIDTH x i32> %size, i32 LANE
|
||||
@@ -3415,7 +3415,7 @@ define <WIDTH x i64> @__new_varying32_32rt(<WIDTH x i32> %size, <WIDTH x MASK> %
|
||||
%ptr_LANE_ID = bitcast i64* %store_LANE_ID to i8**
|
||||
%call_LANE_ID = call i32 @posix_memalign(i8** %ptr_LANE_ID, i32 %alignment, i32 %sz_LANE_ID)')
|
||||
|
||||
%r = load <WIDTH x i64> * %ret
|
||||
%r = load PTR_OP_ARGS(`<WIDTH x i64> ',` %ret')
|
||||
ret <WIDTH x i64> %r
|
||||
}
|
||||
|
||||
@@ -3451,10 +3451,10 @@ declare void @free(i8 *)
|
||||
|
||||
define noalias i8 * @__new_uniform_64rt(i64 %size) {
|
||||
%ptr = alloca i8*
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
%alignment64 = sext i32 %alignment to i64
|
||||
%call1 = call i32 @posix_memalign(i8** %ptr, i64 %alignment64, i64 %size)
|
||||
%ptr_val = load i8** %ptr
|
||||
%ptr_val = load PTR_OP_ARGS(`i8*',`%ptr')
|
||||
ret i8* %ptr_val
|
||||
}
|
||||
|
||||
@@ -3462,7 +3462,7 @@ define <WIDTH x i64> @__new_varying32_64rt(<WIDTH x i32> %size, <WIDTH x MASK> %
|
||||
%ret = alloca <WIDTH x i64>
|
||||
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %ret
|
||||
%ret64 = bitcast <WIDTH x i64> * %ret to i64 *
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
%alignment64 = sext i32 %alignment to i64
|
||||
|
||||
per_lane(WIDTH, <WIDTH x MASK> %mask, `
|
||||
@@ -3472,7 +3472,7 @@ define <WIDTH x i64> @__new_varying32_64rt(<WIDTH x i32> %size, <WIDTH x MASK> %
|
||||
%ptr_LANE_ID = bitcast i64* %store_LANE_ID to i8**
|
||||
%call_LANE_ID = call i32 @posix_memalign(i8** %ptr_LANE_ID, i64 %alignment64, i64 %sz64_LANE_ID)')
|
||||
|
||||
%r = load <WIDTH x i64> * %ret
|
||||
%r = load PTR_OP_ARGS(`<WIDTH x i64> ',` %ret')
|
||||
ret <WIDTH x i64> %r
|
||||
}
|
||||
|
||||
@@ -3480,7 +3480,7 @@ define <WIDTH x i64> @__new_varying64_64rt(<WIDTH x i64> %size, <WIDTH x MASK> %
|
||||
%ret = alloca <WIDTH x i64>
|
||||
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %ret
|
||||
%ret64 = bitcast <WIDTH x i64> * %ret to i64 *
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
%alignment64 = sext i32 %alignment to i64
|
||||
|
||||
per_lane(WIDTH, <WIDTH x MASK> %mask, `
|
||||
@@ -3489,7 +3489,7 @@ define <WIDTH x i64> @__new_varying64_64rt(<WIDTH x i64> %size, <WIDTH x MASK> %
|
||||
%ptr_LANE_ID = bitcast i64* %store_LANE_ID to i8**
|
||||
%call_LANE_ID = call i32 @posix_memalign(i8** %ptr_LANE_ID, i64 %alignment64, i64 %sz64_LANE_ID)')
|
||||
|
||||
%r = load <WIDTH x i64> * %ret
|
||||
%r = load PTR_OP_ARGS(`<WIDTH x i64> ',` %ret')
|
||||
ret <WIDTH x i64> %r
|
||||
}
|
||||
|
||||
@@ -3533,7 +3533,7 @@ declare void @_aligned_free(i8 *)
|
||||
|
||||
define noalias i8 * @__new_uniform_32rt(i64 %size) {
|
||||
%conv = trunc i64 %size to i32
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
%ptr = tail call i8* @_aligned_malloc(i32 %conv, i32 %alignment)
|
||||
ret i8* %ptr
|
||||
}
|
||||
@@ -3542,7 +3542,7 @@ define <WIDTH x i64> @__new_varying32_32rt(<WIDTH x i32> %size, <WIDTH x MASK> %
|
||||
%ret = alloca <WIDTH x i64>
|
||||
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %ret
|
||||
%ret64 = bitcast <WIDTH x i64> * %ret to i64 *
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
|
||||
per_lane(WIDTH, <WIDTH x MASK> %mask, `
|
||||
%sz_LANE_ID = extractelement <WIDTH x i32> %size, i32 LANE
|
||||
@@ -3551,7 +3551,7 @@ define <WIDTH x i64> @__new_varying32_32rt(<WIDTH x i32> %size, <WIDTH x MASK> %
|
||||
%store_LANE_ID = getelementptr PTR_OP_ARGS(`i64', `%ret64, i32 LANE')
|
||||
store i64 %ptr_int_LANE_ID, i64 * %store_LANE_ID')
|
||||
|
||||
%r = load <WIDTH x i64> * %ret
|
||||
%r = load PTR_OP_ARGS(`<WIDTH x i64> ',` %ret')
|
||||
ret <WIDTH x i64> %r
|
||||
}
|
||||
|
||||
@@ -3586,7 +3586,7 @@ declare i8* @_aligned_malloc(i64, i64)
|
||||
declare void @_aligned_free(i8 *)
|
||||
|
||||
define noalias i8 * @__new_uniform_64rt(i64 %size) {
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
%alignment64 = sext i32 %alignment to i64
|
||||
%ptr = tail call i8* @_aligned_malloc(i64 %size, i64 %alignment64)
|
||||
ret i8* %ptr
|
||||
@@ -3596,7 +3596,7 @@ define <WIDTH x i64> @__new_varying32_64rt(<WIDTH x i32> %size, <WIDTH x MASK> %
|
||||
%ret = alloca <WIDTH x i64>
|
||||
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %ret
|
||||
%ret64 = bitcast <WIDTH x i64> * %ret to i64 *
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
%alignment64 = sext i32 %alignment to i64
|
||||
|
||||
per_lane(WIDTH, <WIDTH x MASK> %mask, `
|
||||
@@ -3607,7 +3607,7 @@ define <WIDTH x i64> @__new_varying32_64rt(<WIDTH x i32> %size, <WIDTH x MASK> %
|
||||
%store_LANE_ID = getelementptr PTR_OP_ARGS(`i64', `%ret64, i32 LANE')
|
||||
store i64 %ptr_int_LANE_ID, i64 * %store_LANE_ID')
|
||||
|
||||
%r = load <WIDTH x i64> * %ret
|
||||
%r = load PTR_OP_ARGS(`<WIDTH x i64> ',` %ret')
|
||||
ret <WIDTH x i64> %r
|
||||
}
|
||||
|
||||
@@ -3615,7 +3615,7 @@ define <WIDTH x i64> @__new_varying64_64rt(<WIDTH x i64> %size, <WIDTH x MASK> %
|
||||
%ret = alloca <WIDTH x i64>
|
||||
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %ret
|
||||
%ret64 = bitcast <WIDTH x i64> * %ret to i64 *
|
||||
%alignment = load i32* @memory_alignment
|
||||
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
|
||||
%alignment64 = sext i32 %alignment to i64
|
||||
|
||||
per_lane(WIDTH, <WIDTH x MASK> %mask, `
|
||||
@@ -3625,7 +3625,7 @@ define <WIDTH x i64> @__new_varying64_64rt(<WIDTH x i64> %size, <WIDTH x MASK> %
|
||||
%store_LANE_ID = getelementptr PTR_OP_ARGS(`i64', `%ret64, i32 LANE')
|
||||
store i64 %ptr_int_LANE_ID, i64 * %store_LANE_ID')
|
||||
|
||||
%r = load <WIDTH x i64> * %ret
|
||||
%r = load PTR_OP_ARGS(`<WIDTH x i64> ',` %ret')
|
||||
ret <WIDTH x i64> %r
|
||||
}
|
||||
|
||||
@@ -3944,7 +3944,7 @@ define <$1 x i64> @__$2_varying_$3(<$1 x i64>, <$1 x i64>) nounwind alwaysinline
|
||||
store i64 %v_`'i, i64 * %ptr_`'i
|
||||
')
|
||||
|
||||
%ret = load <$1 x i64> * %rptr
|
||||
%ret = load PTR_OP_ARGS(`<$1 x i64> ',` %rptr')
|
||||
ret <$1 x i64> %ret
|
||||
}
|
||||
')
|
||||
@@ -3990,7 +3990,7 @@ entry:
|
||||
|
||||
load:
|
||||
%ptr = bitcast i8 * %0 to <WIDTH x $1> *
|
||||
%valall = load <WIDTH x $1> * %ptr, align $2
|
||||
%valall = load PTR_OP_ARGS(`<WIDTH x $1> ',` %ptr, align $2')
|
||||
ret <WIDTH x $1> %valall
|
||||
|
||||
loop:
|
||||
@@ -4007,7 +4007,7 @@ load_lane:
|
||||
; allocaed memory above
|
||||
%ptr32 = bitcast i8 * %0 to $1 *
|
||||
%lane_ptr = getelementptr PTR_OP_ARGS(`$1', `%ptr32, i32 %lane')
|
||||
%val = load $1 * %lane_ptr
|
||||
%val = load PTR_OP_ARGS(`$1 ',` %lane_ptr')
|
||||
%store_ptr = getelementptr PTR_OP_ARGS(`$1', `%retptr32, i32 %lane')
|
||||
store $1 %val, $1 * %store_ptr
|
||||
br label %lane_done
|
||||
@@ -4018,7 +4018,7 @@ lane_done:
|
||||
br i1 %done, label %return, label %loop
|
||||
|
||||
return:
|
||||
%r = load <WIDTH x $1> * %retptr
|
||||
%r = load PTR_OP_ARGS(`<WIDTH x $1> ',` %retptr')
|
||||
ret <WIDTH x $1> %r
|
||||
}
|
||||
')
|
||||
@@ -4042,7 +4042,7 @@ define void @__masked_store_$1(<WIDTH x $1>* nocapture, <WIDTH x $1>, <WIDTH x M
|
||||
define(`masked_store_blend_8_16_by_4', `
|
||||
define void @__masked_store_blend_i8(<4 x i8>* nocapture, <4 x i8>,
|
||||
<4 x i32>) nounwind alwaysinline {
|
||||
%old = load <4 x i8> * %0, align 1
|
||||
%old = load PTR_OP_ARGS(`<4 x i8> ',` %0, align 1')
|
||||
|
||||
%m = trunc <4 x i32> %2 to <4 x i1>
|
||||
%resultvec = select <4 x i1> %m, <4 x i8> %1, <4 x i8> %old
|
||||
@@ -4053,7 +4053,7 @@ define void @__masked_store_blend_i8(<4 x i8>* nocapture, <4 x i8>,
|
||||
|
||||
define void @__masked_store_blend_i16(<4 x i16>* nocapture, <4 x i16>,
|
||||
<4 x i32>) nounwind alwaysinline {
|
||||
%old = load <4 x i16> * %0, align 2
|
||||
%old = load PTR_OP_ARGS(`<4 x i16> ',` %0, align 2')
|
||||
|
||||
%m = trunc <4 x i32> %2 to <4 x i1>
|
||||
%resultvec = select <4 x i1> %m, <4 x i16> %1, <4 x i16> %old
|
||||
@@ -4066,7 +4066,7 @@ define void @__masked_store_blend_i16(<4 x i16>* nocapture, <4 x i16>,
|
||||
define(`masked_store_blend_8_16_by_4_mask64', `
|
||||
define void @__masked_store_blend_i8(<4 x i8>* nocapture, <4 x i8>,
|
||||
<4 x i64>) nounwind alwaysinline {
|
||||
%old = load <4 x i8> * %0, align 1
|
||||
%old = load PTR_OP_ARGS(`<4 x i8> ',` %0, align 1')
|
||||
|
||||
%m = trunc <4 x i64> %2 to <4 x i1>
|
||||
%resultvec = select <4 x i1> %m, <4 x i8> %1, <4 x i8> %old
|
||||
@@ -4077,7 +4077,7 @@ define void @__masked_store_blend_i8(<4 x i8>* nocapture, <4 x i8>,
|
||||
|
||||
define void @__masked_store_blend_i16(<4 x i16>* nocapture, <4 x i16>,
|
||||
<4 x i64>) nounwind alwaysinline {
|
||||
%old = load <4 x i16> * %0, align 2
|
||||
%old = load PTR_OP_ARGS(`<4 x i16> ',` %0, align 2')
|
||||
|
||||
%m = trunc <4 x i64> %2 to <4 x i1>
|
||||
%resultvec = select <4 x i1> %m, <4 x i16> %1, <4 x i16> %old
|
||||
@@ -4090,7 +4090,7 @@ define void @__masked_store_blend_i16(<4 x i16>* nocapture, <4 x i16>,
|
||||
define(`masked_store_blend_8_16_by_8', `
|
||||
define void @__masked_store_blend_i8(<8 x i8>* nocapture, <8 x i8>,
|
||||
<8 x i32>) nounwind alwaysinline {
|
||||
%old = load <8 x i8> * %0, align 1
|
||||
%old = load PTR_OP_ARGS(`<8 x i8> ',` %0, align 1')
|
||||
|
||||
%m = trunc <8 x i32> %2 to <8 x i1>
|
||||
%resultvec = select <8 x i1> %m, <8 x i8> %1, <8 x i8> %old
|
||||
@@ -4101,7 +4101,7 @@ define void @__masked_store_blend_i8(<8 x i8>* nocapture, <8 x i8>,
|
||||
|
||||
define void @__masked_store_blend_i16(<8 x i16>* nocapture, <8 x i16>,
|
||||
<8 x i32>) nounwind alwaysinline {
|
||||
%old = load <8 x i16> * %0, align 2
|
||||
%old = load PTR_OP_ARGS(`<8 x i16> ',` %0, align 2')
|
||||
|
||||
%m = trunc <8 x i32> %2 to <8 x i1>
|
||||
%resultvec = select <8 x i1> %m, <8 x i16> %1, <8 x i16> %old
|
||||
@@ -4115,7 +4115,7 @@ define void @__masked_store_blend_i16(<8 x i16>* nocapture, <8 x i16>,
|
||||
define(`masked_store_blend_8_16_by_16', `
|
||||
define void @__masked_store_blend_i8(<16 x i8>* nocapture, <16 x i8>,
|
||||
<16 x i32>) nounwind alwaysinline {
|
||||
%old = load <16 x i8> * %0, align 1
|
||||
%old = load PTR_OP_ARGS(`<16 x i8> ',` %0, align 1')
|
||||
|
||||
%m = trunc <16 x i32> %2 to <16 x i1>
|
||||
%resultvec = select <16 x i1> %m, <16 x i8> %1, <16 x i8> %old
|
||||
@@ -4126,7 +4126,7 @@ define void @__masked_store_blend_i8(<16 x i8>* nocapture, <16 x i8>,
|
||||
|
||||
define void @__masked_store_blend_i16(<16 x i16>* nocapture, <16 x i16>,
|
||||
<16 x i32>) nounwind alwaysinline {
|
||||
%old = load <16 x i16> * %0, align 2
|
||||
%old = load PTR_OP_ARGS(`<16 x i16> ',` %0, align 2')
|
||||
|
||||
%m = trunc <16 x i32> %2 to <16 x i1>
|
||||
%resultvec = select <16 x i1> %m, <16 x i16> %1, <16 x i16> %old
|
||||
@@ -4167,7 +4167,7 @@ all_on:
|
||||
;; everyone wants to load, so just load an entire vector width in a single
|
||||
;; vector load
|
||||
%vecptr = bitcast i32 *%startptr to <WIDTH x i32> *
|
||||
%vec_load = load <WIDTH x i32> *%vecptr, align 4
|
||||
%vec_load = load PTR_OP_ARGS(`<WIDTH x i32> ',`%vecptr, align 4')
|
||||
store <WIDTH x i32> %vec_load, <WIDTH x i32> * %val_ptr, align 4
|
||||
ret i32 WIDTH
|
||||
|
||||
@@ -4185,10 +4185,10 @@ loop:
|
||||
br i1 %do_load, label %load, label %loopend
|
||||
|
||||
load:
|
||||
%loadptr = getelementptr PTR_OP_ARGS(`i32', `startptr, i32 %offset')
|
||||
%loadval = load i32 *%loadptr
|
||||
%loadptr = getelementptr PTR_OP_ARGS(`i32', `%startptr, i32 %offset')
|
||||
%loadval = load PTR_OP_ARGS(`i32 ',`%loadptr')
|
||||
%val_ptr_i32 = bitcast <WIDTH x i32> * %val_ptr to i32 *
|
||||
%storeptr = getelementptr PTR_OP_ARGS(`i32', `val_ptr_i32, i32 %lane')
|
||||
%storeptr = getelementptr PTR_OP_ARGS(`i32', `%val_ptr_i32, i32 %lane')
|
||||
store i32 %loadval, i32 *%storeptr
|
||||
%offset1 = add i32 %offset, 1
|
||||
br label %loopend
|
||||
@@ -4237,7 +4237,7 @@ loop:
|
||||
|
||||
store:
|
||||
%storeval = extractelement <WIDTH x i32> %vals, i32 %lane
|
||||
%storeptr = getelementptr PTR_OP_ARGS(`i32', `startptr, i32 %offset')
|
||||
%storeptr = getelementptr PTR_OP_ARGS(`i32', `%startptr, i32 %offset')
|
||||
store i32 %storeval, i32 *%storeptr
|
||||
%offset1 = add i32 %offset, 1
|
||||
br label %loopend
|
||||
@@ -4283,9 +4283,9 @@ loop:
|
||||
;; zero or sign extending it, while zero extend is free. Also do nothing for
|
||||
;; i64 MASK, as we need i64 value.
|
||||
ifelse(MASK, `i64',
|
||||
` %storeptr = getelementptr PTR_OP_ARGS(`i32', `startptr, MASK %offset',')
|
||||
` %storeptr = getelementptr PTR_OP_ARGS(`i32', `%startptr, MASK %offset')',
|
||||
` %offset1 = zext MASK %offset to i64
|
||||
%storeptr = getelementptr PTR_OP_ARGS(`i32', `startptr, i64 %offset1')')
|
||||
%storeptr = getelementptr PTR_OP_ARGS(`i32', `%startptr, i64 %offset1')')
|
||||
store i32 %storeval, i32 *%storeptr
|
||||
|
||||
%mull_mask = extractelement <WIDTH x MASK> %full_mask, i32 %i
|
||||
@@ -4351,7 +4351,7 @@ domixed:
|
||||
%castptr = bitcast <$1 x $2> * %ptr to <$1 x $4> *
|
||||
%castv = bitcast <$1 x $2> %v to <$1 x $4>
|
||||
call void @__masked_store_blend_i$6(<$1 x $4> * %castptr, <$1 x $4> %castv, <$1 x MASK> %mask)
|
||||
%blendvec = load <$1 x $2> * %ptr
|
||||
%blendvec = load PTR_OP_ARGS(`<$1 x $2> ',` %ptr')
|
||||
br label %check_neighbors
|
||||
|
||||
check_neighbors:
|
||||
@@ -4427,7 +4427,7 @@ define <$1 x $2> @__exclusive_scan_$6(<$1 x $2> %v,
|
||||
%vi = bitcast <$1 x $2> %v to <$1 x i`'$3>
|
||||
call void @__masked_store_blend_i$3(<$1 x i`'$3> * %ptr`'$3, <$1 x i`'$3> %vi,
|
||||
<$1 x MASK> %mask)
|
||||
%v_id = load <$1 x $2> * %ptr
|
||||
%v_id = load PTR_OP_ARGS(`<$1 x $2> ',` %ptr')
|
||||
|
||||
; extract elements of the vector to use in computing the scan
|
||||
forloop(i, 0, eval($1-1), `
|
||||
@@ -4547,12 +4547,12 @@ define <WIDTH x $1> @__gather32_$1(<WIDTH x i32> %ptrs,
|
||||
per_lane(WIDTH, <WIDTH x MASK> %vecmask, `
|
||||
%iptr_LANE_ID = extractelement <WIDTH x i32> %ptrs, i32 LANE
|
||||
%ptr_LANE_ID = inttoptr i32 %iptr_LANE_ID to $1 *
|
||||
%val_LANE_ID = load $1 * %ptr_LANE_ID
|
||||
%val_LANE_ID = load PTR_OP_ARGS(`$1 ',` %ptr_LANE_ID')
|
||||
%store_ptr_LANE_ID = getelementptr PTR_OP_ARGS(`<WIDTH x $1>', `%ret_ptr, i32 0, i32 LANE')
|
||||
store $1 %val_LANE_ID, $1 * %store_ptr_LANE_ID
|
||||
')
|
||||
|
||||
%ret = load <WIDTH x $1> * %ret_ptr
|
||||
%ret = load PTR_OP_ARGS(`<WIDTH x $1> ',` %ret_ptr')
|
||||
ret <WIDTH x $1> %ret
|
||||
}
|
||||
|
||||
@@ -4563,12 +4563,12 @@ define <WIDTH x $1> @__gather64_$1(<WIDTH x i64> %ptrs,
|
||||
per_lane(WIDTH, <WIDTH x MASK> %vecmask, `
|
||||
%iptr_LANE_ID = extractelement <WIDTH x i64> %ptrs, i32 LANE
|
||||
%ptr_LANE_ID = inttoptr i64 %iptr_LANE_ID to $1 *
|
||||
%val_LANE_ID = load $1 * %ptr_LANE_ID
|
||||
%val_LANE_ID = load PTR_OP_ARGS(`$1 ',` %ptr_LANE_ID')
|
||||
%store_ptr_LANE_ID = getelementptr PTR_OP_ARGS(`<WIDTH x $1>', `%ret_ptr, i32 0, i32 LANE')
|
||||
store $1 %val_LANE_ID, $1 * %store_ptr_LANE_ID
|
||||
')
|
||||
|
||||
%ret = load <WIDTH x $1> * %ret_ptr
|
||||
%ret = load PTR_OP_ARGS(`<WIDTH x $1> ',` %ret_ptr')
|
||||
ret <WIDTH x $1> %ret
|
||||
}
|
||||
')
|
||||
@@ -4595,7 +4595,7 @@ define <WIDTH x $1> @__gather_elt32_$1(i8 * %ptr, <WIDTH x i32> %offsets, i32 %o
|
||||
|
||||
; load value and insert into returned value
|
||||
%ptrcast = bitcast i8 * %finalptr to $1 *
|
||||
%val = load $1 *%ptrcast
|
||||
%val = load PTR_OP_ARGS(`$1 ',`%ptrcast')
|
||||
%updatedret = insertelement <WIDTH x $1> %ret, $1 %val, i32 %lane
|
||||
ret <WIDTH x $1> %updatedret
|
||||
}
|
||||
@@ -4616,7 +4616,7 @@ define <WIDTH x $1> @__gather_elt64_$1(i8 * %ptr, <WIDTH x i64> %offsets, i32 %o
|
||||
|
||||
; load value and insert into returned value
|
||||
%ptrcast = bitcast i8 * %finalptr to $1 *
|
||||
%val = load $1 *%ptrcast
|
||||
%val = load PTR_OP_ARGS(`$1 ',`%ptrcast')
|
||||
%updatedret = insertelement <WIDTH x $1> %ret, $1 %val, i32 %lane
|
||||
ret <WIDTH x $1> %updatedret
|
||||
}
|
||||
@@ -4634,13 +4634,13 @@ define <WIDTH x $1> @__gather_factored_base_offsets32_$1(i8 * %ptr, <WIDTH x i32
|
||||
store <WIDTH x i32> zeroinitializer, <WIDTH x i32> * %offsetsPtr
|
||||
call void @__masked_store_blend_i32(<WIDTH x i32> * %offsetsPtr, <WIDTH x i32> %offsets,
|
||||
<WIDTH x MASK> %vecmask)
|
||||
%newOffsets = load <WIDTH x i32> * %offsetsPtr
|
||||
%newOffsets = load PTR_OP_ARGS(`<WIDTH x i32> ',` %offsetsPtr')
|
||||
|
||||
%deltaPtr = alloca <WIDTH x i32>
|
||||
store <WIDTH x i32> zeroinitializer, <WIDTH x i32> * %deltaPtr
|
||||
call void @__masked_store_blend_i32(<WIDTH x i32> * %deltaPtr, <WIDTH x i32> %offset_delta,
|
||||
<WIDTH x MASK> %vecmask)
|
||||
%newDelta = load <WIDTH x i32> * %deltaPtr
|
||||
%newDelta = load PTR_OP_ARGS(`<WIDTH x i32> ',` %deltaPtr')
|
||||
|
||||
%ret0 = call <WIDTH x $1> @__gather_elt32_$1(i8 * %ptr, <WIDTH x i32> %newOffsets,
|
||||
i32 %offset_scale, <WIDTH x i32> %newDelta,
|
||||
@@ -4665,13 +4665,13 @@ define <WIDTH x $1> @__gather_factored_base_offsets64_$1(i8 * %ptr, <WIDTH x i64
|
||||
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %offsetsPtr
|
||||
call void @__masked_store_blend_i64(<WIDTH x i64> * %offsetsPtr, <WIDTH x i64> %offsets,
|
||||
<WIDTH x MASK> %vecmask)
|
||||
%newOffsets = load <WIDTH x i64> * %offsetsPtr
|
||||
%newOffsets = load PTR_OP_ARGS(`<WIDTH x i64> ',` %offsetsPtr')
|
||||
|
||||
%deltaPtr = alloca <WIDTH x i64>
|
||||
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %deltaPtr
|
||||
call void @__masked_store_blend_i64(<WIDTH x i64> * %deltaPtr, <WIDTH x i64> %offset_delta,
|
||||
<WIDTH x MASK> %vecmask)
|
||||
%newDelta = load <WIDTH x i64> * %deltaPtr
|
||||
%newDelta = load PTR_OP_ARGS(`<WIDTH x i64> ',` %deltaPtr')
|
||||
|
||||
%ret0 = call <WIDTH x $1> @__gather_elt64_$1(i8 * %ptr, <WIDTH x i64> %newOffsets,
|
||||
i32 %offset_scale, <WIDTH x i64> %newDelta,
|
||||
|
||||
Reference in New Issue
Block a user