Some codestyle changes

This commit is contained in:
Vsevolod Livinskiy
2015-03-05 18:04:39 +03:00
parent f0aa481a2a
commit f92d351cf0
20 changed files with 272 additions and 272 deletions

View File

@@ -2142,7 +2142,7 @@ declare void @_aligned_free(i8 *)
define noalias i8 * @__new_uniform_32rt(i64 %size) {
%conv = trunc i64 %size to i32
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
%alignment = load PTR_OP_ARGS(`i32') @memory_alignment
%ptr = tail call i8* @_aligned_malloc(i32 %conv, i32 %alignment)
ret i8* %ptr
}
@@ -2151,16 +2151,16 @@ define <WIDTH x i64> @__new_varying32_32rt(<WIDTH x i32> %size, <WIDTH x MASK> %
%ret = alloca <WIDTH x i64>
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %ret
%ret64 = bitcast <WIDTH x i64> * %ret to i64 *
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
%alignment = load PTR_OP_ARGS(`i32') @memory_alignment
per_lane(WIDTH, <WIDTH x MASK> %mask, `
%sz_LANE_ID = extractelement <WIDTH x i32> %size, i32 LANE
%ptr_LANE_ID = call noalias i8 * @_aligned_malloc(i32 %sz_LANE_ID, i32 %alignment)
%ptr_int_LANE_ID = ptrtoint i8 * %ptr_LANE_ID to i64
%store_LANE_ID = getelementptr PTR_OP_ARGS(`i64', `%ret64, i32 LANE')
%store_LANE_ID = getelementptr PTR_OP_ARGS(`i64') %ret64, i32 LANE
store i64 %ptr_int_LANE_ID, i64 * %store_LANE_ID')
%r = load PTR_OP_ARGS(`<WIDTH x i64> ',` %ret')
%r = load PTR_OP_ARGS(`<WIDTH x i64> ') %ret
ret <WIDTH x i64> %r
}
@@ -2195,7 +2195,7 @@ declare i8* @_aligned_malloc(i64, i64)
declare void @_aligned_free(i8 *)
define noalias i8 * @__new_uniform_64rt(i64 %size) {
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
%alignment = load PTR_OP_ARGS(`i32') @memory_alignment
%alignment64 = sext i32 %alignment to i64
%ptr = tail call i8* @_aligned_malloc(i64 %size, i64 %alignment64)
ret i8* %ptr
@@ -2205,7 +2205,7 @@ define <WIDTH x i64> @__new_varying32_64rt(<WIDTH x i32> %size, <WIDTH x MASK> %
%ret = alloca <WIDTH x i64>
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %ret
%ret64 = bitcast <WIDTH x i64> * %ret to i64 *
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
%alignment = load PTR_OP_ARGS(`i32') @memory_alignment
%alignment64 = sext i32 %alignment to i64
per_lane(WIDTH, <WIDTH x MASK> %mask, `
@@ -2213,10 +2213,10 @@ define <WIDTH x i64> @__new_varying32_64rt(<WIDTH x i32> %size, <WIDTH x MASK> %
%sz64_LANE_ID = zext i32 %sz_LANE_ID to i64
%ptr_LANE_ID = call noalias i8 * @_aligned_malloc(i64 %sz64_LANE_ID, i64 %alignment64)
%ptr_int_LANE_ID = ptrtoint i8 * %ptr_LANE_ID to i64
%store_LANE_ID = getelementptr PTR_OP_ARGS(`i64', `%ret64, i32 LANE')
%store_LANE_ID = getelementptr PTR_OP_ARGS(`i64') %ret64, i32 LANE
store i64 %ptr_int_LANE_ID, i64 * %store_LANE_ID')
%r = load PTR_OP_ARGS(`<WIDTH x i64> ',` %ret')
%r = load PTR_OP_ARGS(`<WIDTH x i64> ') %ret
ret <WIDTH x i64> %r
}
@@ -2224,17 +2224,17 @@ define <WIDTH x i64> @__new_varying64_64rt(<WIDTH x i64> %size, <WIDTH x MASK> %
%ret = alloca <WIDTH x i64>
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %ret
%ret64 = bitcast <WIDTH x i64> * %ret to i64 *
%alignment = load PTR_OP_ARGS(`i32',` @memory_alignment')
%alignment = load PTR_OP_ARGS(`i32') @memory_alignment
%alignment64 = sext i32 %alignment to i64
per_lane(WIDTH, <WIDTH x MASK> %mask, `
%sz64_LANE_ID = extractelement <WIDTH x i64> %size, i32 LANE
%ptr_LANE_ID = call noalias i8 * @_aligned_malloc(i64 %sz64_LANE_ID, i64 %alignment64)
%ptr_int_LANE_ID = ptrtoint i8 * %ptr_LANE_ID to i64
%store_LANE_ID = getelementptr PTR_OP_ARGS(`i64', `%ret64, i32 LANE')
%store_LANE_ID = getelementptr PTR_OP_ARGS(`i64') %ret64, i32 LANE
store i64 %ptr_int_LANE_ID, i64 * %store_LANE_ID')
%r = load PTR_OP_ARGS(`<WIDTH x i64> ',` %ret')
%r = load PTR_OP_ARGS(`<WIDTH x i64> ') %ret
ret <WIDTH x i64> %r
}
@@ -2433,11 +2433,11 @@ define <$1 x i64> @__$2_varying_$3(<$1 x i64>, <$1 x i64>) nounwind alwaysinline
%v1_`'i = extractelement <$1 x i64> %1, i32 i
%c_`'i = icmp $4 i64 %v0_`'i, %v1_`'i
%v_`'i = select i1 %c_`'i, i64 %v0_`'i, i64 %v1_`'i
%ptr_`'i = getelementptr PTR_OP_ARGS(`i64', `%r64ptr, i32 i')
%ptr_`'i = getelementptr PTR_OP_ARGS(`i64') %r64ptr, i32 i
store i64 %v_`'i, i64 * %ptr_`'i
')
%ret = load PTR_OP_ARGS(`<$1 x i64> ',` %rptr')
%ret = load PTR_OP_ARGS(`<$1 x i64> ') %rptr
ret <$1 x i64> %ret
}
')
@@ -2483,7 +2483,7 @@ entry:
load:
%ptr = bitcast i8 * %0 to <WIDTH x $1> *
%valall = load PTR_OP_ARGS(`<WIDTH x $1> ',` %ptr, align $2')
%valall = load PTR_OP_ARGS(`<WIDTH x $1> ') %ptr, align $2
ret <WIDTH x $1> %valall
loop:
@@ -2499,9 +2499,9 @@ load_lane:
; yes! do the load and store the result into the appropriate place in the
; allocaed memory above
%ptr32 = bitcast i8 * %0 to $1 *
%lane_ptr = getelementptr PTR_OP_ARGS(`$1', `%ptr32, i32 %lane')
%val = load PTR_OP_ARGS(`$1 ',` %lane_ptr')
%store_ptr = getelementptr PTR_OP_ARGS(`$1', `%retptr32, i32 %lane')
%lane_ptr = getelementptr PTR_OP_ARGS(`$1') %ptr32, i32 %lane
%val = load PTR_OP_ARGS(`$1 ') %lane_ptr
%store_ptr = getelementptr PTR_OP_ARGS(`$1') %retptr32, i32 %lane
store $1 %val, $1 * %store_ptr
br label %lane_done
@@ -2511,7 +2511,7 @@ lane_done:
br i1 %done, label %return, label %loop
return:
%r = load PTR_OP_ARGS(`<WIDTH x $1> ',` %retptr')
%r = load PTR_OP_ARGS(`<WIDTH x $1> ') %retptr
ret <WIDTH x $1> %r
}
')
@@ -2525,7 +2525,7 @@ return:
define(`gen_masked_store', `
define void @__masked_store_$1(<WIDTH x $1>* nocapture, <WIDTH x $1>, <WIDTH x MASK>) nounwind alwaysinline {
per_lane(WIDTH, <WIDTH x MASK> %2, `
%ptr_LANE_ID = getelementptr PTR_OP_ARGS(`<WIDTH x $1>', `%0, i32 0, i32 LANE')
%ptr_LANE_ID = getelementptr PTR_OP_ARGS(`<WIDTH x $1>') %0, i32 0, i32 LANE
%storeval_LANE_ID = extractelement <WIDTH x $1> %1, i32 LANE
store $1 %storeval_LANE_ID, $1 * %ptr_LANE_ID')
ret void
@@ -2535,7 +2535,7 @@ define void @__masked_store_$1(<WIDTH x $1>* nocapture, <WIDTH x $1>, <WIDTH x M
define(`masked_store_blend_8_16_by_4', `
define void @__masked_store_blend_i8(<4 x i8>* nocapture, <4 x i8>,
<4 x i32>) nounwind alwaysinline {
%old = load PTR_OP_ARGS(`<4 x i8> ',` %0, align 1')
%old = load PTR_OP_ARGS(`<4 x i8> ') %0, align 1
ifelse(LLVM_VERSION,LLVM_3_0,`
%old32 = bitcast <4 x i8> %old to i32
%new32 = bitcast <4 x i8> %1 to i32
@@ -2559,7 +2559,7 @@ define void @__masked_store_blend_i8(<4 x i8>* nocapture, <4 x i8>,
define void @__masked_store_blend_i16(<4 x i16>* nocapture, <4 x i16>,
<4 x i32>) nounwind alwaysinline {
%old = load PTR_OP_ARGS(`<4 x i16> ',` %0, align 2')
%old = load PTR_OP_ARGS(`<4 x i16> ') %0, align 2
ifelse(LLVM_VERSION,LLVM_3_0,`
%old64 = bitcast <4 x i16> %old to i64
%new64 = bitcast <4 x i16> %1 to i64
@@ -2585,7 +2585,7 @@ define void @__masked_store_blend_i16(<4 x i16>* nocapture, <4 x i16>,
define(`masked_store_blend_8_16_by_4_mask64', `
define void @__masked_store_blend_i8(<4 x i8>* nocapture, <4 x i8>,
<4 x i64>) nounwind alwaysinline {
%old = load PTR_OP_ARGS(`<4 x i8> ',` %0, align 1')
%old = load PTR_OP_ARGS(`<4 x i8> ') %0, align 1
ifelse(LLVM_VERSION,LLVM_3_0,`
%old32 = bitcast <4 x i8> %old to i32
%new32 = bitcast <4 x i8> %1 to i32
@@ -2609,7 +2609,7 @@ define void @__masked_store_blend_i8(<4 x i8>* nocapture, <4 x i8>,
define void @__masked_store_blend_i16(<4 x i16>* nocapture, <4 x i16>,
<4 x i64>) nounwind alwaysinline {
%old = load PTR_OP_ARGS(`<4 x i16> ',` %0, align 2')
%old = load PTR_OP_ARGS(`<4 x i16> ') %0, align 2
ifelse(LLVM_VERSION,LLVM_3_0,`
%old64 = bitcast <4 x i16> %old to i64
%new64 = bitcast <4 x i16> %1 to i64
@@ -2635,7 +2635,7 @@ define void @__masked_store_blend_i16(<4 x i16>* nocapture, <4 x i16>,
define(`masked_store_blend_8_16_by_8', `
define void @__masked_store_blend_i8(<8 x i8>* nocapture, <8 x i8>,
<8 x i32>) nounwind alwaysinline {
%old = load PTR_OP_ARGS(`<8 x i8> ',` %0, align 1')
%old = load PTR_OP_ARGS(`<8 x i8> ') %0, align 1
ifelse(LLVM_VERSION,LLVM_3_0,`
%old64 = bitcast <8 x i8> %old to i64
%new64 = bitcast <8 x i8> %1 to i64
@@ -2659,7 +2659,7 @@ define void @__masked_store_blend_i8(<8 x i8>* nocapture, <8 x i8>,
define void @__masked_store_blend_i16(<8 x i16>* nocapture, <8 x i16>,
<8 x i32>) nounwind alwaysinline {
%old = load PTR_OP_ARGS(`<8 x i16> ',` %0, align 2')
%old = load PTR_OP_ARGS(`<8 x i16> ') %0, align 2
ifelse(LLVM_VERSION,LLVM_3_0,`
%old128 = bitcast <8 x i16> %old to i128
%new128 = bitcast <8 x i16> %1 to i128
@@ -2686,7 +2686,7 @@ define void @__masked_store_blend_i16(<8 x i16>* nocapture, <8 x i16>,
define(`masked_store_blend_8_16_by_16', `
define void @__masked_store_blend_i8(<16 x i8>* nocapture, <16 x i8>,
<16 x i32>) nounwind alwaysinline {
%old = load PTR_OP_ARGS(`<16 x i8> ',` %0, align 1')
%old = load PTR_OP_ARGS(`<16 x i8> ') %0, align 1
ifelse(LLVM_VERSION,LLVM_3_0,`
%old128 = bitcast <16 x i8> %old to i128
%new128 = bitcast <16 x i8> %1 to i128
@@ -2710,7 +2710,7 @@ define void @__masked_store_blend_i8(<16 x i8>* nocapture, <16 x i8>,
define void @__masked_store_blend_i16(<16 x i16>* nocapture, <16 x i16>,
<16 x i32>) nounwind alwaysinline {
%old = load PTR_OP_ARGS(`<16 x i16> ',` %0, align 2')
%old = load PTR_OP_ARGS(`<16 x i16> ') %0, align 2
ifelse(LLVM_VERSION,LLVM_3_0,`
%old256 = bitcast <16 x i16> %old to i256
%new256 = bitcast <16 x i16> %1 to i256
@@ -2759,8 +2759,8 @@ entry:
if.then: ; preds = %entry
%idxprom = ashr i64 %call, 32
%arrayidx = getelementptr inbounds PTR_OP_ARGS(`i32', `startptr, i64 %idxprom')
%val = load PTR_OP_ARGS(`i32',` %arrayidx, align 4')
%arrayidx = getelementptr inbounds PTR_OP_ARGS(`i32') startptr, i64 %idxprom
%val = load PTR_OP_ARGS(`i32') %arrayidx, align 4
%valvec = insertelement <1 x i32> undef, i32 %val, i32 0
store <1 x i32> %valvec, <1 x i32>* %val_ptr, align 4
br label %if.end
@@ -2780,7 +2780,7 @@ entry:
if.then: ; preds = %entry
%idxprom = ashr i64 %call, 32
%arrayidx = getelementptr inbounds PTR_OP_ARGS(`i32', `startptr, i64 %idxprom')
%arrayidx = getelementptr inbounds PTR_OP_ARGS(`i32') startptr, i64 %idxprom
%val = extractelement <1 x i32> %vals, i32 0
store i32 %val, i32* %arrayidx, align 4
br label %if.end
@@ -2849,7 +2849,7 @@ domixed:
%castptr = bitcast <$1 x $2> * %ptr to <$1 x $4> *
%castv = bitcast <$1 x $2> %v to <$1 x $4>
call void @__masked_store_blend_i$6(<$1 x $4> * %castptr, <$1 x $4> %castv, <$1 x MASK> %mask)
%blendvec = load PTR_OP_ARGS(`<$1 x $2> ',` %ptr')
%blendvec = load PTR_OP_ARGS(`<$1 x $2> ') %ptr
br label %check_neighbors
check_neighbors:
@@ -2985,12 +2985,12 @@ define <WIDTH x $1> @__gather32_$1(<WIDTH x i32> %ptrs,
per_lane(WIDTH, <WIDTH x MASK> %vecmask, `
%iptr_LANE_ID = extractelement <WIDTH x i32> %ptrs, i32 LANE
%ptr_LANE_ID = inttoptr i32 %iptr_LANE_ID to $1 *
%val_LANE_ID = load PTR_OP_ARGS(`$1 ',` %ptr_LANE_ID')
%store_ptr_LANE_ID = getelementptr PTR_OP_ARGS(`<WIDTH x $1>', `%ret_ptr, i32 0, i32 LANE')
%val_LANE_ID = load PTR_OP_ARGS(`$1 ') %ptr_LANE_ID
%store_ptr_LANE_ID = getelementptr PTR_OP_ARGS(`<WIDTH x $1>') %ret_ptr, i32 0, i32 LANE
store $1 %val_LANE_ID, $1 * %store_ptr_LANE_ID
')
%ret = load PTR_OP_ARGS(`<WIDTH x $1> ',` %ret_ptr')
%ret = load PTR_OP_ARGS(`<WIDTH x $1> ') %ret_ptr
ret <WIDTH x $1> %ret
}
@@ -3001,12 +3001,12 @@ define <WIDTH x $1> @__gather64_$1(<WIDTH x i64> %ptrs,
per_lane(WIDTH, <WIDTH x MASK> %vecmask, `
%iptr_LANE_ID = extractelement <WIDTH x i64> %ptrs, i32 LANE
%ptr_LANE_ID = inttoptr i64 %iptr_LANE_ID to $1 *
%val_LANE_ID = load PTR_OP_ARGS(`$1 ',` %ptr_LANE_ID')
%store_ptr_LANE_ID = getelementptr PTR_OP_ARGS(`<WIDTH x $1>', `%ret_ptr, i32 0, i32 LANE')
%val_LANE_ID = load PTR_OP_ARGS(`$1 ') %ptr_LANE_ID
%store_ptr_LANE_ID = getelementptr PTR_OP_ARGS(`<WIDTH x $1>') %ret_ptr, i32 0, i32 LANE
store $1 %val_LANE_ID, $1 * %store_ptr_LANE_ID
')
%ret = load PTR_OP_ARGS(`<WIDTH x $1> ',` %ret_ptr')
%ret = load PTR_OP_ARGS(`<WIDTH x $1> ') %ret_ptr
ret <WIDTH x $1> %ret
}
')
@@ -3025,15 +3025,15 @@ define <WIDTH x $1> @__gather_elt32_$1(i8 * %ptr, <WIDTH x i32> %offsets, i32 %o
%offset64 = sext i32 %offset32 to i64
%scale64 = sext i32 %offset_scale to i64
%offset = mul i64 %offset64, %scale64
%ptroffset = getelementptr PTR_OP_ARGS(`i8', `%ptr, i64 %offset')
%ptroffset = getelementptr PTR_OP_ARGS(`i8') %ptr, i64 %offset
%delta = extractelement <WIDTH x i32> %offset_delta, i32 %lane
%delta64 = sext i32 %delta to i64
%finalptr = getelementptr PTR_OP_ARGS(`i8', `%ptroffset, i64 %delta64')
%finalptr = getelementptr PTR_OP_ARGS(`i8') %ptroffset, i64 %delta64
; load value and insert into returned value
%ptrcast = bitcast i8 * %finalptr to $1 *
%val = load PTR_OP_ARGS(`$1 ',`%ptrcast')
%val = load PTR_OP_ARGS(`$1 ') %ptrcast
%updatedret = insertelement <WIDTH x $1> %ret, $1 %val, i32 %lane
ret <WIDTH x $1> %updatedret
}
@@ -3047,14 +3047,14 @@ define <WIDTH x $1> @__gather_elt64_$1(i8 * %ptr, <WIDTH x i64> %offsets, i32 %o
; patterns that apply the free x86 2x/4x/8x scaling in addressing calculations
%offset_scale64 = sext i32 %offset_scale to i64
%offset = mul i64 %offset64, %offset_scale64
%ptroffset = getelementptr PTR_OP_ARGS(`i8', `%ptr, i64 %offset')
%ptroffset = getelementptr PTR_OP_ARGS(`i8') %ptr, i64 %offset
%delta64 = extractelement <WIDTH x i64> %offset_delta, i32 %lane
%finalptr = getelementptr PTR_OP_ARGS(`i8', `%ptroffset, i64 %delta64')
%finalptr = getelementptr PTR_OP_ARGS(`i8') %ptroffset, i64 %delta64
; load value and insert into returned value
%ptrcast = bitcast i8 * %finalptr to $1 *
%val = load PTR_OP_ARGS(`$1 ',`%ptrcast')
%val = load PTR_OP_ARGS(`$1 ') %ptrcast
%updatedret = insertelement <WIDTH x $1> %ret, $1 %val, i32 %lane
ret <WIDTH x $1> %updatedret
}
@@ -3072,13 +3072,13 @@ define <WIDTH x $1> @__gather_factored_base_offsets32_$1(i8 * %ptr, <WIDTH x i32
store <WIDTH x i32> zeroinitializer, <WIDTH x i32> * %offsetsPtr
call void @__masked_store_blend_i32(<WIDTH x i32> * %offsetsPtr, <WIDTH x i32> %offsets,
<WIDTH x MASK> %vecmask)
%newOffsets = load PTR_OP_ARGS(`<WIDTH x i32> ',` %offsetsPtr')
%newOffsets = load PTR_OP_ARGS(`<WIDTH x i32> ') %offsetsPtr
%deltaPtr = alloca <WIDTH x i32>
store <WIDTH x i32> zeroinitializer, <WIDTH x i32> * %deltaPtr
call void @__masked_store_blend_i32(<WIDTH x i32> * %deltaPtr, <WIDTH x i32> %offset_delta,
<WIDTH x MASK> %vecmask)
%newDelta = load PTR_OP_ARGS(`<WIDTH x i32> ',` %deltaPtr')
%newDelta = load PTR_OP_ARGS(`<WIDTH x i32> ') %deltaPtr
%ret0 = call <WIDTH x $1> @__gather_elt32_$1(i8 * %ptr, <WIDTH x i32> %newOffsets,
i32 %offset_scale, <WIDTH x i32> %newDelta,
@@ -3103,13 +3103,13 @@ define <WIDTH x $1> @__gather_factored_base_offsets64_$1(i8 * %ptr, <WIDTH x i64
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %offsetsPtr
call void @__masked_store_blend_i64(<WIDTH x i64> * %offsetsPtr, <WIDTH x i64> %offsets,
<WIDTH x MASK> %vecmask)
%newOffsets = load PTR_OP_ARGS(`<WIDTH x i64> ',` %offsetsPtr')
%newOffsets = load PTR_OP_ARGS(`<WIDTH x i64> ') %offsetsPtr
%deltaPtr = alloca <WIDTH x i64>
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %deltaPtr
call void @__masked_store_blend_i64(<WIDTH x i64> * %deltaPtr, <WIDTH x i64> %offset_delta,
<WIDTH x MASK> %vecmask)
%newDelta = load PTR_OP_ARGS(`<WIDTH x i64> ',` %deltaPtr')
%newDelta = load PTR_OP_ARGS(`<WIDTH x i64> ') %deltaPtr
%ret0 = call <WIDTH x $1> @__gather_elt64_$1(i8 * %ptr, <WIDTH x i64> %newOffsets,
i32 %offset_scale, <WIDTH x i64> %newDelta,
@@ -3180,11 +3180,11 @@ define void @__scatter_elt32_$1(i8 * %ptr, <WIDTH x i32> %offsets, i32 %offset_s
%offset64 = sext i32 %offset32 to i64
%scale64 = sext i32 %offset_scale to i64
%offset = mul i64 %offset64, %scale64
%ptroffset = getelementptr PTR_OP_ARGS(`i8', `%ptr, i64 %offset')
%ptroffset = getelementptr PTR_OP_ARGS(`i8') %ptr, i64 %offset
%delta = extractelement <WIDTH x i32> %offset_delta, i32 %lane
%delta64 = sext i32 %delta to i64
%finalptr = getelementptr PTR_OP_ARGS(`i8', `%ptroffset, i64 %delta64')
%finalptr = getelementptr PTR_OP_ARGS(`i8') %ptroffset, i64 %delta64
%ptrcast = bitcast i8 * %finalptr to $1 *
%storeval = extractelement <WIDTH x $1> %values, i32 %lane
@@ -3200,10 +3200,10 @@ define void @__scatter_elt64_$1(i8 * %ptr, <WIDTH x i64> %offsets, i32 %offset_s
; patterns that apply the free x86 2x/4x/8x scaling in addressing calculations
%scale64 = sext i32 %offset_scale to i64
%offset = mul i64 %offset64, %scale64
%ptroffset = getelementptr PTR_OP_ARGS(`i8', `%ptr, i64 %offset')
%ptroffset = getelementptr PTR_OP_ARGS(`i8') %ptr, i64 %offset
%delta64 = extractelement <WIDTH x i64> %offset_delta, i32 %lane
%finalptr = getelementptr PTR_OP_ARGS(`i8', `%ptroffset, i64 %delta64')
%finalptr = getelementptr PTR_OP_ARGS(`i8') %ptroffset, i64 %delta64
%ptrcast = bitcast i8 * %finalptr to $1 *
%storeval = extractelement <WIDTH x $1> %values, i32 %lane