From 4f486333ed06016632e85c69a92837bd4d2f3909 Mon Sep 17 00:00:00 2001 From: Evghenii Date: Mon, 28 Oct 2013 16:47:40 +0100 Subject: [PATCH 01/12] now nvptx allows extern "C" task void, which is emits a kernel that should (?) be callable by driver API from external code --- builtins/target-nvptx64.ll | 1256 ++++++++++++------------------------ ispc.cpp | 4 +- 2 files changed, 407 insertions(+), 853 deletions(-) diff --git a/builtins/target-nvptx64.ll b/builtins/target-nvptx64.ll index 3da3f747..79437ac8 100644 --- a/builtins/target-nvptx64.ll +++ b/builtins/target-nvptx64.ll @@ -1,60 +1,9 @@ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Define the standard library builtins for the NOVEC target -define(`MASK',`i32') +define(`MASK',`i1') define(`WIDTH',`1') -include(`util.m4') - -include(`svml.m4') -svml_stubs(float,f,WIDTH) -svml_stubs(double,d,WIDTH) - -; Define some basics for a 1-wide target -stdlib_core() -packed_load_and_store() -scans() -int64minmax() -aossoa() -rdrand_decls() - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; masked store - -gen_masked_store(i8) -gen_masked_store(i16) -gen_masked_store(i32) -gen_masked_store(i64) - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; unaligned loads/loads+broadcasts - - -masked_load(i8, 1) -masked_load(i16, 2) -masked_load(i32, 4) -masked_load(float, 4) -masked_load(i64, 8) -masked_load(double, 8) - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; gather/scatter - -; define these with the macros from stdlib.m4 - -gen_gather_factored(i8) -gen_gather_factored(i16) -gen_gather_factored(i32) -gen_gather_factored(float) -gen_gather_factored(i64) -gen_gather_factored(double) - -gen_scatter(i8) -gen_scatter(i16) -gen_scatter(i32) -gen_scatter(float) -gen_scatter(i64) -gen_scatter(double) - +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64" ;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;; @@ -112,813 +61,80 @@ define i32 @__nctaid_z() nounwind readnone alwaysinline ret i32 %nb } -;;;;;;;;;;;;;;;;;;;;; -;;;;;;;;;;;;;;;;;;;;; -;;;;;;;;;;;;;;;;;;;;; - -define <1 x i8> @__vselect_i8(<1 x i8>, <1 x i8> , - <1 x i32> %mask) nounwind readnone alwaysinline { -; %mv = trunc <1 x i32> %mask to <1 x i8> -; %notmask = xor <1 x i8> %mv, -; %cleared_old = and <1 x i8> %0, %notmask -; %masked_new = and <1 x i8> %1, %mv -; %new = or <1 x i8> %cleared_old, %masked_new -; ret <1 x i8> %new - - ; not doing this the easy way because of problems with LLVM's scalarizer -; %cmp = icmp eq <1 x i32> %mask, -; %sel = select <1 x i1> %cmp, <1 x i8> %0, <1 x i8> %1 - %m = extractelement <1 x i32> %mask, i32 0 - %cmp = icmp eq i32 %m, 0 - %d0 = extractelement <1 x i8> %0, i32 0 - %d1 = extractelement <1 x i8> %1, i32 0 - %sel = select i1 %cmp, i8 %d0, i8 %d1 - %r = insertelement <1 x i8> undef, i8 %sel, i32 0 - ret <1 x i8> %r -} - -define <1 x i16> @__vselect_i16(<1 x i16>, <1 x i16> , - <1 x i32> %mask) nounwind readnone alwaysinline { -; %mv = trunc <1 x i32> %mask to <1 x i16> -; %notmask = xor <1 x i16> %mv, -; %cleared_old = and <1 x i16> %0, %notmask -; %masked_new = and <1 x i16> %1, %mv -; %new = or <1 x i16> %cleared_old, %masked_new -; ret <1 x i16> %new -; %cmp = icmp eq <1 x i32> %mask, -; %sel = select <1 x i1> %cmp, <1 x i16> %0, <1 x i16> %1 - %m = extractelement <1 x i32> %mask, i32 0 - %cmp = icmp eq i32 %m, 0 - %d0 = extractelement <1 x i16> %0, i32 0 - %d1 = extractelement <1 x i16> %1, i32 0 - %sel = select i1 %cmp, i16 %d0, i16 %d1 - %r = insertelement <1 x i16> undef, i16 %sel, i32 0 - ret <1 x i16> %r - -; ret <1 x i16> %sel -} +;;;;;;;;;;;;;; -define <1 x i32> @__vselect_i32(<1 x i32>, <1 x i32> , - <1 x i32> %mask) nounwind readnone alwaysinline { -; %notmask = xor <1 x i32> %mask, -; %cleared_old = and <1 x i32> %0, %notmask -; %masked_new = and <1 x i32> %1, %mask -; %new = or <1 x i32> %cleared_old, %masked_new -; ret <1 x i32> %new -; %cmp = icmp eq <1 x i32> %mask, -; %sel = select <1 x i1> %cmp, <1 x i32> %0, <1 x i32> %1 -; ret <1 x i32> %sel - %m = extractelement <1 x i32> %mask, i32 0 - %cmp = icmp eq i32 %m, 0 - %d0 = extractelement <1 x i32> %0, i32 0 - %d1 = extractelement <1 x i32> %1, i32 0 - %sel = select i1 %cmp, i32 %d0, i32 %d1 - %r = insertelement <1 x i32> undef, i32 %sel, i32 0 - ret <1 x i32> %r - -} - -define <1 x i64> @__vselect_i64(<1 x i64>, <1 x i64> , - <1 x i32> %mask) nounwind readnone alwaysinline { -; %newmask = zext <1 x i32> %mask to <1 x i64> -; %notmask = xor <1 x i64> %newmask, -; %cleared_old = and <1 x i64> %0, %notmask -; %masked_new = and <1 x i64> %1, %newmask -; %new = or <1 x i64> %cleared_old, %masked_new -; ret <1 x i64> %new -; %cmp = icmp eq <1 x i32> %mask, -; %sel = select <1 x i1> %cmp, <1 x i64> %0, <1 x i64> %1 -; ret <1 x i64> %sel - %m = extractelement <1 x i32> %mask, i32 0 - %cmp = icmp eq i32 %m, 0 - %d0 = extractelement <1 x i64> %0, i32 0 - %d1 = extractelement <1 x i64> %1, i32 0 - %sel = select i1 %cmp, i64 %d0, i64 %d1 - %r = insertelement <1 x i64> undef, i64 %sel, i32 0 - ret <1 x i64> %r - -} - -define <1 x float> @__vselect_float(<1 x float>, <1 x float>, - <1 x i32> %mask) nounwind readnone alwaysinline { -; %v0 = bitcast <1 x float> %0 to <1 x i32> -; %v1 = bitcast <1 x float> %1 to <1 x i32> -; %r = call <1 x i32> @__vselect_i32(<1 x i32> %v0, <1 x i32> %v1, <1 x i32> %mask) -; %rf = bitcast <1 x i32> %r to <1 x float> -; ret <1 x float> %rf -; %cmp = icmp eq <1 x i32> %mask, -; %sel = select <1 x i1> %cmp, <1 x float> %0, <1 x float> %1 -; ret <1 x float> %sel - %m = extractelement <1 x i32> %mask, i32 0 - %cmp = icmp eq i32 %m, 0 - %d0 = extractelement <1 x float> %0, i32 0 - %d1 = extractelement <1 x float> %1, i32 0 - %sel = select i1 %cmp, float %d0, float %d1 - %r = insertelement <1 x float> undef, float %sel, i32 0 - ret <1 x float> %r - -} +include(`util.m4') +stdlib_core() +packed_load_and_store() +scans() +rdrand_decls() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; masked store - -define void @__masked_store_blend_i8(<1 x i8>* nocapture, <1 x i8>, - <1 x i32> %mask) nounwind alwaysinline { - %val = load <1 x i8> * %0, align 4 - %newval = call <1 x i8> @__vselect_i8(<1 x i8> %val, <1 x i8> %1, <1 x i32> %mask) - store <1 x i8> %newval, <1 x i8> * %0, align 4 - ret void -} - -define void @__masked_store_blend_i16(<1 x i16>* nocapture, <1 x i16>, - <1 x i32> %mask) nounwind alwaysinline { - %val = load <1 x i16> * %0, align 4 - %newval = call <1 x i16> @__vselect_i16(<1 x i16> %val, <1 x i16> %1, <1 x i32> %mask) - store <1 x i16> %newval, <1 x i16> * %0, align 4 - ret void -} - -define void @__masked_store_blend_i32(<1 x i32>* nocapture, <1 x i32>, - <1 x i32> %mask) nounwind alwaysinline { - %val = load <1 x i32> * %0, align 4 - %newval = call <1 x i32> @__vselect_i32(<1 x i32> %val, <1 x i32> %1, <1 x i32> %mask) - store <1 x i32> %newval, <1 x i32> * %0, align 4 - ret void -} - -define void @__masked_store_blend_i64(<1 x i64>* nocapture, <1 x i64>, - <1 x i32> %mask) nounwind alwaysinline { - %val = load <1 x i64> * %0, align 4 - %newval = call <1 x i64> @__vselect_i64(<1 x i64> %val, <1 x i64> %1, <1 x i32> %mask) - store <1 x i64> %newval, <1 x i64> * %0, align 4 - ret void -} - -masked_store_float_double() - -define i64 @__movmsk(<1 x i32>) nounwind readnone alwaysinline { - %item = extractelement <1 x i32> %0, i32 0 - %v = lshr i32 %item, 31 - %v64 = zext i32 %v to i64 - ret i64 %v64 -} - -define i1 @__any(<1 x i32>) nounwind readnone alwaysinline { - %item = extractelement <1 x i32> %0, i32 0 - %v = lshr i32 %item, 31 - %cmp = icmp ne i32 %v, 0 - ret i1 %cmp -} - -define i1 @__all(<1 x i32>) nounwind readnone alwaysinline { - %item = extractelement <1 x i32> %0, i32 0 - %v = lshr i32 %item, 31 - %cmp = icmp eq i32 %v, 1 - ret i1 %cmp -} - -define i1 @__none(<1 x i32>) nounwind readnone alwaysinline { - %item = extractelement <1 x i32> %0, i32 0 - %v = lshr i32 %item, 31 - %cmp = icmp eq i32 %v, 0 - ret i1 %cmp -} - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rounding -;; -;; There are not any rounding instructions in SSE2, so we have to emulate -;; the functionality with multiple instructions... - -; The code for __round_* is the result of compiling the following source -; code. -; -; export float Round(float x) { -; unsigned int sign = signbits(x); -; unsigned int ix = intbits(x); -; ix ^= sign; -; x = floatbits(ix); -; x += 0x1.0p23f; -; x -= 0x1.0p23f; -; ix = intbits(x); -; ix ^= sign; -; x = floatbits(ix); -; return x; -;} - -define <1 x float> @__round_varying_float(<1 x float>) nounwind readonly alwaysinline { - %float_to_int_bitcast.i.i.i.i = bitcast <1 x float> %0 to <1 x i32> - %bitop.i.i = and <1 x i32> %float_to_int_bitcast.i.i.i.i, - %bitop.i = xor <1 x i32> %float_to_int_bitcast.i.i.i.i, %bitop.i.i - %int_to_float_bitcast.i.i40.i = bitcast <1 x i32> %bitop.i to <1 x float> - %binop.i = fadd <1 x float> %int_to_float_bitcast.i.i40.i, - %binop21.i = fadd <1 x float> %binop.i, - %float_to_int_bitcast.i.i.i = bitcast <1 x float> %binop21.i to <1 x i32> - %bitop31.i = xor <1 x i32> %float_to_int_bitcast.i.i.i, %bitop.i.i - %int_to_float_bitcast.i.i.i = bitcast <1 x i32> %bitop31.i to <1 x float> - ret <1 x float> %int_to_float_bitcast.i.i.i -} - -;; Similarly, for implementations of the __floor* functions below, we have the -;; bitcode from compiling the following source code... - -;export float Floor(float x) { -; float y = Round(x); -; unsigned int cmp = y > x ? 0xffffffff : 0; -; float delta = -1.f; -; unsigned int idelta = intbits(delta); -; idelta &= cmp; -; delta = floatbits(idelta); -; return y + delta; -;} - -define <1 x float> @__floor_varying_float(<1 x float>) nounwind readonly alwaysinline { - %calltmp.i = tail call <1 x float> @__round_varying_float(<1 x float> %0) nounwind - %bincmp.i = fcmp ogt <1 x float> %calltmp.i, %0 - %val_to_boolvec32.i = sext <1 x i1> %bincmp.i to <1 x i32> - %bitop.i = and <1 x i32> %val_to_boolvec32.i, - %int_to_float_bitcast.i.i.i = bitcast <1 x i32> %bitop.i to <1 x float> - %binop.i = fadd <1 x float> %calltmp.i, %int_to_float_bitcast.i.i.i - ret <1 x float> %binop.i -} - -;; And here is the code we compiled to get the __ceil* functions below -; -;export uniform float Ceil(uniform float x) { -; uniform float y = Round(x); -; uniform int yltx = y < x ? 0xffffffff : 0; -; uniform float delta = 1.f; -; uniform int idelta = intbits(delta); -; idelta &= yltx; -; delta = floatbits(idelta); -; return y + delta; -;} - -define <1 x float> @__ceil_varying_float(<1 x float>) nounwind readonly alwaysinline { - %calltmp.i = tail call <1 x float> @__round_varying_float(<1 x float> %0) nounwind - %bincmp.i = fcmp olt <1 x float> %calltmp.i, %0 - %val_to_boolvec32.i = sext <1 x i1> %bincmp.i to <1 x i32> - %bitop.i = and <1 x i32> %val_to_boolvec32.i, - %int_to_float_bitcast.i.i.i = bitcast <1 x i32> %bitop.i to <1 x float> - %binop.i = fadd <1 x float> %calltmp.i, %int_to_float_bitcast.i.i.i - ret <1 x float> %binop.i -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rounding doubles - -; expecting math lib to provide this -declare double @ceil (double) nounwind readnone -declare double @floor (double) nounwind readnone -declare double @round (double) nounwind readnone -;declare float @llvm.sqrt.f32(float %Val) -declare double @llvm.sqrt.f64(double %Val) -declare float @llvm.sin.f32(float %Val) -declare float @llvm.cos.f32(float %Val) -declare float @llvm.sqrt.f32(float %Val) -declare float @llvm.exp.f32(float %Val) -declare float @llvm.log.f32(float %Val) -declare float @llvm.pow.f32(float %f, float %e) - -declare float @llvm.nvvm.rsqrt.approx.f(float %f) nounwind readonly alwaysinline -declare float @llvm.nvvm.sqrt.f(float %f) nounwind readonly alwaysinline -declare double @llvm.nvvm.rsqrt.approx.d(double %f) nounwind readonly alwaysinline -declare double @llvm.nvvm.sqrt.d(double %f) nounwind readonly alwaysinline - - - - -;; stuff that could be in builtins ... - -define(`unary1to1', ` - %v_0 = extractelement <1 x $1> %0, i32 0 - %r_0 = call $1 $2($1 %v_0) - %ret_0 = insertelement <1 x $1> undef, $1 %r_0, i32 0 - ret <1 x $1> %ret_0 -') - - - -;; dummy 1 wide vector ops -define void -@__aos_to_soa4_float1(<1 x float> %v0, <1 x float> %v1, <1 x float> %v2, - <1 x float> %v3, <1 x float> * noalias %out0, - <1 x float> * noalias %out1, <1 x float> * noalias %out2, - <1 x float> * noalias %out3) nounwind alwaysinline { - - store <1 x float> %v0, <1 x float > * %out0 - store <1 x float> %v1, <1 x float > * %out1 - store <1 x float> %v2, <1 x float > * %out2 - store <1 x float> %v3, <1 x float > * %out3 - - ret void -} - -define void -@__soa_to_aos4_float1(<1 x float> %v0, <1 x float> %v1, <1 x float> %v2, - <1 x float> %v3, <1 x float> * noalias %out0, - <1 x float> * noalias %out1, <1 x float> * noalias %out2, - <1 x float> * noalias %out3) nounwind alwaysinline { - call void @__aos_to_soa4_float1(<1 x float> %v0, <1 x float> %v1, - <1 x float> %v2, <1 x float> %v3, <1 x float> * %out0, - <1 x float> * %out1, <1 x float> * %out2, <1 x float> * %out3) - ret void -} - -define void -@__aos_to_soa3_float1(<1 x float> %v0, <1 x float> %v1, - <1 x float> %v2, <1 x float> * %out0, <1 x float> * %out1, - <1 x float> * %out2) { - store <1 x float> %v0, <1 x float > * %out0 - store <1 x float> %v1, <1 x float > * %out1 - store <1 x float> %v2, <1 x float > * %out2 - - ret void -} - -define void -@__soa_to_aos3_float1(<1 x float> %v0, <1 x float> %v1, - <1 x float> %v2, <1 x float> * %out0, <1 x float> * %out1, - <1 x float> * %out2) { - call void @__aos_to_soa3_float1(<1 x float> %v0, <1 x float> %v1, - <1 x float> %v2, <1 x float> * %out0, <1 x float> * %out1, - <1 x float> * %out2) - ret void -} - - -;; end builtins - - -define <1 x double> @__round_varying_double(<1 x double>) nounwind readonly alwaysinline { - unary1to1(double, @round) -} - -define <1 x double> @__floor_varying_double(<1 x double>) nounwind readonly alwaysinline { - unary1to1(double, @floor) -} - - -define <1 x double> @__ceil_varying_double(<1 x double>) nounwind readonly alwaysinline { - unary1to1(double, @ceil) -} - -; To do vector integer min and max, we do the vector compare and then sign -; extend the i1 vector result to an i32 mask. The __vselect does the -; rest... - -define <1 x i32> @__min_varying_int32(<1 x i32>, <1 x i32>) nounwind readonly alwaysinline { - %c = icmp slt <1 x i32> %0, %1 - %mask = sext <1 x i1> %c to <1 x i32> - %v = call <1 x i32> @__vselect_i32(<1 x i32> %1, <1 x i32> %0, <1 x i32> %mask) - ret <1 x i32> %v -} - -define i32 @__min_uniform_int32(i32, i32) nounwind readonly alwaysinline { - %c = icmp slt i32 %0, %1 - %r = select i1 %c, i32 %0, i32 %1 - ret i32 %r -} - -define <1 x i32> @__max_varying_int32(<1 x i32>, <1 x i32>) nounwind readonly alwaysinline { - %c = icmp sgt <1 x i32> %0, %1 - %mask = sext <1 x i1> %c to <1 x i32> - %v = call <1 x i32> @__vselect_i32(<1 x i32> %1, <1 x i32> %0, <1 x i32> %mask) - ret <1 x i32> %v -} - -define i32 @__max_uniform_int32(i32, i32) nounwind readonly alwaysinline { - %c = icmp sgt i32 %0, %1 - %r = select i1 %c, i32 %0, i32 %1 - ret i32 %r -} - -; The functions for unsigned ints are similar, just with unsigned -; comparison functions... - -define <1 x i32> @__min_varying_uint32(<1 x i32>, <1 x i32>) nounwind readonly alwaysinline { - %c = icmp ult <1 x i32> %0, %1 - %mask = sext <1 x i1> %c to <1 x i32> - %v = call <1 x i32> @__vselect_i32(<1 x i32> %1, <1 x i32> %0, <1 x i32> %mask) - ret <1 x i32> %v -} - -define i32 @__min_uniform_uint32(i32, i32) nounwind readonly alwaysinline { - %c = icmp ult i32 %0, %1 - %r = select i1 %c, i32 %0, i32 %1 - ret i32 %r -} - -define <1 x i32> @__max_varying_uint32(<1 x i32>, <1 x i32>) nounwind readonly alwaysinline { - %c = icmp ugt <1 x i32> %0, %1 - %mask = sext <1 x i1> %c to <1 x i32> - %v = call <1 x i32> @__vselect_i32(<1 x i32> %1, <1 x i32> %0, <1 x i32> %mask) - ret <1 x i32> %v -} - -define i32 @__max_uniform_uint32(i32, i32) nounwind readonly alwaysinline { - %c = icmp ugt i32 %0, %1 - %r = select i1 %c, i32 %0, i32 %1 - ret i32 %r -} - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; horizontal ops / reductions - -declare i32 @llvm.ctpop.i32(i32) nounwind readnone - -declare i16 @__reduce_add_int8() nounwind readnone -declare i32 @__reduce_add_int16() nounwind readnone - -define i32 @__popcnt_int32(i32) nounwind readonly alwaysinline { - %call = call i32 @llvm.ctpop.i32(i32 %0) - ret i32 %call -} - -declare i64 @llvm.ctpop.i64(i64) nounwind readnone - -define i64 @__popcnt_int64(i64) nounwind readonly alwaysinline { - %call = call i64 @llvm.ctpop.i64(i64 %0) - ret i64 %call -} - - -define float @__reduce_add_float(<1 x float> %v) nounwind readonly alwaysinline { - %r = extractelement <1 x float> %v, i32 0 - ret float %r -} - -define float @__reduce_min_float(<1 x float>) nounwind readnone { - %r = extractelement <1 x float> %0, i32 0 - ret float %r -} - -define float @__reduce_max_float(<1 x float>) nounwind readnone { - %r = extractelement <1 x float> %0, i32 0 - ret float %r -} - -define i32 @__reduce_add_int32(<1 x i32> %v) nounwind readnone { - %r = extractelement <1 x i32> %v, i32 0 - ret i32 %r -} - -define i32 @__reduce_min_int32(<1 x i32>) nounwind readnone { - %r = extractelement <1 x i32> %0, i32 0 - ret i32 %r -} - -define i32 @__reduce_max_int32(<1 x i32>) nounwind readnone { - %r = extractelement <1 x i32> %0, i32 0 - ret i32 %r -} - -define i32 @__reduce_min_uint32(<1 x i32>) nounwind readnone { - %r = extractelement <1 x i32> %0, i32 0 - ret i32 %r -} - -define i32 @__reduce_max_uint32(<1 x i32>) nounwind readnone { - %r = extractelement <1 x i32> %0, i32 0 - ret i32 %r - } - - -define double @__reduce_add_double(<1 x double>) nounwind readnone { - %m = extractelement <1 x double> %0, i32 0 - ret double %m -} - -define double @__reduce_min_double(<1 x double>) nounwind readnone { - %m = extractelement <1 x double> %0, i32 0 - ret double %m -} - -define double @__reduce_max_double(<1 x double>) nounwind readnone { - %m = extractelement <1 x double> %0, i32 0 - ret double %m -} - -define i64 @__reduce_add_int64(<1 x i64>) nounwind readnone { - %m = extractelement <1 x i64> %0, i32 0 - ret i64 %m -} - -define i64 @__reduce_min_int64(<1 x i64>) nounwind readnone { - %m = extractelement <1 x i64> %0, i32 0 - ret i64 %m -} - -define i64 @__reduce_max_int64(<1 x i64>) nounwind readnone { - %m = extractelement <1 x i64> %0, i32 0 - ret i64 %m -} - -define i64 @__reduce_min_uint64(<1 x i64>) nounwind readnone { - %m = extractelement <1 x i64> %0, i32 0 - ret i64 %m -} - -define i64 @__reduce_max_uint64(<1 x i64>) nounwind readnone { - %m = extractelement <1 x i64> %0, i32 0 - ret i64 %m -} - -define i1 @__reduce_equal_int32(<1 x i32> %vv, i32 * %samevalue, - <1 x i32> %mask) nounwind alwaysinline { - %v=extractelement <1 x i32> %vv, i32 0 - store i32 %v, i32 * %samevalue - ret i1 true - -} - -define i1 @__reduce_equal_float(<1 x float> %vv, float * %samevalue, - <1 x i32> %mask) nounwind alwaysinline { - %v=extractelement <1 x float> %vv, i32 0 - store float %v, float * %samevalue - ret i1 true - -} - -define i1 @__reduce_equal_int64(<1 x i64> %vv, i64 * %samevalue, - <1 x i32> %mask) nounwind alwaysinline { - %v=extractelement <1 x i64> %vv, i32 0 - store i64 %v, i64 * %samevalue - ret i1 true - -} - -define i1 @__reduce_equal_double(<1 x double> %vv, double * %samevalue, - <1 x i32> %mask) nounwind alwaysinline { - %v=extractelement <1 x double> %vv, i32 0 - store double %v, double * %samevalue - ret i1 true - -} - -; extracting/reinserting elements because I want to be able to remove vectors later on - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rcp - -define <1 x float> @__rcp_varying_float(<1 x float>) nounwind readonly alwaysinline { - ;%call = call <1 x float> @llvm.x86.sse.rcp.ps(<1 x float> %0) - ; do one N-R iteration to improve precision - ; float iv = __rcp_v(v); - ; return iv * (2. - v * iv); - ;%v_iv = fmul <1 x float> %0, %call - ;%two_minus = fsub <1 x float> , %v_iv - ;%iv_mul = fmul <1 x float> %call, %two_minus - ;ret <1 x float> %iv_mul - %d = extractelement <1 x float> %0, i32 0 - %r = fdiv float 1.,%d - %rv = insertelement <1 x float> undef, float %r, i32 0 - ret <1 x float> %rv -} - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; sqrt - -define <1 x float> @__sqrt_varying_float(<1 x float> %v) nounwind readonly alwaysinline { - %vs = extractelement <1 x float> %v, i32 0 - %rs = call float @llvm.nvvm.sqrt.f(float %vs) - %rv = insertelement <1 x float> undef , float %rs, i32 0 - ret <1 x float> %rv -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; rsqrt - - -define <1 x float> @__rsqrt_varying_float(<1 x float> %v) nounwind readonly alwaysinline { - %vs = extractelement <1 x float> %v, i32 0 - %rs = call float @llvm.nvvm.rsqrt.approx.f(float %vs) -; %rs = call float asm "rsqrt.approx.f32 $0,$0", "=f,f"(float %vs) ; example of inline ptx - %rv = insertelement <1 x float> undef , float %rs, i32 0 - ret <1 x float> %rv -} -define <1 x double> @__rsqrt_varying_double(<1 x double> %v) nounwind readonly alwaysinline { - %vs = extractelement <1 x double> %v, i32 0 - %rs = call double @llvm.nvvm.rsqrt.approx.d(double %vs) - %rv = insertelement <1 x double> undef , double %rs, i32 0 - ret <1 x double> %rv -} - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; float min/max - -define <1 x float> @__max_varying_float(<1 x float>, <1 x float>) nounwind readonly alwaysinline { -; %call = call <1 x float> @llvm.x86.sse.max.ps(<1 x float> %0, <1 x float> %1) -; ret <1 x float> %call - %a = extractelement <1 x float> %0, i32 0 - %b = extractelement <1 x float> %1, i32 0 - %d = fcmp ogt float %a, %b - %r = select i1 %d, float %a, float %b - %rv = insertelement <1 x float> undef, float %r, i32 0 - ret <1 x float> %rv -} - -define <1 x float> @__min_varying_float(<1 x float>, <1 x float>) nounwind readonly alwaysinline { -; %call = call <1 x float> @llvm.x86.sse.min.ps(<1 x float> %0, <1 x float> %1) -; ret <1 x float> %call - %a = extractelement <1 x float> %0, i32 0 - %b = extractelement <1 x float> %1, i32 0 - %d = fcmp olt float %a, %b - %r = select i1 %d, float %a, float %b - %rv = insertelement <1 x float> undef, float %r, i32 0 - ret <1 x float> %rv - -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; double precision sqrt - -;declare <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double>) nounwind readnone - -define <1 x double> @__sqrt_varying_double(<1 x double>) nounwind alwaysinline { - ;unarya2to4(ret, double, @llvm.x86.sse2.sqrt.pd, %0) - ;ret <1 x double> %ret - unary1to1(double, @llvm.sqrt.f64) -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; double precision min/max - -;declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone -;declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone - -define <1 x double> @__min_varying_double(<1 x double>, <1 x double>) nounwind readnone { - ;binarsy2to4(ret, double, @llvm.x86.sse2.min.pd, %0, %1) - ;ret <1 x double> %ret - %a = extractelement <1 x double> %0, i32 0 - %b = extractelement <1 x double> %1, i32 0 - %d = fcmp olt double %a, %b - %r = select i1 %d, double %a, double %b - %rv = insertelement <1 x double> undef, double %r, i32 0 - ret <1 x double> %rv - -} - -define <1 x double> @__max_varying_double(<1 x double>, <1 x double>) nounwind readnone { - ;binary2sto4(ret, double, @llvm.x86.sse2.max.pd, %0, %1) - ;ret <1 x double> %ret - %a = extractelement <1 x double> %0, i32 0 - %b = extractelement <1 x double> %1, i32 0 - %d = fcmp ogt double %a, %b - %r = select i1 %d, double %a, double %b - %rv = insertelement <1 x double> undef, double %r, i32 0 - ret <1 x double> %rv - -} - - -define float @__rcp_uniform_float(float) nounwind readonly alwaysinline { -; uniform float iv = extract(__rcp_u(v), 0); -; return iv * (2. - v * iv); - %r = fdiv float 1.,%0 - ret float %r -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rounding floats - -define float @__round_uniform_float(float) nounwind readonly alwaysinline { - ; roundss, round mode nearest 0b00 | don't signal precision exceptions 0b1000 = 8 - ; the roundss intrinsic is a total mess--docs say: - ; - ; __m128 _mm_round_ss (__m128 a, __m128 b, const int c) - ; - ; b is a 128-bit parameter. The lowest 32 bits are the result of the rounding function - ; on b0. The higher order 96 bits are copied directly from input parameter a. The - ; return value is described by the following equations: - ; - ; r0 = RND(b0) - ; r1 = a1 - ; r2 = a2 - ; r3 = a3 - ; - ; It doesn't matter what we pass as a, since we only need the r0 value - ; here. So we pass the same register for both. - %v = insertelement<1 x float> undef, float %0, i32 0 - %rv = call <1 x float> @__round_varying_float(<1 x float> %v) - %r=extractelement <1 x float> %rv, i32 0 - ret float %r - -} - -define float @__floor_uniform_float(float) nounwind readonly alwaysinline { - %v = insertelement<1 x float> undef, float %0, i32 0 - %rv = call <1 x float> @__floor_varying_float(<1 x float> %v) - %r=extractelement <1 x float> %rv, i32 0 - ret float %r - -} - -define float @__ceil_uniform_float(float) nounwind readonly alwaysinline { - %v = insertelement<1 x float> undef, float %0, i32 0 - %rv = call <1 x float> @__ceil_varying_float(<1 x float> %v) - %r=extractelement <1 x float> %rv, i32 0 - ret float %r -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rounding doubles - - -define double @__round_uniform_double(double) nounwind readonly alwaysinline { - %rs=call double @round(double %0) - ret double %rs -} - -define double @__floor_uniform_double(double) nounwind readonly alwaysinline { - %rs = call double @floor(double %0) - ret double %rs -} - -define double @__ceil_uniform_double(double) nounwind readonly alwaysinline { - %rs = call double @ceil(double %0) - ret double %rs -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; sqrt - - -define float @__sqrt_uniform_float(float) nounwind readonly alwaysinline { - %ret = call float @llvm.sqrt.f32(float %0) - ret float %ret -} - -define double @__sqrt_uniform_double(double) nounwind readonly alwaysinline { - %ret = call double @llvm.sqrt.f64(double %0) - ret double %ret -} - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rsqrt - - -define float @__rsqrt_uniform_float(float) nounwind readonly alwaysinline { - %s = call float @__sqrt_uniform_float(float %0) - %r = call float @__rcp_uniform_float(float %s) - ret float %r -} - - - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; fastmath - - -define void @__fastmath() nounwind alwaysinline { - ; no-op - ret void -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; float min/max - - -define float @__max_uniform_float(float, float) nounwind readonly alwaysinline { - %d = fcmp ogt float %0, %1 - %r = select i1 %d, float %0, float %1 - ret float %r - -} - -define float @__min_uniform_float(float, float) nounwind readonly alwaysinline { - %d = fcmp olt float %0, %1 - %r = select i1 %d, float %0, float %1 - ret float %r - -} -define double @__max_uniform_double(double, double) nounwind readonly alwaysinline { - %d = fcmp ogt double %0, %1 - %r = select i1 %d, double %0, double %1 - ret double %r - -} - -define double @__min_uniform_double(double, double) nounwind readonly alwaysinline { - %d = fcmp olt double %0, %1 - %r = select i1 %d, double %0, double %1 - ret double %r - -} +;; broadcast/rotate/shuffle define_shuffles() - ctlztz() -define_prefetches() +declare @__smear_float(float) nounwind readnone +declare @__smear_double(double) nounwind readnone +declare @__smear_i8(i8) nounwind readnone +declare @__smear_i16(i16) nounwind readnone +declare @__smear_i32(i32) nounwind readnone +declare @__smear_i64(i64) nounwind readnone + +declare @__setzero_float() nounwind readnone +declare @__setzero_double() nounwind readnone +declare @__setzero_i8() nounwind readnone +declare @__setzero_i16() nounwind readnone +declare @__setzero_i32() nounwind readnone +declare @__setzero_i64() nounwind readnone + +declare @__undef_float() nounwind readnone +declare @__undef_double() nounwind readnone +declare @__undef_i8() nounwind readnone +declare @__undef_i16() nounwind readnone +declare @__undef_i32() nounwind readnone +declare @__undef_i64() nounwind readnone + +declare @__shuffle_i8(, ) nounwind readnone +declare @__shuffle2_i8(, , + ) nounwind readnone +declare @__shuffle_i16(, ) nounwind readnone +declare @__shuffle2_i16(, , + ) nounwind readnone +declare @__shuffle_float(, + ) nounwind readnone +declare @__shuffle2_float(, , + ) nounwind readnone +declare @__shuffle_i32(, + ) nounwind readnone +declare @__shuffle2_i32(, , + ) nounwind readnone +declare @__shuffle_double(, + ) nounwind readnone +declare @__shuffle2_double(, + , ) nounwind readnone +declare @__shuffle_i64(, + ) nounwind readnone +declare @__shuffle2_i64(, , + ) nounwind readnone + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; aos/soa + +declare void @__soa_to_aos3_float( %v0, %v1, + %v2, float * noalias %p) nounwind +declare void @__aos_to_soa3_float(float * noalias %p, * %out0, + * %out1, * %out2) nounwind +declare void @__soa_to_aos4_float( %v0, %v1, + %v2, %v3, + float * noalias %p) nounwind +declare void @__aos_to_soa4_float(float * noalias %p, * noalias %out0, + * noalias %out1, + * noalias %out2, + * noalias %out3) nounwind ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; half conversion routines @@ -928,7 +144,345 @@ declare @__half_to_float_varying( %v) nounwind read declare i16 @__float_to_half_uniform(float %v) nounwind readnone declare @__float_to_half_varying( %v) nounwind readnone +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; math + +declare void @__fastmath() nounwind + +;; round/floor/ceil + +declare float @__round_uniform_float(float) nounwind readnone +declare float @__floor_uniform_float(float) nounwind readnone +declare float @__ceil_uniform_float(float) nounwind readnone + +declare double @__round_uniform_double(double) nounwind readnone +declare double @__floor_uniform_double(double) nounwind readnone +declare double @__ceil_uniform_double(double) nounwind readnone + +declare @__round_varying_float() nounwind readnone +declare @__floor_varying_float() nounwind readnone +declare @__ceil_varying_float() nounwind readnone + +declare @__round_varying_double() nounwind readnone +declare @__floor_varying_double() nounwind readnone +declare @__ceil_varying_double() nounwind readnone + +;; min/max uniform + +;; declare float @__max_uniform_float(float, float) nounwind readnone +;; declare float @__min_uniform_float(float, float) nounwind readnone +define float @__max_uniform_float(float, float) nounwind readonly alwaysinline { + %d = fcmp ogt float %0, %1 + %r = select i1 %d, float %0, float %1 + ret float %r + +} +define float @__min_uniform_float(float, float) nounwind readonly alwaysinline { + %d = fcmp olt float %0, %1 + %r = select i1 %d, float %0, float %1 + ret float %r + +} + +;; declare i32 @__min_uniform_int32(i32, i32) nounwind readnone +;; declare i32 @__max_uniform_int32(i32, i32) nounwind readnone +define i32 @__min_uniform_int32(i32, i32) nounwind readonly alwaysinline { + %c = icmp slt i32 %0, %1 + %r = select i1 %c, i32 %0, i32 %1 + ret i32 %r +} +define i32 @__max_uniform_int32(i32, i32) nounwind readonly alwaysinline { + %c = icmp sgt i32 %0, %1 + %r = select i1 %c, i32 %0, i32 %1 + ret i32 %r +} + +;; declare i32 @__min_uniform_uint32(i32, i32) nounwind readnone +;; declare i32 @__max_uniform_uint32(i32, i32) nounwind readnone +define i32 @__min_uniform_uint32(i32, i32) nounwind readonly alwaysinline { + %c = icmp ult i32 %0, %1 + %r = select i1 %c, i32 %0, i32 %1 + ret i32 %r +} +define i32 @__max_uniform_uint32(i32, i32) nounwind readonly alwaysinline { + %c = icmp ugt i32 %0, %1 + %r = select i1 %c, i32 %0, i32 %1 + ret i32 %r +} + +declare i64 @__min_uniform_int64(i64, i64) nounwind readnone +declare i64 @__max_uniform_int64(i64, i64) nounwind readnone +declare i64 @__min_uniform_uint64(i64, i64) nounwind readnone +declare i64 @__max_uniform_uint64(i64, i64) nounwind readnone + +;; declare double @__min_uniform_double(double, double) nounwind readnone +;; declare double @__max_uniform_double(double, double) nounwind readnone +define double @__max_uniform_double(double, double) nounwind readonly alwaysinline { + %d = fcmp ogt double %0, %1 + %r = select i1 %d, double %0, double %1 + ret double %r +} +define double @__min_uniform_double(double, double) nounwind readonly alwaysinline { + %d = fcmp olt double %0, %1 + %r = select i1 %d, double %0, double %1 + ret double %r +} + +;; min/max uniform + +declare @__max_varying_float(, ) nounwind readnone +declare @__min_varying_float(, ) nounwind readnone +declare @__min_varying_int32(, ) nounwind readnone +declare @__max_varying_int32(, ) nounwind readnone +declare @__min_varying_uint32(, ) nounwind readnone +declare @__max_varying_uint32(, ) nounwind readnone +declare @__min_varying_int64(, ) nounwind readnone +declare @__max_varying_int64(, ) nounwind readnone +declare @__min_varying_uint64(, ) nounwind readnone +declare @__max_varying_uint64(, ) nounwind readnone +declare @__min_varying_double(, + ) nounwind readnone +declare @__max_varying_double(, + ) nounwind readnone + +;; sqrt/rsqrt/rcp + +declare float @llvm.nvvm.rsqrt.approx.f(float %f) nounwind readonly alwaysinline +declare float @llvm.nvvm.sqrt.f(float %f) nounwind readonly alwaysinline +declare double @llvm.nvvm.rsqrt.approx.d(double %f) nounwind readonly alwaysinline +declare double @llvm.nvvm.sqrt.d(double %f) nounwind readonly alwaysinline + +;; declare float @__rcp_uniform_float(float) nounwind readnone +define float @__rcp_uniform_float(float) nounwind readonly alwaysinline { +; uniform float iv = extract(__rcp_u(v), 0); +; return iv * (2. - v * iv); + %r = fdiv float 1.,%0 + ret float %r +} +;; declare float @__sqrt_uniform_float(float) nounwind readnone +define float @__sqrt_uniform_float(float) nounwind readonly alwaysinline { + %ret = call float @llvm.nvvm.sqrt.f(float %0) + ret float %ret +} +;; declare float @__rsqrt_uniform_float(float) nounwind readnone +define float @__rsqrt_uniform_float(float) nounwind readonly alwaysinline +{ + %ret = call float @llvm.nvvm.rsqrt.approx.f(float %0) + ret float %ret +} + +declare @__rcp_varying_float() nounwind readnone +declare @__rsqrt_varying_float() nounwind readnone +declare @__sqrt_varying_float() nounwind readnone + +;; declare double @__sqrt_uniform_double(double) nounwind readnone +define double @__sqrt_uniform_double(double) nounwind readonly alwaysinline { + %ret = call double @llvm.nvvm.sqrt.d(double %0) + ret double %ret +} +declare @__sqrt_varying_double() nounwind readnone + +;; bit ops + +declare i32 @__popcnt_int32(i32) nounwind readnone +declare i64 @__popcnt_int64(i64) nounwind readnone + +declare i32 @__count_trailing_zeros_i32(i32) nounwind readnone +declare i64 @__count_trailing_zeros_i64(i64) nounwind readnone +declare i32 @__count_leading_zeros_i32(i32) nounwind readnone +declare i64 @__count_leading_zeros_i64(i64) nounwind readnone + +; FIXME: need either to wire these up to the 8-wide SVML entrypoints, +; or, use the macro to call the 4-wide ones twice with our 8-wide +; vectors... + +;; svml + +include(`svml.m4') +svml_stubs(float,f,WIDTH) +svml_stubs(double,d,WIDTH) + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; reductions + +;; declare i64 @__movmsk() nounwind readnone +;; declare i1 @__any() nounwind readnone +;; declare i1 @__all() nounwind readnone +;; declare i1 @__none() nounwind readnone + +define i64 @__movmsk(<1 x i1>) nounwind readnone alwaysinline { + %v = extractelement <1 x i1> %0, i32 0 + %v64 = zext i1 %v to i64 + ret i64 %v64 +} + +define i1 @__any(<1 x i1>) nounwind readnone alwaysinline { + %v = extractelement <1 x i1> %0, i32 0 + %cmp = icmp ne i1 %v, 0 + ret i1 %cmp +} + +define i1 @__all(<1 x i1>) nounwind readnone alwaysinline { + %v = extractelement <1 x i1> %0, i32 0 + %cmp = icmp eq i1 %v, 1 + ret i1 %cmp +} + +define i1 @__none(<1 x i1>) nounwind readnone alwaysinline { + %v = extractelement <1 x i1> %0, i32 0 + %cmp = icmp eq i1 %v, 0 + ret i1 %cmp +} + +declare i16 @__reduce_add_int8() nounwind readnone +declare i32 @__reduce_add_int16() nounwind readnone + +declare float @__reduce_add_float() nounwind readnone +declare float @__reduce_min_float() nounwind readnone +declare float @__reduce_max_float() nounwind readnone + +declare i64 @__reduce_add_int32() nounwind readnone +declare i32 @__reduce_min_int32() nounwind readnone +declare i32 @__reduce_max_int32() nounwind readnone +declare i32 @__reduce_min_uint32() nounwind readnone +declare i32 @__reduce_max_uint32() nounwind readnone + +declare double @__reduce_add_double() nounwind readnone +declare double @__reduce_min_double() nounwind readnone +declare double @__reduce_max_double() nounwind readnone + +declare i64 @__reduce_add_int64() nounwind readnone +declare i64 @__reduce_min_int64() nounwind readnone +declare i64 @__reduce_max_int64() nounwind readnone +declare i64 @__reduce_min_uint64() nounwind readnone +declare i64 @__reduce_max_uint64() nounwind readnone + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; unaligned loads/loads+broadcasts + + +masked_load(i8, 1) +masked_load(i16, 2) +masked_load(i32, 4) +masked_load(float, 4) +masked_load(i64, 8) +masked_load(double, 8) + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; masked store + +gen_masked_store(i8) +gen_masked_store(i16) +gen_masked_store(i32) +gen_masked_store(float) +gen_masked_store(i64) +gen_masked_store(double) + +define void @__masked_store_blend_i8(* nocapture, , + ) nounwind alwaysinline { + %v = load * %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void +} + +define void @__masked_store_blend_i16(* nocapture, , + ) nounwind alwaysinline { + %v = load * %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void +} + +define void @__masked_store_blend_i32(* nocapture, , + ) nounwind alwaysinline { + %v = load * %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void +} + +define void @__masked_store_blend_float(* nocapture, , + ) nounwind alwaysinline { + %v = load * %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void +} + +define void @__masked_store_blend_i64(* nocapture, + , ) nounwind alwaysinline { + %v = load * %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void +} + +define void @__masked_store_blend_double(* nocapture, + , ) nounwind alwaysinline { + %v = load * %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; gather/scatter + +; define these with the macros from stdlib.m4 + +gen_gather_factored(i8) +gen_gather_factored(i16) +gen_gather_factored(i32) +gen_gather_factored(float) +gen_gather_factored(i64) +gen_gather_factored(double) + +gen_scatter(i8) +gen_scatter(i16) +gen_scatter(i32) +gen_scatter(float) +gen_scatter(i64) +gen_scatter(double) + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; prefetch + +define void @__prefetch_read_uniform_1(i8 * nocapture) nounwind alwaysinline { } +;; define void @__prefetch_read_uniform_2(i8 * nocapture) nounwind alwaysinline { } +;; define void @__prefetch_read_uniform_3(i8 * nocapture) nounwind alwaysinline { } +;; define void @__prefetch_read_uniform_nt(i8 * nocapture) nounwind alwaysinline { } + +define_prefetches() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; int8/int16 builtins define_avgs() + +define i1 @__reduce_equal_int32(<1 x i32> %vv, i32 * %samevalue, + <1 x i1> %mask) nounwind alwaysinline { + %v=extractelement <1 x i32> %vv, i32 0 + store i32 %v, i32 * %samevalue + ret i1 true +} +define i1 @__reduce_equal_float(<1 x float> %vv, float * %samevalue, + <1 x i1> %mask) nounwind alwaysinline { + %v=extractelement <1 x float> %vv, i32 0 + store float %v, float * %samevalue + ret i1 true +} +define i1 @__reduce_equal_int64(<1 x i64> %vv, i64 * %samevalue, + <1 x i1> %mask) nounwind alwaysinline { + %v=extractelement <1 x i64> %vv, i32 0 + store i64 %v, i64 * %samevalue + ret i1 true + +} +define i1 @__reduce_equal_double(<1 x double> %vv, double * %samevalue, + <1 x i1> %mask) nounwind alwaysinline { + %v=extractelement <1 x double> %vv, i32 0 + store double %v, double * %samevalue + ret i1 true + +} diff --git a/ispc.cpp b/ispc.cpp index 96cf4447..7f8ea80d 100644 --- a/ispc.cpp +++ b/ispc.cpp @@ -642,12 +642,12 @@ Target::Target(const char *arch, const char *cpu, const char *isa, bool pic) : this->m_nativeVectorWidth = 1; this->m_vectorWidth = 1; this->m_attributes = "+sm_35"; -#if 0 +#if 1 this->m_hasHalf = false; this->m_maskingIsFree = true; this->m_maskBitCount = 1; this->m_hasTranscendentals = true; - this->m_hasGather = this->m_hasScatter = true; + this->m_hasGather = this->m_hasScatter = false; #else this->m_maskingIsFree = false; this->m_maskBitCount = 32; From 500ad7fb51e8fe76d328f512d9ba3b0fe1d5f6a2 Mon Sep 17 00:00:00 2001 From: Evghenii Date: Mon, 28 Oct 2013 17:01:03 +0100 Subject: [PATCH 02/12] using mask i1 for nvptx64 --- builtins/target-nvptx64.ll | 318 ++++++++++++++++++++++++------------- 1 file changed, 204 insertions(+), 114 deletions(-) diff --git a/builtins/target-nvptx64.ll b/builtins/target-nvptx64.ll index 79437ac8..fab5ff1d 100644 --- a/builtins/target-nvptx64.ll +++ b/builtins/target-nvptx64.ll @@ -68,6 +68,7 @@ include(`util.m4') stdlib_core() packed_load_and_store() +int64minmax() scans() rdrand_decls() @@ -75,66 +76,81 @@ rdrand_decls() ;; broadcast/rotate/shuffle define_shuffles() -ctlztz() -declare @__smear_float(float) nounwind readnone -declare @__smear_double(double) nounwind readnone -declare @__smear_i8(i8) nounwind readnone -declare @__smear_i16(i16) nounwind readnone -declare @__smear_i32(i32) nounwind readnone -declare @__smear_i64(i64) nounwind readnone +;; declare @__smear_float(float) nounwind readnone +;; declare @__smear_double(double) nounwind readnone +;; declare @__smear_i8(i8) nounwind readnone +;; declare @__smear_i16(i16) nounwind readnone +;; declare @__smear_i32(i32) nounwind readnone +;; declare @__smear_i64(i64) nounwind readnone -declare @__setzero_float() nounwind readnone -declare @__setzero_double() nounwind readnone -declare @__setzero_i8() nounwind readnone -declare @__setzero_i16() nounwind readnone -declare @__setzero_i32() nounwind readnone -declare @__setzero_i64() nounwind readnone +;; declare @__setzero_float() nounwind readnone +;; declare @__setzero_double() nounwind readnone +;; declare @__setzero_i8() nounwind readnone +;; declare @__setzero_i16() nounwind readnone +;; declare @__setzero_i32() nounwind readnone +;; declare @__setzero_i64() nounwind readnone -declare @__undef_float() nounwind readnone -declare @__undef_double() nounwind readnone -declare @__undef_i8() nounwind readnone -declare @__undef_i16() nounwind readnone -declare @__undef_i32() nounwind readnone -declare @__undef_i64() nounwind readnone +;; declare @__undef_float() nounwind readnone +;; declare @__undef_double() nounwind readnone +;; declare @__undef_i8() nounwind readnone +;; declare @__undef_i16() nounwind readnone +;; declare @__undef_i32() nounwind readnone +;; declare @__undef_i64() nounwind readnone -declare @__shuffle_i8(, ) nounwind readnone -declare @__shuffle2_i8(, , - ) nounwind readnone -declare @__shuffle_i16(, ) nounwind readnone -declare @__shuffle2_i16(, , - ) nounwind readnone -declare @__shuffle_float(, - ) nounwind readnone -declare @__shuffle2_float(, , - ) nounwind readnone -declare @__shuffle_i32(, - ) nounwind readnone -declare @__shuffle2_i32(, , - ) nounwind readnone -declare @__shuffle_double(, - ) nounwind readnone -declare @__shuffle2_double(, - , ) nounwind readnone -declare @__shuffle_i64(, - ) nounwind readnone -declare @__shuffle2_i64(, , - ) nounwind readnone ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; aos/soa -declare void @__soa_to_aos3_float( %v0, %v1, - %v2, float * noalias %p) nounwind -declare void @__aos_to_soa3_float(float * noalias %p, * %out0, - * %out1, * %out2) nounwind -declare void @__soa_to_aos4_float( %v0, %v1, - %v2, %v3, - float * noalias %p) nounwind -declare void @__aos_to_soa4_float(float * noalias %p, * noalias %out0, - * noalias %out1, - * noalias %out2, - * noalias %out3) nounwind +aossoa() + +;; dummy 1 wide vector ops +define void +@__aos_to_soa4_float1(<1 x float> %v0, <1 x float> %v1, <1 x float> %v2, + <1 x float> %v3, <1 x float> * noalias %out0, + <1 x float> * noalias %out1, <1 x float> * noalias %out2, + <1 x float> * noalias %out3) nounwind alwaysinline { + + store <1 x float> %v0, <1 x float > * %out0 + store <1 x float> %v1, <1 x float > * %out1 + store <1 x float> %v2, <1 x float > * %out2 + store <1 x float> %v3, <1 x float > * %out3 + + ret void +} + +define void +@__soa_to_aos4_float1(<1 x float> %v0, <1 x float> %v1, <1 x float> %v2, + <1 x float> %v3, <1 x float> * noalias %out0, + <1 x float> * noalias %out1, <1 x float> * noalias %out2, + <1 x float> * noalias %out3) nounwind alwaysinline { + call void @__aos_to_soa4_float1(<1 x float> %v0, <1 x float> %v1, + <1 x float> %v2, <1 x float> %v3, <1 x float> * %out0, + <1 x float> * %out1, <1 x float> * %out2, <1 x float> * %out3) + ret void +} + +define void +@__aos_to_soa3_float1(<1 x float> %v0, <1 x float> %v1, + <1 x float> %v2, <1 x float> * %out0, <1 x float> * %out1, + <1 x float> * %out2) { + store <1 x float> %v0, <1 x float > * %out0 + store <1 x float> %v1, <1 x float > * %out1 + store <1 x float> %v2, <1 x float > * %out2 + + ret void +} + +define void +@__soa_to_aos3_float1(<1 x float> %v0, <1 x float> %v1, + <1 x float> %v2, <1 x float> * %out0, <1 x float> * %out1, + <1 x float> * %out2) { + call void @__aos_to_soa3_float1(<1 x float> %v0, <1 x float> %v1, + <1 x float> %v2, <1 x float> * %out0, <1 x float> * %out1, + <1 x float> * %out2) + ret void +} + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; half conversion routines @@ -210,10 +226,10 @@ define i32 @__max_uniform_uint32(i32, i32) nounwind readonly alwaysinline { ret i32 %r } -declare i64 @__min_uniform_int64(i64, i64) nounwind readnone -declare i64 @__max_uniform_int64(i64, i64) nounwind readnone -declare i64 @__min_uniform_uint64(i64, i64) nounwind readnone -declare i64 @__max_uniform_uint64(i64, i64) nounwind readnone +;; declare i64 @__min_uniform_int64(i64, i64) nounwind readnone +;; declare i64 @__max_uniform_int64(i64, i64) nounwind readnone +;; declare i64 @__min_uniform_uint64(i64, i64) nounwind readnone +;; declare i64 @__max_uniform_uint64(i64, i64) nounwind readnone ;; declare double @__min_uniform_double(double, double) nounwind readnone ;; declare double @__max_uniform_double(double, double) nounwind readnone @@ -236,10 +252,10 @@ declare @__min_varying_int32(, ) nounwin declare @__max_varying_int32(, ) nounwind readnone declare @__min_varying_uint32(, ) nounwind readnone declare @__max_varying_uint32(, ) nounwind readnone -declare @__min_varying_int64(, ) nounwind readnone -declare @__max_varying_int64(, ) nounwind readnone -declare @__min_varying_uint64(, ) nounwind readnone -declare @__max_varying_uint64(, ) nounwind readnone +;; declare @__min_varying_int64(, ) nounwind readnone +;; declare @__max_varying_int64(, ) nounwind readnone +;; declare @__min_varying_uint64(, ) nounwind readnone +;; declare @__max_varying_uint64(, ) nounwind readnone declare @__min_varying_double(, ) nounwind readnone declare @__max_varying_double(, @@ -284,13 +300,19 @@ declare @__sqrt_varying_double() nounwind readn ;; bit ops -declare i32 @__popcnt_int32(i32) nounwind readnone -declare i64 @__popcnt_int64(i64) nounwind readnone +declare i32 @llvm.ctpop.i32(i32) nounwind readnone +define i32 @__popcnt_int32(i32) nounwind readonly alwaysinline { + %call = call i32 @llvm.ctpop.i32(i32 %0) + ret i32 %call +} -declare i32 @__count_trailing_zeros_i32(i32) nounwind readnone -declare i64 @__count_trailing_zeros_i64(i64) nounwind readnone -declare i32 @__count_leading_zeros_i32(i32) nounwind readnone -declare i64 @__count_leading_zeros_i64(i64) nounwind readnone +declare i64 @llvm.ctpop.i64(i64) nounwind readnone +define i64 @__popcnt_int64(i64) nounwind readonly alwaysinline { + %call = call i64 @llvm.ctpop.i64(i64 %0) + ret i64 %call +} + +ctlztz() ; FIXME: need either to wire these up to the 8-wide SVML entrypoints, ; or, use the macro to call the 4-wide ones twice with our 8-wide @@ -303,12 +325,13 @@ svml_stubs(float,f,WIDTH) svml_stubs(double,d,WIDTH) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; reductions +; population count; -;; declare i64 @__movmsk() nounwind readnone -;; declare i1 @__any() nounwind readnone -;; declare i1 @__all() nounwind readnone -;; declare i1 @__none() nounwind readnone + + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; reductions define i64 @__movmsk(<1 x i1>) nounwind readnone alwaysinline { %v = extractelement <1 x i1> %0, i32 0 @@ -337,25 +360,118 @@ define i1 @__none(<1 x i1>) nounwind readnone alwaysinline { declare i16 @__reduce_add_int8() nounwind readnone declare i32 @__reduce_add_int16() nounwind readnone -declare float @__reduce_add_float() nounwind readnone -declare float @__reduce_min_float() nounwind readnone -declare float @__reduce_max_float() nounwind readnone +define float @__reduce_add_float(<1 x float> %v) nounwind readonly alwaysinline { + %r = extractelement <1 x float> %v, i32 0 + ret float %r +} -declare i64 @__reduce_add_int32() nounwind readnone -declare i32 @__reduce_min_int32() nounwind readnone -declare i32 @__reduce_max_int32() nounwind readnone -declare i32 @__reduce_min_uint32() nounwind readnone -declare i32 @__reduce_max_uint32() nounwind readnone +define float @__reduce_min_float(<1 x float>) nounwind readnone { + %r = extractelement <1 x float> %0, i32 0 + ret float %r +} -declare double @__reduce_add_double() nounwind readnone -declare double @__reduce_min_double() nounwind readnone -declare double @__reduce_max_double() nounwind readnone +define float @__reduce_max_float(<1 x float>) nounwind readnone { + %r = extractelement <1 x float> %0, i32 0 + ret float %r +} -declare i64 @__reduce_add_int64() nounwind readnone -declare i64 @__reduce_min_int64() nounwind readnone -declare i64 @__reduce_max_int64() nounwind readnone -declare i64 @__reduce_min_uint64() nounwind readnone -declare i64 @__reduce_max_uint64() nounwind readnone +define i32 @__reduce_add_int32(<1 x i32> %v) nounwind readnone { + %r = extractelement <1 x i32> %v, i32 0 + ret i32 %r +} + +define i32 @__reduce_min_int32(<1 x i32>) nounwind readnone { + %r = extractelement <1 x i32> %0, i32 0 + ret i32 %r +} + +define i32 @__reduce_max_int32(<1 x i32>) nounwind readnone { + %r = extractelement <1 x i32> %0, i32 0 + ret i32 %r +} + +define i32 @__reduce_min_uint32(<1 x i32>) nounwind readnone { + %r = extractelement <1 x i32> %0, i32 0 + ret i32 %r +} + +define i32 @__reduce_max_uint32(<1 x i32>) nounwind readnone { + %r = extractelement <1 x i32> %0, i32 0 + ret i32 %r + } + + +define double @__reduce_add_double(<1 x double>) nounwind readnone { + %m = extractelement <1 x double> %0, i32 0 + ret double %m +} + +define double @__reduce_min_double(<1 x double>) nounwind readnone { + %m = extractelement <1 x double> %0, i32 0 + ret double %m +} + +define double @__reduce_max_double(<1 x double>) nounwind readnone { + %m = extractelement <1 x double> %0, i32 0 + ret double %m +} + +define i64 @__reduce_add_int64(<1 x i64>) nounwind readnone { + %m = extractelement <1 x i64> %0, i32 0 + ret i64 %m +} + +define i64 @__reduce_min_int64(<1 x i64>) nounwind readnone { + %m = extractelement <1 x i64> %0, i32 0 + ret i64 %m +} + +define i64 @__reduce_max_int64(<1 x i64>) nounwind readnone { + %m = extractelement <1 x i64> %0, i32 0 + ret i64 %m +} + +define i64 @__reduce_min_uint64(<1 x i64>) nounwind readnone { + %m = extractelement <1 x i64> %0, i32 0 + ret i64 %m +} + +define i64 @__reduce_max_uint64(<1 x i64>) nounwind readnone { + %m = extractelement <1 x i64> %0, i32 0 + ret i64 %m +} + +define i1 @__reduce_equal_int32(<1 x i32> %vv, i32 * %samevalue, + <1 x i32> %mask) nounwind alwaysinline { + %v=extractelement <1 x i32> %vv, i32 0 + store i32 %v, i32 * %samevalue + ret i1 true + +} + +define i1 @__reduce_equal_float(<1 x float> %vv, float * %samevalue, + <1 x i32> %mask) nounwind alwaysinline { + %v=extractelement <1 x float> %vv, i32 0 + store float %v, float * %samevalue + ret i1 true + +} + +define i1 @__reduce_equal_int64(<1 x i64> %vv, i64 * %samevalue, + <1 x i32> %mask) nounwind alwaysinline { + %v=extractelement <1 x i64> %vv, i32 0 + store i64 %v, i64 * %samevalue + ret i1 true + +} + +define i1 @__reduce_equal_double(<1 x double> %vv, double * %samevalue, + <1 x i32> %mask) nounwind alwaysinline { + %v=extractelement <1 x double> %vv, i32 0 + store double %v, double * %samevalue + ret i1 true + +} ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; unaligned loads/loads+broadcasts @@ -449,7 +565,7 @@ gen_scatter(double) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; prefetch -define void @__prefetch_read_uniform_1(i8 * nocapture) nounwind alwaysinline { } +;; define void @__prefetch_read_uniform_1(i8 * nocapture) nounwind alwaysinline { } ;; define void @__prefetch_read_uniform_2(i8 * nocapture) nounwind alwaysinline { } ;; define void @__prefetch_read_uniform_3(i8 * nocapture) nounwind alwaysinline { } ;; define void @__prefetch_read_uniform_nt(i8 * nocapture) nounwind alwaysinline { } @@ -460,29 +576,3 @@ define_prefetches() define_avgs() -define i1 @__reduce_equal_int32(<1 x i32> %vv, i32 * %samevalue, - <1 x i1> %mask) nounwind alwaysinline { - %v=extractelement <1 x i32> %vv, i32 0 - store i32 %v, i32 * %samevalue - ret i1 true -} -define i1 @__reduce_equal_float(<1 x float> %vv, float * %samevalue, - <1 x i1> %mask) nounwind alwaysinline { - %v=extractelement <1 x float> %vv, i32 0 - store float %v, float * %samevalue - ret i1 true -} -define i1 @__reduce_equal_int64(<1 x i64> %vv, i64 * %samevalue, - <1 x i1> %mask) nounwind alwaysinline { - %v=extractelement <1 x i64> %vv, i32 0 - store i64 %v, i64 * %samevalue - ret i1 true - -} -define i1 @__reduce_equal_double(<1 x double> %vv, double * %samevalue, - <1 x i1> %mask) nounwind alwaysinline { - %v=extractelement <1 x double> %vv, i32 0 - store double %v, double * %samevalue - ret i1 true - -} From ff98271a43a34487fa092ab626ad24424ab1455e Mon Sep 17 00:00:00 2001 From: Evghenii Date: Mon, 28 Oct 2013 17:03:00 +0100 Subject: [PATCH 03/12] using mask i1 for nvptx64 --- builtins/target-nvptx64.ll | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/builtins/target-nvptx64.ll b/builtins/target-nvptx64.ll index fab5ff1d..c9815455 100644 --- a/builtins/target-nvptx64.ll +++ b/builtins/target-nvptx64.ll @@ -442,7 +442,7 @@ define i64 @__reduce_max_uint64(<1 x i64>) nounwind readnone { } define i1 @__reduce_equal_int32(<1 x i32> %vv, i32 * %samevalue, - <1 x i32> %mask) nounwind alwaysinline { + <1 x i1> %mask) nounwind alwaysinline { %v=extractelement <1 x i32> %vv, i32 0 store i32 %v, i32 * %samevalue ret i1 true @@ -450,7 +450,7 @@ define i1 @__reduce_equal_int32(<1 x i32> %vv, i32 * %samevalue, } define i1 @__reduce_equal_float(<1 x float> %vv, float * %samevalue, - <1 x i32> %mask) nounwind alwaysinline { + <1 x i1> %mask) nounwind alwaysinline { %v=extractelement <1 x float> %vv, i32 0 store float %v, float * %samevalue ret i1 true @@ -458,7 +458,7 @@ define i1 @__reduce_equal_float(<1 x float> %vv, float * %samevalue, } define i1 @__reduce_equal_int64(<1 x i64> %vv, i64 * %samevalue, - <1 x i32> %mask) nounwind alwaysinline { + <1 x i1> %mask) nounwind alwaysinline { %v=extractelement <1 x i64> %vv, i32 0 store i64 %v, i64 * %samevalue ret i1 true @@ -466,7 +466,7 @@ define i1 @__reduce_equal_int64(<1 x i64> %vv, i64 * %samevalue, } define i1 @__reduce_equal_double(<1 x double> %vv, double * %samevalue, - <1 x i32> %mask) nounwind alwaysinline { + <1 x i1> %mask) nounwind alwaysinline { %v=extractelement <1 x double> %vv, i32 0 store double %v, double * %samevalue ret i1 true From 57aefdf83089bc0cec32381153154b3281c564ba Mon Sep 17 00:00:00 2001 From: Evghenii Date: Tue, 29 Oct 2013 10:21:48 +0100 Subject: [PATCH 04/12] accepts ptx extension when target is nvptx64 --- module.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/module.cpp b/module.cpp index dce41457..02551dd8 100644 --- a/module.cpp +++ b/module.cpp @@ -953,7 +953,13 @@ Module::writeOutput(OutputType outputType, const char *outFileName, const char *fileType = NULL; switch (outputType) { case Asm: - if (strcasecmp(suffix, "s")) + if (g->target->getISA() != Target::NVPTX64) + { + if (strcasecmp(suffix, "s")) + fileType = "assembly"; + } + else + if (strcasecmp(suffix, "ptx")) fileType = "assembly"; break; case Bitcode: From f115a32073c74a3689f92cbc0b3b356251424504 Mon Sep 17 00:00:00 2001 From: Evghenii Date: Tue, 29 Oct 2013 10:21:56 +0100 Subject: [PATCH 05/12] fix llvm 3.2 compilation --- func.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/func.cpp b/func.cpp index 597c42d2..532f92e6 100644 --- a/func.cpp +++ b/func.cpp @@ -47,7 +47,7 @@ #include #if defined(LLVM_3_1) || defined(LLVM_3_2) - #include + #include #include #include #include From b50d3944eaa46e4768e6d868989cae644f015adc Mon Sep 17 00:00:00 2001 From: Evghenii Date: Tue, 29 Oct 2013 10:22:07 +0100 Subject: [PATCH 06/12] allow easy switch between llvm --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4af7aff4..3977fb4e 100644 --- a/Makefile +++ b/Makefile @@ -36,7 +36,7 @@ # If you have your own special version of llvm and/or clang, change # these variables to match. -LLVM_CONFIG=$(shell which /usr/local/llvm-3.3/bin/llvm-config) +LLVM_CONFIG=$(shell which /home/evghenii/usr/local/llvm/bin-3.3/bin/llvm-config) CLANG_INCLUDE=$(shell $(LLVM_CONFIG) --includedir) # Enable ARM by request From b2baa35c3dbe6849eea256c6feb12bcf838ae37f Mon Sep 17 00:00:00 2001 From: Evghenii Date: Tue, 29 Oct 2013 11:34:01 +0100 Subject: [PATCH 07/12] added correct datalayout for nvptx64 --- builtins/target-nvptx64.ll | 2 +- ispc.cpp | 3 +++ module.cpp | 5 +++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/builtins/target-nvptx64.ll b/builtins/target-nvptx64.ll index c9815455..ecd536a3 100644 --- a/builtins/target-nvptx64.ll +++ b/builtins/target-nvptx64.ll @@ -3,7 +3,7 @@ define(`MASK',`i1') define(`WIDTH',`1') -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64" +;; target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64" ;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;; diff --git a/ispc.cpp b/ispc.cpp index 7f8ea80d..8bdcb0c4 100644 --- a/ispc.cpp +++ b/ispc.cpp @@ -710,6 +710,9 @@ Target::Target(const char *arch, const char *cpu, const char *isa, bool pic) : dl_string = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-" "i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-" "f80:128:128-n8:16:32:64-S128-v16:16:16-v32:32:32-v4:128:128"; + } else if (m_isa == Target::NVPTX64) + { + dl_string = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"; } // 3. Finally set member data diff --git a/module.cpp b/module.cpp index 02551dd8..28c922c8 100644 --- a/module.cpp +++ b/module.cpp @@ -1053,6 +1053,11 @@ Module::writeBitcode(llvm::Module *module, const char *outFileName) { } llvm::raw_fd_ostream fos(fd, (fd != 1), false); + if (g->target->getISA() == Target::NVPTX64) + { + const std::string dl_string = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"; + module->setDataLayout(dl_string); + } llvm::WriteBitcodeToFile(module, fos); return true; } From ac700d48604f9aafa6f6528ae29b4f1f3fd49c7b Mon Sep 17 00:00:00 2001 From: Evghenii Date: Tue, 29 Oct 2013 13:36:31 +0100 Subject: [PATCH 08/12] checkpoint --- ctx.cpp | 22 ++++++++++++++++++++-- func.cpp | 7 ++++++- ispc.cpp | 2 ++ ispc.h | 2 ++ module.cpp | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 85 insertions(+), 3 deletions(-) diff --git a/ctx.cpp b/ctx.cpp index 3aee776a..c97da129 100644 --- a/ctx.cpp +++ b/ctx.cpp @@ -1404,15 +1404,33 @@ FunctionEmitContext::MasksAllEqual(llvm::Value *v1, llvm::Value *v2) { llvm::Value * FunctionEmitContext::ProgramIndexVector(bool is32bits) { + if (1) //g->target->getISA() != Target::NVPTX64) + { llvm::SmallVector array; for (int i = 0; i < g->target->getVectorWidth() ; ++i) { - llvm::Constant *C = is32bits ? LLVMInt32(i) : LLVMInt64(i); - array.push_back(C); + llvm::Constant *C = is32bits ? LLVMInt32(i) : LLVMInt64(i); + array.push_back(C); } llvm::Constant* index = llvm::ConstantVector::get(array); return index; + } + else + { + std::vector mm; + m->symbolTable->LookupFunction("__tid_x", &mm); + if (g->target->getMaskBitCount() == 1) + AssertPos(currentPos, mm.size() == 1); + else + // There should be one with signed int signature, one unsigned int. + AssertPos(currentPos, mm.size() == 2); + // We can actually call either one, since both are i32s as far as + // LLVM's type system is concerned... + llvm::Function *fmm = mm[0]->function; + std::vector args; + return CallInst(fmm, NULL, args, "__tid_x"); + } } diff --git a/func.cpp b/func.cpp index 532f92e6..62372a21 100644 --- a/func.cpp +++ b/func.cpp @@ -523,6 +523,11 @@ Function::GenerateIR() { } // And we can now go ahead and emit the code + { /* export function with NVPTX64 target should be emitted host architecture */ + const FunctionType *type= CastType(sym->type); + if (g->target->getISA() == Target::NVPTX64 && type->isExported) + return; + } { FunctionEmitContext ec(this, sym, function, firstStmtPos); emitCode(&ec, function, firstStmtPos); @@ -540,7 +545,7 @@ Function::GenerateIR() { // the application can call it const FunctionType *type = CastType(sym->type); Assert(type != NULL); - if (type->isExported) { + if (type->isExported && g->target->getISA() != Target::NVPTX64) { if (!type->isTask) { llvm::FunctionType *ftype = type->LLVMFunctionType(g->ctx, true); llvm::GlobalValue::LinkageTypes linkage = llvm::GlobalValue::ExternalLinkage; diff --git a/ispc.cpp b/ispc.cpp index 8bdcb0c4..00d94000 100644 --- a/ispc.cpp +++ b/ispc.cpp @@ -167,6 +167,7 @@ Target::Target(const char *arch, const char *cpu, const char *isa, bool pic) : #endif m_valid(false), m_isa(SSE2), + m_isPTX(false), m_arch(""), m_is32Bit(true), m_cpu(""), @@ -639,6 +640,7 @@ Target::Target(const char *arch, const char *cpu, const char *isa, bool pic) : #endif else if (!strcasecmp(isa, "nvptx64")) { this->m_isa = Target::NVPTX64; + this->m_isPTX = true; this->m_nativeVectorWidth = 1; this->m_vectorWidth = 1; this->m_attributes = "+sm_35"; diff --git a/ispc.h b/ispc.h index e2a58ba9..eea77348 100644 --- a/ispc.h +++ b/ispc.h @@ -244,6 +244,7 @@ public: bool isValid() const {return m_valid;} ISA getISA() const {return m_isa;} + bool isPTX() const {return m_isPTX;} std::string getArch() const {return m_arch;} @@ -298,6 +299,7 @@ private: /** Instruction set being compiled to. */ ISA m_isa; + bool m_isPTX; /** Target system architecture. (e.g. "x86-64", "x86"). */ std::string m_arch; diff --git a/module.cpp b/module.cpp index 28c922c8..07dc5e48 100644 --- a/module.cpp +++ b/module.cpp @@ -2316,6 +2316,61 @@ Module::CompileAndOutput(const char *srcFile, const char *hostStubFileName, const char *devStubFileName) { + if (target != NULL && !strcmp(target,"nvptx64")) + { + fprintf(stderr, "compiling nvptx64 \n"); + // We're only compiling to a single target + g->target = new Target(arch, cpu, target, generatePIC); + if (!g->target->isValid()) + return 1; + + m = new Module(srcFile); + if (m->CompileFile() == 0) { + if (outputType == CXX) { + if (target == NULL || strncmp(target, "generic-", 8) != 0) { + Error(SourcePos(), "When generating C++ output, one of the \"generic-*\" " + "targets must be used."); + return 1; + } + } + else if (outputType == Asm || outputType == Object) { + if (target != NULL && strncmp(target, "generic-", 8) == 0) { + Error(SourcePos(), "When using a \"generic-*\" compilation target, " + "%s output can not be used.", + (outputType == Asm) ? "assembly" : "object file"); + return 1; + } + } + + if (outFileName != NULL) + if (!m->writeOutput(outputType, outFileName, includeFileName)) + return 1; + if (headerFileName != NULL) + if (!m->writeOutput(Module::Header, headerFileName)) + return 1; + if (depsFileName != NULL) + if (!m->writeOutput(Module::Deps,depsFileName)) + return 1; + if (hostStubFileName != NULL) + if (!m->writeOutput(Module::HostStub,hostStubFileName)) + return 1; + if (devStubFileName != NULL) + if (!m->writeOutput(Module::DevStub,devStubFileName)) + return 1; + } + else + ++m->errorCount; + + int errorCount = m->errorCount; + delete m; + m = NULL; + + delete g->target; + g->target = NULL; + + return errorCount > 0; + } + else if (target == NULL || strchr(target, ',') == NULL) { // We're only compiling to a single target g->target = new Target(arch, cpu, target, generatePIC); From 8baef6daa3966397f14445ed486ce54ac7b20e54 Mon Sep 17 00:00:00 2001 From: Evghenii Date: Tue, 29 Oct 2013 14:01:53 +0100 Subject: [PATCH 09/12] +1 --- ctx.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ctx.cpp b/ctx.cpp index c97da129..6b60ec51 100644 --- a/ctx.cpp +++ b/ctx.cpp @@ -1417,9 +1417,9 @@ FunctionEmitContext::ProgramIndexVector(bool is32bits) { return index; } else - { + { /* this idea is to call __tid_x() builtin, but it doesn't work */ std::vector mm; - m->symbolTable->LookupFunction("__tid_x", &mm); + m->symbolTable->LookupFunction("laneIndex", &mm); if (g->target->getMaskBitCount() == 1) AssertPos(currentPos, mm.size() == 1); else @@ -1429,7 +1429,7 @@ FunctionEmitContext::ProgramIndexVector(bool is32bits) { // LLVM's type system is concerned... llvm::Function *fmm = mm[0]->function; std::vector args; - return CallInst(fmm, NULL, args, "__tid_x"); + return CallInst(fmm, NULL, args, "laneIndex"); } } From b31fc6f66d655d353b98f598e3fb2957b6686ebd Mon Sep 17 00:00:00 2001 From: Evghenii Date: Tue, 29 Oct 2013 14:17:11 +0100 Subject: [PATCH 10/12] now can generate both targets for npvtx64. m_isPTX is set true, to distuish when to either skip or exlcusive euse export --- ispc.cpp | 4 +- ispc.h | 2 +- module.cpp | 107 +++++++++++++++++++++++++++++------------------------ 3 files changed, 61 insertions(+), 52 deletions(-) diff --git a/ispc.cpp b/ispc.cpp index 00d94000..232b88e8 100644 --- a/ispc.cpp +++ b/ispc.cpp @@ -157,7 +157,7 @@ static const char *supportedCPUs[] = { #endif // LLVM 3.4+ }; -Target::Target(const char *arch, const char *cpu, const char *isa, bool pic) : +Target::Target(const char *arch, const char *cpu, const char *isa, bool pic, bool isPTX) : m_target(NULL), m_targetMachine(NULL), #if defined(LLVM_3_1) @@ -167,7 +167,7 @@ Target::Target(const char *arch, const char *cpu, const char *isa, bool pic) : #endif m_valid(false), m_isa(SSE2), - m_isPTX(false), + m_isPTX(isPTX), m_arch(""), m_is32Bit(true), m_cpu(""), diff --git a/ispc.h b/ispc.h index eea77348..29f34183 100644 --- a/ispc.h +++ b/ispc.h @@ -189,7 +189,7 @@ public: /** Initializes the given Target pointer for a target of the given name, if the name is a known target. Returns true if the target was initialized and false if the name is unknown. */ - Target(const char *arch, const char *cpu, const char *isa, bool pic); + Target(const char *arch, const char *cpu, const char *isa, bool pic, bool isPTX = false); /** Returns a comma-delimited string giving the names of the currently supported compilation targets. */ diff --git a/module.cpp b/module.cpp index 07dc5e48..071013b8 100644 --- a/module.cpp +++ b/module.cpp @@ -733,13 +733,11 @@ Module::AddFunctionDeclaration(const std::string &name, if (storageClass == SC_EXTERN_C) { // Make sure the user hasn't supplied both an 'extern "C"' and a // 'task' qualifier with the function -#if 0 /* NVPTX64::task_and_externC */ if (functionType->isTask && g->target->getISA() != Target::NVPTX64) { Error(pos, "\"task\" qualifier is illegal with C-linkage extern " "function \"%s\". Ignoring this function.", name.c_str()); return; } -#endif std::vector funcs; symbolTable->LookupFunction(name.c_str(), &funcs); @@ -2316,62 +2314,72 @@ Module::CompileAndOutput(const char *srcFile, const char *hostStubFileName, const char *devStubFileName) { - if (target != NULL && !strcmp(target,"nvptx64")) + if (target != NULL && !strcmp(target,"nvptx64")) // NVPTX64 { - fprintf(stderr, "compiling nvptx64 \n"); // We're only compiling to a single target - g->target = new Target(arch, cpu, target, generatePIC); - if (!g->target->isValid()) - return 1; + const char * target_list[] = {"nvptx64", "avx"}; + int errorCount = 0; + for (int itarget = 0; itarget < 2; itarget++) + { + fprintf(stderr, "compiling nvptx64 : target= %s\n",target_list[itarget]); + g->target = new Target(arch, cpu, target_list[itarget], generatePIC, /* isPTX= */ true); + if (!g->target->isValid()) + return 1; - m = new Module(srcFile); - if (m->CompileFile() == 0) { - if (outputType == CXX) { - if (target == NULL || strncmp(target, "generic-", 8) != 0) { - Error(SourcePos(), "When generating C++ output, one of the \"generic-*\" " - "targets must be used."); - return 1; - } - } - else if (outputType == Asm || outputType == Object) { - if (target != NULL && strncmp(target, "generic-", 8) == 0) { - Error(SourcePos(), "When using a \"generic-*\" compilation target, " - "%s output can not be used.", - (outputType == Asm) ? "assembly" : "object file"); - return 1; + m = new Module(srcFile); + if (m->CompileFile() == 0) { + if (outputType == CXX) { + if (target == NULL || strncmp(target, "generic-", 8) != 0) { + Error(SourcePos(), "When generating C++ output, one of the \"generic-*\" " + "targets must be used."); + return 1; + } + } + else if (outputType == Asm || outputType == Object) { + if (target != NULL && strncmp(target, "generic-", 8) == 0) { + Error(SourcePos(), "When using a \"generic-*\" compilation target, " + "%s output can not be used.", + (outputType == Asm) ? "assembly" : "object file"); + return 1; + } + } + + assert(outFileName != NULL); + std::string targetOutFileName = + lGetTargetFileName(outFileName, target_list[itarget]); + if (!m->writeOutput(outputType, targetOutFileName.c_str(), includeFileName)) + return 1; + + if (itarget > 0) + { + if (headerFileName != NULL) + if (!m->writeOutput(Module::Header, headerFileName)) + return 1; + if (depsFileName != NULL) + if (!m->writeOutput(Module::Deps,depsFileName)) + return 1; + if (hostStubFileName != NULL) + if (!m->writeOutput(Module::HostStub,hostStubFileName)) + return 1; + if (devStubFileName != NULL) + if (!m->writeOutput(Module::DevStub,devStubFileName)) + return 1; } } + else + ++m->errorCount; + + errorCount += m->errorCount; + delete m; + m = NULL; + + delete g->target; + g->target = NULL; - if (outFileName != NULL) - if (!m->writeOutput(outputType, outFileName, includeFileName)) - return 1; - if (headerFileName != NULL) - if (!m->writeOutput(Module::Header, headerFileName)) - return 1; - if (depsFileName != NULL) - if (!m->writeOutput(Module::Deps,depsFileName)) - return 1; - if (hostStubFileName != NULL) - if (!m->writeOutput(Module::HostStub,hostStubFileName)) - return 1; - if (devStubFileName != NULL) - if (!m->writeOutput(Module::DevStub,devStubFileName)) - return 1; } - else - ++m->errorCount; - - int errorCount = m->errorCount; - delete m; - m = NULL; - - delete g->target; - g->target = NULL; - return errorCount > 0; } - else - if (target == NULL || strchr(target, ',') == NULL) { + else if (target == NULL || strchr(target, ',') == NULL) { // We're only compiling to a single target g->target = new Target(arch, cpu, target, generatePIC); if (!g->target->isValid()) @@ -2542,4 +2550,5 @@ Module::CompileAndOutput(const char *srcFile, return errorCount > 0; } + return true; } From f15cdc03e3fadcaf009b2be2437684705982e9dc Mon Sep 17 00:00:00 2001 From: Evghenii Date: Tue, 29 Oct 2013 14:46:51 +0100 Subject: [PATCH 11/12] nvptx64 generates 2 targets: task and normal function for nvptx64 and export for avx only --- decl.cpp | 15 +++++++-------- func.cpp | 2 ++ module.cpp | 20 ++++++++++++++++++-- type.cpp | 2 +- 4 files changed, 28 insertions(+), 11 deletions(-) diff --git a/decl.cpp b/decl.cpp index 47443375..f21e3c41 100644 --- a/decl.cpp +++ b/decl.cpp @@ -531,7 +531,7 @@ Declarator::InitFromType(const Type *baseType, DeclSpecs *ds) { returnType = returnType->ResolveUnboundVariability(Variability::Varying); bool isTask = ds && ((ds->typeQualifiers & TYPEQUAL_TASK) != 0); - if (isTask && g->target->getISA() == Target::NVPTX64) + if (isTask && g->target->isPTX()) //getISA() == Target::NVPTX64) { ds->storageClass = SC_EXTERN_C; ds->typeQualifiers |= TYPEQUAL_UNMASKED; @@ -546,13 +546,12 @@ Declarator::InitFromType(const Type *baseType, DeclSpecs *ds) { "qualifiers"); return; } -#if 0 /* NVPTX64::task_and_externC */ - if (isExternC && isTask) { - Error(pos, "Function can't have both \"extern \"C\"\" and \"task\" " - "qualifiers"); - return; - } -#endif + if (!g->target->isPTX()) + if (isExternC && isTask) { + Error(pos, "Function can't have both \"extern \"C\"\" and \"task\" " + "qualifiers"); + return; + } if (isExternC && isExported) { Error(pos, "Function can't have both \"extern \"C\"\" and \"export\" " "qualifiers"); diff --git a/func.cpp b/func.cpp index 62372a21..f930ce10 100644 --- a/func.cpp +++ b/func.cpp @@ -527,6 +527,8 @@ Function::GenerateIR() { const FunctionType *type= CastType(sym->type); if (g->target->getISA() == Target::NVPTX64 && type->isExported) return; + if (g->target->getISA() != Target::NVPTX64 && g->target->isPTX() && !type->isExported) + return; } { FunctionEmitContext ec(this, sym, function, firstStmtPos); diff --git a/module.cpp b/module.cpp index 071013b8..2f5e6167 100644 --- a/module.cpp +++ b/module.cpp @@ -733,7 +733,7 @@ Module::AddFunctionDeclaration(const std::string &name, if (storageClass == SC_EXTERN_C) { // Make sure the user hasn't supplied both an 'extern "C"' and a // 'task' qualifier with the function - if (functionType->isTask && g->target->getISA() != Target::NVPTX64) { + if (functionType->isTask && !g->target->isPTX()) { //tISA() != Target::NVPTX64) { Error(pos, "\"task\" qualifier is illegal with C-linkage extern " "function \"%s\". Ignoring this function.", name.c_str()); return; @@ -2319,6 +2319,11 @@ Module::CompileAndOutput(const char *srcFile, // We're only compiling to a single target const char * target_list[] = {"nvptx64", "avx"}; int errorCount = 0; + + const char *suffix_orig = strrchr(outFileName, '.'); + ++suffix_orig; + assert(suffix_orig!=NULL); + for (int itarget = 0; itarget < 2; itarget++) { fprintf(stderr, "compiling nvptx64 : target= %s\n",target_list[itarget]); @@ -2345,8 +2350,19 @@ Module::CompileAndOutput(const char *srcFile, } assert(outFileName != NULL); - std::string targetOutFileName = + + std::string targetOutFileName = lGetTargetFileName(outFileName, target_list[itarget]); + if (outputType == Asm) + { + const char * targetOutFileName_c = targetOutFileName.c_str(); + const int suffix = strrchr(targetOutFileName_c, '.') - targetOutFileName_c + 1; + if (itarget == 1 && !strcasecmp(suffix_orig, "ptx")) + { + targetOutFileName[suffix ] = 's'; + targetOutFileName[suffix+1] = 0; + } + } if (!m->writeOutput(outputType, targetOutFileName.c_str(), includeFileName)) return 1; diff --git a/type.cpp b/type.cpp index 23594e2f..04c02989 100644 --- a/type.cpp +++ b/type.cpp @@ -2925,7 +2925,7 @@ FunctionType::GetReturnTypeString() const { llvm::FunctionType * FunctionType::LLVMFunctionType(llvm::LLVMContext *ctx, bool removeMask) const { - if (isTask == true && g->target->getISA() != Target::NVPTX64) + if (isTask == true && !g->target->isPTX()) //getISA() != Target::NVPTX64) Assert(removeMask == false); // Get the LLVM Type *s for the function arguments From ed9bca0e123e31fd382baa73e67360d89dcab8ce Mon Sep 17 00:00:00 2001 From: Evghenii Date: Tue, 29 Oct 2013 15:06:08 +0100 Subject: [PATCH 12/12] add __soa_to_aos*_float1 and __aos_to_soa*_float1 builtins --- builtins.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/builtins.cpp b/builtins.cpp index 91e9a16a..36e31888 100644 --- a/builtins.cpp +++ b/builtins.cpp @@ -338,11 +338,13 @@ lSetInternalFunctions(llvm::Module *module) { "__all", "__any", "__aos_to_soa3_float", + "__aos_to_soa3_float1", "__aos_to_soa3_float16", "__aos_to_soa3_float4", "__aos_to_soa3_float8", "__aos_to_soa3_int32", "__aos_to_soa4_float", + "__aos_to_soa4_float1", "__aos_to_soa4_float16", "__aos_to_soa4_float4", "__aos_to_soa4_float8", @@ -549,11 +551,13 @@ lSetInternalFunctions(llvm::Module *module) { "__shuffle_i64", "__shuffle_i8", "__soa_to_aos3_float", + "__soa_to_aos3_float1", "__soa_to_aos3_float16", "__soa_to_aos3_float4", "__soa_to_aos3_float8", "__soa_to_aos3_int32", "__soa_to_aos4_float", + "__soa_to_aos4_float1", "__soa_to_aos4_float16", "__soa_to_aos4_float4", "__soa_to_aos4_float8",