diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index 10100098..d402e927 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -29,265 +29,366 @@ ;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; AVX target implementation. -;; -;; Please note that this file uses SSE intrinsics, but LLVM generates AVX -;; instructions, so it doesn't makes sense to change this implemenation. +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128-v16:16:16-v32:32:32-v4:128:128"; +define(`MASK',`i1') +define(`HAVE_GATHER',`1') +define(`HAVE_SCATTER',`1') -ctlztz() -define_prefetches() -define_shuffles() -aossoa() +include(`util.m4') +stdlib_core() +scans() +reduce_equal(WIDTH) +rdrand_decls() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rounding floats +;; broadcast/rotate/shuffle -declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32) nounwind readnone +declare @__smear_float(float) nounwind readnone +declare @__smear_double(double) nounwind readnone +declare @__smear_i8(i8) nounwind readnone +declare @__smear_i16(i16) nounwind readnone +declare @__smear_i32(i32) nounwind readnone +declare @__smear_i64(i64) nounwind readnone -define float @__round_uniform_float(float) nounwind readonly alwaysinline { - ; roundss, round mode nearest 0b00 | don't signal precision exceptions 0b1000 = 8 - ; the roundss intrinsic is a total mess--docs say: - ; - ; __m128 _mm_round_ss (__m128 a, __m128 b, const int c) - ; - ; b is a 128-bit parameter. The lowest 32 bits are the result of the rounding function - ; on b0. The higher order 96 bits are copied directly from input parameter a. The - ; return value is described by the following equations: - ; - ; r0 = RND(b0) - ; r1 = a1 - ; r2 = a2 - ; r3 = a3 - ; - ; It doesn't matter what we pass as a, since we only need the r0 value - ; here. So we pass the same register for both. Further, only the 0th - ; element of the b parameter matters - %xi = insertelement <4 x float> undef, float %0, i32 0 - %xr = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %xi, <4 x float> %xi, i32 8) - %rs = extractelement <4 x float> %xr, i32 0 - ret float %rs -} +declare @__setzero_float() nounwind readnone +declare @__setzero_double() nounwind readnone +declare @__setzero_i8() nounwind readnone +declare @__setzero_i16() nounwind readnone +declare @__setzero_i32() nounwind readnone +declare @__setzero_i64() nounwind readnone -define float @__floor_uniform_float(float) nounwind readonly alwaysinline { - ; see above for round_ss instrinsic discussion... - %xi = insertelement <4 x float> undef, float %0, i32 0 - ; roundps, round down 0b01 | don't signal precision exceptions 0b1001 = 9 - %xr = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %xi, <4 x float> %xi, i32 9) - %rs = extractelement <4 x float> %xr, i32 0 - ret float %rs -} +declare @__undef_float() nounwind readnone +declare @__undef_double() nounwind readnone +declare @__undef_i8() nounwind readnone +declare @__undef_i16() nounwind readnone +declare @__undef_i32() nounwind readnone +declare @__undef_i64() nounwind readnone -define float @__ceil_uniform_float(float) nounwind readonly alwaysinline { - ; see above for round_ss instrinsic discussion... - %xi = insertelement <4 x float> undef, float %0, i32 0 - ; roundps, round up 0b10 | don't signal precision exceptions 0b1010 = 10 - %xr = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %xi, <4 x float> %xi, i32 10) - %rs = extractelement <4 x float> %xr, i32 0 - ret float %rs -} +declare @__broadcast_float(, i32) nounwind readnone +declare @__broadcast_double(, i32) nounwind readnone +declare @__broadcast_i8(, i32) nounwind readnone +declare @__broadcast_i16(, i32) nounwind readnone +declare @__broadcast_i32(, i32) nounwind readnone +declare @__broadcast_i64(, i32) nounwind readnone + +declare @__rotate_i8(, i32) nounwind readnone +declare @__rotate_i16(, i32) nounwind readnone +declare @__rotate_float(, i32) nounwind readnone +declare @__rotate_i32(, i32) nounwind readnone +declare @__rotate_double(, i32) nounwind readnone +declare @__rotate_i64(, i32) nounwind readnone + +declare @__shift_i8(, i32) nounwind readnone +declare @__shift_i16(, i32) nounwind readnone +declare @__shift_float(, i32) nounwind readnone +declare @__shift_i32(, i32) nounwind readnone +declare @__shift_double(, i32) nounwind readnone +declare @__shift_i64(, i32) nounwind readnone + +declare @__shuffle_i8(, ) nounwind readnone +declare @__shuffle2_i8(, , + ) nounwind readnone +declare @__shuffle_i16(, ) nounwind readnone +declare @__shuffle2_i16(, , + ) nounwind readnone +declare @__shuffle_float(, + ) nounwind readnone +declare @__shuffle2_float(, , + ) nounwind readnone +declare @__shuffle_i32(, + ) nounwind readnone +declare @__shuffle2_i32(, , + ) nounwind readnone +declare @__shuffle_double(, + ) nounwind readnone +declare @__shuffle2_double(, + , ) nounwind readnone +declare @__shuffle_i64(, + ) nounwind readnone +declare @__shuffle2_i64(, , + ) nounwind readnone ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rounding doubles +;; aos/soa -declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) nounwind readnone - -define double @__round_uniform_double(double) nounwind readonly alwaysinline { - %xi = insertelement <2 x double> undef, double %0, i32 0 - %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 8) - %rs = extractelement <2 x double> %xr, i32 0 - ret double %rs -} - -define double @__floor_uniform_double(double) nounwind readonly alwaysinline { - ; see above for round_ss instrinsic discussion... - %xi = insertelement <2 x double> undef, double %0, i32 0 - ; roundsd, round down 0b01 | don't signal precision exceptions 0b1001 = 9 - %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 9) - %rs = extractelement <2 x double> %xr, i32 0 - ret double %rs -} - -define double @__ceil_uniform_double(double) nounwind readonly alwaysinline { - ; see above for round_ss instrinsic discussion... - %xi = insertelement <2 x double> undef, double %0, i32 0 - ; roundsd, round up 0b10 | don't signal precision exceptions 0b1010 = 10 - %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 10) - %rs = extractelement <2 x double> %xr, i32 0 - ret double %rs -} +declare void @__soa_to_aos3_float( %v0, %v1, + %v2, float * noalias %p) nounwind +declare void @__aos_to_soa3_float(float * noalias %p, * %out0, + * %out1, * %out2) nounwind +declare void @__soa_to_aos4_float( %v0, %v1, + %v2, %v3, + float * noalias %p) nounwind +declare void @__aos_to_soa4_float(float * noalias %p, * noalias %out0, + * noalias %out1, + * noalias %out2, + * noalias %out3) nounwind ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rcp +;; half conversion routines -declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone - -define float @__rcp_uniform_float(float) nounwind readonly alwaysinline { - ; do the rcpss call - ; uniform float iv = extract(__rcp_u(v), 0); - ; return iv * (2. - v * iv); - %vecval = insertelement <4 x float> undef, float %0, i32 0 - %call = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %vecval) - %scall = extractelement <4 x float> %call, i32 0 - - ; do one N-R iteration to improve precision, as above - %v_iv = fmul float %0, %scall - %two_minus = fsub float 2., %v_iv - %iv_mul = fmul float %scall, %two_minus - ret float %iv_mul -} +declare float @__half_to_float_uniform(i16 %v) nounwind readnone +declare @__half_to_float_varying( %v) nounwind readnone +declare i16 @__float_to_half_uniform(float %v) nounwind readnone +declare @__float_to_half_varying( %v) nounwind readnone ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rsqrt +;; math -declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone +declare void @__fastmath() nounwind -define float @__rsqrt_uniform_float(float) nounwind readonly alwaysinline { - ; uniform float is = extract(__rsqrt_u(v), 0); - %v = insertelement <4 x float> undef, float %0, i32 0 - %vis = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %v) - %is = extractelement <4 x float> %vis, i32 0 +;; round/floor/ceil - ; Newton-Raphson iteration to improve precision - ; return 0.5 * is * (3. - (v * is) * is); - %v_is = fmul float %0, %is - %v_is_is = fmul float %v_is, %is - %three_sub = fsub float 3., %v_is_is - %is_mul = fmul float %is, %three_sub - %half_scale = fmul float 0.5, %is_mul - ret float %half_scale -} +declare float @__round_uniform_float(float) nounwind readnone +declare float @__floor_uniform_float(float) nounwind readnone +declare float @__ceil_uniform_float(float) nounwind readnone +declare double @__round_uniform_double(double) nounwind readnone +declare double @__floor_uniform_double(double) nounwind readnone +declare double @__ceil_uniform_double(double) nounwind readnone + +declare @__round_varying_float() nounwind readnone +declare @__floor_varying_float() nounwind readnone +declare @__ceil_varying_float() nounwind readnone +declare @__round_varying_double() nounwind readnone +declare @__floor_varying_double() nounwind readnone +declare @__ceil_varying_double() nounwind readnone + +;; min/max + +declare float @__max_uniform_float(float, float) nounwind readnone +declare float @__min_uniform_float(float, float) nounwind readnone +declare i32 @__min_uniform_int32(i32, i32) nounwind readnone +declare i32 @__max_uniform_int32(i32, i32) nounwind readnone +declare i32 @__min_uniform_uint32(i32, i32) nounwind readnone +declare i32 @__max_uniform_uint32(i32, i32) nounwind readnone +declare i64 @__min_uniform_int64(i64, i64) nounwind readnone +declare i64 @__max_uniform_int64(i64, i64) nounwind readnone +declare i64 @__min_uniform_uint64(i64, i64) nounwind readnone +declare i64 @__max_uniform_uint64(i64, i64) nounwind readnone +declare double @__min_uniform_double(double, double) nounwind readnone +declare double @__max_uniform_double(double, double) nounwind readnone + +declare @__max_varying_float(, + ) nounwind readnone +declare @__min_varying_float(, + ) nounwind readnone +declare @__min_varying_int32(, ) nounwind readnone +declare @__max_varying_int32(, ) nounwind readnone +declare @__min_varying_uint32(, ) nounwind readnone +declare @__max_varying_uint32(, ) nounwind readnone +declare @__min_varying_int64(, ) nounwind readnone +declare @__max_varying_int64(, ) nounwind readnone +declare @__min_varying_uint64(, ) nounwind readnone +declare @__max_varying_uint64(, ) nounwind readnone +declare @__min_varying_double(, + ) nounwind readnone +declare @__max_varying_double(, + ) nounwind readnone + +;; sqrt/rsqrt/rcp + +declare float @__rsqrt_uniform_float(float) nounwind readnone +declare float @__rcp_uniform_float(float) nounwind readnone +declare float @__sqrt_uniform_float(float) nounwind readnone +declare @__rcp_varying_float() nounwind readnone +declare @__rsqrt_varying_float() nounwind readnone + +declare @__sqrt_varying_float() nounwind readnone + +declare double @__sqrt_uniform_double(double) nounwind readnone +declare @__sqrt_varying_double() nounwind readnone + +;; bit ops + +declare i32 @__popcnt_int32(i32) nounwind readnone +declare i64 @__popcnt_int64(i64) nounwind readnone + +declare i32 @__count_trailing_zeros_i32(i32) nounwind readnone +declare i64 @__count_trailing_zeros_i64(i64) nounwind readnone +declare i32 @__count_leading_zeros_i32(i32) nounwind readnone +declare i64 @__count_leading_zeros_i64(i64) nounwind readnone + +; FIXME: need either to wire these up to the 8-wide SVML entrypoints, +; or, use the macro to call the 4-wide ones twice with our 8-wide +; vectors... + +;; svml + +include(`svml.m4') +svml_stubs(float,f,WIDTH) +svml_stubs(double,d,WIDTH) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; sqrt +;; reductions -declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone +declare i64 @__movmsk() nounwind readnone +declare i1 @__any() nounwind readnone +declare i1 @__all() nounwind readnone +declare i1 @__none() nounwind readnone -define float @__sqrt_uniform_float(float) nounwind readonly alwaysinline { - sse_unary_scalar(ret, 4, float, @llvm.x86.sse.sqrt.ss, %0) - ret float %ret -} +declare i16 @__reduce_add_int8() nounwind readnone +declare i32 @__reduce_add_int16() nounwind readnone + +declare float @__reduce_add_float() nounwind readnone +declare float @__reduce_min_float() nounwind readnone +declare float @__reduce_max_float() nounwind readnone + +declare i64 @__reduce_add_int32() nounwind readnone +declare i32 @__reduce_min_int32() nounwind readnone +declare i32 @__reduce_max_int32() nounwind readnone +declare i32 @__reduce_min_uint32() nounwind readnone +declare i32 @__reduce_max_uint32() nounwind readnone + +declare double @__reduce_add_double() nounwind readnone +declare double @__reduce_min_double() nounwind readnone +declare double @__reduce_max_double() nounwind readnone + +declare i64 @__reduce_add_int64() nounwind readnone +declare i64 @__reduce_min_int64() nounwind readnone +declare i64 @__reduce_max_int64() nounwind readnone +declare i64 @__reduce_min_uint64() nounwind readnone +declare i64 @__reduce_max_uint64() nounwind readnone ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; double precision sqrt +;; unaligned loads/loads+broadcasts -declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone -define double @__sqrt_uniform_double(double) nounwind alwaysinline { - sse_unary_scalar(ret, 2, double, @llvm.x86.sse2.sqrt.sd, %0) - ret double %ret +declare @__masked_load_i8(i8 * nocapture, %mask) nounwind readonly +declare @__masked_load_i16(i8 * nocapture, %mask) nounwind readonly +declare @__masked_load_i32(i8 * nocapture, %mask) nounwind readonly +declare @__masked_load_float(i8 * nocapture, %mask) nounwind readonly +declare @__masked_load_i64(i8 * nocapture, %mask) nounwind readonly +declare @__masked_load_double(i8 * nocapture, %mask) nounwind readonly + +declare void @__masked_store_i8(* nocapture, , + ) nounwind +declare void @__masked_store_i16(* nocapture, , + ) nounwind +declare void @__masked_store_i32(* nocapture, , + ) nounwind +declare void @__masked_store_float(* nocapture, , + ) nounwind +declare void @__masked_store_i64(* nocapture, , + %mask) nounwind +declare void @__masked_store_double(* nocapture, , + %mask) nounwind + + +define void @__masked_store_blend_i8(* nocapture, , + ) nounwind alwaysinline { + %v = load PTR_OP_ARGS(` ') %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void } -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; fast math mode +define void @__masked_store_blend_i16(* nocapture, , + ) nounwind alwaysinline { + %v = load PTR_OP_ARGS(` ') %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void +} -declare void @llvm.x86.sse.stmxcsr(i8 *) nounwind -declare void @llvm.x86.sse.ldmxcsr(i8 *) nounwind +define void @__masked_store_blend_i32(* nocapture, , + ) nounwind alwaysinline { + %v = load PTR_OP_ARGS(` ') %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void +} -define void @__fastmath() nounwind alwaysinline { - %ptr = alloca i32 - %ptr8 = bitcast i32 * %ptr to i8 * - call void @llvm.x86.sse.stmxcsr(i8 * %ptr8) - %oldval = load PTR_OP_ARGS(`i32 ') %ptr +define void @__masked_store_blend_float(* nocapture, , + ) nounwind alwaysinline { + %v = load PTR_OP_ARGS(` ') %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void +} - ; turn on DAZ (64)/FTZ (32768) -> 32832 - %update = or i32 %oldval, 32832 - store i32 %update, i32 *%ptr - call void @llvm.x86.sse.ldmxcsr(i8 * %ptr8) +define void @__masked_store_blend_i64(* nocapture, + , ) nounwind alwaysinline { + %v = load PTR_OP_ARGS(` ') %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void +} + +define void @__masked_store_blend_double(* nocapture, + , ) nounwind alwaysinline { + %v = load PTR_OP_ARGS(` ') %0 + %v1 = select %2, %1, %v + store %v1, * %0 ret void } ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; float min/max +;; gather/scatter -define float @__max_uniform_float(float, float) nounwind readonly alwaysinline { - %cmp = fcmp ogt float %1, %0 - %ret = select i1 %cmp, float %1, float %0 - ret float %ret -} +define(`gather_scatter', ` +declare @__gather_base_offsets32_$1(i8 * nocapture, i32, , + ) nounwind readonly +declare @__gather_base_offsets64_$1(i8 * nocapture, i32, , + ) nounwind readonly +declare @__gather32_$1(, + ) nounwind readonly +declare @__gather64_$1(, + ) nounwind readonly -define float @__min_uniform_float(float, float) nounwind readonly alwaysinline { - %cmp = fcmp ogt float %1, %0 - %ret = select i1 %cmp, float %0, float %1 - ret float %ret -} +declare void @__scatter_base_offsets32_$1(i8* nocapture, i32, , + , ) nounwind +declare void @__scatter_base_offsets64_$1(i8* nocapture, i32, , + , ) nounwind +declare void @__scatter32_$1(, , + ) nounwind +declare void @__scatter64_$1(, , + ) nounwind +') -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; double precision min/max +gather_scatter(i8) +gather_scatter(i16) +gather_scatter(i32) +gather_scatter(float) +gather_scatter(i64) +gather_scatter(double) -define double @__min_uniform_double(double, double) nounwind readnone alwaysinline { - %cmp = fcmp ogt double %1, %0 - %ret = select i1 %cmp, double %0, double %1 - ret double %ret -} - -define double @__max_uniform_double(double, double) nounwind readnone alwaysinline { - %cmp = fcmp ogt double %1, %0 - %ret = select i1 %cmp, double %1, double %0 - ret double %ret -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone -declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone -declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone -declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; int min/max - -define i32 @__min_uniform_int32(i32, i32) nounwind readonly alwaysinline { - %cmp = icmp sgt i32 %1, %0 - %ret = select i1 %cmp, i32 %0, i32 %1 - ret i32 %ret -} - -define i32 @__max_uniform_int32(i32, i32) nounwind readonly alwaysinline { - %cmp = icmp sgt i32 %1, %0 - %ret = select i1 %cmp, i32 %1, i32 %0 - ret i32 %ret -} +declare i32 @__packed_load_active(i32 * nocapture, * nocapture, + ) nounwind +declare i32 @__packed_store_active(i32 * nocapture, %vals, + ) nounwind +declare i32 @__packed_store_active2(i32 * nocapture, %vals, + ) nounwind ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; unsigned int min/max - -define i32 @__min_uniform_uint32(i32, i32) nounwind readonly alwaysinline { - %cmp = icmp ugt i32 %1, %0 - %ret = select i1 %cmp, i32 %0, i32 %1 - ret i32 %ret -} - -define i32 @__max_uniform_uint32(i32, i32) nounwind readonly alwaysinline { - %cmp = icmp ugt i32 %1, %0 - %ret = select i1 %cmp, i32 %1, i32 %0 - ret i32 %ret -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; horizontal ops / reductions - -declare i32 @llvm.ctpop.i32(i32) nounwind readnone - -define i32 @__popcnt_int32(i32) nounwind readonly alwaysinline { - %call = call i32 @llvm.ctpop.i32(i32 %0) - ret i32 %call -} - -declare i64 @llvm.ctpop.i64(i64) nounwind readnone - -define i64 @__popcnt_int64(i64) nounwind readonly alwaysinline { - %call = call i64 @llvm.ctpop.i64(i64 %0) - ret i64 %call -} +;; prefetch +declare void @__prefetch_read_uniform_1(i8 * nocapture) nounwind +declare void @__prefetch_read_uniform_2(i8 * nocapture) nounwind +declare void @__prefetch_read_uniform_3(i8 * nocapture) nounwind +declare void @__prefetch_read_uniform_nt(i8 * nocapture) nounwind +declare void @__prefetch_read_varying_1( %addr, %mask) nounwind +declare void @__prefetch_read_varying_1_native(i8 * %base, i32 %scale, %offsets, %mask) nounwind +declare void @__prefetch_read_varying_2( %addr, %mask) nounwind +declare void @__prefetch_read_varying_2_native(i8 * %base, i32 %scale, %offsets, %mask) nounwind +declare void @__prefetch_read_varying_3( %addr, %mask) nounwind +declare void @__prefetch_read_varying_3_native(i8 * %base, i32 %scale, %offsets, %mask) nounwind +declare void @__prefetch_read_varying_nt( %addr, %mask) nounwind +declare void @__prefetch_read_varying_nt_native(i8 * %base, i32 %scale, %offsets, %mask) nounwind ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; int8/int16 builtins define_avgs() declare_nvptx() +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; reciprocals in double precision, if supported + +rsqrtd_decl() +rcpd_decl() + +transcendetals_decl() +trigonometry_decl() diff --git a/builtins/target-knl.ll b/builtins/target-knl.ll index abc39885..8305dda7 100644 --- a/builtins/target-knl.ll +++ b/builtins/target-knl.ll @@ -1,4 +1,4 @@ -;; Copyright (c) 2010-2013, Intel Corporation +;; Copyright (c) 2010-2014, Intel Corporation ;; All rights reserved. ;; ;; Redistribution and use in source and binary forms, with or without @@ -29,510 +29,6 @@ ;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -define(`HAVE_GATHER', `1') - -include(`target-avx-x2.ll') - -rdrand_definition() - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; int min/max - -declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readonly -declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readonly - -define <16 x i32> @__min_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { - binary8to16(m, i32, @llvm.x86.avx2.pmins.d, %0, %1) - ret <16 x i32> %m -} - -define <16 x i32> @__max_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { - binary8to16(m, i32, @llvm.x86.avx2.pmaxs.d, %0, %1) - ret <16 x i32> %m -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; unsigned int min/max - -declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readonly -declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readonly - -define <16 x i32> @__min_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { - binary8to16(m, i32, @llvm.x86.avx2.pminu.d, %0, %1) - ret <16 x i32> %m -} - -define <16 x i32> @__max_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { - binary8to16(m, i32, @llvm.x86.avx2.pmaxu.d, %0, %1) - ret <16 x i32> %m -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; float/half conversions - -declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readnone -; 0 is round nearest even -declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readnone - -define <16 x float> @__half_to_float_varying(<16 x i16> %v) nounwind readnone { - %r_0 = shufflevector <16 x i16> %v, <16 x i16> undef, - <8 x i32> - %vr_0 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_0) - %r_1 = shufflevector <16 x i16> %v, <16 x i16> undef, - <8 x i32> - %vr_1 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_1) - %r = shufflevector <8 x float> %vr_0, <8 x float> %vr_1, - <16 x i32> - ret <16 x float> %r -} - -define <16 x i16> @__float_to_half_varying(<16 x float> %v) nounwind readnone { - %r_0 = shufflevector <16 x float> %v, <16 x float> undef, - <8 x i32> - %vr_0 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_0, i32 0) - %r_1 = shufflevector <16 x float> %v, <16 x float> undef, - <8 x i32> - %vr_1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_1, i32 0) - %r = shufflevector <8 x i16> %vr_0, <8 x i16> %vr_1, - <16 x i32> - ret <16 x i16> %r -} - -define float @__half_to_float_uniform(i16 %v) nounwind readnone { - %v1 = bitcast i16 %v to <1 x i16> - %vv = shufflevector <1 x i16> %v1, <1 x i16> undef, - <8 x i32> - %rv = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %vv) - %r = extractelement <8 x float> %rv, i32 0 - ret float %r -} - -define i16 @__float_to_half_uniform(float %v) nounwind readnone { - %v1 = bitcast float %v to <1 x float> - %vv = shufflevector <1 x float> %v1, <1 x float> undef, - <8 x i32> - ; round to nearest even - %rv = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %vv, i32 0) - %r = extractelement <8 x i16> %rv, i32 0 - ret i16 %r -} - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; gather - -declare void @llvm.trap() noreturn nounwind - -; $1: type -; $2: var base name -define(`extract_4s', ` - %$2_1 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> - %$2_2 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> - %$2_3 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> - %$2_4 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> -') - -; $1: type -; $2: var base name -define(`extract_8s', ` - %$2_1 = shufflevector <16 x $1> %$2, <16 x $1> undef, - <8 x i32> - %$2_2 = shufflevector <16 x $1> %$2, <16 x $1> undef, - <8 x i32> -') - -; $1: element type -; $2: ret name -; $3: v1 -; $4: v2 -define(`assemble_8s', ` - %$2 = shufflevector <8 x $1> %$3, <8 x $1> %$4, - <16 x i32> -') - -; $1: element type -; $2: ret name -; $3: v1 -; $4: v2 -; $5: v3 -; $6: v4 -define(`assemble_4s', ` - %$2_1 = shufflevector <4 x $1> %$3, <4 x $1> %$4, - <8 x i32> - %$2_2 = shufflevector <4 x $1> %$5, <4 x $1> %$6, - <8 x i32> - assemble_8s($1, $2, $2_1, $2_2) -') - - -gen_gather(i8) -gen_gather(i16) - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; int32 gathers - -declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %target, i8 * %ptr, - <8 x i32> %indices, <8 x i32> %mask, i8 %scale) readonly nounwind -declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %target, i8 * %ptr, - <4 x i64> %indices, <4 x i32> %mask, i8 %scale) readonly nounwind - -define <16 x i32> @__gather_base_offsets32_i32(i8 * %ptr, i32 %scale, <16 x i32> %offsets, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - extract_8s(i32, offsets) - extract_8s(i32, vecmask) - - %v1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr, - <8 x i32> %offsets_1, <8 x i32> %vecmask_1, i8 %scale8) - %v2 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr, - <8 x i32> %offsets_2, <8 x i32> %vecmask_2, i8 %scale8) - - assemble_8s(i32, v, v1, v2) - - ret <16 x i32> %v -} - - -define <16 x i32> @__gather_base_offsets64_i32(i8 * %ptr, - i32 %scale, <16 x i64> %offsets, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - - extract_4s(i32, vecmask) - extract_4s(i64, offsets) - - %v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, - <4 x i64> %offsets_1, <4 x i32> %vecmask_1, i8 %scale8) - %v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, - <4 x i64> %offsets_2, <4 x i32> %vecmask_2, i8 %scale8) - %v3 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, - <4 x i64> %offsets_3, <4 x i32> %vecmask_3, i8 %scale8) - %v4 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, - <4 x i64> %offsets_4, <4 x i32> %vecmask_4, i8 %scale8) - - assemble_4s(i32, v, v1, v2, v3, v4) - - ret <16 x i32> %v -} - - -define <16 x i32> @__gather32_i32(<16 x i32> %ptrs, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - extract_8s(i32, ptrs) - extract_8s(i32, vecmask) - - %v1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null, - <8 x i32> %ptrs_1, <8 x i32> %vecmask_1, i8 1) - %v2 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null, - <8 x i32> %ptrs_2, <8 x i32> %vecmask_2, i8 1) - - assemble_8s(i32, v, v1, v2) - - ret <16 x i32> %v -} - - -define <16 x i32> @__gather64_i32(<16 x i64> %ptrs, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - extract_4s(i64, ptrs) - extract_4s(i32, vecmask) - - %v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, - <4 x i64> %ptrs_1, <4 x i32> %vecmask_1, i8 1) - %v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, - <4 x i64> %ptrs_2, <4 x i32> %vecmask_2, i8 1) - %v3 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, - <4 x i64> %ptrs_3, <4 x i32> %vecmask_3, i8 1) - %v4 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, - <4 x i64> %ptrs_4, <4 x i32> %vecmask_4, i8 1) - - assemble_4s(i32, v, v1, v2, v3, v4) - - ret <16 x i32> %v -} - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; float gathers - -declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %target, i8 * %ptr, - <8 x i32> %indices, <8 x float> %mask, i8 %scale8) readonly nounwind -declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %target, i8 * %ptr, - <4 x i64> %indices, <4 x float> %mask, i8 %scale8) readonly nounwind - -define <16 x float> @__gather_base_offsets32_float(i8 * %ptr, - i32 %scale, <16 x i32> %offsets, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %mask = bitcast <16 x i32> %vecmask to <16 x float> - extract_8s(i32, offsets) - extract_8s(float, mask) - - %v1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr, - <8 x i32> %offsets_1, <8 x float> %mask_1, i8 %scale8) - %v2 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr, - <8 x i32> %offsets_2, <8 x float> %mask_2, i8 %scale8) - - assemble_8s(float, v, v1, v2) - - ret <16 x float> %v -} - - -define <16 x float> @__gather_base_offsets64_float(i8 * %ptr, - i32 %scale, <16 x i64> %offsets, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %mask = bitcast <16 x i32> %vecmask to <16 x float> - extract_4s(i64, offsets) - extract_4s(float, mask) - - %v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, - <4 x i64> %offsets_1, <4 x float> %mask_1, i8 %scale8) - %v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, - <4 x i64> %offsets_2, <4 x float> %mask_2, i8 %scale8) - %v3 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, - <4 x i64> %offsets_3, <4 x float> %mask_3, i8 %scale8) - %v4 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, - <4 x i64> %offsets_4, <4 x float> %mask_4, i8 %scale8) - - assemble_4s(float, v, v1, v2, v3, v4) - - ret <16 x float> %v -} - - -define <16 x float> @__gather32_float(<16 x i32> %ptrs, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %mask = bitcast <16 x i32> %vecmask to <16 x float> - extract_8s(float, mask) - extract_8s(i32, ptrs) - - %v1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null, - <8 x i32> %ptrs_1, <8 x float> %mask_1, i8 1) - %v2 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null, - <8 x i32> %ptrs_2, <8 x float> %mask_2, i8 1) - - assemble_8s(float, v, v1, v2) - - ret <16 x float> %v -} - - -define <16 x float> @__gather64_float(<16 x i64> %ptrs, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %mask = bitcast <16 x i32> %vecmask to <16 x float> - extract_4s(i64, ptrs) - extract_4s(float, mask) - - %v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, - <4 x i64> %ptrs_1, <4 x float> %mask_1, i8 1) - %v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, - <4 x i64> %ptrs_2, <4 x float> %mask_2, i8 1) - %v3 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, - <4 x i64> %ptrs_3, <4 x float> %mask_3, i8 1) - %v4 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, - <4 x i64> %ptrs_4, <4 x float> %mask_4, i8 1) - - assemble_4s(float, v, v1, v2, v3, v4) - - ret <16 x float> %v -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; int64 gathers - -declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %target, i8 * %ptr, - <4 x i32> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind -declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %target, i8 * %ptr, - <4 x i64> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind - -define <16 x i64> @__gather_base_offsets32_i64(i8 * %ptr, - i32 %scale, <16 x i32> %offsets, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %vecmask = sext <16 x i32> %mask32 to <16 x i64> - extract_4s(i32, offsets) - extract_4s(i64, vecmask) - - %v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i32> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8) - %v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i32> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8) - %v3 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i32> %offsets_3, <4 x i64> %vecmask_3, i8 %scale8) - %v4 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i32> %offsets_4, <4 x i64> %vecmask_4, i8 %scale8) - - assemble_4s(i64, v, v1, v2, v3, v4) - - ret <16 x i64> %v -} - - -define <16 x i64> @__gather_base_offsets64_i64(i8 * %ptr, - i32 %scale, <16 x i64> %offsets, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %vecmask = sext <16 x i32> %mask32 to <16 x i64> - extract_4s(i64, offsets) - extract_4s(i64, vecmask) - - %v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i64> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8) - %v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i64> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8) - %v3 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i64> %offsets_3, <4 x i64> %vecmask_3, i8 %scale8) - %v4 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i64> %offsets_4, <4 x i64> %vecmask_4, i8 %scale8) - - assemble_4s(i64, v, v1, v2, v3, v4) - - ret <16 x i64> %v -} - - -define <16 x i64> @__gather32_i64(<16 x i32> %ptrs, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %vecmask = sext <16 x i32> %mask32 to <16 x i64> - extract_4s(i32, ptrs) - extract_4s(i64, vecmask) - - %v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, - <4 x i32> %ptrs_1, <4 x i64> %vecmask_1, i8 1) - %v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, - <4 x i32> %ptrs_2, <4 x i64> %vecmask_2, i8 1) - %v3 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, - <4 x i32> %ptrs_3, <4 x i64> %vecmask_3, i8 1) - %v4 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, - <4 x i32> %ptrs_4, <4 x i64> %vecmask_4, i8 1) - - assemble_4s(i64, v, v1, v2, v3, v4) - - ret <16 x i64> %v -} - -define <16 x i64> @__gather64_i64(<16 x i64> %ptrs, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %vecmask = sext <16 x i32> %mask32 to <16 x i64> - extract_4s(i64, ptrs) - extract_4s(i64, vecmask) - - %v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, - <4 x i64> %ptrs_1, <4 x i64> %vecmask_1, i8 1) - %v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, - <4 x i64> %ptrs_2, <4 x i64> %vecmask_2, i8 1) - %v3 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, - <4 x i64> %ptrs_3, <4 x i64> %vecmask_3, i8 1) - %v4 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, - <4 x i64> %ptrs_4, <4 x i64> %vecmask_4, i8 1) - - assemble_4s(i64, v, v1, v2, v3, v4) - - ret <16 x i64> %v -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; double gathers - -declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %target, i8 * %ptr, - <4 x i64> %indices, <4 x double> %mask, i8 %scale) readonly nounwind -declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %target, i8 * %ptr, - <4 x i32> %indices, <4 x double> %mask, i8 %scale) readonly nounwind - -define <16 x double> @__gather_base_offsets32_double(i8 * %ptr, - i32 %scale, <16 x i32> %offsets, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> - %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> - extract_4s(i32, offsets) - extract_4s(double, vecmask) - - %v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i32> %offsets_1, <4 x double> %vecmask_1, i8 %scale8) - %v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i32> %offsets_2, <4 x double> %vecmask_2, i8 %scale8) - %v3 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i32> %offsets_3, <4 x double> %vecmask_3, i8 %scale8) - %v4 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i32> %offsets_4, <4 x double> %vecmask_4, i8 %scale8) - - assemble_4s(double, v, v1, v2, v3, v4) - - ret <16 x double> %v -} - - -define <16 x double> @__gather_base_offsets64_double(i8 * %ptr, - i32 %scale, <16 x i64> %offsets, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> - %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> - extract_4s(i64, offsets) - extract_4s(double, vecmask) - - %v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i64> %offsets_1, <4 x double> %vecmask_1, i8 %scale8) - %v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i64> %offsets_2, <4 x double> %vecmask_2, i8 %scale8) - %v3 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i64> %offsets_3, <4 x double> %vecmask_3, i8 %scale8) - %v4 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i64> %offsets_4, <4 x double> %vecmask_4, i8 %scale8) - - assemble_4s(double, v, v1, v2, v3, v4) - - ret <16 x double> %v -} - - -define <16 x double> @__gather32_double(<16 x i32> %ptrs, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> - %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> - extract_4s(i32, ptrs) - extract_4s(double, vecmask) - - %v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, - <4 x i32> %ptrs_1, <4 x double> %vecmask_1, i8 1) - %v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, - <4 x i32> %ptrs_2, <4 x double> %vecmask_2, i8 1) - %v3 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, - <4 x i32> %ptrs_3, <4 x double> %vecmask_3, i8 1) - %v4 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, - <4 x i32> %ptrs_4, <4 x double> %vecmask_4, i8 1) - - assemble_4s(double, v, v1, v2, v3, v4) - - ret <16 x double> %v -} - - -define <16 x double> @__gather64_double(<16 x i64> %ptrs, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> - %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> - extract_4s(i64, ptrs) - extract_4s(double, vecmask) - - %v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, - <4 x i64> %ptrs_1, <4 x double> %vecmask_1, i8 1) - %v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, - <4 x i64> %ptrs_2, <4 x double> %vecmask_2, i8 1) - %v3 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, - <4 x i64> %ptrs_3, <4 x double> %vecmask_3, i8 1) - %v4 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, - <4 x i64> %ptrs_4, <4 x double> %vecmask_4, i8 1) - - assemble_4s(double, v, v1, v2, v3, v4) - - ret <16 x double> %v -} +define(`WIDTH',`16') +include(`target-avx512-common.ll') +saturation_arithmetic_novec() diff --git a/ispc.cpp b/ispc.cpp index 3596e171..05affd32 100644 --- a/ispc.cpp +++ b/ispc.cpp @@ -1126,7 +1126,9 @@ Target::SupportedTargets() { "avx2-i32x8, avx2-i32x16, avx2-i64x4, " "generic-x1, generic-x4, generic-x8, generic-x16, " "generic-x32, generic-x64, *-generic-x16, " +#if !defined(LLVM_3_2) && !defined(LLVM_3_3) && !defined(LLVM_3_4) && !defined(LLVM_3_5) && !defined(LLVM_3_6)// LLVM 3.7+ "knl-avx512" +#endif #ifdef ISPC_ARM_ENABLED ", neon-i8x16, neon-i16x8, neon-i32x4" #endif @@ -1195,8 +1197,10 @@ Target::ISAToString(ISA isa) { return "avx11"; case Target::AVX2: return "avx2"; +#if !defined(LLVM_3_2) && !defined(LLVM_3_3) && !defined(LLVM_3_4) && !defined(LLVM_3_5) && !defined(LLVM_3_6)// LLVM 3.7+ case Target::KNL_AVX512: return "knl-avx512"; +#endif case Target::SKX: return "skx"; case Target::GENERIC: @@ -1241,8 +1245,10 @@ Target::ISAToTargetString(ISA isa) { return "avx1.1-i32x8"; case Target::AVX2: return "avx2-i32x8"; +#if !defined(LLVM_3_2) && !defined(LLVM_3_3) && !defined(LLVM_3_4) && !defined(LLVM_3_5) && !defined(LLVM_3_6)// LLVM 3.7+ case Target::KNL_AVX512: return "knl-avx512"; +#endif case Target::SKX: return "avx2"; case Target::GENERIC: