diff --git a/Makefile b/Makefile index 023a63b4..5f6b7ba1 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,8 @@ CXX_SRC=builtins.cpp ctx.cpp decl.cpp expr.cpp ispc.cpp \ util.cpp HEADERS=builtins.h ctx.h decl.h expr.h ispc.h llvmutil.h module.h \ opt.h stmt.h sym.h type.h util.h -BUILTINS_SRC=builtins-avx.ll builtins-sse2.ll builtins-sse4.ll builtins-sse4x2.ll +BUILTINS_SRC=builtins-avx.ll builtins-avx-x2.ll builtins-sse2.ll \ + builtins-sse4.ll builtins-sse4x2.ll BISON_SRC=parse.yy FLEX_SRC=lex.ll @@ -105,7 +106,7 @@ objs/lex.o: objs/lex.cpp $(HEADERS) objs/parse.cc @echo Compiling $< @$(CXX) $(CXXFLAGS) -o $@ -c $< -objs/builtins-%.cpp: builtins-%.ll builtins.m4 builtins-sse.ll +objs/builtins-%.cpp: builtins-%.ll builtins.m4 builtins-sse.ll builtins-avx-common.ll @echo Creating C++ source from builtin definitions file $< @m4 -DLLVM_VERSION=$(LLVM_VERSION) builtins.m4 $< | ./bitcode2cpp.py $< > $@ diff --git a/builtins-avx-common.ll b/builtins-avx-common.ll new file mode 100644 index 00000000..a632c40a --- /dev/null +++ b/builtins-avx-common.ll @@ -0,0 +1,278 @@ +;; Copyright (c) 2010-2011, Intel Corporation +;; All rights reserved. +;; +;; Redistribution and use in source and binary forms, with or without +;; modification, are permitted provided that the following conditions are +;; met: +;; +;; * Redistributions of source code must retain the above copyright +;; notice, this list of conditions and the following disclaimer. +;; +;; * Redistributions in binary form must reproduce the above copyright +;; notice, this list of conditions and the following disclaimer in the +;; documentation and/or other materials provided with the distribution. +;; +;; * Neither the name of Intel Corporation nor the names of its +;; contributors may be used to endorse or promote products derived from +;; this software without specific prior written permission. +;; +;; +;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; *** Untested *** AVX target implementation. +;; +;; The LLVM AVX code generator is incomplete, so the ispc AVX target +;; hasn't yet been tested. There is therefore a higher-than-normal +;; chance that there are bugs in the code in this file. + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rcp + +declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone + +define internal float @__rcp_uniform_float(float) nounwind readonly alwaysinline { +; uniform float iv = extract(__rcp_u(v), 0); +; return iv * (2. - v * iv); + %vecval = insertelement <4 x float> undef, float %0, i32 0 + %call = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %vecval) + %scall = extractelement <4 x float> %call, i32 0 + + ; do one N-R iteration + %v_iv = fmul float %0, %scall + %two_minus = fsub float 2., %v_iv + %iv_mul = fmul float %scall, %two_minus + ret float %iv_mul +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rounding floats + +declare <4 x float> @llvm.x86.sse.round.ss(<4 x float>, <4 x float>, i32) nounwind readnone + +define internal float @__round_uniform_float(float) nounwind readonly alwaysinline { + ; roundss, round mode nearest 0b00 | don't signal precision exceptions 0b1000 = 8 + ; the roundss intrinsic is a total mess--docs say: + ; + ; __m128 _mm_round_ss (__m128 a, __m128 b, const int c) + ; + ; b is a 128-bit parameter. The lowest 32 bits are the result of the rounding function + ; on b0. The higher order 96 bits are copied directly from input parameter a. The + ; return value is described by the following equations: + ; + ; r0 = RND(b0) + ; r1 = a1 + ; r2 = a2 + ; r3 = a3 + ; + ; It doesn't matter what we pass as a, since we only need the r0 value + ; here. So we pass the same register for both. + %xi = insertelement <4 x float> undef, float %0, i32 0 + %xr = call <4 x float> @llvm.x86.sse.round.ss(<4 x float> %xi, <4 x float> %xi, i32 8) + %rs = extractelement <4 x float> %xr, i32 0 + ret float %rs +} + +define internal float @__floor_uniform_float(float) nounwind readonly alwaysinline { + ; see above for round_ss instrinsic discussion... + %xi = insertelement <4 x float> undef, float %0, i32 0 + ; roundps, round down 0b01 | don't signal precision exceptions 0b1000 = 9 + %xr = call <4 x float> @llvm.x86.sse.round.ss(<4 x float> %xi, <4 x float> %xi, i32 9) + %rs = extractelement <4 x float> %xr, i32 0 + ret float %rs +} + +define internal float @__ceil_uniform_float(float) nounwind readonly alwaysinline { + ; see above for round_ss instrinsic discussion... + %xi = insertelement <4 x float> undef, float %0, i32 0 + ; roundps, round up 0b10 | don't signal precision exceptions 0b1000 = 10 + %xr = call <4 x float> @llvm.x86.sse.round.ss(<4 x float> %xi, <4 x float> %xi, i32 10) + %rs = extractelement <4 x float> %xr, i32 0 + ret float %rs +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rounding doubles + +declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) nounwind readnone + +define internal double @__round_uniform_double(double) nounwind readonly alwaysinline { + %xi = insertelement <2 x double> undef, double %0, i32 0 + %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 8) + %rs = extractelement <2 x double> %xr, i32 0 + ret double %rs +} + +define internal double @__floor_uniform_double(double) nounwind readonly alwaysinline { + ; see above for round_ss instrinsic discussion... + %xi = insertelement <2 x double> undef, double %0, i32 0 + ; roundpd, round down 0b01 | don't signal precision exceptions 0b1000 = 9 + %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 9) + %rs = extractelement <2 x double> %xr, i32 0 + ret double %rs +} + +define internal double @__ceil_uniform_double(double) nounwind readonly alwaysinline { + ; see above for round_ss instrinsic discussion... + %xi = insertelement <2 x double> undef, double %0, i32 0 + ; roundps, round up 0b10 | don't signal precision exceptions 0b1000 = 10 + %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 10) + %rs = extractelement <2 x double> %xr, i32 0 + ret double %rs +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rsqrt + +declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone + +define internal float @__rsqrt_uniform_float(float) nounwind readonly alwaysinline { + ; uniform float is = extract(__rsqrt_u(v), 0); + %v = insertelement <4 x float> undef, float %0, i32 0 + %vis = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %v) + %is = extractelement <4 x float> %vis, i32 0 + + ; return 0.5 * is * (3. - (v * is) * is); + %v_is = fmul float %0, %is + %v_is_is = fmul float %v_is, %is + %three_sub = fsub float 3., %v_is_is + %is_mul = fmul float %is, %three_sub + %half_scale = fmul float 0.5, %is_mul + ret float %half_scale +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; sqrt + +declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone + +define internal float @__sqrt_uniform_float(float) nounwind readonly alwaysinline { + sse_unary_scalar(ret, 4, float, @llvm.x86.sse.sqrt.ss, %0) + ret float %ret +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; fastmath + +declare void @llvm.x86.sse.stmxcsr(i8 *) nounwind +declare void @llvm.x86.sse.ldmxcsr(i8 *) nounwind + +define internal void @__fastmath() nounwind alwaysinline { + %ptr = alloca i32 + %ptr8 = bitcast i32 * %ptr to i8 * + call void @llvm.x86.sse.stmxcsr(i8 * %ptr8) + %oldval = load i32 *%ptr + + ; turn on DAZ (64)/FTZ (32768) -> 32832 + %update = or i32 %oldval, 32832 + store i32 %update, i32 *%ptr + call void @llvm.x86.sse.ldmxcsr(i8 * %ptr8) + ret void +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; float min/max + +declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind readnone +declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind readnone + +define internal float @__max_uniform_float(float, float) nounwind readonly alwaysinline { + sse_binary_scalar(ret, 4, float, @llvm.x86.sse.max.ss, %0, %1) + ret float %ret +} + +define internal float @__min_uniform_float(float, float) nounwind readonly alwaysinline { + sse_binary_scalar(ret, 4, float, @llvm.x86.sse.min.ss, %0, %1) + ret float %ret +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int min/max + +declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone +declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone + +define internal i32 @__min_uniform_int32(i32, i32) nounwind readonly alwaysinline { + sse_binary_scalar(ret, 4, i32, @llvm.x86.sse41.pminsd, %0, %1) + ret i32 %ret +} + +define internal i32 @__max_uniform_int32(i32, i32) nounwind readonly alwaysinline { + sse_binary_scalar(ret, 4, i32, @llvm.x86.sse41.pmaxsd, %0, %1) + ret i32 %ret +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; unsigned int min/max + +declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone +declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone + +define internal i32 @__min_uniform_uint32(i32, i32) nounwind readonly alwaysinline { + sse_binary_scalar(ret, 4, i32, @llvm.x86.sse41.pminud, %0, %1) + ret i32 %ret +} + +define internal i32 @__max_uniform_uint32(i32, i32) nounwind readonly alwaysinline { + sse_binary_scalar(ret, 4, i32, @llvm.x86.sse41.pmaxud, %0, %1) + ret i32 %ret +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; horizontal ops + +declare i32 @llvm.ctpop.i32(i32) nounwind readnone + +define internal i32 @__popcnt_int32(i32) nounwind readonly alwaysinline { + %call = call i32 @llvm.ctpop.i32(i32 %0) + ret i32 %call +} + +declare i64 @llvm.ctpop.i64(i64) nounwind readnone + +define internal i64 @__popcnt_int64(i64) nounwind readonly alwaysinline { + %call = call i64 @llvm.ctpop.i64(i64 %0) + ret i64 %call +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; double precision sqrt + +declare <2 x double> @llvm.x86.sse.sqrt.sd(<2 x double>) nounwind readnone + +define internal double @__sqrt_uniform_double(double) nounwind alwaysinline { + sse_unary_scalar(ret, 2, double, @llvm.x86.sse.sqrt.sd, %0) + ret double %ret +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; double precision min/max + +declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind readnone +declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone + +define internal double @__min_uniform_double(double, double) nounwind readnone alwaysinline { + sse_binary_scalar(ret, 2, double, @llvm.x86.sse2.min.sd, %0, %1) + ret double %ret +} + +define internal double @__max_uniform_double(double, double) nounwind readnone alwaysinline { + sse_binary_scalar(ret, 2, double, @llvm.x86.sse2.max.sd, %0, %1) + ret double %ret +} diff --git a/builtins-avx-x2.ll b/builtins-avx-x2.ll new file mode 100644 index 00000000..4000425f --- /dev/null +++ b/builtins-avx-x2.ll @@ -0,0 +1,665 @@ +;; Copyright (c) 2010-2011, Intel Corporation +;; All rights reserved. +;; +;; Redistribution and use in source and binary forms, with or without +;; modification, are permitted provided that the following conditions are +;; met: +;; +;; * Redistributions of source code must retain the above copyright +;; notice, this list of conditions and the following disclaimer. +;; +;; * Redistributions in binary form must reproduce the above copyright +;; notice, this list of conditions and the following disclaimer in the +;; documentation and/or other materials provided with the distribution. +;; +;; * Neither the name of Intel Corporation nor the names of its +;; contributors may be used to endorse or promote products derived from +;; this software without specific prior written permission. +;; +;; +;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; *** Untested *** AVX target implementation. +;; +;; The LLVM AVX code generator is incomplete, so the ispc AVX target +;; hasn't yet been tested. There is therefore a higher-than-normal +;; chance that there are bugs in the code in this file. + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Basic 16-wide definitions + +stdlib_core(16) +packed_load_and_store(16) +scans(16) +int64minmax(16) + +include(`builtins-avx-common.ll') + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rcp + +declare <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float>) nounwind readnone + +define internal <16 x float> @__rcp_varying_float(<16 x float>) nounwind readonly alwaysinline { + ; float iv = __rcp_v(v); + ; return iv * (2. - v * iv); + + unary8to16(call, float, @llvm.x86.avx.rcp.ps.256, %0) + ; do one N-R iteration + %v_iv = fmul <16 x float> %0, %call + %two_minus = fsub <16 x float> , %v_iv + %iv_mul = fmul <16 x float> %call, %two_minus + ret <16 x float> %iv_mul +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rounding floats + +declare <8 x float> @llvm.x86.avx.round.ps.256(<8 x float>, i32) nounwind readnone + +define internal <16 x float> @__round_varying_float(<16 x float>) nounwind readonly alwaysinline { + ; roundps, round mode nearest 0b00 | don't signal precision exceptions 0b1000 = 8 + round8to16(%0, 8) +} + +define internal <16 x float> @__floor_varying_float(<16 x float>) nounwind readonly alwaysinline { + ; roundps, round down 0b01 | don't signal precision exceptions 0b1001 = 9 + round8to16(%0, 9) +} + +define internal <16 x float> @__ceil_varying_float(<16 x float>) nounwind readonly alwaysinline { + ; roundps, round up 0b10 | don't signal precision exceptions 0b1010 = 10 + round8to16(%0, 10) +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rounding doubles + +declare <4 x double> @llvm.x86.avx.round.pd.256(<4 x double>, i32) nounwind readnone + +define internal <16 x double> @__round_varying_double(<16 x double>) nounwind readonly alwaysinline { + round4to16double(%0, 8) +} + +define internal <16 x double> @__floor_varying_double(<16 x double>) nounwind readonly alwaysinline { + round4to16double(%0, 9) +} + +define internal <16 x double> @__ceil_varying_double(<16 x double>) nounwind readonly alwaysinline { + round4to16double(%0, 10) +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rsqrt + +declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone + +define internal <16 x float> @__rsqrt_varying_float(<16 x float> %v) nounwind readonly alwaysinline { + ; float is = __rsqrt_v(v); + unary8to16(is, float, @llvm.x86.avx.rsqrt.ps.256, %v) + ; return 0.5 * is * (3. - (v * is) * is); + %v_is = fmul <16 x float> %v, %is + %v_is_is = fmul <16 x float> %v_is, %is + %three_sub = fsub <16 x float> , %v_is_is + %is_mul = fmul <16 x float> %is, %three_sub + %half_scale = fmul <16 x float> , %is_mul + ret <16 x float> %half_scale +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; sqrt + +declare <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float>) nounwind readnone + +define internal <16 x float> @__sqrt_varying_float(<16 x float>) nounwind readonly alwaysinline { + unary8to16(call, float, @llvm.x86.avx.sqrt.ps.256, %0) + ret <16 x float> %call +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; svml + +; FIXME: need either to wire these up to the 8-wide SVML entrypoints, +; or, use the macro to call the 4-wide ones 4x with our 16-wide +; vectors... + +declare <16 x float> @__svml_sin(<16 x float>) +declare <16 x float> @__svml_cos(<16 x float>) +declare void @__svml_sincos(<16 x float>, <16 x float> *, <16 x float> *) +declare <16 x float> @__svml_tan(<16 x float>) +declare <16 x float> @__svml_atan(<16 x float>) +declare <16 x float> @__svml_atan2(<16 x float>, <16 x float>) +declare <16 x float> @__svml_exp(<16 x float>) +declare <16 x float> @__svml_log(<16 x float>) +declare <16 x float> @__svml_pow(<16 x float>, <16 x float>) + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; float min/max + +declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind readnone +declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind readnone + +define internal <16 x float> @__max_varying_float(<16 x float>, + <16 x float>) nounwind readonly alwaysinline { + binary8to16(call, float, @llvm.x86.avx.max.ps.256, %0, %1) + ret <16 x float> %call +} + +define internal <16 x float> @__min_varying_float(<16 x float>, + <16 x float>) nounwind readonly alwaysinline { + binary8to16(call, float, @llvm.x86.avx.min.ps.256, %0, %1) + ret <16 x float> %call +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int min/max + +define internal <16 x i32> @__min_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary4to16(ret, i32, @llvm.x86.sse41.pminsd, %0, %1) + ret <16 x i32> %ret +} + +define internal <16 x i32> @__max_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary4to16(ret, i32, @llvm.x86.sse41.pmaxsd, %0, %1) + ret <16 x i32> %ret +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; unsigned int min/max + +define internal <16 x i32> @__min_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary4to16(ret, i32, @llvm.x86.sse41.pminud, %0, %1) + ret <16 x i32> %ret +} + +define internal <16 x i32> @__max_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary4to16(ret, i32, @llvm.x86.sse41.pmaxud, %0, %1) + ret <16 x i32> %ret +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; horizontal ops + +declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone + +define internal i32 @__movmsk(<16 x i32>) nounwind readnone alwaysinline { + %floatmask = bitcast <16 x i32> %0 to <16 x float> + %mask0 = shufflevector <16 x float> %floatmask, <16 x float> undef, + <8 x i32> + %v0 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %mask0) nounwind readnone + %mask1 = shufflevector <16 x float> %floatmask, <16 x float> undef, + <8 x i32> + %v1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %mask1) nounwind readnone + + %v1shift = shl i32 %v1, 8 + %v = or i32 %v1shift, %v0 + ret i32 %v +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; horizontal float ops + +declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) nounwind readnone + +define internal float @__reduce_add_float(<16 x float>) nounwind readonly alwaysinline { + %va = shufflevector <16 x float> %0, <16 x float> undef, + <8 x i32> + %vb = shufflevector <16 x float> %0, <16 x float> undef, + <8 x i32> + %v1 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %va, <8 x float> %vb) + %v2 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %v1, <8 x float> %v1) + %v3 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %v2, <8 x float> %v2) + %scalar1 = extractelement <8 x float> %v2, i32 0 + %scalar2 = extractelement <8 x float> %v2, i32 4 + %sum = fadd float %scalar1, %scalar2 + ret float %sum +} + + +define internal float @__reduce_min_float(<16 x float>) nounwind readnone alwaysinline { + reduce16(float, @__min_varying_float, @__min_uniform_float) +} + + +define internal float @__reduce_max_float(<16 x float>) nounwind readnone alwaysinline { + reduce16(float, @__max_varying_float, @__max_uniform_float) +} + +reduce_equal(16) + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; horizontal int32 ops + +define internal <16 x i32> @__add_varying_int32(<16 x i32>, + <16 x i32>) nounwind readnone alwaysinline { + %s = add <16 x i32> %0, %1 + ret <16 x i32> %s +} + +define internal i32 @__add_uniform_int32(i32, i32) nounwind readnone alwaysinline { + %s = add i32 %0, %1 + ret i32 %s +} + +define internal i32 @__reduce_add_int32(<16 x i32>) nounwind readnone alwaysinline { + reduce16(i32, @__add_varying_int32, @__add_uniform_int32) +} + + +define internal i32 @__reduce_min_int32(<16 x i32>) nounwind readnone alwaysinline { + reduce16(i32, @__min_varying_int32, @__min_uniform_int32) +} + + +define internal i32 @__reduce_max_int32(<16 x i32>) nounwind readnone alwaysinline { + reduce16(i32, @__max_varying_int32, @__max_uniform_int32) +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;; horizontal uint32 ops + +define internal i32 @__reduce_add_uint32(<16 x i32> %v) nounwind readnone alwaysinline { + %r = call i32 @__reduce_add_int32(<16 x i32> %v) + ret i32 %r +} + +define internal i32 @__reduce_min_uint32(<16 x i32>) nounwind readnone alwaysinline { + reduce16(i32, @__min_varying_uint32, @__min_uniform_uint32) +} + + +define internal i32 @__reduce_max_uint32(<16 x i32>) nounwind readnone alwaysinline { + reduce16(i32, @__max_varying_uint32, @__max_uniform_uint32) +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; horizontal double ops + +declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounwind readnone + +define internal double @__reduce_add_double(<16 x double>) nounwind readonly alwaysinline { + %va = shufflevector <16 x double> %0, <16 x double> undef, + <4 x i32> + %vb = shufflevector <16 x double> %0, <16 x double> undef, + <4 x i32> + %vc = shufflevector <16 x double> %0, <16 x double> undef, + <4 x i32> + %vd = shufflevector <16 x double> %0, <16 x double> undef, + <4 x i32> + %vab = fadd <4 x double> %va, %vb + %vcd = fadd <4 x double> %vc, %vd + + %sum0 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %vab, <4 x double> %vcd) + %sum1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %sum0, <4 x double> %sum0) + %scalar1 = extractelement <4 x double> %sum0, i32 0 + %scalar2 = extractelement <4 x double> %sum1, i32 1 + %sum = fadd double %scalar1, %scalar2 + ret double %sum +} + +define internal double @__reduce_min_double(<16 x double>) nounwind readnone alwaysinline { + reduce16(double, @__min_varying_double, @__min_uniform_double) +} + + +define internal double @__reduce_max_double(<16 x double>) nounwind readnone alwaysinline { + reduce16(double, @__max_varying_double, @__max_uniform_double) +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; horizontal int64 ops + +define internal <16 x i64> @__add_varying_int64(<16 x i64>, + <16 x i64>) nounwind readnone alwaysinline { + %s = add <16 x i64> %0, %1 + ret <16 x i64> %s +} + +define internal i64 @__add_uniform_int64(i64, i64) nounwind readnone alwaysinline { + %s = add i64 %0, %1 + ret i64 %s +} + +define internal i64 @__reduce_add_int64(<16 x i64>) nounwind readnone alwaysinline { + reduce16(i64, @__add_varying_int64, @__add_uniform_int64) +} + + +define internal i64 @__reduce_min_int64(<16 x i64>) nounwind readnone alwaysinline { + reduce16(i64, @__min_varying_int64, @__min_uniform_int64) +} + + +define internal i64 @__reduce_max_int64(<16 x i64>) nounwind readnone alwaysinline { + reduce16(i64, @__max_varying_int64, @__max_uniform_int64) +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;; horizontal uint64 ops + +define internal i64 @__reduce_add_uint64(<16 x i64> %v) nounwind readnone alwaysinline { + %r = call i64 @__reduce_add_int64(<16 x i64> %v) + ret i64 %r +} + +define internal i64 @__reduce_min_uint64(<16 x i64>) nounwind readnone alwaysinline { + reduce16(i64, @__min_varying_uint64, @__min_uniform_uint64) +} + + +define internal i64 @__reduce_max_uint64(<16 x i64>) nounwind readnone alwaysinline { + reduce16(i64, @__max_varying_uint64, @__max_uniform_uint64) +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; unaligned loads/loads+broadcasts + +load_and_broadcast(16, i8, 8) +load_and_broadcast(16, i16, 16) +load_and_broadcast(16, i32, 32) +load_and_broadcast(16, i64, 64) + +; no masked load instruction for i8 and i16 types?? +load_masked(16, i8, 8, 1) +load_masked(16, i16, 16, 2) + +declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8 *, <8 x float> %mask) +declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8 *, <4 x double> %mask) + +define <16 x i32> @__load_masked_32(i8 *, <16 x i32> %mask) nounwind alwaysinline { + %floatmask = bitcast <16 x i32> %mask to <16 x float> + %mask0 = shufflevector <16 x float> %floatmask, <16 x float> undef, + <8 x i32> + %val0 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8 * %0, <8 x float> %mask0) + %mask1 = shufflevector <16 x float> %floatmask, <16 x float> undef, + <8 x i32> + %ptr1 = getelementptr i8 * %0, i32 32 ;; 8x4 bytes = 32 + %val1 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8 * %ptr1, <8 x float> %mask1) + + %retval = shufflevector <8 x float> %val0, <8 x float> %val1, + <16 x i32> + %reti32 = bitcast <16 x float> %retval to <16 x i32> + ret <16 x i32> %reti32 +} + + +define <16 x i64> @__load_masked_64(i8 *, <16 x i32> %mask) nounwind alwaysinline { + ; double up masks, bitcast to doubles + %mask0 = shufflevector <16 x i32> %mask, <16 x i32> undef, + <8 x i32> + %mask1 = shufflevector <16 x i32> %mask, <16 x i32> undef, + <8 x i32> + %mask2 = shufflevector <16 x i32> %mask, <16 x i32> undef, + <8 x i32> + %mask3 = shufflevector <16 x i32> %mask, <16 x i32> undef, + <8 x i32> + %mask0d = bitcast <8 x i32> %mask0 to <4 x double> + %mask1d = bitcast <8 x i32> %mask1 to <4 x double> + %mask2d = bitcast <8 x i32> %mask2 to <4 x double> + %mask3d = bitcast <8 x i32> %mask3 to <4 x double> + + %val0d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %0, <4 x double> %mask0d) + %ptr1 = getelementptr i8 * %0, i32 32 + %val1d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %ptr1, <4 x double> %mask1d) + %ptr2 = getelementptr i8 * %0, i32 64 + %val2d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %ptr2, <4 x double> %mask2d) + %ptr3 = getelementptr i8 * %0, i32 96 + %val3d = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8 * %ptr3, <4 x double> %mask3d) + + %val01 = shufflevector <4 x double> %val0d, <4 x double> %val1d, + <8 x i32> + %val23 = shufflevector <4 x double> %val2d, <4 x double> %val3d, + <8 x i32> + %val0123 = shufflevector <8 x double> %val01, <8 x double> %val23, + <16 x i32> + %val = bitcast <16 x double> %val0123 to <16 x i64> + ret <16 x i64> %val +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; masked store + +; FIXME: there is no AVX instruction for these, but we could be clever +; by packing the bits down and setting the last 3/4 or half, respectively, +; of the mask to zero... Not sure if this would be a win in the end +gen_masked_store(16, i8, 8) +gen_masked_store(16, i16, 16) + +; note that mask is the 2nd parameter, not the 3rd one!! +declare void @llvm.x86.avx.maskstore.ps.256(i8 *, <8 x float>, <8 x float>) +declare void @llvm.x86.avx.maskstore.pd.256(i8 *, <4 x double>, <4 x double>) + +define void @__masked_store_32(<16 x i32>* nocapture, <16 x i32>, + <16 x i32>) nounwind alwaysinline { + %ptr = bitcast <16 x i32> * %0 to i8 * + %val = bitcast <16 x i32> %1 to <16 x float> + %mask = bitcast <16 x i32> %2 to <16 x float> + + %val0 = shufflevector <16 x float> %val, <16 x float> undef, + <8 x i32> + %val1 = shufflevector <16 x float> %val, <16 x float> undef, + <8 x i32> + + %mask0 = shufflevector <16 x float> %mask, <16 x float> undef, + <8 x i32> + %mask1 = shufflevector <16 x float> %mask, <16 x float> undef, + <8 x i32> + + call void @llvm.x86.avx.maskstore.ps.256(i8 * %ptr, <8 x float> %mask0, <8 x float> %val0) + %ptr1 = getelementptr i8 * %ptr, i32 32 + call void @llvm.x86.avx.maskstore.ps.256(i8 * %ptr1, <8 x float> %mask1, <8 x float> %val1) + + ret void +} + +define void @__masked_store_64(<16 x i64>* nocapture, <16 x i64>, + <16 x i32> %mask) nounwind alwaysinline { + %ptr = bitcast <16 x i64> * %0 to i8 * + %val = bitcast <16 x i64> %1 to <16 x double> + + ; double up masks, bitcast to doubles + %mask0 = shufflevector <16 x i32> %mask, <16 x i32> undef, + <8 x i32> + %mask1 = shufflevector <16 x i32> %mask, <16 x i32> undef, + <8 x i32> + %mask2 = shufflevector <16 x i32> %mask, <16 x i32> undef, + <8 x i32> + %mask3 = shufflevector <16 x i32> %mask, <16 x i32> undef, + <8 x i32> + %mask0d = bitcast <8 x i32> %mask0 to <4 x double> + %mask1d = bitcast <8 x i32> %mask1 to <4 x double> + %mask2d = bitcast <8 x i32> %mask2 to <4 x double> + %mask3d = bitcast <8 x i32> %mask3 to <4 x double> + + %val0 = shufflevector <16 x double> %val, <16 x double> undef, + <4 x i32> + %val1 = shufflevector <16 x double> %val, <16 x double> undef, + <4 x i32> + %val2 = shufflevector <16 x double> %val, <16 x double> undef, + <4 x i32> + %val3 = shufflevector <16 x double> %val, <16 x double> undef, + <4 x i32> + + call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr, <4 x double> %mask0d, <4 x double> %val0) + %ptr1 = getelementptr i8 * %ptr, i32 32 + call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr1, <4 x double> %mask1d, <4 x double> %val1) + %ptr2 = getelementptr i8 * %ptr, i32 64 + call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr2, <4 x double> %mask2d, <4 x double> %val2) + %ptr3 = getelementptr i8 * %ptr, i32 96 + call void @llvm.x86.avx.maskstore.pd.256(i8 * %ptr3, <4 x double> %mask3d, <4 x double> %val3) + + ret void +} + +masked_store_blend_8_16_by_16() + +declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, + <8 x float>) nounwind readnone + + +define void @__masked_store_blend_32(<16 x i32>* nocapture, <16 x i32>, + <16 x i32>) nounwind alwaysinline { + %maskAsFloat = bitcast <16 x i32> %2 to <16 x float> + %oldValue = load <16 x i32>* %0, align 4 + %oldAsFloat = bitcast <16 x i32> %oldValue to <16 x float> + %newAsFloat = bitcast <16 x i32> %1 to <16 x float> + + %old0 = shufflevector <16 x float> %oldAsFloat, <16 x float> undef, + <8 x i32> + %old1 = shufflevector <16 x float> %oldAsFloat, <16 x float> undef, + <8 x i32> + %new0 = shufflevector <16 x float> %newAsFloat, <16 x float> undef, + <8 x i32> + %new1 = shufflevector <16 x float> %newAsFloat, <16 x float> undef, + <8 x i32> + %mask0 = shufflevector <16 x float> %maskAsFloat, <16 x float> undef, + <8 x i32> + %mask1 = shufflevector <16 x float> %maskAsFloat, <16 x float> undef, + <8 x i32> + + %blend0 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %old0, + <8 x float> %new0, + <8 x float> %mask0) + %blend1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %old1, + <8 x float> %new1, + <8 x float> %mask1) + %blend = shufflevector <8 x float> %blend0, <8 x float> %blend1, + <16 x i32> + %blendAsInt = bitcast <16 x float> %blend to <16 x i32> + store <16 x i32> %blendAsInt, <16 x i32>* %0, align 4 + ret void +} + + +declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, + <4 x double>) nounwind readnone + +define void @__masked_store_blend_64(<16 x i64>* nocapture %ptr, <16 x i64> %newi64, + <16 x i32> %mask) nounwind alwaysinline { + %oldValue = load <16 x i64>* %ptr, align 8 + %old = bitcast <16 x i64> %oldValue to <16 x double> + %old0d = shufflevector <16 x double> %old, <16 x double> undef, + <4 x i32> + %old1d = shufflevector <16 x double> %old, <16 x double> undef, + <4 x i32> + %old2d = shufflevector <16 x double> %old, <16 x double> undef, + <4 x i32> + %old3d = shufflevector <16 x double> %old, <16 x double> undef, + <4 x i32> + + %new = bitcast <16 x i64> %newi64 to <16 x double> + %new0d = shufflevector <16 x double> %new, <16 x double> undef, + <4 x i32> + %new1d = shufflevector <16 x double> %new, <16 x double> undef, + <4 x i32> + %new2d = shufflevector <16 x double> %new, <16 x double> undef, + <4 x i32> + %new3d = shufflevector <16 x double> %new, <16 x double> undef, + <4 x i32> + + %mask0 = shufflevector <16 x i32> %mask, <16 x i32> undef, + <8 x i32> + %mask1 = shufflevector <16 x i32> %mask, <16 x i32> undef, + <8 x i32> + %mask2 = shufflevector <16 x i32> %mask, <16 x i32> undef, + <8 x i32> + %mask3 = shufflevector <16 x i32> %mask, <16 x i32> undef, + <8 x i32> + %mask0d = bitcast <8 x i32> %mask0 to <4 x double> + %mask1d = bitcast <8 x i32> %mask1 to <4 x double> + %mask2d = bitcast <8 x i32> %mask2 to <4 x double> + %mask3d = bitcast <8 x i32> %mask3 to <4 x double> + + %result0d = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %old0d, + <4 x double> %new0d, <4 x double> %mask0d) + %result1d = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %old1d, + <4 x double> %new1d, <4 x double> %mask1d) + %result2d = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %old2d, + <4 x double> %new2d, <4 x double> %mask2d) + %result3d = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %old3d, + <4 x double> %new3d, <4 x double> %mask3d) + + %result01 = shufflevector <4 x double> %result0d, <4 x double> %result1d, + <8 x i32> + %result23 = shufflevector <4 x double> %result2d, <4 x double> %result3d, + <8 x i32> + + %result = shufflevector <8 x double> %result01, <8 x double> %result23, + <16 x i32> + %result64 = bitcast <16 x double> %result to <16 x i64> + store <16 x i64> %result64, <16 x i64> * %ptr + ret void +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; gather/scatter + +gen_gather(16, i8) +gen_gather(16, i16) +gen_gather(16, i32) +gen_gather(16, i64) + +gen_scatter(16, i8) +gen_scatter(16, i16) +gen_scatter(16, i32) +gen_scatter(16, i64) + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; double precision sqrt + +declare <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double>) nounwind readnone + +define internal <16 x double> @__sqrt_varying_double(<16 x double>) nounwind alwaysinline { + unary4to16(ret, double, @llvm.x86.avx.sqrt.pd.256, %0) + ret <16 x double> %ret +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; double precision min/max + +declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwind readnone +declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwind readnone + +define internal <16 x double> @__min_varying_double(<16 x double>, <16 x double>) nounwind readnone alwaysinline { + binary4to16(ret, double, @llvm.x86.avx.min.pd.256, %0, %1) + ret <16 x double> %ret +} + +define internal <16 x double> @__max_varying_double(<16 x double>, <16 x double>) nounwind readnone alwaysinline { + binary4to16(ret, double, @llvm.x86.avx.max.pd.256, %0, %1) + ret <16 x double> %ret +} diff --git a/builtins-avx.ll b/builtins-avx.ll index f7f70046..91aec080 100644 --- a/builtins-avx.ll +++ b/builtins-avx.ll @@ -44,11 +44,12 @@ packed_load_and_store(8) scans(8) int64minmax(8) +include(`builtins-avx-common.ll') + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; rcp declare <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float>) nounwind readnone -declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone define internal <8 x float> @__rcp_varying_float(<8 x float>) nounwind readonly alwaysinline { ; float iv = __rcp_v(v); @@ -63,25 +64,10 @@ define internal <8 x float> @__rcp_varying_float(<8 x float>) nounwind readonly ret <8 x float> %iv_mul } -define internal float @__rcp_uniform_float(float) nounwind readonly alwaysinline { -; uniform float iv = extract(__rcp_u(v), 0); -; return iv * (2. - v * iv); - %vecval = insertelement <4 x float> undef, float %0, i32 0 - %call = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %vecval) - %scall = extractelement <4 x float> %call, i32 0 - - ; do one N-R iteration - %v_iv = fmul float %0, %scall - %two_minus = fsub float 2., %v_iv - %iv_mul = fmul float %scall, %two_minus - ret float %iv_mul -} - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; rounding floats declare <8 x float> @llvm.x86.avx.round.ps.256(<8 x float>, i32) nounwind readnone -declare <4 x float> @llvm.x86.sse.round.ss(<4 x float>, <4 x float>, i32) nounwind readnone define internal <8 x float> @__round_varying_float(<8 x float>) nounwind readonly alwaysinline { ; roundps, round mode nearest 0b00 | don't signal precision exceptions 0b1000 = 8 @@ -89,111 +75,43 @@ define internal <8 x float> @__round_varying_float(<8 x float>) nounwind readonl ret <8 x float> %call } -define internal float @__round_uniform_float(float) nounwind readonly alwaysinline { - ; roundss, round mode nearest 0b00 | don't signal precision exceptions 0b1000 = 8 - ; the roundss intrinsic is a total mess--docs say: - ; - ; __m128 _mm_round_ss (__m128 a, __m128 b, const int c) - ; - ; b is a 128-bit parameter. The lowest 32 bits are the result of the rounding function - ; on b0. The higher order 96 bits are copied directly from input parameter a. The - ; return value is described by the following equations: - ; - ; r0 = RND(b0) - ; r1 = a1 - ; r2 = a2 - ; r3 = a3 - ; - ; It doesn't matter what we pass as a, since we only need the r0 value - ; here. So we pass the same register for both. - %xi = insertelement <4 x float> undef, float %0, i32 0 - %xr = call <4 x float> @llvm.x86.sse.round.ss(<4 x float> %xi, <4 x float> %xi, i32 8) - %rs = extractelement <4 x float> %xr, i32 0 - ret float %rs -} - define internal <8 x float> @__floor_varying_float(<8 x float>) nounwind readonly alwaysinline { ; roundps, round down 0b01 | don't signal precision exceptions 0b1000 = 9 %call = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %0, i32 9) ret <8 x float> %call } -define internal float @__floor_uniform_float(float) nounwind readonly alwaysinline { - ; see above for round_ss instrinsic discussion... - %xi = insertelement <4 x float> undef, float %0, i32 0 - ; roundps, round down 0b01 | don't signal precision exceptions 0b1000 = 9 - %xr = call <4 x float> @llvm.x86.sse.round.ss(<4 x float> %xi, <4 x float> %xi, i32 9) - %rs = extractelement <4 x float> %xr, i32 0 - ret float %rs -} - define internal <8 x float> @__ceil_varying_float(<8 x float>) nounwind readonly alwaysinline { ; roundps, round up 0b10 | don't signal precision exceptions 0b1000 = 10 %call = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %0, i32 10) ret <8 x float> %call } -define internal float @__ceil_uniform_float(float) nounwind readonly alwaysinline { - ; see above for round_ss instrinsic discussion... - %xi = insertelement <4 x float> undef, float %0, i32 0 - ; roundps, round up 0b10 | don't signal precision exceptions 0b1000 = 10 - %xr = call <4 x float> @llvm.x86.sse.round.ss(<4 x float> %xi, <4 x float> %xi, i32 10) - %rs = extractelement <4 x float> %xr, i32 0 - ret float %rs -} - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; rounding doubles declare <4 x double> @llvm.x86.avx.round.pd.256(<4 x double>, i32) nounwind readnone -declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) nounwind readnone define internal <8 x double> @__round_varying_double(<8 x double>) nounwind readonly alwaysinline { round4to8double(%0, 8) } -define internal double @__round_uniform_double(double) nounwind readonly alwaysinline { - %xi = insertelement <2 x double> undef, double %0, i32 0 - %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 8) - %rs = extractelement <2 x double> %xr, i32 0 - ret double %rs -} - define internal <8 x double> @__floor_varying_double(<8 x double>) nounwind readonly alwaysinline { ; roundpd, round down 0b01 | don't signal precision exceptions 0b1000 = 9 round4to8double(%0, 9) } -define internal double @__floor_uniform_double(double) nounwind readonly alwaysinline { - ; see above for round_ss instrinsic discussion... - %xi = insertelement <2 x double> undef, double %0, i32 0 - ; roundpd, round down 0b01 | don't signal precision exceptions 0b1000 = 9 - %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 9) - %rs = extractelement <2 x double> %xr, i32 0 - ret double %rs -} define internal <8 x double> @__ceil_varying_double(<8 x double>) nounwind readonly alwaysinline { ; roundpd, round up 0b10 | don't signal precision exceptions 0b1000 = 10 round4to8double(%0, 10) } -define internal double @__ceil_uniform_double(double) nounwind readonly alwaysinline { - ; see above for round_ss instrinsic discussion... - %xi = insertelement <2 x double> undef, double %0, i32 0 - ; roundps, round up 0b10 | don't signal precision exceptions 0b1000 = 10 - %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 10) - %rs = extractelement <2 x double> %xr, i32 0 - ret double %rs -} - - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; rsqrt declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone -declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone define internal <8 x float> @__rsqrt_varying_float(<8 x float> %v) nounwind readonly alwaysinline { ; float is = __rsqrt_v(v); @@ -207,58 +125,16 @@ define internal <8 x float> @__rsqrt_varying_float(<8 x float> %v) nounwind read ret <8 x float> %half_scale } -define internal float @__rsqrt_uniform_float(float) nounwind readonly alwaysinline { - ; uniform float is = extract(__rsqrt_u(v), 0); - %v = insertelement <4 x float> undef, float %0, i32 0 - %vis = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %v) - %is = extractelement <4 x float> %vis, i32 0 - - ; return 0.5 * is * (3. - (v * is) * is); - %v_is = fmul float %0, %is - %v_is_is = fmul float %v_is, %is - %three_sub = fsub float 3., %v_is_is - %is_mul = fmul float %is, %three_sub - %half_scale = fmul float 0.5, %is_mul - ret float %half_scale -} - - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; sqrt declare <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float>) nounwind readnone -declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone define internal <8 x float> @__sqrt_varying_float(<8 x float>) nounwind readonly alwaysinline { %call = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %0) ret <8 x float> %call } -define internal float @__sqrt_uniform_float(float) nounwind readonly alwaysinline { - sse_unary_scalar(ret, 4, float, @llvm.x86.sse.sqrt.ss, %0) - ret float %ret -} - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; fastmath - -declare void @llvm.x86.sse.stmxcsr(i8 *) nounwind -declare void @llvm.x86.sse.ldmxcsr(i8 *) nounwind - -define internal void @__fastmath() nounwind alwaysinline { - %ptr = alloca i32 - %ptr8 = bitcast i32 * %ptr to i8 * - call void @llvm.x86.sse.stmxcsr(i8 * %ptr8) - %oldval = load i32 *%ptr - - ; turn on DAZ (64)/FTZ (32768) -> 32832 - %update = or i32 %oldval, 32832 - store i32 %update, i32 *%ptr - call void @llvm.x86.sse.ldmxcsr(i8 * %ptr8) - ret void -} - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; svml @@ -280,9 +156,7 @@ declare <8 x float> @__svml_pow(<8 x float>, <8 x float>) ;; float min/max declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind readnone -declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind readnone declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind readnone -declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind readnone define internal <8 x float> @__max_varying_float(<8 x float>, <8 x float>) nounwind readonly alwaysinline { @@ -290,94 +164,43 @@ define internal <8 x float> @__max_varying_float(<8 x float>, ret <8 x float> %call } -define internal float @__max_uniform_float(float, float) nounwind readonly alwaysinline { - sse_binary_scalar(ret, 4, float, @llvm.x86.sse.max.ss, %0, %1) - ret float %ret -} - define internal <8 x float> @__min_varying_float(<8 x float>, <8 x float>) nounwind readonly alwaysinline { %call = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %0, <8 x float> %1) ret <8 x float> %call } -define internal float @__min_uniform_float(float, float) nounwind readonly alwaysinline { - sse_binary_scalar(ret, 4, float, @llvm.x86.sse.min.ss, %0, %1) - ret float %ret -} - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; int min/max -; no 8-wide integer stuff in avx1... -declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone -declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone - define internal <8 x i32> @__min_varying_int32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline { binary4to8(ret, i32, @llvm.x86.sse41.pminsd, %0, %1) ret <8 x i32> %ret } -define internal i32 @__min_uniform_int32(i32, i32) nounwind readonly alwaysinline { - sse_binary_scalar(ret, 4, i32, @llvm.x86.sse41.pminsd, %0, %1) - ret i32 %ret -} - define internal <8 x i32> @__max_varying_int32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline { binary4to8(ret, i32, @llvm.x86.sse41.pmaxsd, %0, %1) ret <8 x i32> %ret } -define internal i32 @__max_uniform_int32(i32, i32) nounwind readonly alwaysinline { - sse_binary_scalar(ret, 4, i32, @llvm.x86.sse41.pmaxsd, %0, %1) - ret i32 %ret -} - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; unsigned int min/max -declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone -declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone - define internal <8 x i32> @__min_varying_uint32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline { binary4to8(ret, i32, @llvm.x86.sse41.pminud, %0, %1) ret <8 x i32> %ret } -define internal i32 @__min_uniform_uint32(i32, i32) nounwind readonly alwaysinline { - sse_binary_scalar(ret, 4, i32, @llvm.x86.sse41.pminud, %0, %1) - ret i32 %ret -} - define internal <8 x i32> @__max_varying_uint32(<8 x i32>, <8 x i32>) nounwind readonly alwaysinline { binary4to8(ret, i32, @llvm.x86.sse41.pmaxud, %0, %1) ret <8 x i32> %ret } -define internal i32 @__max_uniform_uint32(i32, i32) nounwind readonly alwaysinline { - sse_binary_scalar(ret, 4, i32, @llvm.x86.sse41.pmaxud, %0, %1) - ret i32 %ret -} - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; horizontal ops -declare i32 @llvm.ctpop.i32(i32) nounwind readnone - -define internal i32 @__popcnt_int32(i32) nounwind readonly alwaysinline { - %call = call i32 @llvm.ctpop.i32(i32 %0) - ret i32 %call -} - -declare i64 @llvm.ctpop.i64(i64) nounwind readnone - -define internal i64 @__popcnt_int64(i64) nounwind readonly alwaysinline { - %call = call i64 @llvm.ctpop.i64(i64 %0) - ret i64 %call -} - declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone define internal i32 @__movmsk(<8 x i32>) nounwind readnone alwaysinline { @@ -711,43 +534,26 @@ gen_scatter(8, i64) ;; double precision sqrt declare <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double>) nounwind readnone -declare <2 x double> @llvm.x86.sse.sqrt.sd(<2 x double>) nounwind readnone define internal <8 x double> @__sqrt_varying_double(<8 x double>) nounwind alwaysinline { unary4to8(ret, double, @llvm.x86.avx.sqrt.pd.256, %0) ret <8 x double> %ret } -define internal double @__sqrt_uniform_double(double) nounwind alwaysinline { - sse_unary_scalar(ret, 2, double, @llvm.x86.sse.sqrt.sd, %0) - ret double %ret -} - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; double precision min/max declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwind readnone -declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind readnone declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwind readnone -declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone define internal <8 x double> @__min_varying_double(<8 x double>, <8 x double>) nounwind readnone alwaysinline { binary4to8(ret, double, @llvm.x86.avx.min.pd.256, %0, %1) ret <8 x double> %ret } -define internal double @__min_uniform_double(double, double) nounwind readnone alwaysinline { - sse_binary_scalar(ret, 2, double, @llvm.x86.sse2.min.sd, %0, %1) - ret double %ret -} - define internal <8 x double> @__max_varying_double(<8 x double>, <8 x double>) nounwind readnone alwaysinline { binary4to8(ret, double, @llvm.x86.avx.max.pd.256, %0, %1) ret <8 x double> %ret } -define internal double @__max_uniform_double(double, double) nounwind readnone alwaysinline { - sse_binary_scalar(ret, 2, double, @llvm.x86.sse2.max.sd, %0, %1) - ret double %ret -} diff --git a/builtins.cpp b/builtins.cpp index e73c372f..c38001f7 100644 --- a/builtins.cpp +++ b/builtins.cpp @@ -454,10 +454,22 @@ DefineStdlib(SymbolTable *symbolTable, llvm::LLVMContext *ctx, llvm::Module *mod } break; case Target::AVX: - extern unsigned char builtins_bitcode_avx[]; - extern int builtins_bitcode_avx_length; - lAddBitcode(builtins_bitcode_avx, builtins_bitcode_avx_length, module, - symbolTable); + switch (g->target.vectorWidth) { + case 8: + extern unsigned char builtins_bitcode_avx[]; + extern int builtins_bitcode_avx_length; + lAddBitcode(builtins_bitcode_avx, builtins_bitcode_avx_length, module, + symbolTable); + break; + case 16: + extern unsigned char builtins_bitcode_avx_x2[]; + extern int builtins_bitcode_avx_x2_length; + lAddBitcode(builtins_bitcode_avx_x2, builtins_bitcode_avx_x2_length, + module, symbolTable); + break; + default: + FATAL("logic error in DefineStdlib"); + } break; default: FATAL("logic error"); diff --git a/builtins.m4 b/builtins.m4 index e4d40457..e7da41b5 100644 --- a/builtins.m4 +++ b/builtins.m4 @@ -111,6 +111,32 @@ define(`reduce8', ` ' ) +define(`reduce16', ` + %v1 = shufflevector <16 x $1> %0, <16 x $1> undef, + <16 x i32> + %m1 = call <16 x $1> $2(<16 x $1> %v1, <16 x $1> %0) + %v2 = shufflevector <16 x $1> %m1, <16 x $1> undef, + <16 x i32> + %m2 = call <16 x $1> $2(<16 x $1> %v2, <16 x $1> %m1) + %v3 = shufflevector <16 x $1> %m2, <16 x $1> undef, + <16 x i32> + %m3 = call <16 x $1> $2(<16 x $1> %v3, <16 x $1> %m2) + + %m3a = extractelement <16 x $1> %m3, i32 0 + %m3b = extractelement <16 x $1> %m3, i32 1 + %m = call $1 $3($1 %m3a, $1 %m3b) + ret $1 %m +' +) + ;; Do an reduction over an 8-wide vector, using a vector reduction function ;; that only takes 4-wide vectors ;; $1: type of final scalar result @@ -211,6 +237,45 @@ define(`unary4to8', ` ' ) +define(`unary4to16', ` + %$1_0 = shufflevector <16 x $2> $4, <16 x $2> undef, <4 x i32> + %v$1_0 = call <4 x $2> $3(<4 x $2> %$1_0) + %$1_1 = shufflevector <16 x $2> $4, <16 x $2> undef, <4 x i32> + %v$1_1 = call <4 x $2> $3(<4 x $2> %$1_1) + %$1_2 = shufflevector <16 x $2> $4, <16 x $2> undef, <4 x i32> + %v$1_2 = call <4 x $2> $3(<4 x $2> %$1_2) + %$1_3 = shufflevector <16 x $2> $4, <16 x $2> undef, <4 x i32> + %v$1_3 = call <4 x $2> $3(<4 x $2> %$1_3) + + %$1a = shufflevector <4 x $2> %v$1_0, <4 x $2> %v$1_1, + <8 x i32> + %$1b = shufflevector <4 x $2> %v$1_2, <4 x $2> %v$1_3, + <8 x i32> + %$1 = shufflevector <8 x $2> %$1a, <8 x $2> %$1b, + <16 x i32> +' +) + +;; And so forth... +;; $1: name of variable into which the final result should go +;; $2: scalar type of the vector elements +;; $3: 8-wide unary vector function to apply +;; $4: 16-wide operand value + +define(`unary8to16', ` + %$1_0 = shufflevector <16 x $2> $4, <16 x $2> undef, + <8 x i32> + %v$1_0 = call <8 x $2> $3(<8 x $2> %$1_0) + %$1_1 = shufflevector <16 x $2> $4, <16 x $2> undef, + <8 x i32> + %v$1_1 = call <8 x $2> $3(<8 x $2> %$1_1) + %$1 = shufflevector <8 x $2> %v$1_0, <8 x $2> %v$1_1, + <16 x i32> +' +) + ;; And along the lines of `binary2to4', this maps a 4-wide binary function to ;; two 8-wide vector operands ;; $1: name of variable into which the final result should go @@ -231,6 +296,57 @@ define(`binary4to8', ` ' ) +define(`binary8to16', ` +%$1_0a = shufflevector <16 x $2> $4, <16 x $2> undef, + <8 x i32> +%$1_0b = shufflevector <16 x $2> $5, <16 x $2> undef, + <8 x i32> +%v$1_0 = call <8 x $2> $3(<8 x $2> %$1_0a, <8 x $2> %$1_0b) +%$1_1a = shufflevector <16 x $2> $4, <16 x $2> undef, + <8 x i32> +%$1_1b = shufflevector <16 x $2> $5, <16 x $2> undef, + <8 x i32> +%v$1_1 = call <8 x $2> $3(<8 x $2> %$1_1a, <8 x $2> %$1_1b) +%$1 = shufflevector <8 x $2> %v$1_0, <8 x $2> %v$1_1, + <16 x i32> +' +) + +define(`binary4to16', ` +%$1_0a = shufflevector <16 x $2> $4, <16 x $2> undef, + <4 x i32> +%$1_0b = shufflevector <16 x $2> $5, <16 x $2> undef, + <4 x i32> +%r$1_0 = call <4 x $2> $3(<4 x $2> %$1_0a, <4 x $2> %$1_0b) + +%$1_1a = shufflevector <16 x $2> $4, <16 x $2> undef, + <4 x i32> +%$1_1b = shufflevector <16 x $2> $5, <16 x $2> undef, + <4 x i32> +%r$1_1 = call <4 x $2> $3(<4 x $2> %$1_1a, <4 x $2> %$1_1b) + +%$1_2a = shufflevector <16 x $2> $4, <16 x $2> undef, + <4 x i32> +%$1_2b = shufflevector <16 x $2> $5, <16 x $2> undef, + <4 x i32> +%r$1_2 = call <4 x $2> $3(<4 x $2> %$1_2a, <4 x $2> %$1_2b) + +%$1_3a = shufflevector <16 x $2> $4, <16 x $2> undef, + <4 x i32> +%$1_3b = shufflevector <16 x $2> $5, <16 x $2> undef, + <4 x i32> +%r$1_3 = call <4 x $2> $3(<4 x $2> %$1_3a, <4 x $2> %$1_3b) + +%r$1_01 = shufflevector <4 x $2> %r$1_0, <4 x $2> %r$1_1, + <8 x i32> +%r$1_23 = shufflevector <4 x $2> %r$1_2, <4 x $2> %r$1_3, + <8 x i32> + +%$1 = shufflevector <8 x $2> %r$1_01, <8 x $2> %r$1_23, + <16 x i32> +') ;; Maps a 2-wide unary function to an 8-wide vector operand, returning an ;; 8-wide vector result @@ -306,6 +422,20 @@ ret <8 x float> %ret ' ) +define(`round8to16', ` +%v0 = shufflevector <16 x float> $1, <16 x float> undef, + <8 x i32> +%v1 = shufflevector <16 x float> $1, <16 x float> undef, + <8 x i32> +%r0 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %v0, i32 $2) +%r1 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %v1, i32 $2) +%ret = shufflevector <8 x float> %r0, <8 x float> %r1, + <16 x i32> +ret <16 x float> %ret +' +) + define(`round4to8double', ` %v0 = shufflevector <8 x double> $1, <8 x double> undef, <4 x i32> %v1 = shufflevector <8 x double> $1, <8 x double> undef, <4 x i32> @@ -349,6 +479,30 @@ ret <8 x double> %ret ' ) +define(`round4to16double', ` +%v0 = shufflevector <16 x double> $1, <16 x double> undef, + <4 x i32> +%v1 = shufflevector <16 x double> $1, <16 x double> undef, + <4 x i32> +%v2 = shufflevector <16 x double> $1, <16 x double> undef, + <4 x i32> +%v3 = shufflevector <16 x double> $1, <16 x double> undef, + <4 x i32> +%r0 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %v0, i32 $2) +%r1 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %v1, i32 $2) +%r2 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %v2, i32 $2) +%r3 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %v3, i32 $2) +%ret0 = shufflevector <4 x double> %r0, <4 x double> %r1, + <8 x i32> +%ret1 = shufflevector <4 x double> %r2, <4 x double> %r3, + <8 x i32> +%ret = shufflevector <8 x double> %ret0, <8 x double> %ret1, + <16 x i32> +ret <16 x double> %ret +' +) + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; forloop macro @@ -1260,6 +1414,46 @@ define void @__masked_store_blend_16(<8 x i16>* nocapture, <8 x i16>, } ') +define(`masked_store_blend_8_16_by_16', ` +define void @__masked_store_blend_8(<16 x i8>* nocapture, <16 x i8>, + <16 x i32>) nounwind alwaysinline { + %old = load <16 x i8> * %0 + %old128 = bitcast <16 x i8> %old to i128 + %new128 = bitcast <16 x i8> %1 to i128 + + %mask8 = trunc <16 x i32> %2 to <16 x i8> + %mask128 = bitcast <16 x i8> %mask8 to i128 + %notmask128 = xor i128 %mask128, -1 + + %newmasked = and i128 %new128, %mask128 + %oldmasked = and i128 %old128, %notmask128 + %result = or i128 %newmasked, %oldmasked + + %resultvec = bitcast i128 %result to <16 x i8> + store <16 x i8> %resultvec, <16 x i8> * %0 + ret void +} + +define void @__masked_store_blend_16(<16 x i16>* nocapture, <16 x i16>, + <16 x i32>) nounwind alwaysinline { + %old = load <16 x i16> * %0 + %old256 = bitcast <16 x i16> %old to i256 + %new256 = bitcast <16 x i16> %1 to i256 + + %mask16 = trunc <16 x i32> %2 to <16 x i16> + %mask256 = bitcast <16 x i16> %mask16 to i256 + %notmask256 = xor i256 %mask256, -1 + + %newmasked = and i256 %new256, %mask256 + %oldmasked = and i256 %old256, %notmask256 + %result = or i256 %newmasked, %oldmasked + + %resultvec = bitcast i256 %result to <16 x i16> + store <16 x i16> %resultvec, <16 x i16> * %0 + ret void +} +') + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; packed load and store functions diff --git a/ispc.vcxproj b/ispc.vcxproj index a2f641ba..0df08572 100755 --- a/ispc.vcxproj +++ b/ispc.vcxproj @@ -16,6 +16,7 @@ + @@ -120,6 +121,19 @@ Building gen-bitcode-avx.cpp + + + Document + m4 builtins.m4 builtins-avx-x2.ll | python bitcode2cpp.py builtins-avx-x2.ll > gen-bitcode-avx-x2.cpp + gen-bitcode-avx-x2.cpp + builtins.m4;builtins-sse.ll + m4 builtins.m4 builtins-avx-x2.ll | python bitcode2cpp.py builtins-avx-x2.ll > gen-bitcode-avx-x2.cpp + gen-bitcode-avx-x2.cpp + builtins.m4;builtins-sse.ll + Building gen-bitcode-avx-x2.cpp + Building gen-bitcode-avx-x2.cpp + + Document diff --git a/main.cpp b/main.cpp index 86e7cad4..23ea67b8 100644 --- a/main.cpp +++ b/main.cpp @@ -92,7 +92,7 @@ static void usage(int ret) { printf(" disable-uniform-memory-optimizations\tDisable uniform-based coherent memory access\n"); printf(" disable-masked-store-optimizations\tDisable lowering to regular stores when possible\n"); #if defined(LLVM_3_0) || defined(LLVM_3_0svn) - printf(" [--target={sse2,sse4,sse4x2,avx}] Select target ISA (SSE4 is default unless compiling for atom; then SSE2 is.)\n"); + printf(" [--target={sse2,sse4,sse4x2,avx,avx-x2}] Select target ISA (SSE4 is default unless compiling for atom; then SSE2 is.)\n"); #else printf(" [--target={sse2,sse4,sse4x2}] Select target ISA (SSE4 is default unless compiling for atom; then SSE2 is.)\n"); #endif // LLVM 3.0 @@ -128,6 +128,11 @@ static void lDoTarget(const char *target) { g->target.nativeVectorWidth = 8; g->target.vectorWidth = 8; } + else if (!strcasecmp(target, "avx-x2")) { + g->target.isa = Target::AVX; + g->target.nativeVectorWidth = 8; + g->target.vectorWidth = 16; + } #endif // LLVM 3.0 else usage(1);