From 7af7659ac23818832bfa62eb14777d6e5dc61ad9 Mon Sep 17 00:00:00 2001 From: "Niall Douglas (s [underscore] sourceforge {at} nedprod [dot] com)" Date: Mon, 5 Sep 2016 15:52:40 +0100 Subject: [PATCH] Fix ARM NEON output not always being inlined. Also improved scope for ARM NEON optimisation by LLVM, gained about 2% on my code here. --- builtins/target-neon-16.ll | 98 +++++++++++++++--------------- builtins/target-neon-32.ll | 96 ++++++++++++++--------------- builtins/target-neon-8.ll | 106 ++++++++++++++++----------------- builtins/target-neon-common.ll | 57 ++++++++++-------- 4 files changed, 182 insertions(+), 175 deletions(-) diff --git a/builtins/target-neon-16.ll b/builtins/target-neon-16.ll index d07b6ba1..fa8819a3 100644 --- a/builtins/target-neon-16.ll +++ b/builtins/target-neon-16.ll @@ -42,12 +42,12 @@ include(`target-neon-common.ll') ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; half conversion routines -define <8 x float> @__half_to_float_varying(<8 x i16> %v) nounwind readnone { +define <8 x float> @__half_to_float_varying(<8 x i16> %v) nounwind readnone alwaysinline { unary4to8conv(r, i16, float, @llvm.arm.neon.vcvthf2fp, %v) ret <8 x float> %r } -define <8 x i16> @__float_to_half_varying(<8 x float> %v) nounwind readnone { +define <8 x i16> @__float_to_half_varying(<8 x float> %v) nounwind readnone alwaysinline { unary4to8conv(r, float, i16, @llvm.arm.neon.vcvtfp2hf, %v) ret <8 x i16> %r } @@ -115,13 +115,13 @@ declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwin declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>) nounwind readnone define @__max_varying_float(, - ) nounwind readnone { + ) nounwind readnone alwaysinline { binary4to8(r, float, @llvm.arm.neon.vmaxs.v4f32, %0, %1) ret %r } define @__min_varying_float(, - ) nounwind readnone { + ) nounwind readnone alwaysinline { binary4to8(r, float, @llvm.arm.neon.vmins.v4f32, %0, %1) ret %r } @@ -131,22 +131,22 @@ declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>) nounwind read declare <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32>, <4 x i32>) nounwind readnone declare <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone -define @__min_varying_int32(, ) nounwind readnone { +define @__min_varying_int32(, ) nounwind readnone alwaysinline { binary4to8(r, i32, @llvm.arm.neon.vmins.v4i32, %0, %1) ret %r } -define @__max_varying_int32(, ) nounwind readnone { +define @__max_varying_int32(, ) nounwind readnone alwaysinline { binary4to8(r, i32, @llvm.arm.neon.vmaxs.v4i32, %0, %1) ret %r } -define @__min_varying_uint32(, ) nounwind readnone { +define @__min_varying_uint32(, ) nounwind readnone alwaysinline { binary4to8(r, i32, @llvm.arm.neon.vminu.v4i32, %0, %1) ret %r } -define @__max_varying_uint32(, ) nounwind readnone { +define @__max_varying_uint32(, ) nounwind readnone alwaysinline { binary4to8(r, i32, @llvm.arm.neon.vmaxu.v4i32, %0, %1) ret %r } @@ -156,7 +156,7 @@ define @__max_varying_uint32(, ) nounwin declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>) nounwind readnone -define @__rcp_varying_float( %d) nounwind readnone { +define @__rcp_varying_float( %d) nounwind readnone alwaysinline { unary4to8(x0, float, @llvm.arm.neon.vrecpe.v4f32, %d) binary4to8(x0_nr, float, @llvm.arm.neon.vrecps.v4f32, %d, %x0) %x1 = fmul %x0, %x0_nr @@ -168,7 +168,7 @@ define @__rcp_varying_float( %d) nounwind readnon declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone declare <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float>, <4 x float>) nounwind readnone -define @__rsqrt_varying_float( %d) nounwind readnone { +define @__rsqrt_varying_float( %d) nounwind readnone alwaysinline { unary4to8(x0, float, @llvm.arm.neon.vrsqrte.v4f32, %d) %x0_2 = fmul %x0, %x0 binary4to8(x0_nr, float, @llvm.arm.neon.vrsqrts.v4f32, %d, %x0_2) @@ -179,7 +179,7 @@ define @__rsqrt_varying_float( %d) nounwind readn ret %x2 } -define float @__rsqrt_uniform_float(float) nounwind readnone { +define float @__rsqrt_uniform_float(float) nounwind readnone alwaysinline { %v1 = bitcast float %0 to <1 x float> %vs = shufflevector <1 x float> %v1, <1 x float> undef, <8 x i32> %vs = shufflevector <1 x float> %v1, <1 x float> undef, <8 x i32> @llvm.sqrt.v4f32(<4 x float>) -define @__sqrt_varying_float() nounwind readnone { +define @__sqrt_varying_float() nounwind readnone alwaysinline { unary4to8(result, float, @llvm.sqrt.v4f32, %0) ;; this returns nan for v=0, which is undesirable.. ;; %rsqrt = call @__rsqrt_varying_float( %0) @@ -211,7 +211,7 @@ define @__sqrt_varying_float() nounwind readnone declare <4 x double> @llvm.sqrt.v4f64(<4 x double>) -define @__sqrt_varying_double() nounwind readnone { +define @__sqrt_varying_double() nounwind readnone alwaysinline { unary4to8(r, double, @llvm.sqrt.v4f64, %0) ret %r } @@ -219,7 +219,7 @@ define @__sqrt_varying_double() nounwind readno ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; reductions -define i64 @__movmsk() nounwind readnone { +define i64 @__movmsk() nounwind readnone alwaysinline { %and_mask = and %0, %v4 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %and_mask) @@ -288,48 +288,48 @@ define(`neon_reduce', ` declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwind readnone -define internal float @add_f32(float, float) { +define internal float @add_f32(float, float) nounwind readnone alwaysinline { %r = fadd float %0, %1 ret float %r } -define internal @__add_varying_float(, ) { +define internal @__add_varying_float(, ) nounwind readnone alwaysinline { %r = fadd %0, %1 ret %r } -define float @__reduce_add_float() nounwind readnone { +define float @__reduce_add_float() nounwind readnone alwaysinline { neon_reduce(float, @__add_varying_float, @llvm.arm.neon.vpadd.v2f32, @add_f32) } declare <2 x float> @llvm.arm.neon.vpmins.v2f32(<2 x float>, <2 x float>) nounwind readnone -define internal float @min_f32(float, float) { +define internal float @min_f32(float, float) nounwind readnone alwaysinline { %cmp = fcmp olt float %0, %1 %r = select i1 %cmp, float %0, float %1 ret float %r } -define float @__reduce_min_float() nounwind readnone { +define float @__reduce_min_float() nounwind readnone alwaysinline { neon_reduce(float, @__min_varying_float, @llvm.arm.neon.vpmins.v2f32, @min_f32) } declare <2 x float> @llvm.arm.neon.vpmaxs.v2f32(<2 x float>, <2 x float>) nounwind readnone -define internal float @max_f32(float, float) { +define internal float @max_f32(float, float) nounwind readnone alwaysinline { %cmp = fcmp ugt float %0, %1 %r = select i1 %cmp, float %0, float %1 ret float %r } -define float @__reduce_max_float() nounwind readnone { +define float @__reduce_max_float() nounwind readnone alwaysinline { neon_reduce(float, @__max_varying_float, @llvm.arm.neon.vpmaxs.v2f32, @max_f32) } declare <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8>) nounwind readnone declare <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16>) nounwind readnone -define i16 @__reduce_add_int8() nounwind readnone { +define i16 @__reduce_add_int8() nounwind readnone alwaysinline { %a16 = call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %0) %a32 = call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %a16) %a0 = extractelement <2 x i32> %a32, i32 0 @@ -341,7 +341,7 @@ define i16 @__reduce_add_int8() nounwind readnone { declare <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16() -define i64 @__reduce_add_int16() nounwind readnone { +define i64 @__reduce_add_int16() nounwind readnone alwaysinline { %a1 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16( %0) %a2 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %a1) %aa = extractelement <2 x i64> %a2, i32 0 @@ -352,7 +352,7 @@ define i64 @__reduce_add_int16() nounwind readnone { declare <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32>) nounwind readnone -define i64 @__reduce_add_int32() nounwind readnone { +define i64 @__reduce_add_int32() nounwind readnone alwaysinline { v8tov4(i32, %0, %va, %vb) %pa = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %va) %pb = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %vb) @@ -365,53 +365,53 @@ define i64 @__reduce_add_int32() nounwind readnone { declare <2 x i32> @llvm.arm.neon.vpmins.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -define internal i32 @min_si32(i32, i32) { +define internal i32 @min_si32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp slt i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__reduce_min_int32() nounwind readnone { +define i32 @__reduce_min_int32() nounwind readnone alwaysinline { neon_reduce(i32, @__min_varying_int32, @llvm.arm.neon.vpmins.v2i32, @min_si32) } declare <2 x i32> @llvm.arm.neon.vpmaxs.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -define internal i32 @max_si32(i32, i32) { +define internal i32 @max_si32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp sgt i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__reduce_max_int32() nounwind readnone { +define i32 @__reduce_max_int32() nounwind readnone alwaysinline { neon_reduce(i32, @__max_varying_int32, @llvm.arm.neon.vpmaxs.v2i32, @max_si32) } declare <2 x i32> @llvm.arm.neon.vpminu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -define internal i32 @min_ui32(i32, i32) { +define internal i32 @min_ui32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp ult i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__reduce_min_uint32() nounwind readnone { +define i32 @__reduce_min_uint32() nounwind readnone alwaysinline { neon_reduce(i32, @__min_varying_uint32, @llvm.arm.neon.vpmins.v2i32, @min_ui32) } declare <2 x i32> @llvm.arm.neon.vpmaxu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -define internal i32 @max_ui32(i32, i32) { +define internal i32 @max_ui32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp ugt i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__reduce_max_uint32() nounwind readnone { +define i32 @__reduce_max_uint32() nounwind readnone alwaysinline { neon_reduce(i32, @__max_varying_uint32, @llvm.arm.neon.vpmaxs.v2i32, @max_ui32) } -define double @__reduce_add_double() nounwind readnone { +define double @__reduce_add_double() nounwind readnone alwaysinline { v8tov2(double, %0, %v0, %v1, %v2, %v3) %v01 = fadd <2 x double> %v0, %v1 %v23 = fadd <2 x double> %v2, %v3 @@ -422,15 +422,15 @@ define double @__reduce_add_double() nounwind readnone { ret double %m } -define double @__reduce_min_double() nounwind readnone { +define double @__reduce_min_double() nounwind readnone alwaysinline { reduce8(double, @__min_varying_double, @__min_uniform_double) } -define double @__reduce_max_double() nounwind readnone { +define double @__reduce_max_double() nounwind readnone alwaysinline { reduce8(double, @__max_varying_double, @__max_uniform_double) } -define i64 @__reduce_add_int64() nounwind readnone { +define i64 @__reduce_add_int64() nounwind readnone alwaysinline { v8tov2(i64, %0, %v0, %v1, %v2, %v3) %v01 = add <2 x i64> %v0, %v1 %v23 = add <2 x i64> %v2, %v3 @@ -441,19 +441,19 @@ define i64 @__reduce_add_int64() nounwind readnone { ret i64 %m } -define i64 @__reduce_min_int64() nounwind readnone { +define i64 @__reduce_min_int64() nounwind readnone alwaysinline { reduce8(i64, @__min_varying_int64, @__min_uniform_int64) } -define i64 @__reduce_max_int64() nounwind readnone { +define i64 @__reduce_max_int64() nounwind readnone alwaysinline { reduce8(i64, @__max_varying_int64, @__max_uniform_int64) } -define i64 @__reduce_min_uint64() nounwind readnone { +define i64 @__reduce_min_uint64() nounwind readnone alwaysinline { reduce8(i64, @__min_varying_uint64, @__min_uniform_uint64) } -define i64 @__reduce_max_uint64() nounwind readnone { +define i64 @__reduce_max_uint64() nounwind readnone alwaysinline { reduce8(i64, @__max_varying_uint64, @__max_uniform_uint64) } @@ -462,56 +462,56 @@ define i64 @__reduce_max_uint64() nounwind readnone { declare <8 x i8> @llvm.arm.neon.vrhaddu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone -define <8 x i8> @__avg_up_uint8(<8 x i8>, <8 x i8>) nounwind readnone { +define <8 x i8> @__avg_up_uint8(<8 x i8>, <8 x i8>) nounwind readnone alwaysinline { %r = call <8 x i8> @llvm.arm.neon.vrhaddu.v8i8(<8 x i8> %0, <8 x i8> %1) ret <8 x i8> %r } declare <8 x i8> @llvm.arm.neon.vrhadds.v8i8(<8 x i8>, <8 x i8>) nounwind readnone -define <8 x i8> @__avg_up_int8(<8 x i8>, <8 x i8>) nounwind readnone { +define <8 x i8> @__avg_up_int8(<8 x i8>, <8 x i8>) nounwind readnone alwaysinline { %r = call <8 x i8> @llvm.arm.neon.vrhadds.v8i8(<8 x i8> %0, <8 x i8> %1) ret <8 x i8> %r } declare <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone -define <8 x i8> @__avg_down_uint8(<8 x i8>, <8 x i8>) nounwind readnone { +define <8 x i8> @__avg_down_uint8(<8 x i8>, <8 x i8>) nounwind readnone alwaysinline { %r = call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %0, <8 x i8> %1) ret <8 x i8> %r } declare <8 x i8> @llvm.arm.neon.vhadds.v8i8(<8 x i8>, <8 x i8>) nounwind readnone -define <8 x i8> @__avg_down_int8(<8 x i8>, <8 x i8>) nounwind readnone { +define <8 x i8> @__avg_down_int8(<8 x i8>, <8 x i8>) nounwind readnone alwaysinline { %r = call <8 x i8> @llvm.arm.neon.vhadds.v8i8(<8 x i8> %0, <8 x i8> %1) ret <8 x i8> %r } declare <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone -define <8 x i16> @__avg_up_uint16(<8 x i16>, <8 x i16>) nounwind readnone { +define <8 x i16> @__avg_up_uint16(<8 x i16>, <8 x i16>) nounwind readnone alwaysinline { %r = call <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16> %0, <8 x i16> %1) ret <8 x i16> %r } declare <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone -define <8 x i16> @__avg_up_int16(<8 x i16>, <8 x i16>) nounwind readnone { +define <8 x i16> @__avg_up_int16(<8 x i16>, <8 x i16>) nounwind readnone alwaysinline { %r = call <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16> %0, <8 x i16> %1) ret <8 x i16> %r } declare <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone -define <8 x i16> @__avg_down_uint16(<8 x i16>, <8 x i16>) nounwind readnone { +define <8 x i16> @__avg_down_uint16(<8 x i16>, <8 x i16>) nounwind readnone alwaysinline { %r = call <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16> %0, <8 x i16> %1) ret <8 x i16> %r } declare <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone -define <8 x i16> @__avg_down_int16(<8 x i16>, <8 x i16>) nounwind readnone { +define <8 x i16> @__avg_down_int16(<8 x i16>, <8 x i16>) nounwind readnone alwaysinline { %r = call <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16> %0, <8 x i16> %1) ret <8 x i16> %r } diff --git a/builtins/target-neon-32.ll b/builtins/target-neon-32.ll index 6c0064d1..54e22830 100644 --- a/builtins/target-neon-32.ll +++ b/builtins/target-neon-32.ll @@ -43,12 +43,12 @@ include(`target-neon-common.ll') ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; half conversion routines -define <4 x float> @__half_to_float_varying(<4 x i16> %v) nounwind readnone { +define <4 x float> @__half_to_float_varying(<4 x i16> %v) nounwind readnone alwaysinline { %r = call <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16> %v) ret <4 x float> %r } -define <4 x i16> @__float_to_half_varying(<4 x float> %v) nounwind readnone { +define <4 x i16> @__float_to_half_varying(<4 x float> %v) nounwind readnone alwaysinline { %r = call <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float> %v) ret <4 x i16> %r } @@ -106,13 +106,13 @@ declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwin declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>) nounwind readnone define @__max_varying_float(, - ) nounwind readnone { + ) nounwind readnone alwaysinline { %r = call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %0, <4 x float> %1) ret %r } define @__min_varying_float(, - ) nounwind readnone { + ) nounwind readnone alwaysinline { %r = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %0, <4 x float> %1) ret %r } @@ -122,22 +122,22 @@ declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>) nounwind read declare <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32>, <4 x i32>) nounwind readnone declare <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone -define @__min_varying_int32(, ) nounwind readnone { +define @__min_varying_int32(, ) nounwind readnone alwaysinline { %r = call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %0, <4 x i32> %1) ret <4 x i32> %r } -define @__max_varying_int32(, ) nounwind readnone { +define @__max_varying_int32(, ) nounwind readnone alwaysinline { %r = call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %0, <4 x i32> %1) ret <4 x i32> %r } -define @__min_varying_uint32(, ) nounwind readnone { +define @__min_varying_uint32(, ) nounwind readnone alwaysinline { %r = call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %0, <4 x i32> %1) ret <4 x i32> %r } -define @__max_varying_uint32(, ) nounwind readnone { +define @__max_varying_uint32(, ) nounwind readnone alwaysinline { %r = call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %0, <4 x i32> %1) ret <4 x i32> %r } @@ -147,7 +147,7 @@ define @__max_varying_uint32(, ) nounwin declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>) nounwind readnone -define @__rcp_varying_float( %d) nounwind readnone { +define @__rcp_varying_float( %d) nounwind readnone alwaysinline { %x0 = call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %d) %x0_nr = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %d, <4 x float> %x0) %x1 = fmul <4 x float> %x0, %x0_nr @@ -159,7 +159,7 @@ define @__rcp_varying_float( %d) nounwind readnon declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone declare <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float>, <4 x float>) nounwind readnone -define @__rsqrt_varying_float( %d) nounwind readnone { +define @__rsqrt_varying_float( %d) nounwind readnone alwaysinline { %x0 = call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %d) %x0_2 = fmul <4 x float> %x0, %x0 %x0_nr = call <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float> %d, <4 x float> %x0_2) @@ -170,7 +170,7 @@ define @__rsqrt_varying_float( %d) nounwind readn ret <4 x float> %x2 } -define float @__rsqrt_uniform_float(float) nounwind readnone { +define float @__rsqrt_uniform_float(float) nounwind readnone alwaysinline { %v1 = bitcast float %0 to <1 x float> %vs = shufflevector <1 x float> %v1, <1 x float> undef, <4 x i32> @@ -179,7 +179,7 @@ define float @__rsqrt_uniform_float(float) nounwind readnone { ret float %r } -define float @__rcp_uniform_float(float) nounwind readnone { +define float @__rcp_uniform_float(float) nounwind readnone alwaysinline { %v1 = bitcast float %0 to <1 x float> %vs = shufflevector <1 x float> %v1, <1 x float> undef, <4 x i32> @@ -190,7 +190,7 @@ define float @__rcp_uniform_float(float) nounwind readnone { declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) -define @__sqrt_varying_float() nounwind readnone { +define @__sqrt_varying_float() nounwind readnone alwaysinline { %result = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %0) ;; this returns nan for v=0, which is undesirable.. ;; %rsqrt = call @__rsqrt_varying_float( %0) @@ -200,7 +200,7 @@ define @__sqrt_varying_float() nounwind readnone declare <4 x double> @llvm.sqrt.v4f64(<4 x double>) -define @__sqrt_varying_double() nounwind readnone { +define @__sqrt_varying_double() nounwind readnone alwaysinline { %r = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %0) ret <4 x double> %r } @@ -208,7 +208,7 @@ define @__sqrt_varying_double() nounwind readno ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; reductions -define i64 @__movmsk(<4 x MASK>) nounwind readnone { +define i64 @__movmsk(<4 x MASK>) nounwind readnone alwaysinline { %and_mask = and <4 x MASK> %0, %v01 = shufflevector <4 x i32> %and_mask, <4 x i32> undef, <2 x i32> %v23 = shufflevector <4 x i32> %and_mask, <4 x i32> undef, <2 x i32> @@ -264,42 +264,42 @@ define(`neon_reduce', ` declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwind readnone -define internal float @add_f32(float, float) { +define internal float @add_f32(float, float) nounwind readnone alwaysinline { %r = fadd float %0, %1 ret float %r } -define float @__reduce_add_float(<4 x float>) nounwind readnone { +define float @__reduce_add_float(<4 x float>) nounwind readnone alwaysinline { neon_reduce(float, @llvm.arm.neon.vpadd.v2f32, @add_f32) } declare <2 x float> @llvm.arm.neon.vpmins.v2f32(<2 x float>, <2 x float>) nounwind readnone -define internal float @min_f32(float, float) { +define internal float @min_f32(float, float) nounwind readnone alwaysinline { %cmp = fcmp olt float %0, %1 %r = select i1 %cmp, float %0, float %1 ret float %r } -define float @__reduce_min_float(<4 x float>) nounwind readnone { +define float @__reduce_min_float(<4 x float>) nounwind readnone alwaysinline { neon_reduce(float, @llvm.arm.neon.vpmins.v2f32, @min_f32) } declare <2 x float> @llvm.arm.neon.vpmaxs.v2f32(<2 x float>, <2 x float>) nounwind readnone -define internal float @max_f32(float, float) { +define internal float @max_f32(float, float) nounwind readnone alwaysinline { %cmp = fcmp ugt float %0, %1 %r = select i1 %cmp, float %0, float %1 ret float %r } -define float @__reduce_max_float(<4 x float>) nounwind readnone { +define float @__reduce_max_float(<4 x float>) nounwind readnone alwaysinline { neon_reduce(float, @llvm.arm.neon.vpmaxs.v2f32, @max_f32) } declare <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8>) nounwind readnone -define i16 @__reduce_add_int8() nounwind readnone { +define i16 @__reduce_add_int8() nounwind readnone alwaysinline { %v8 = shufflevector <4 x i8> %0, <4 x i8> zeroinitializer, <8 x i32> %a16 = call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %v8) @@ -313,7 +313,7 @@ define i16 @__reduce_add_int8() nounwind readnone { declare <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16>) nounwind readnone -define i32 @__reduce_add_int16() nounwind readnone { +define i32 @__reduce_add_int16() nounwind readnone alwaysinline { %a32 = call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %0) %a0 = extractelement <2 x i32> %a32, i32 0 %a1 = extractelement <2 x i32> %a32, i32 1 @@ -323,7 +323,7 @@ define i32 @__reduce_add_int16() nounwind readnone { declare <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32>) nounwind readnone -define i64 @__reduce_add_int32() nounwind readnone { +define i64 @__reduce_add_int32() nounwind readnone alwaysinline { %a64 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %0) %a0 = extractelement <2 x i64> %a64, i32 0 %a1 = extractelement <2 x i64> %a64, i32 1 @@ -333,53 +333,53 @@ define i64 @__reduce_add_int32() nounwind readnone { declare <2 x i32> @llvm.arm.neon.vpmins.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -define internal i32 @min_si32(i32, i32) { +define internal i32 @min_si32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp slt i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__reduce_min_int32(<4 x i32>) nounwind readnone { +define i32 @__reduce_min_int32(<4 x i32>) nounwind readnone alwaysinline { neon_reduce(i32, @llvm.arm.neon.vpmins.v2i32, @min_si32) } declare <2 x i32> @llvm.arm.neon.vpmaxs.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -define internal i32 @max_si32(i32, i32) { +define internal i32 @max_si32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp sgt i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__reduce_max_int32(<4 x i32>) nounwind readnone { +define i32 @__reduce_max_int32(<4 x i32>) nounwind readnone alwaysinline { neon_reduce(i32, @llvm.arm.neon.vpmaxs.v2i32, @max_si32) } declare <2 x i32> @llvm.arm.neon.vpminu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -define internal i32 @min_ui32(i32, i32) { +define internal i32 @min_ui32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp ult i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__reduce_min_uint32(<4 x i32>) nounwind readnone { +define i32 @__reduce_min_uint32(<4 x i32>) nounwind readnone alwaysinline { neon_reduce(i32, @llvm.arm.neon.vpmins.v2i32, @min_ui32) } declare <2 x i32> @llvm.arm.neon.vpmaxu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -define internal i32 @max_ui32(i32, i32) { +define internal i32 @max_ui32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp ugt i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__reduce_max_uint32(<4 x i32>) nounwind readnone { +define i32 @__reduce_max_uint32(<4 x i32>) nounwind readnone alwaysinline { neon_reduce(i32, @llvm.arm.neon.vpmaxs.v2i32, @max_ui32) } -define double @__reduce_add_double(<4 x double>) nounwind readnone { +define double @__reduce_add_double(<4 x double>) nounwind readnone alwaysinline { %v0 = shufflevector <4 x double> %0, <4 x double> undef, <2 x i32> %v1 = shufflevector <4 x double> %0, <4 x double> undef, @@ -391,15 +391,15 @@ define double @__reduce_add_double(<4 x double>) nounwind readnone { ret double %m } -define double @__reduce_min_double(<4 x double>) nounwind readnone { +define double @__reduce_min_double(<4 x double>) nounwind readnone alwaysinline { reduce4(double, @__min_varying_double, @__min_uniform_double) } -define double @__reduce_max_double(<4 x double>) nounwind readnone { +define double @__reduce_max_double(<4 x double>) nounwind readnone alwaysinline { reduce4(double, @__max_varying_double, @__max_uniform_double) } -define i64 @__reduce_add_int64(<4 x i64>) nounwind readnone { +define i64 @__reduce_add_int64(<4 x i64>) nounwind readnone alwaysinline { %v0 = shufflevector <4 x i64> %0, <4 x i64> undef, <2 x i32> %v1 = shufflevector <4 x i64> %0, <4 x i64> undef, @@ -411,19 +411,19 @@ define i64 @__reduce_add_int64(<4 x i64>) nounwind readnone { ret i64 %m } -define i64 @__reduce_min_int64(<4 x i64>) nounwind readnone { +define i64 @__reduce_min_int64(<4 x i64>) nounwind readnone alwaysinline { reduce4(i64, @__min_varying_int64, @__min_uniform_int64) } -define i64 @__reduce_max_int64(<4 x i64>) nounwind readnone { +define i64 @__reduce_max_int64(<4 x i64>) nounwind readnone alwaysinline { reduce4(i64, @__max_varying_int64, @__max_uniform_int64) } -define i64 @__reduce_min_uint64(<4 x i64>) nounwind readnone { +define i64 @__reduce_min_uint64(<4 x i64>) nounwind readnone alwaysinline { reduce4(i64, @__min_varying_uint64, @__min_uniform_uint64) } -define i64 @__reduce_max_uint64(<4 x i64>) nounwind readnone { +define i64 @__reduce_max_uint64(<4 x i64>) nounwind readnone alwaysinline { reduce4(i64, @__max_varying_uint64, @__max_uniform_uint64) } @@ -432,56 +432,56 @@ define i64 @__reduce_max_uint64(<4 x i64>) nounwind readnone { declare <4 x i8> @llvm.arm.neon.vrhaddu.v4i8(<4 x i8>, <4 x i8>) nounwind readnone -define <4 x i8> @__avg_up_uint8(<4 x i8>, <4 x i8>) nounwind readnone { +define <4 x i8> @__avg_up_uint8(<4 x i8>, <4 x i8>) nounwind readnone alwaysinline { %r = call <4 x i8> @llvm.arm.neon.vrhaddu.v4i8(<4 x i8> %0, <4 x i8> %1) ret <4 x i8> %r } declare <4 x i8> @llvm.arm.neon.vrhadds.v4i8(<4 x i8>, <4 x i8>) nounwind readnone -define <4 x i8> @__avg_up_int8(<4 x i8>, <4 x i8>) nounwind readnone { +define <4 x i8> @__avg_up_int8(<4 x i8>, <4 x i8>) nounwind readnone alwaysinline { %r = call <4 x i8> @llvm.arm.neon.vrhadds.v4i8(<4 x i8> %0, <4 x i8> %1) ret <4 x i8> %r } declare <4 x i8> @llvm.arm.neon.vhaddu.v4i8(<4 x i8>, <4 x i8>) nounwind readnone -define <4 x i8> @__avg_down_uint8(<4 x i8>, <4 x i8>) nounwind readnone { +define <4 x i8> @__avg_down_uint8(<4 x i8>, <4 x i8>) nounwind readnone alwaysinline { %r = call <4 x i8> @llvm.arm.neon.vhaddu.v4i8(<4 x i8> %0, <4 x i8> %1) ret <4 x i8> %r } declare <4 x i8> @llvm.arm.neon.vhadds.v4i8(<4 x i8>, <4 x i8>) nounwind readnone -define <4 x i8> @__avg_down_int8(<4 x i8>, <4 x i8>) nounwind readnone { +define <4 x i8> @__avg_down_int8(<4 x i8>, <4 x i8>) nounwind readnone alwaysinline { %r = call <4 x i8> @llvm.arm.neon.vhadds.v4i8(<4 x i8> %0, <4 x i8> %1) ret <4 x i8> %r } declare <4 x i16> @llvm.arm.neon.vrhaddu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone -define <4 x i16> @__avg_up_uint16(<4 x i16>, <4 x i16>) nounwind readnone { +define <4 x i16> @__avg_up_uint16(<4 x i16>, <4 x i16>) nounwind readnone alwaysinline { %r = call <4 x i16> @llvm.arm.neon.vrhaddu.v4i16(<4 x i16> %0, <4 x i16> %1) ret <4 x i16> %r } declare <4 x i16> @llvm.arm.neon.vrhadds.v4i16(<4 x i16>, <4 x i16>) nounwind readnone -define <4 x i16> @__avg_up_int16(<4 x i16>, <4 x i16>) nounwind readnone { +define <4 x i16> @__avg_up_int16(<4 x i16>, <4 x i16>) nounwind readnone alwaysinline { %r = call <4 x i16> @llvm.arm.neon.vrhadds.v4i16(<4 x i16> %0, <4 x i16> %1) ret <4 x i16> %r } declare <4 x i16> @llvm.arm.neon.vhaddu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone -define <4 x i16> @__avg_down_uint16(<4 x i16>, <4 x i16>) nounwind readnone { +define <4 x i16> @__avg_down_uint16(<4 x i16>, <4 x i16>) nounwind readnone alwaysinline { %r = call <4 x i16> @llvm.arm.neon.vhaddu.v4i16(<4 x i16> %0, <4 x i16> %1) ret <4 x i16> %r } declare <4 x i16> @llvm.arm.neon.vhadds.v4i16(<4 x i16>, <4 x i16>) nounwind readnone -define <4 x i16> @__avg_down_int16(<4 x i16>, <4 x i16>) nounwind readnone { +define <4 x i16> @__avg_down_int16(<4 x i16>, <4 x i16>) nounwind readnone alwaysinline { %r = call <4 x i16> @llvm.arm.neon.vhadds.v4i16(<4 x i16> %0, <4 x i16> %1) ret <4 x i16> %r } diff --git a/builtins/target-neon-8.ll b/builtins/target-neon-8.ll index 3a7898dc..2b1939de 100644 --- a/builtins/target-neon-8.ll +++ b/builtins/target-neon-8.ll @@ -42,12 +42,12 @@ include(`target-neon-common.ll') ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; half conversion routines -define <16 x float> @__half_to_float_varying(<16 x i16> %v) nounwind readnone { +define <16 x float> @__half_to_float_varying(<16 x i16> %v) nounwind readnone alwaysinline { unary4to16conv(r, i16, float, @llvm.arm.neon.vcvthf2fp, %v) ret <16 x float> %r } -define <16 x i16> @__float_to_half_varying(<16 x float> %v) nounwind readnone { +define <16 x i16> @__float_to_half_varying(<16 x float> %v) nounwind readnone alwaysinline { unary4to16conv(r, float, i16, @llvm.arm.neon.vcvtfp2hf, %v) ret <16 x i16> %r } @@ -125,13 +125,13 @@ declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwin declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>) nounwind readnone define @__max_varying_float(, - ) nounwind readnone { + ) nounwind readnone alwaysinline { binary4to16(r, float, @llvm.arm.neon.vmaxs.v4f32, %0, %1) ret %r } define @__min_varying_float(, - ) nounwind readnone { + ) nounwind readnone alwaysinline { binary4to16(r, float, @llvm.arm.neon.vmins.v4f32, %0, %1) ret %r } @@ -141,22 +141,22 @@ declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>) nounwind read declare <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32>, <4 x i32>) nounwind readnone declare <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone -define @__min_varying_int32(, ) nounwind readnone { +define @__min_varying_int32(, ) nounwind readnone alwaysinline { binary4to16(r, i32, @llvm.arm.neon.vmins.v4i32, %0, %1) ret %r } -define @__max_varying_int32(, ) nounwind readnone { +define @__max_varying_int32(, ) nounwind readnone alwaysinline { binary4to16(r, i32, @llvm.arm.neon.vmaxs.v4i32, %0, %1) ret %r } -define @__min_varying_uint32(, ) nounwind readnone { +define @__min_varying_uint32(, ) nounwind readnone alwaysinline { binary4to16(r, i32, @llvm.arm.neon.vminu.v4i32, %0, %1) ret %r } -define @__max_varying_uint32(, ) nounwind readnone { +define @__max_varying_uint32(, ) nounwind readnone alwaysinline { binary4to16(r, i32, @llvm.arm.neon.vmaxu.v4i32, %0, %1) ret %r } @@ -166,7 +166,7 @@ define @__max_varying_uint32(, ) nounwin declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>) nounwind readnone -define @__rcp_varying_float( %d) nounwind readnone { +define @__rcp_varying_float( %d) nounwind readnone alwaysinline { unary4to16(x0, float, @llvm.arm.neon.vrecpe.v4f32, %d) binary4to16(x0_nr, float, @llvm.arm.neon.vrecps.v4f32, %d, %x0) %x1 = fmul %x0, %x0_nr @@ -178,7 +178,7 @@ define @__rcp_varying_float( %d) nounwind readnon declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone declare <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float>, <4 x float>) nounwind readnone -define @__rsqrt_varying_float( %d) nounwind readnone { +define @__rsqrt_varying_float( %d) nounwind readnone alwaysinline { unary4to16(x0, float, @llvm.arm.neon.vrsqrte.v4f32, %d) %x0_2 = fmul %x0, %x0 binary4to16(x0_nr, float, @llvm.arm.neon.vrsqrts.v4f32, %d, %x0_2) @@ -189,7 +189,7 @@ define @__rsqrt_varying_float( %d) nounwind readn ret %x2 } -define float @__rsqrt_uniform_float(float) nounwind readnone { +define float @__rsqrt_uniform_float(float) nounwind readnone alwaysinline { %v1 = bitcast float %0 to <1 x float> %vs = shufflevector <1 x float> %v1, <1 x float> undef, <16 x i32> %vs = shufflevector <1 x float> %v1, <1 x float> undef, <16 x i32> @llvm.sqrt.v4f32(<4 x float>) -define @__sqrt_varying_float() nounwind readnone { +define @__sqrt_varying_float() nounwind readnone alwaysinline { unary4to16(result, float, @llvm.sqrt.v4f32, %0) ;; this returns nan for v=0, which is undesirable.. ;; %rsqrt = call @__rsqrt_varying_float( %0) @@ -225,7 +225,7 @@ define @__sqrt_varying_float() nounwind readnone declare <4 x double> @llvm.sqrt.v4f64(<4 x double>) -define @__sqrt_varying_double() nounwind readnone { +define @__sqrt_varying_double() nounwind readnone alwaysinline { unary4to16(r, double, @llvm.sqrt.v4f64, %0) ret %r } @@ -233,7 +233,7 @@ define @__sqrt_varying_double() nounwind readno ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; reductions -define i64 @__movmsk() nounwind readnone { +define i64 @__movmsk() nounwind readnone alwaysinline { %and_mask = and %0, @@ -327,41 +327,41 @@ define(`neon_reduce', ` declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwind readnone -define internal float @add_f32(float, float) { +define internal float @add_f32(float, float) nounwind readnone alwaysinline { %r = fadd float %0, %1 ret float %r } -define internal @__add_varying_float(, ) { +define internal @__add_varying_float(, ) nounwind readnone alwaysinline { %r = fadd %0, %1 ret %r } -define float @__reduce_add_float() nounwind readnone { +define float @__reduce_add_float() nounwind readnone alwaysinline { neon_reduce(float, @__add_varying_float, @llvm.arm.neon.vpadd.v2f32, @add_f32) } declare <2 x float> @llvm.arm.neon.vpmins.v2f32(<2 x float>, <2 x float>) nounwind readnone -define internal float @min_f32(float, float) { +define internal float @min_f32(float, float) nounwind readnone alwaysinline { %cmp = fcmp olt float %0, %1 %r = select i1 %cmp, float %0, float %1 ret float %r } -define float @__reduce_min_float() nounwind readnone { +define float @__reduce_min_float() nounwind readnone alwaysinline { neon_reduce(float, @__min_varying_float, @llvm.arm.neon.vpmins.v2f32, @min_f32) } declare <2 x float> @llvm.arm.neon.vpmaxs.v2f32(<2 x float>, <2 x float>) nounwind readnone -define internal float @max_f32(float, float) { +define internal float @max_f32(float, float) nounwind readnone alwaysinline { %cmp = fcmp ugt float %0, %1 %r = select i1 %cmp, float %0, float %1 ret float %r } -define float @__reduce_max_float() nounwind readnone { +define float @__reduce_max_float() nounwind readnone alwaysinline { neon_reduce(float, @__max_varying_float, @llvm.arm.neon.vpmaxs.v2f32, @max_f32) } @@ -369,7 +369,7 @@ declare <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8>) nounwind readnon declare <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16>) nounwind readnone declare <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32>) nounwind readnone -define i64 @__reduce_add_int8() nounwind readnone { +define i64 @__reduce_add_int8() nounwind readnone alwaysinline { %a16 = call <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8> %0) %a32 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %a16) %a64 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %a32) @@ -379,7 +379,7 @@ define i64 @__reduce_add_int8() nounwind readnone { ret i64 %r } -define i64 @__reduce_add_int16() nounwind readnone { +define i64 @__reduce_add_int16() nounwind readnone alwaysinline { v16tov8(i16, %0, %va, %vb) %a32 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %va) %b32 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %vb) @@ -392,7 +392,7 @@ define i64 @__reduce_add_int16() nounwind readnone { ret i64 %r } -define i64 @__reduce_add_int32() nounwind readnone { +define i64 @__reduce_add_int32() nounwind readnone alwaysinline { v16tov4(i32, %0, %va, %vb, %vc, %vd) %a64 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %va) %b64 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %vb) @@ -409,101 +409,101 @@ define i64 @__reduce_add_int32() nounwind readnone { declare <2 x i32> @llvm.arm.neon.vpmins.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -define internal i32 @min_si32(i32, i32) { +define internal i32 @min_si32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp slt i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__reduce_min_int32() nounwind readnone { +define i32 @__reduce_min_int32() nounwind readnone alwaysinline { neon_reduce(i32, @__min_varying_int32, @llvm.arm.neon.vpmins.v2i32, @min_si32) } declare <2 x i32> @llvm.arm.neon.vpmaxs.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -define internal i32 @max_si32(i32, i32) { +define internal i32 @max_si32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp sgt i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__reduce_max_int32() nounwind readnone { +define i32 @__reduce_max_int32() nounwind readnone alwaysinline { neon_reduce(i32, @__max_varying_int32, @llvm.arm.neon.vpmaxs.v2i32, @max_si32) } declare <2 x i32> @llvm.arm.neon.vpminu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -define internal i32 @min_ui32(i32, i32) { +define internal i32 @min_ui32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp ult i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__reduce_min_uint32() nounwind readnone { +define i32 @__reduce_min_uint32() nounwind readnone alwaysinline { neon_reduce(i32, @__min_varying_uint32, @llvm.arm.neon.vpmins.v2i32, @min_ui32) } declare <2 x i32> @llvm.arm.neon.vpmaxu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -define internal i32 @max_ui32(i32, i32) { +define internal i32 @max_ui32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp ugt i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__reduce_max_uint32() nounwind readnone { +define i32 @__reduce_max_uint32() nounwind readnone alwaysinline { neon_reduce(i32, @__max_varying_uint32, @llvm.arm.neon.vpmaxs.v2i32, @max_ui32) } -define internal double @__add_uniform_double(double, double) { +define internal double @__add_uniform_double(double, double) nounwind readnone alwaysinline { %r = fadd double %0, %1 ret double %r } -define internal @__add_varying_double(, ) { +define internal @__add_varying_double(, ) nounwind readnone alwaysinline { %r = fadd %0, %1 ret %r } -define double @__reduce_add_double() nounwind readnone { +define double @__reduce_add_double() nounwind readnone alwaysinline { reduce16(double, @__add_varying_double, @__add_uniform_double) } -define double @__reduce_min_double() nounwind readnone { +define double @__reduce_min_double() nounwind readnone alwaysinline { reduce16(double, @__min_varying_double, @__min_uniform_double) } -define double @__reduce_max_double() nounwind readnone { +define double @__reduce_max_double() nounwind readnone alwaysinline { reduce16(double, @__max_varying_double, @__max_uniform_double) } -define internal i64 @__add_uniform_int64(i64, i64) { +define internal i64 @__add_uniform_int64(i64, i64) nounwind readnone alwaysinline { %r = add i64 %0, %1 ret i64 %r } -define internal @__add_varying_int64(, ) { +define internal @__add_varying_int64(, ) nounwind readnone alwaysinline { %r = add %0, %1 ret %r } -define i64 @__reduce_add_int64() nounwind readnone { +define i64 @__reduce_add_int64() nounwind readnone alwaysinline { reduce16(i64, @__add_varying_int64, @__add_uniform_int64) } -define i64 @__reduce_min_int64() nounwind readnone { +define i64 @__reduce_min_int64() nounwind readnone alwaysinline { reduce16(i64, @__min_varying_int64, @__min_uniform_int64) } -define i64 @__reduce_max_int64() nounwind readnone { +define i64 @__reduce_max_int64() nounwind readnone alwaysinline { reduce16(i64, @__max_varying_int64, @__max_uniform_int64) } -define i64 @__reduce_min_uint64() nounwind readnone { +define i64 @__reduce_min_uint64() nounwind readnone alwaysinline { reduce16(i64, @__min_varying_uint64, @__min_uniform_uint64) } -define i64 @__reduce_max_uint64() nounwind readnone { +define i64 @__reduce_max_uint64() nounwind readnone alwaysinline { reduce16(i64, @__max_varying_uint64, @__max_uniform_uint64) } @@ -512,35 +512,35 @@ define i64 @__reduce_max_uint64() nounwind readnone { declare <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone -define <16 x i8> @__avg_up_uint8(<16 x i8>, <16 x i8>) nounwind readnone { +define <16 x i8> @__avg_up_uint8(<16 x i8>, <16 x i8>) nounwind readnone alwaysinline { %r = call <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8> %0, <16 x i8> %1) ret <16 x i8> %r } declare <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone -define <16 x i8> @__avg_up_int8(<16 x i8>, <16 x i8>) nounwind readnone { +define <16 x i8> @__avg_up_int8(<16 x i8>, <16 x i8>) nounwind readnone alwaysinline { %r = call <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8> %0, <16 x i8> %1) ret <16 x i8> %r } declare <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone -define <16 x i8> @__avg_down_uint8(<16 x i8>, <16 x i8>) nounwind readnone { +define <16 x i8> @__avg_down_uint8(<16 x i8>, <16 x i8>) nounwind readnone alwaysinline { %r = call <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8> %0, <16 x i8> %1) ret <16 x i8> %r } declare <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone -define <16 x i8> @__avg_down_int8(<16 x i8>, <16 x i8>) nounwind readnone { +define <16 x i8> @__avg_down_int8(<16 x i8>, <16 x i8>) nounwind readnone alwaysinline { %r = call <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8> %0, <16 x i8> %1) ret <16 x i8> %r } declare <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone -define <16 x i16> @__avg_up_uint16(<16 x i16>, <16 x i16>) nounwind readnone { +define <16 x i16> @__avg_up_uint16(<16 x i16>, <16 x i16>) nounwind readnone alwaysinline { v16tov8(i16, %0, %a0, %b0) v16tov8(i16, %1, %a1, %b1) %r0 = call <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16> %a0, <8 x i16> %a1) @@ -551,7 +551,7 @@ define <16 x i16> @__avg_up_uint16(<16 x i16>, <16 x i16>) nounwind readnone { declare <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone -define <16 x i16> @__avg_up_int16(<16 x i16>, <16 x i16>) nounwind readnone { +define <16 x i16> @__avg_up_int16(<16 x i16>, <16 x i16>) nounwind readnone alwaysinline { v16tov8(i16, %0, %a0, %b0) v16tov8(i16, %1, %a1, %b1) %r0 = call <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16> %a0, <8 x i16> %a1) @@ -562,7 +562,7 @@ define <16 x i16> @__avg_up_int16(<16 x i16>, <16 x i16>) nounwind readnone { declare <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone -define <16 x i16> @__avg_down_uint16(<16 x i16>, <16 x i16>) nounwind readnone { +define <16 x i16> @__avg_down_uint16(<16 x i16>, <16 x i16>) nounwind readnone alwaysinline { v16tov8(i16, %0, %a0, %b0) v16tov8(i16, %1, %a1, %b1) %r0 = call <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16> %a0, <8 x i16> %a1) @@ -573,7 +573,7 @@ define <16 x i16> @__avg_down_uint16(<16 x i16>, <16 x i16>) nounwind readnone { declare <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone -define <16 x i16> @__avg_down_int16(<16 x i16>, <16 x i16>) nounwind readnone { +define <16 x i16> @__avg_down_int16(<16 x i16>, <16 x i16>) nounwind readnone alwaysinline { v16tov8(i16, %0, %a0, %b0) v16tov8(i16, %1, %a1, %b1) %r0 = call <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16> %a0, <8 x i16> %a1) diff --git a/builtins/target-neon-common.ll b/builtins/target-neon-common.ll index b021fbb6..d2537ae7 100644 --- a/builtins/target-neon-common.ll +++ b/builtins/target-neon-common.ll @@ -49,7 +49,7 @@ ctlztz() declare <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float>) nounwind readnone declare <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16>) nounwind readnone -define float @__half_to_float_uniform(i16 %v) nounwind readnone { +define float @__half_to_float_uniform(i16 %v) nounwind readnone alwaysinline { %v1 = bitcast i16 %v to <1 x i16> %vec = shufflevector <1 x i16> %v1, <1 x i16> undef, <4 x i32> @@ -58,7 +58,7 @@ define float @__half_to_float_uniform(i16 %v) nounwind readnone { ret float %r } -define i16 @__float_to_half_uniform(float %v) nounwind readnone { +define i16 @__float_to_half_uniform(float %v) nounwind readnone alwaysinline { %v1 = bitcast float %v to <1 x float> %vec = shufflevector <1 x float> %v1, <1 x float> undef, <4 x i32> @@ -70,7 +70,14 @@ define i16 @__float_to_half_uniform(float %v) nounwind readnone { ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; math -define void @__fastmath() nounwind { +declare i32 @llvm.arm.get.fpscr() nounwind +declare void @llvm.arm.set.fpscr(i32) nounwind + +define void @__fastmath() nounwind alwaysinline { + %x = call i32 @llvm.arm.get.fpscr() + ; Turn on FTZ (bit 24) and default NaN (bit 25) + %y = or i32 %x, 50331648 + call void @llvm.arm.set.fpscr(i32 %y) ret void } @@ -120,111 +127,111 @@ declare double @__ceil_uniform_double(double) nounwind readnone ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; min/max -define float @__max_uniform_float(float, float) nounwind readnone { +define float @__max_uniform_float(float, float) nounwind readnone alwaysinline { %cmp = fcmp ugt float %0, %1 %r = select i1 %cmp, float %0, float %1 ret float %r } -define float @__min_uniform_float(float, float) nounwind readnone { +define float @__min_uniform_float(float, float) nounwind readnone alwaysinline { %cmp = fcmp ult float %0, %1 %r = select i1 %cmp, float %0, float %1 ret float %r } -define i32 @__min_uniform_int32(i32, i32) nounwind readnone { +define i32 @__min_uniform_int32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp slt i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__max_uniform_int32(i32, i32) nounwind readnone { +define i32 @__max_uniform_int32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp sgt i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__min_uniform_uint32(i32, i32) nounwind readnone { +define i32 @__min_uniform_uint32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp ult i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i32 @__max_uniform_uint32(i32, i32) nounwind readnone { +define i32 @__max_uniform_uint32(i32, i32) nounwind readnone alwaysinline { %cmp = icmp ugt i32 %0, %1 %r = select i1 %cmp, i32 %0, i32 %1 ret i32 %r } -define i64 @__min_uniform_int64(i64, i64) nounwind readnone { +define i64 @__min_uniform_int64(i64, i64) nounwind readnone alwaysinline { %cmp = icmp slt i64 %0, %1 %r = select i1 %cmp, i64 %0, i64 %1 ret i64 %r } -define i64 @__max_uniform_int64(i64, i64) nounwind readnone { +define i64 @__max_uniform_int64(i64, i64) nounwind readnone alwaysinline { %cmp = icmp sgt i64 %0, %1 %r = select i1 %cmp, i64 %0, i64 %1 ret i64 %r } -define i64 @__min_uniform_uint64(i64, i64) nounwind readnone { +define i64 @__min_uniform_uint64(i64, i64) nounwind readnone alwaysinline { %cmp = icmp ult i64 %0, %1 %r = select i1 %cmp, i64 %0, i64 %1 ret i64 %r } -define i64 @__max_uniform_uint64(i64, i64) nounwind readnone { +define i64 @__max_uniform_uint64(i64, i64) nounwind readnone alwaysinline { %cmp = icmp ugt i64 %0, %1 %r = select i1 %cmp, i64 %0, i64 %1 ret i64 %r } -define double @__min_uniform_double(double, double) nounwind readnone { +define double @__min_uniform_double(double, double) nounwind readnone alwaysinline { %cmp = fcmp olt double %0, %1 %r = select i1 %cmp, double %0, double %1 ret double %r } -define double @__max_uniform_double(double, double) nounwind readnone { +define double @__max_uniform_double(double, double) nounwind readnone alwaysinline { %cmp = fcmp ogt double %0, %1 %r = select i1 %cmp, double %0, double %1 ret double %r } -define @__min_varying_int64(, ) nounwind readnone { +define @__min_varying_int64(, ) nounwind readnone alwaysinline { %m = icmp slt %0, %1 %r = select %m, %0, %1 ret %r } -define @__max_varying_int64(, ) nounwind readnone { +define @__max_varying_int64(, ) nounwind readnone alwaysinline { %m = icmp sgt %0, %1 %r = select %m, %0, %1 ret %r } -define @__min_varying_uint64(, ) nounwind readnone { +define @__min_varying_uint64(, ) nounwind readnone alwaysinline { %m = icmp ult %0, %1 %r = select %m, %0, %1 ret %r } -define @__max_varying_uint64(, ) nounwind readnone { +define @__max_varying_uint64(, ) nounwind readnone alwaysinline { %m = icmp ugt %0, %1 %r = select %m, %0, %1 ret %r } define @__min_varying_double(, - ) nounwind readnone { + ) nounwind readnone alwaysinline { %m = fcmp olt %0, %1 %r = select %m, %0, %1 ret %r } define @__max_varying_double(, - ) nounwind readnone { + ) nounwind readnone alwaysinline { %m = fcmp ogt %0, %1 %r = select %m, %0, %1 ret %r @@ -234,14 +241,14 @@ define @__max_varying_double(, declare float @llvm.sqrt.f32(float) -define float @__sqrt_uniform_float(float) nounwind readnone { +define float @__sqrt_uniform_float(float) nounwind readnone alwaysinline { %r = call float @llvm.sqrt.f32(float %0) ret float %r } declare double @llvm.sqrt.f64(double) -define double @__sqrt_uniform_double(double) nounwind readnone { +define double @__sqrt_uniform_double(double) nounwind readnone alwaysinline { %r = call double @llvm.sqrt.f64(double %0) ret double %r } @@ -251,12 +258,12 @@ define double @__sqrt_uniform_double(double) nounwind readnone { declare i32 @llvm.ctpop.i32(i32) nounwind readnone declare i64 @llvm.ctpop.i64(i64) nounwind readnone -define i32 @__popcnt_int32(i32) nounwind readnone { +define i32 @__popcnt_int32(i32) nounwind readnone alwaysinline { %v = call i32 @llvm.ctpop.i32(i32 %0) ret i32 %v } -define i64 @__popcnt_int64(i64) nounwind readnone { +define i64 @__popcnt_int64(i64) nounwind readnone alwaysinline { %v = call i64 @llvm.ctpop.i64(i64 %0) ret i64 %v }