diff --git a/builtins/target-avx2-x2.ll b/builtins/target-avx2-x2.ll index a2a4fd34..053fd078 100644 --- a/builtins/target-avx2-x2.ll +++ b/builtins/target-avx2-x2.ll @@ -29,6 +29,10 @@ ;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +ifelse(LLVM_VERSION, `LLVM_3_0', `', + LLVM_VERSION, `LLVM_3_1', `', + `define(`HAVE_GATHER', `1')') + include(`target-avx-x2.ll') ifelse(LLVM_VERSION, `LLVM_3_0', `rdrand_decls()', @@ -128,9 +132,430 @@ define i16 @__float_to_half_uniform(float %v) nounwind readnone { ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; gather +declare void @llvm.trap() noreturn nounwind + +; $1: type +; $2: var base name +define(`extract_4s', ` + %$2_1 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> + %$2_2 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> + %$2_3 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> + %$2_4 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> +') + +; $1: type +; $2: var base name +define(`extract_8s', ` + %$2_1 = shufflevector <16 x $1> %$2, <16 x $1> undef, + <8 x i32> + %$2_2 = shufflevector <16 x $1> %$2, <16 x $1> undef, + <8 x i32> +') + +; $1: element type +; $2: ret name +; $3: v1 +; $4: v2 +define(`assemble_8s', ` + %$2 = shufflevector <8 x $1> %$3, <8 x $1> %$4, + <16 x i32> +') + +; $1: element type +; $2: ret name +; $3: v1 +; $4: v2 +; $5: v3 +; $6: v4 +define(`assemble_4s', ` + %$2_1 = shufflevector <4 x $1> %$3, <4 x $1> %$4, + <8 x i32> + %$2_2 = shufflevector <4 x $1> %$5, <4 x $1> %$6, + <8 x i32> + assemble_8s($1, $2, $2_1, $2_2) +') + +ifelse(LLVM_VERSION, `LLVM_3_0', ` gen_gather_factored(i8) gen_gather_factored(i16) gen_gather_factored(i32) gen_gather_factored(float) gen_gather_factored(i64) -gen_gather_factored(double) +gen_gather_factored(double)', +LLVM_VERSION, `LLVM_3_1', ` +gen_gather_factored(i8) +gen_gather_factored(i16) +gen_gather_factored(i32) +gen_gather_factored(float) +gen_gather_factored(i64) +gen_gather_factored(double)', ` + +gen_gather(i8) +gen_gather(i16) + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int32 gathers + +declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %target, i8 * %ptr, + <8 x i32> %indices, <8 x i32> %mask, i8 %scale) readonly nounwind +declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %target, i8 * %ptr, + <4 x i64> %indices, <4 x i32> %mask, i8 %scale) readonly nounwind + +define <16 x i32> @__gather_base_offsets32_i32(i8 * %ptr, i32 %scale, <16 x i32> %offsets, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + extract_8s(i32, offsets) + extract_8s(i32, vecmask) + + %v1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr, + <8 x i32> %offsets_1, <8 x i32> %vecmask_1, i8 %scale8) + %v2 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr, + <8 x i32> %offsets_2, <8 x i32> %vecmask_2, i8 %scale8) + + assemble_8s(i32, v, v1, v2) + + ret <16 x i32> %v +} + + +define <16 x i32> @__gather_base_offsets64_i32(i8 * %ptr, + i32 %scale, <16 x i64> %offsets, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + + extract_4s(i32, vecmask) + extract_4s(i64, offsets) + + %v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x i32> %vecmask_1, i8 %scale8) + %v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x i32> %vecmask_2, i8 %scale8) + %v3 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, + <4 x i64> %offsets_3, <4 x i32> %vecmask_3, i8 %scale8) + %v4 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, + <4 x i64> %offsets_4, <4 x i32> %vecmask_4, i8 %scale8) + + assemble_4s(i32, v, v1, v2, v3, v4) + + ret <16 x i32> %v +} + + +define <16 x i32> @__gather32_i32(<16 x i32> %ptrs, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + extract_8s(i32, ptrs) + extract_8s(i32, vecmask) + + %v1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null, + <8 x i32> %ptrs_1, <8 x i32> %vecmask_1, i8 1) + %v2 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null, + <8 x i32> %ptrs_2, <8 x i32> %vecmask_2, i8 1) + + assemble_8s(i32, v, v1, v2) + + ret <16 x i32> %v +} + + +define <16 x i32> @__gather64_i32(<16 x i64> %ptrs, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + extract_4s(i64, ptrs) + extract_4s(i32, vecmask) + + %v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x i32> %vecmask_1, i8 1) + %v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x i32> %vecmask_2, i8 1) + %v3 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, + <4 x i64> %ptrs_3, <4 x i32> %vecmask_3, i8 1) + %v4 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, + <4 x i64> %ptrs_4, <4 x i32> %vecmask_4, i8 1) + + assemble_4s(i32, v, v1, v2, v3, v4) + + ret <16 x i32> %v +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; float gathers + +declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %target, i8 * %ptr, + <8 x i32> %indices, <8 x float> %mask, i8 %scale8) readonly nounwind +declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %target, i8 * %ptr, + <4 x i64> %indices, <4 x float> %mask, i8 %scale8) readonly nounwind + +define <16 x float> @__gather_base_offsets32_float(i8 * %ptr, + i32 %scale, <16 x i32> %offsets, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %mask = bitcast <16 x i32> %vecmask to <16 x float> + extract_8s(i32, offsets) + extract_8s(float, mask) + + %v1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr, + <8 x i32> %offsets_1, <8 x float> %mask_1, i8 %scale8) + %v2 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr, + <8 x i32> %offsets_2, <8 x float> %mask_2, i8 %scale8) + + assemble_8s(float, v, v1, v2) + + ret <16 x float> %v +} + + +define <16 x float> @__gather_base_offsets64_float(i8 * %ptr, + i32 %scale, <16 x i64> %offsets, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %mask = bitcast <16 x i32> %vecmask to <16 x float> + extract_4s(i64, offsets) + extract_4s(float, mask) + + %v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x float> %mask_1, i8 %scale8) + %v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x float> %mask_2, i8 %scale8) + %v3 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, + <4 x i64> %offsets_3, <4 x float> %mask_3, i8 %scale8) + %v4 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, + <4 x i64> %offsets_4, <4 x float> %mask_4, i8 %scale8) + + assemble_4s(float, v, v1, v2, v3, v4) + + ret <16 x float> %v +} + + +define <16 x float> @__gather32_float(<16 x i32> %ptrs, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %mask = bitcast <16 x i32> %vecmask to <16 x float> + extract_8s(float, mask) + extract_8s(i32, ptrs) + + %v1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null, + <8 x i32> %ptrs_1, <8 x float> %mask_1, i8 1) + %v2 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null, + <8 x i32> %ptrs_2, <8 x float> %mask_2, i8 1) + + assemble_8s(float, v, v1, v2) + + ret <16 x float> %v +} + + +define <16 x float> @__gather64_float(<16 x i64> %ptrs, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %mask = bitcast <16 x i32> %vecmask to <16 x float> + extract_4s(i64, ptrs) + extract_4s(float, mask) + + %v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x float> %mask_1, i8 1) + %v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x float> %mask_2, i8 1) + %v3 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, + <4 x i64> %ptrs_3, <4 x float> %mask_3, i8 1) + %v4 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, + <4 x i64> %ptrs_4, <4 x float> %mask_4, i8 1) + + assemble_4s(float, v, v1, v2, v3, v4) + + ret <16 x float> %v +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int64 gathers + +declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %target, i8 * %ptr, + <4 x i32> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind +declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %target, i8 * %ptr, + <4 x i64> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind + +define <16 x i64> @__gather_base_offsets32_i64(i8 * %ptr, + i32 %scale, <16 x i32> %offsets, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask = sext <16 x i32> %mask32 to <16 x i64> + extract_4s(i32, offsets) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i32> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i32> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8) + %v3 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i32> %offsets_3, <4 x i64> %vecmask_3, i8 %scale8) + %v4 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i32> %offsets_4, <4 x i64> %vecmask_4, i8 %scale8) + + assemble_4s(i64, v, v1, v2, v3, v4) + + ret <16 x i64> %v +} + + +define <16 x i64> @__gather_base_offsets64_i64(i8 * %ptr, + i32 %scale, <16 x i64> %offsets, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask = sext <16 x i32> %mask32 to <16 x i64> + extract_4s(i64, offsets) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8) + %v3 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i64> %offsets_3, <4 x i64> %vecmask_3, i8 %scale8) + %v4 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i64> %offsets_4, <4 x i64> %vecmask_4, i8 %scale8) + + assemble_4s(i64, v, v1, v2, v3, v4) + + ret <16 x i64> %v +} + + +define <16 x i64> @__gather32_i64(<16 x i32> %ptrs, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask = sext <16 x i32> %mask32 to <16 x i64> + extract_4s(i32, ptrs) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, + <4 x i32> %ptrs_1, <4 x i64> %vecmask_1, i8 1) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, + <4 x i32> %ptrs_2, <4 x i64> %vecmask_2, i8 1) + %v3 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, + <4 x i32> %ptrs_3, <4 x i64> %vecmask_3, i8 1) + %v4 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, + <4 x i32> %ptrs_4, <4 x i64> %vecmask_4, i8 1) + + assemble_4s(i64, v, v1, v2, v3, v4) + + ret <16 x i64> %v +} + +define <16 x i64> @__gather64_i64(<16 x i64> %ptrs, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask = sext <16 x i32> %mask32 to <16 x i64> + extract_4s(i64, ptrs) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x i64> %vecmask_1, i8 1) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x i64> %vecmask_2, i8 1) + %v3 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, + <4 x i64> %ptrs_3, <4 x i64> %vecmask_3, i8 1) + %v4 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, + <4 x i64> %ptrs_4, <4 x i64> %vecmask_4, i8 1) + + assemble_4s(i64, v, v1, v2, v3, v4) + + ret <16 x i64> %v +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; double gathers + +declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %target, i8 * %ptr, + <4 x i64> %indices, <4 x double> %mask, i8 %scale) readonly nounwind +declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %target, i8 * %ptr, + <4 x i32> %indices, <4 x double> %mask, i8 %scale) readonly nounwind + +define <16 x double> @__gather_base_offsets32_double(i8 * %ptr, + i32 %scale, <16 x i32> %offsets, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> + %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> + extract_4s(i32, offsets) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i32> %offsets_1, <4 x double> %vecmask_1, i8 %scale8) + %v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i32> %offsets_2, <4 x double> %vecmask_2, i8 %scale8) + %v3 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i32> %offsets_3, <4 x double> %vecmask_3, i8 %scale8) + %v4 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i32> %offsets_4, <4 x double> %vecmask_4, i8 %scale8) + + assemble_4s(double, v, v1, v2, v3, v4) + + ret <16 x double> %v +} + + +define <16 x double> @__gather_base_offsets64_double(i8 * %ptr, + i32 %scale, <16 x i64> %offsets, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> + %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> + extract_4s(i64, offsets) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x double> %vecmask_1, i8 %scale8) + %v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x double> %vecmask_2, i8 %scale8) + %v3 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i64> %offsets_3, <4 x double> %vecmask_3, i8 %scale8) + %v4 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i64> %offsets_4, <4 x double> %vecmask_4, i8 %scale8) + + assemble_4s(double, v, v1, v2, v3, v4) + + ret <16 x double> %v +} + + +define <16 x double> @__gather32_double(<16 x i32> %ptrs, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> + %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> + extract_4s(i32, ptrs) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, + <4 x i32> %ptrs_1, <4 x double> %vecmask_1, i8 1) + %v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, + <4 x i32> %ptrs_2, <4 x double> %vecmask_2, i8 1) + %v3 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, + <4 x i32> %ptrs_3, <4 x double> %vecmask_3, i8 1) + %v4 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, + <4 x i32> %ptrs_4, <4 x double> %vecmask_4, i8 1) + + assemble_4s(double, v, v1, v2, v3, v4) + + ret <16 x double> %v +} + + +define <16 x double> @__gather64_double(<16 x i64> %ptrs, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> + %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> + extract_4s(i64, ptrs) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x double> %vecmask_1, i8 1) + %v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x double> %vecmask_2, i8 1) + %v3 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, + <4 x i64> %ptrs_3, <4 x double> %vecmask_3, i8 1) + %v4 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, + <4 x i64> %ptrs_4, <4 x double> %vecmask_4, i8 1) + + assemble_4s(double, v, v1, v2, v3, v4) + + ret <16 x double> %v +} + +') diff --git a/builtins/target-avx2.ll b/builtins/target-avx2.ll index 4b4b38c5..f4a0ee07 100644 --- a/builtins/target-avx2.ll +++ b/builtins/target-avx2.ll @@ -29,6 +29,10 @@ ;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +ifelse(LLVM_VERSION, `LLVM_3_0', `', + LLVM_VERSION, `LLVM_3_1', `', + `define(`HAVE_GATHER', `1')') + include(`target-avx.ll') ifelse(LLVM_VERSION, `LLVM_3_0', `rdrand_decls()', @@ -112,9 +116,318 @@ define i16 @__float_to_half_uniform(float %v) nounwind readnone { ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; gather +declare void @llvm.trap() noreturn nounwind + +define(`extract_4s', ` + %$2_1 = shufflevector <8 x $1> %$2, <8 x $1> undef, <4 x i32> + %$2_2 = shufflevector <8 x $1> %$2, <8 x $1> undef, <4 x i32> +') + +ifelse(LLVM_VERSION, `LLVM_3_0', ` gen_gather_factored(i8) gen_gather_factored(i16) gen_gather_factored(i32) gen_gather_factored(float) gen_gather_factored(i64) -gen_gather_factored(double) +gen_gather_factored(double)', +LLVM_VERSION, `LLVM_3_1', ` +gen_gather_factored(i8) +gen_gather_factored(i16) +gen_gather_factored(i32) +gen_gather_factored(float) +gen_gather_factored(i64) +gen_gather_factored(double)', ` + +gen_gather(i8) +gen_gather(i16) + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int32 gathers + +declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %target, i8 * %ptr, + <8 x i32> %indices, <8 x i32> %mask, i8 %scale) readonly nounwind +declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %target, i8 * %ptr, + <4 x i64> %indices, <4 x i32> %mask, i8 %scale) readonly nounwind + +define <8 x i32> @__gather_base_offsets32_i32(i8 * %ptr, + i32 %scale, <8 x i32> %offsets, + <8 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + + %v = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr, + <8 x i32> %offsets, <8 x i32> %vecmask, i8 %scale8) + + ret <8 x i32> %v +} + + +define <8 x i32> @__gather_base_offsets64_i32(i8 * %ptr, + i32 %scale, <8 x i64> %offsets, + <8 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + extract_4s(i32, vecmask) + extract_4s(i64, offsets) + + %v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x i32> %vecmask_1, i8 %scale8) + %v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x i32> %vecmask_2, i8 %scale8) + + %v = shufflevector <4 x i32> %v1, <4 x i32> %v2, + <8 x i32> + ret <8 x i32> %v +} + + +define <8 x i32> @__gather32_i32(<8 x i32> %ptrs, + <8 x i32> %vecmask) nounwind readonly alwaysinline { + %v = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null, + <8 x i32> %ptrs, <8 x i32> %vecmask, i8 1) + ret <8 x i32> %v +} + + +define <8 x i32> @__gather64_i32(<8 x i64> %ptrs, + <8 x i32> %vecmask) nounwind readonly alwaysinline { + extract_4s(i64, ptrs) + extract_4s(i32, vecmask) + + %v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x i32> %vecmask_1, i8 1) + %v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x i32> %vecmask_2, i8 1) + + %v = shufflevector <4 x i32> %v1, <4 x i32> %v2, + <8 x i32> + ret <8 x i32> %v +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; float gathers + +declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %target, i8 * %ptr, + <8 x i32> %indices, <8 x float> %mask, i8 %scale8) readonly nounwind +declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %target, i8 * %ptr, + <4 x i64> %indices, <4 x float> %mask, i8 %scale8) readonly nounwind + +define <8 x float> @__gather_base_offsets32_float(i8 * %ptr, + i32 %scale, <8 x i32> %offsets, + <8 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %mask = bitcast <8 x i32> %vecmask to <8 x float> + + %v = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr, + <8 x i32> %offsets, <8 x float> %mask, i8 %scale8) + + ret <8 x float> %v +} + + +define <8 x float> @__gather_base_offsets64_float(i8 * %ptr, + i32 %scale, <8 x i64> %offsets, + <8 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %mask = bitcast <8 x i32> %vecmask to <8 x float> + extract_4s(i64, offsets) + extract_4s(float, mask) + + %v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x float> %mask_1, i8 %scale8) + %v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x float> %mask_2, i8 %scale8) + + %v = shufflevector <4 x float> %v1, <4 x float> %v2, + <8 x i32> + ret <8 x float> %v +} + + +define <8 x float> @__gather32_float(<8 x i32> %ptrs, + <8 x i32> %vecmask) nounwind readonly alwaysinline { + %mask = bitcast <8 x i32> %vecmask to <8 x float> + + %v = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null, + <8 x i32> %ptrs, <8 x float> %mask, i8 1) + + ret <8 x float> %v +} + + +define <8 x float> @__gather64_float(<8 x i64> %ptrs, + <8 x i32> %vecmask) nounwind readonly alwaysinline { + %mask = bitcast <8 x i32> %vecmask to <8 x float> + extract_4s(i64, ptrs) + extract_4s(float, mask) + + %v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x float> %mask_1, i8 1) + %v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x float> %mask_2, i8 1) + + %v = shufflevector <4 x float> %v1, <4 x float> %v2, + <8 x i32> + ret <8 x float> %v +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int64 gathers + +declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %target, i8 * %ptr, + <4 x i32> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind +declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %target, i8 * %ptr, + <4 x i64> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind + +define <8 x i64> @__gather_base_offsets32_i64(i8 * %ptr, + i32 %scale, <8 x i32> %offsets, + <8 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask = sext <8 x i32> %mask32 to <8 x i64> + extract_4s(i32, offsets) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i32> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i32> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8) + + %v = shufflevector <4 x i64> %v1, <4 x i64> %v2, + <8 x i32> + ret <8 x i64> %v +} + + +define <8 x i64> @__gather_base_offsets64_i64(i8 * %ptr, + i32 %scale, <8 x i64> %offsets, + <8 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask = sext <8 x i32> %mask32 to <8 x i64> + extract_4s(i64, offsets) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8) + + %v = shufflevector <4 x i64> %v1, <4 x i64> %v2, + <8 x i32> + ret <8 x i64> %v +} + + +define <8 x i64> @__gather32_i64(<8 x i32> %ptrs, + <8 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask = sext <8 x i32> %mask32 to <8 x i64> + + extract_4s(i32, ptrs) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, + <4 x i32> %ptrs_1, <4 x i64> %vecmask_1, i8 1) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, + <4 x i32> %ptrs_2, <4 x i64> %vecmask_2, i8 1) + %v = shufflevector <4 x i64> %v1, <4 x i64> %v2, + <8 x i32> + ret <8 x i64> %v +} + + +define <8 x i64> @__gather64_i64(<8 x i64> %ptrs, + <8 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask = sext <8 x i32> %mask32 to <8 x i64> + extract_4s(i64, ptrs) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x i64> %vecmask_1, i8 1) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x i64> %vecmask_2, i8 1) + + %v = shufflevector <4 x i64> %v1, <4 x i64> %v2, + <8 x i32> + ret <8 x i64> %v +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; double gathers + +declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %target, i8 * %ptr, + <4 x i64> %indices, <4 x double> %mask, i8 %scale) readonly nounwind +declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %target, i8 * %ptr, + <4 x i32> %indices, <4 x double> %mask, i8 %scale) readonly nounwind + +define <8 x double> @__gather_base_offsets32_double(i8 * %ptr, + i32 %scale, <8 x i32> %offsets, + <8 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask64 = sext <8 x i32> %mask32 to <8 x i64> + %vecmask = bitcast <8 x i64> %vecmask64 to <8 x double> + extract_4s(i32, offsets) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i32> %offsets_1, <4 x double> %vecmask_1, i8 %scale8) + %v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i32> %offsets_2, <4 x double> %vecmask_2, i8 %scale8) + + %v = shufflevector <4 x double> %v1, <4 x double> %v2, + <8 x i32> + ret <8 x double> %v +} + +define <8 x double> @__gather_base_offsets64_double(i8 * %ptr, + i32 %scale, <8 x i64> %offsets, + <8 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask64 = sext <8 x i32> %mask32 to <8 x i64> + %vecmask = bitcast <8 x i64> %vecmask64 to <8 x double> + extract_4s(i64, offsets) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x double> %vecmask_1, i8 %scale8) + %v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x double> %vecmask_2, i8 %scale8) + + %v = shufflevector <4 x double> %v1, <4 x double> %v2, + <8 x i32> + ret <8 x double> %v +} + +define <8 x double> @__gather32_double(<8 x i32> %ptrs, + <8 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask64 = sext <8 x i32> %mask32 to <8 x i64> + %vecmask = bitcast <8 x i64> %vecmask64 to <8 x double> + extract_4s(i32, ptrs) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, + <4 x i32> %ptrs_1, <4 x double> %vecmask_1, i8 1) + %v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, + <4 x i32> %ptrs_2, <4 x double> %vecmask_2, i8 1) + + %v = shufflevector <4 x double> %v1, <4 x double> %v2, + <8 x i32> + ret <8 x double> %v +} + +define <8 x double> @__gather64_double(<8 x i64> %ptrs, + <8 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask64 = sext <8 x i32> %mask32 to <8 x i64> + %vecmask = bitcast <8 x i64> %vecmask64 to <8 x double> + extract_4s(i64, ptrs) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x double> %vecmask_1, i8 1) + %v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x double> %vecmask_2, i8 1) + + %v = shufflevector <4 x double> %v1, <4 x double> %v2, + <8 x i32> + + ret <8 x double> %v +} + +') diff --git a/builtins/util.m4 b/builtins/util.m4 index d29bcbca..a97336a7 100644 --- a/builtins/util.m4 +++ b/builtins/util.m4 @@ -3471,6 +3471,40 @@ pl_done: ;; ;; $1: scalar type for which to generate functions to do gathers +define(`gen_gather_general', ` +; fully general 32-bit gather, takes array of pointers encoded as vector of i32s +define @__gather32_$1( %ptrs, + %vecmask) nounwind readonly alwaysinline { + %ret_ptr = alloca + per_lane(WIDTH, %vecmask, ` + %iptr_LANE_ID = extractelement %ptrs, i32 LANE + %ptr_LANE_ID = inttoptr i32 %iptr_LANE_ID to $1 * + %val_LANE_ID = load $1 * %ptr_LANE_ID + %store_ptr_LANE_ID = getelementptr * %ret_ptr, i32 0, i32 LANE + store $1 %val_LANE_ID, $1 * %store_ptr_LANE_ID + ') + + %ret = load * %ret_ptr + ret %ret +} + +; fully general 64-bit gather, takes array of pointers encoded as vector of i32s +define @__gather64_$1( %ptrs, + %vecmask) nounwind readonly alwaysinline { + %ret_ptr = alloca + per_lane(WIDTH, %vecmask, ` + %iptr_LANE_ID = extractelement %ptrs, i32 LANE + %ptr_LANE_ID = inttoptr i64 %iptr_LANE_ID to $1 * + %val_LANE_ID = load $1 * %ptr_LANE_ID + %store_ptr_LANE_ID = getelementptr * %ret_ptr, i32 0, i32 LANE + store $1 %val_LANE_ID, $1 * %store_ptr_LANE_ID + ') + + %ret = load * %ret_ptr + ret %ret +} +') + ; vec width, type define(`gen_gather_factored', ` ;; Define the utility function to do the gather operation for a single element @@ -3582,37 +3616,42 @@ define @__gather_factored_base_offsets64_$1(i8 * %ptr, %ret`'eval(WIDTH-1) } -; fully general 32-bit gather, takes array of pointers encoded as vector of i32s -define @__gather32_$1( %ptrs, - %vecmask) nounwind readonly alwaysinline { - %ret_ptr = alloca - per_lane(WIDTH, %vecmask, ` - %iptr_LANE_ID = extractelement %ptrs, i32 LANE - %ptr_LANE_ID = inttoptr i32 %iptr_LANE_ID to $1 * - %val_LANE_ID = load $1 * %ptr_LANE_ID - %store_ptr_LANE_ID = getelementptr * %ret_ptr, i32 0, i32 LANE - store $1 %val_LANE_ID, $1 * %store_ptr_LANE_ID - ') +gen_gather_general($1) +' +) - %ret = load * %ret_ptr - ret %ret +; vec width, type +define(`gen_gather', ` + +gen_gather_factored($1) + +define +@__gather_base_offsets32_$1(i8 * %ptr, i32 %offset_scale, + %offsets, + %vecmask) nounwind readonly alwaysinline { + %scale_vec = bitcast i32 %offset_scale to <1 x i32> + %smear_scale = shufflevector <1 x i32> %scale_vec, <1 x i32> undef, + < forloop(i, 1, eval(WIDTH-1), `i32 0, ') i32 0 > + %scaled_offsets = mul %smear_scale, %offsets + %v = call @__gather_factored_base_offsets32_$1(i8 * %ptr, %scaled_offsets, i32 1, + zeroinitializer, %vecmask) + ret %v } -; fully general 64-bit gather, takes array of pointers encoded as vector of i32s -define @__gather64_$1( %ptrs, - %vecmask) nounwind readonly alwaysinline { - %ret_ptr = alloca - per_lane(WIDTH, %vecmask, ` - %iptr_LANE_ID = extractelement %ptrs, i32 LANE - %ptr_LANE_ID = inttoptr i64 %iptr_LANE_ID to $1 * - %val_LANE_ID = load $1 * %ptr_LANE_ID - %store_ptr_LANE_ID = getelementptr * %ret_ptr, i32 0, i32 LANE - store $1 %val_LANE_ID, $1 * %store_ptr_LANE_ID - ') - - %ret = load * %ret_ptr - ret %ret +define +@__gather_base_offsets64_$1(i8 * %ptr, i32 %offset_scale, + %offsets, + %vecmask) nounwind readonly alwaysinline { + %scale64 = zext i32 %offset_scale to i64 + %scale_vec = bitcast i64 %scale64 to <1 x i64> + %smear_scale = shufflevector <1 x i64> %scale_vec, <1 x i64> undef, + < forloop(i, 1, eval(WIDTH-1), `i32 0, ') i32 0 > + %scaled_offsets = mul %smear_scale, %offsets + %v = call @__gather_factored_base_offsets64_$1(i8 * %ptr, %scaled_offsets, + i32 1, zeroinitializer, %vecmask) + ret %v } + ' ) diff --git a/ispc.cpp b/ispc.cpp index 636dfdd4..fac83bbe 100644 --- a/ispc.cpp +++ b/ispc.cpp @@ -365,6 +365,7 @@ Target::GetTarget(const char *arch, const char *cpu, const char *isa, #if !defined(LLVM_3_1) // LLVM 3.2+ only t->hasRand = true; + t->hasGather = true; #endif } else if (!strcasecmp(isa, "avx2-x2")) { @@ -378,6 +379,7 @@ Target::GetTarget(const char *arch, const char *cpu, const char *isa, #if !defined(LLVM_3_1) // LLVM 3.2+ only t->hasRand = true; + t->hasGather = true; #endif } #endif // !LLVM_3_0