From 35222694e5b81b5b199ba60621ad58cea01bad92 Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Fri, 24 Apr 2015 09:44:23 +0300 Subject: [PATCH 01/23] [AVX512]: knl target was added --- builtins/dispatch.ll | 2 +- check_isa.cpp | 2 +- ispc.cpp | 54 ++++++++++++++++++++++++++++++++++++++++---- ispc.h | 16 ++++++------- 4 files changed, 59 insertions(+), 15 deletions(-) diff --git a/builtins/dispatch.ll b/builtins/dispatch.ll index 055ce705..e0bab120 100644 --- a/builtins/dispatch.ll +++ b/builtins/dispatch.ll @@ -112,7 +112,7 @@ ;; else if ((info2[1] & (1 << 26)) != 0 && // AVX512 PF ;; (info2[1] & (1 << 27)) != 0 && // AVX512 ER ;; (info2[1] & (1 << 28)) != 0) { // AVX512 CDI -;; return 5; // KNL +;; return 5; // KNL_AVX512 ;; } ;; // If it's unknown AVX512 target, fall through and use AVX2 ;; // or whatever is available in the machine. diff --git a/check_isa.cpp b/check_isa.cpp index 6aff860b..8ef3499c 100644 --- a/check_isa.cpp +++ b/check_isa.cpp @@ -126,7 +126,7 @@ lGetSystemISA() { else if ((info2[1] & (1 << 26)) != 0 && // AVX512 PF (info2[1] & (1 << 27)) != 0 && // AVX512 ER (info2[1] & (1 << 28)) != 0) { // AVX512 CDI - return "KNL"; + return "KNL_AVX512"; } // If it's unknown AVX512 target, fall through and use AVX2 // or whatever is available in the machine. diff --git a/ispc.cpp b/ispc.cpp index 8e62191a..9e89b6cf 100644 --- a/ispc.cpp +++ b/ispc.cpp @@ -170,7 +170,7 @@ lGetSystemISA() { else if ((info2[1] & (1 << 26)) != 0 && // AVX512 PF (info2[1] & (1 << 27)) != 0 && // AVX512 ER (info2[1] & (1 << 28)) != 0) { // AVX512 CDI - return "knl"; + return "knl-avx512"; } // If it's unknown AVX512 target, fall through and use AVX2 // or whatever is available in the machine. @@ -238,6 +238,11 @@ typedef enum { CPU_Broadwell, #endif +#if !defined(LLVM_3_2) && !defined(LLVM_3_3) && !defined(LLVM_3_4) && !defined(LLVM_3_5) && !defined(LLVM_3_6)// LLVM 3.7+ + // KNL. Supports AVX512. + CPU_KNL, +#endif + #if !defined(LLVM_3_2) && !defined(LLVM_3_3) // LLVM 3.4+ // Late Atom-like design. Supports SSE 4.2 + POPCNT/LZCNT. CPU_Silvermont, @@ -318,6 +323,10 @@ public: names[CPU_Broadwell].push_back("broadwell"); #endif +#if !defined(LLVM_3_2) && !defined(LLVM_3_3) && !defined(LLVM_3_4) && !defined(LLVM_3_5) && !defined(LLVM_3_6)// LLVM 3.7+ + names[CPU_KNL].push_back("knl"); +#endif + #ifdef ISPC_ARM_ENABLED names[CPU_CortexA15].push_back("cortex-a15"); @@ -336,6 +345,14 @@ public: CPU_Core2, CPU_Nehalem, CPU_Silvermont, CPU_None); #endif + +#if !defined(LLVM_3_2) && !defined(LLVM_3_3) && !defined(LLVM_3_4) && !defined(LLVM_3_5) && !defined(LLVM_3_6)// LLVM 3.7+ + compat[CPU_KNL] = Set(CPU_Generic, CPU_Bonnell, CPU_Penryn, + CPU_Core2, CPU_Nehalem, CPU_Silvermont, + CPU_SandyBridge, CPU_IvyBridge, + CPU_Haswell, CPU_Broadwell, CPU_None); +#endif + #if defined(LLVM_3_2) || defined(LLVM_3_3) || defined(LLVM_3_4) || defined(LLVM_3_5) // LLVM 3.6+ #define CPU_Broadwell CPU_Haswell #else @@ -490,6 +507,12 @@ Target::Target(const char *arch, const char *cpu, const char *isa, bool pic, boo break; #endif +#if !defined(LLVM_3_2) && !defined(LLVM_3_3) && !defined(LLVM_3_4) && !defined(LLVM_3_5) && !defined(LLVM_3_6)// LLVM 3.7+ + case CPU_KNL: + isa = "knl-avx512"; + break; +#endif + #if !defined(LLVM_3_2) && !defined(LLVM_3_3) && !defined(LLVM_3_4) && !defined(LLVM_3_5) case CPU_Broadwell: #endif @@ -822,7 +845,7 @@ Target::Target(const char *arch, const char *cpu, const char *isa, bool pic, boo // TODO: enable knl and skx support // They are downconverted to avx2 for code generation. !strcasecmp(isa, "skx") || - !strcasecmp(isa, "knl")) { + !strcasecmp(isa, "knl-avx512")) { this->m_isa = Target::AVX2; this->m_nativeVectorWidth = 8; this->m_nativeVectorAlignment = 32; @@ -862,6 +885,27 @@ Target::Target(const char *arch, const char *cpu, const char *isa, bool pic, boo this->m_hasGather = true; CPUfromISA = CPU_Haswell; } +#if !defined(LLVM_3_2) && !defined(LLVM_3_3) && !defined(LLVM_3_4) && !defined(LLVM_3_5) && !defined(LLVM_3_6)// LLVM 3.7+ + else if (!strcasecmp(isa, "knl-avx512")) { + this->m_isa = Target::KNL_AVX512; + this->m_nativeVectorWidth = 16; + this->m_nativeVectorAlignment = 64; + // ?? this->m_dataTypeWidth = 32; + this->m_vectorWidth = 16; + this->m_maskingIsFree = true; + this->m_maskBitCount = 1; + this->m_hasHalf = true; + this->m_hasRand = true; + this->m_hasGather = this->m_hasScatter = true; + this->m_hasTranscendentals = true; + // For MIC it is set to true due to performance reasons. The option should be tested. + this->m_hasTrigonometry = true; + this->m_hasRsqrtd = this->m_hasRcpd = true; + this->m_hasVecPrefetch = true; + CPUfromISA = CPU_KNL; + } +#endif + #ifdef ISPC_ARM_ENABLED else if (!strcasecmp(isa, "neon-i8x16")) { this->m_isa = Target::NEON8; @@ -1155,8 +1199,8 @@ Target::ISAToString(ISA isa) { return "avx11"; case Target::AVX2: return "avx2"; - case Target::KNL: - return "knl"; + case Target::KNL_AVX512: + return "knl-avx512"; case Target::SKX: return "skx"; case Target::GENERIC: @@ -1203,7 +1247,7 @@ Target::ISAToTargetString(ISA isa) { return "avx2-i32x8"; // TODO: enable knl and skx support. // They are downconverted to avx2 for code generation. - case Target::KNL: + case Target::KNL_AVX512: return "avx2"; case Target::SKX: return "avx2"; diff --git a/ispc.h b/ispc.h index cec6e4d6..99fba1b3 100644 --- a/ispc.h +++ b/ispc.h @@ -187,14 +187,14 @@ public: also that __best_available_isa() needs to be updated if ISAs are added or the enumerant values are reordered. */ enum ISA { - SSE2 = 0, - SSE4 = 1, - AVX = 2, - AVX11 = 3, - AVX2 = 4, - KNL = 5, - SKX = 6, - GENERIC = 7, + SSE2 = 0, + SSE4 = 1, + AVX = 2, + AVX11 = 3, + AVX2 = 4, + KNL_AVX512 = 5, + SKX = 6, + GENERIC = 7, #ifdef ISPC_NVPTX_ENABLED NVPTX, #endif From e0eac74f83d2f79264956d593ccc4d8e6253fcb2 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 30 Apr 2015 14:46:09 +0300 Subject: [PATCH 02/23] [AVX512]: separated knl from avx2 --- ispc.cpp | 15 ++++----------- module.cpp | 2 +- 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/ispc.cpp b/ispc.cpp index 9e89b6cf..a6d6d2f5 100644 --- a/ispc.cpp +++ b/ispc.cpp @@ -347,7 +347,7 @@ public: #endif #if !defined(LLVM_3_2) && !defined(LLVM_3_3) && !defined(LLVM_3_4) && !defined(LLVM_3_5) && !defined(LLVM_3_6)// LLVM 3.7+ - compat[CPU_KNL] = Set(CPU_Generic, CPU_Bonnell, CPU_Penryn, + compat[CPU_KNL] = Set(CPU_Generic, CPU_Bonnell, CPU_Penryn, CPU_Core2, CPU_Nehalem, CPU_Silvermont, CPU_SandyBridge, CPU_IvyBridge, CPU_Haswell, CPU_Broadwell, CPU_None); @@ -841,11 +841,7 @@ Target::Target(const char *arch, const char *cpu, const char *isa, bool pic, boo CPUfromISA = CPU_IvyBridge; } else if (!strcasecmp(isa, "avx2") || - !strcasecmp(isa, "avx2-i32x8") || - // TODO: enable knl and skx support - // They are downconverted to avx2 for code generation. - !strcasecmp(isa, "skx") || - !strcasecmp(isa, "knl-avx512")) { + !strcasecmp(isa, "avx2-i32x8")) { this->m_isa = Target::AVX2; this->m_nativeVectorWidth = 8; this->m_nativeVectorAlignment = 32; @@ -943,8 +939,7 @@ Target::Target(const char *arch, const char *cpu, const char *isa, bool pic, boo } #endif #ifdef ISPC_NVPTX_ENABLED - else if (!strcasecmp(isa, "nvptx")) - { + else if (!strcasecmp(isa, "nvptx")) { this->m_isa = Target::NVPTX; this->m_cpu = "sm_35"; this->m_nativeVectorWidth = 32; @@ -1245,10 +1240,8 @@ Target::ISAToTargetString(ISA isa) { return "avx1.1-i32x8"; case Target::AVX2: return "avx2-i32x8"; - // TODO: enable knl and skx support. - // They are downconverted to avx2 for code generation. case Target::KNL_AVX512: - return "avx2"; + return "knl-avx512"; case Target::SKX: return "avx2"; case Target::GENERIC: diff --git a/module.cpp b/module.cpp index 6dc80747..112f66a1 100644 --- a/module.cpp +++ b/module.cpp @@ -2896,7 +2896,7 @@ lCreateDispatchFunction(llvm::Module *module, llvm::Function *setISAFunc, if ((Target::ISA)(i == Target::GENERIC) && !g->target->getTreatGenericAsSmth().empty()) { if (g->target->getTreatGenericAsSmth() == "knl_generic") - dispatchNum = Target::KNL; + dispatchNum = Target::KNL_AVX512; else if (g->target->getTreatGenericAsSmth() == "skx_generic") dispatchNum = Target::SKX; else { From 92650bdff060ccfe00cd81752e526c9733889ccc Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 30 Apr 2015 15:38:38 +0300 Subject: [PATCH 03/23] [AVX512]: add knl cpu to the list of knl backwards compatibility list --- ispc.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ispc.cpp b/ispc.cpp index a6d6d2f5..3596e171 100644 --- a/ispc.cpp +++ b/ispc.cpp @@ -347,7 +347,7 @@ public: #endif #if !defined(LLVM_3_2) && !defined(LLVM_3_3) && !defined(LLVM_3_4) && !defined(LLVM_3_5) && !defined(LLVM_3_6)// LLVM 3.7+ - compat[CPU_KNL] = Set(CPU_Generic, CPU_Bonnell, CPU_Penryn, + compat[CPU_KNL] = Set(CPU_KNL, CPU_Generic, CPU_Bonnell, CPU_Penryn, CPU_Core2, CPU_Nehalem, CPU_Silvermont, CPU_SandyBridge, CPU_IvyBridge, CPU_Haswell, CPU_Broadwell, CPU_None); @@ -1125,7 +1125,8 @@ Target::SupportedTargets() { "avx1.1-i32x8, avx1.1-i32x16, avx1.1-i64x4 " "avx2-i32x8, avx2-i32x16, avx2-i64x4, " "generic-x1, generic-x4, generic-x8, generic-x16, " - "generic-x32, generic-x64, *-generic-x16" + "generic-x32, generic-x64, *-generic-x16, " + "knl-avx512" #ifdef ISPC_ARM_ENABLED ", neon-i8x16, neon-i16x8, neon-i32x4" #endif From 3eccce5e4f0c1078443167cc68ee1c6bdf396957 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 30 Apr 2015 17:03:35 +0300 Subject: [PATCH 04/23] [AVX512]: new .ll file for knl target --- Makefile | 2 +- builtins.cpp | 15 ++ builtins/target-knl.ll | 538 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 554 insertions(+), 1 deletion(-) create mode 100644 builtins/target-knl.ll diff --git a/Makefile b/Makefile index 28d83695..b551a49a 100644 --- a/Makefile +++ b/Makefile @@ -201,7 +201,7 @@ HEADERS=ast.h builtins.h ctx.h decl.h expr.h func.h ispc.h llvmutil.h module.h \ opt.h stmt.h sym.h type.h util.h TARGETS=avx2-i64x4 avx11-i64x4 avx1-i64x4 avx1 avx1-x2 avx11 avx11-x2 avx2 avx2-x2 \ sse2 sse2-x2 sse4-8 sse4-16 sse4 sse4-x2 \ - generic-4 generic-8 generic-16 generic-32 generic-64 generic-1 + generic-4 generic-8 generic-16 generic-32 generic-64 generic-1 knl ifneq ($(ARM_ENABLED), 0) TARGETS+=neon-32 neon-16 neon-8 endif diff --git a/builtins.cpp b/builtins.cpp index 777d0671..ab526b64 100644 --- a/builtins.cpp +++ b/builtins.cpp @@ -1328,6 +1328,21 @@ DefineStdlib(SymbolTable *symbolTable, llvm::LLVMContext *ctx, llvm::Module *mod } break; } + case Target::KNL_AVX512: { + switch (g->target->getVectorWidth()) { + case 16: + if (runtime32) { + EXPORT_MODULE(builtins_bitcode_knl_32bit); + } + else { + EXPORT_MODULE(builtins_bitcode_knl_64bit); + } + break; + default: + FATAL("logic error in DefineStdlib"); + } + break; + } case Target::GENERIC: { switch (g->target->getVectorWidth()) { case 4: diff --git a/builtins/target-knl.ll b/builtins/target-knl.ll new file mode 100644 index 00000000..abc39885 --- /dev/null +++ b/builtins/target-knl.ll @@ -0,0 +1,538 @@ +;; Copyright (c) 2010-2013, Intel Corporation +;; All rights reserved. +;; +;; Redistribution and use in source and binary forms, with or without +;; modification, are permitted provided that the following conditions are +;; met: +;; +;; * Redistributions of source code must retain the above copyright +;; notice, this list of conditions and the following disclaimer. +;; +;; * Redistributions in binary form must reproduce the above copyright +;; notice, this list of conditions and the following disclaimer in the +;; documentation and/or other materials provided with the distribution. +;; +;; * Neither the name of Intel Corporation nor the names of its +;; contributors may be used to endorse or promote products derived from +;; this software without specific prior written permission. +;; +;; +;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +define(`HAVE_GATHER', `1') + +include(`target-avx-x2.ll') + +rdrand_definition() + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int min/max + +declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readonly +declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readonly + +define <16 x i32> @__min_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary8to16(m, i32, @llvm.x86.avx2.pmins.d, %0, %1) + ret <16 x i32> %m +} + +define <16 x i32> @__max_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary8to16(m, i32, @llvm.x86.avx2.pmaxs.d, %0, %1) + ret <16 x i32> %m +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; unsigned int min/max + +declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readonly +declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readonly + +define <16 x i32> @__min_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary8to16(m, i32, @llvm.x86.avx2.pminu.d, %0, %1) + ret <16 x i32> %m +} + +define <16 x i32> @__max_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary8to16(m, i32, @llvm.x86.avx2.pmaxu.d, %0, %1) + ret <16 x i32> %m +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; float/half conversions + +declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readnone +; 0 is round nearest even +declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readnone + +define <16 x float> @__half_to_float_varying(<16 x i16> %v) nounwind readnone { + %r_0 = shufflevector <16 x i16> %v, <16 x i16> undef, + <8 x i32> + %vr_0 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_0) + %r_1 = shufflevector <16 x i16> %v, <16 x i16> undef, + <8 x i32> + %vr_1 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_1) + %r = shufflevector <8 x float> %vr_0, <8 x float> %vr_1, + <16 x i32> + ret <16 x float> %r +} + +define <16 x i16> @__float_to_half_varying(<16 x float> %v) nounwind readnone { + %r_0 = shufflevector <16 x float> %v, <16 x float> undef, + <8 x i32> + %vr_0 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_0, i32 0) + %r_1 = shufflevector <16 x float> %v, <16 x float> undef, + <8 x i32> + %vr_1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_1, i32 0) + %r = shufflevector <8 x i16> %vr_0, <8 x i16> %vr_1, + <16 x i32> + ret <16 x i16> %r +} + +define float @__half_to_float_uniform(i16 %v) nounwind readnone { + %v1 = bitcast i16 %v to <1 x i16> + %vv = shufflevector <1 x i16> %v1, <1 x i16> undef, + <8 x i32> + %rv = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %vv) + %r = extractelement <8 x float> %rv, i32 0 + ret float %r +} + +define i16 @__float_to_half_uniform(float %v) nounwind readnone { + %v1 = bitcast float %v to <1 x float> + %vv = shufflevector <1 x float> %v1, <1 x float> undef, + <8 x i32> + ; round to nearest even + %rv = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %vv, i32 0) + %r = extractelement <8 x i16> %rv, i32 0 + ret i16 %r +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; gather + +declare void @llvm.trap() noreturn nounwind + +; $1: type +; $2: var base name +define(`extract_4s', ` + %$2_1 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> + %$2_2 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> + %$2_3 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> + %$2_4 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> +') + +; $1: type +; $2: var base name +define(`extract_8s', ` + %$2_1 = shufflevector <16 x $1> %$2, <16 x $1> undef, + <8 x i32> + %$2_2 = shufflevector <16 x $1> %$2, <16 x $1> undef, + <8 x i32> +') + +; $1: element type +; $2: ret name +; $3: v1 +; $4: v2 +define(`assemble_8s', ` + %$2 = shufflevector <8 x $1> %$3, <8 x $1> %$4, + <16 x i32> +') + +; $1: element type +; $2: ret name +; $3: v1 +; $4: v2 +; $5: v3 +; $6: v4 +define(`assemble_4s', ` + %$2_1 = shufflevector <4 x $1> %$3, <4 x $1> %$4, + <8 x i32> + %$2_2 = shufflevector <4 x $1> %$5, <4 x $1> %$6, + <8 x i32> + assemble_8s($1, $2, $2_1, $2_2) +') + + +gen_gather(i8) +gen_gather(i16) + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int32 gathers + +declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %target, i8 * %ptr, + <8 x i32> %indices, <8 x i32> %mask, i8 %scale) readonly nounwind +declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %target, i8 * %ptr, + <4 x i64> %indices, <4 x i32> %mask, i8 %scale) readonly nounwind + +define <16 x i32> @__gather_base_offsets32_i32(i8 * %ptr, i32 %scale, <16 x i32> %offsets, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + extract_8s(i32, offsets) + extract_8s(i32, vecmask) + + %v1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr, + <8 x i32> %offsets_1, <8 x i32> %vecmask_1, i8 %scale8) + %v2 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr, + <8 x i32> %offsets_2, <8 x i32> %vecmask_2, i8 %scale8) + + assemble_8s(i32, v, v1, v2) + + ret <16 x i32> %v +} + + +define <16 x i32> @__gather_base_offsets64_i32(i8 * %ptr, + i32 %scale, <16 x i64> %offsets, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + + extract_4s(i32, vecmask) + extract_4s(i64, offsets) + + %v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x i32> %vecmask_1, i8 %scale8) + %v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x i32> %vecmask_2, i8 %scale8) + %v3 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, + <4 x i64> %offsets_3, <4 x i32> %vecmask_3, i8 %scale8) + %v4 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, + <4 x i64> %offsets_4, <4 x i32> %vecmask_4, i8 %scale8) + + assemble_4s(i32, v, v1, v2, v3, v4) + + ret <16 x i32> %v +} + + +define <16 x i32> @__gather32_i32(<16 x i32> %ptrs, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + extract_8s(i32, ptrs) + extract_8s(i32, vecmask) + + %v1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null, + <8 x i32> %ptrs_1, <8 x i32> %vecmask_1, i8 1) + %v2 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null, + <8 x i32> %ptrs_2, <8 x i32> %vecmask_2, i8 1) + + assemble_8s(i32, v, v1, v2) + + ret <16 x i32> %v +} + + +define <16 x i32> @__gather64_i32(<16 x i64> %ptrs, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + extract_4s(i64, ptrs) + extract_4s(i32, vecmask) + + %v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x i32> %vecmask_1, i8 1) + %v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x i32> %vecmask_2, i8 1) + %v3 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, + <4 x i64> %ptrs_3, <4 x i32> %vecmask_3, i8 1) + %v4 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, + <4 x i64> %ptrs_4, <4 x i32> %vecmask_4, i8 1) + + assemble_4s(i32, v, v1, v2, v3, v4) + + ret <16 x i32> %v +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; float gathers + +declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %target, i8 * %ptr, + <8 x i32> %indices, <8 x float> %mask, i8 %scale8) readonly nounwind +declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %target, i8 * %ptr, + <4 x i64> %indices, <4 x float> %mask, i8 %scale8) readonly nounwind + +define <16 x float> @__gather_base_offsets32_float(i8 * %ptr, + i32 %scale, <16 x i32> %offsets, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %mask = bitcast <16 x i32> %vecmask to <16 x float> + extract_8s(i32, offsets) + extract_8s(float, mask) + + %v1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr, + <8 x i32> %offsets_1, <8 x float> %mask_1, i8 %scale8) + %v2 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr, + <8 x i32> %offsets_2, <8 x float> %mask_2, i8 %scale8) + + assemble_8s(float, v, v1, v2) + + ret <16 x float> %v +} + + +define <16 x float> @__gather_base_offsets64_float(i8 * %ptr, + i32 %scale, <16 x i64> %offsets, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %mask = bitcast <16 x i32> %vecmask to <16 x float> + extract_4s(i64, offsets) + extract_4s(float, mask) + + %v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x float> %mask_1, i8 %scale8) + %v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x float> %mask_2, i8 %scale8) + %v3 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, + <4 x i64> %offsets_3, <4 x float> %mask_3, i8 %scale8) + %v4 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, + <4 x i64> %offsets_4, <4 x float> %mask_4, i8 %scale8) + + assemble_4s(float, v, v1, v2, v3, v4) + + ret <16 x float> %v +} + + +define <16 x float> @__gather32_float(<16 x i32> %ptrs, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %mask = bitcast <16 x i32> %vecmask to <16 x float> + extract_8s(float, mask) + extract_8s(i32, ptrs) + + %v1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null, + <8 x i32> %ptrs_1, <8 x float> %mask_1, i8 1) + %v2 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null, + <8 x i32> %ptrs_2, <8 x float> %mask_2, i8 1) + + assemble_8s(float, v, v1, v2) + + ret <16 x float> %v +} + + +define <16 x float> @__gather64_float(<16 x i64> %ptrs, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %mask = bitcast <16 x i32> %vecmask to <16 x float> + extract_4s(i64, ptrs) + extract_4s(float, mask) + + %v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x float> %mask_1, i8 1) + %v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x float> %mask_2, i8 1) + %v3 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, + <4 x i64> %ptrs_3, <4 x float> %mask_3, i8 1) + %v4 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, + <4 x i64> %ptrs_4, <4 x float> %mask_4, i8 1) + + assemble_4s(float, v, v1, v2, v3, v4) + + ret <16 x float> %v +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int64 gathers + +declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %target, i8 * %ptr, + <4 x i32> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind +declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %target, i8 * %ptr, + <4 x i64> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind + +define <16 x i64> @__gather_base_offsets32_i64(i8 * %ptr, + i32 %scale, <16 x i32> %offsets, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask = sext <16 x i32> %mask32 to <16 x i64> + extract_4s(i32, offsets) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i32> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i32> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8) + %v3 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i32> %offsets_3, <4 x i64> %vecmask_3, i8 %scale8) + %v4 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i32> %offsets_4, <4 x i64> %vecmask_4, i8 %scale8) + + assemble_4s(i64, v, v1, v2, v3, v4) + + ret <16 x i64> %v +} + + +define <16 x i64> @__gather_base_offsets64_i64(i8 * %ptr, + i32 %scale, <16 x i64> %offsets, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask = sext <16 x i32> %mask32 to <16 x i64> + extract_4s(i64, offsets) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8) + %v3 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i64> %offsets_3, <4 x i64> %vecmask_3, i8 %scale8) + %v4 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i64> %offsets_4, <4 x i64> %vecmask_4, i8 %scale8) + + assemble_4s(i64, v, v1, v2, v3, v4) + + ret <16 x i64> %v +} + + +define <16 x i64> @__gather32_i64(<16 x i32> %ptrs, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask = sext <16 x i32> %mask32 to <16 x i64> + extract_4s(i32, ptrs) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, + <4 x i32> %ptrs_1, <4 x i64> %vecmask_1, i8 1) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, + <4 x i32> %ptrs_2, <4 x i64> %vecmask_2, i8 1) + %v3 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, + <4 x i32> %ptrs_3, <4 x i64> %vecmask_3, i8 1) + %v4 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, + <4 x i32> %ptrs_4, <4 x i64> %vecmask_4, i8 1) + + assemble_4s(i64, v, v1, v2, v3, v4) + + ret <16 x i64> %v +} + +define <16 x i64> @__gather64_i64(<16 x i64> %ptrs, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask = sext <16 x i32> %mask32 to <16 x i64> + extract_4s(i64, ptrs) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x i64> %vecmask_1, i8 1) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x i64> %vecmask_2, i8 1) + %v3 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, + <4 x i64> %ptrs_3, <4 x i64> %vecmask_3, i8 1) + %v4 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, + <4 x i64> %ptrs_4, <4 x i64> %vecmask_4, i8 1) + + assemble_4s(i64, v, v1, v2, v3, v4) + + ret <16 x i64> %v +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; double gathers + +declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %target, i8 * %ptr, + <4 x i64> %indices, <4 x double> %mask, i8 %scale) readonly nounwind +declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %target, i8 * %ptr, + <4 x i32> %indices, <4 x double> %mask, i8 %scale) readonly nounwind + +define <16 x double> @__gather_base_offsets32_double(i8 * %ptr, + i32 %scale, <16 x i32> %offsets, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> + %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> + extract_4s(i32, offsets) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i32> %offsets_1, <4 x double> %vecmask_1, i8 %scale8) + %v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i32> %offsets_2, <4 x double> %vecmask_2, i8 %scale8) + %v3 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i32> %offsets_3, <4 x double> %vecmask_3, i8 %scale8) + %v4 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i32> %offsets_4, <4 x double> %vecmask_4, i8 %scale8) + + assemble_4s(double, v, v1, v2, v3, v4) + + ret <16 x double> %v +} + + +define <16 x double> @__gather_base_offsets64_double(i8 * %ptr, + i32 %scale, <16 x i64> %offsets, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> + %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> + extract_4s(i64, offsets) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x double> %vecmask_1, i8 %scale8) + %v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x double> %vecmask_2, i8 %scale8) + %v3 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i64> %offsets_3, <4 x double> %vecmask_3, i8 %scale8) + %v4 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i64> %offsets_4, <4 x double> %vecmask_4, i8 %scale8) + + assemble_4s(double, v, v1, v2, v3, v4) + + ret <16 x double> %v +} + + +define <16 x double> @__gather32_double(<16 x i32> %ptrs, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> + %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> + extract_4s(i32, ptrs) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, + <4 x i32> %ptrs_1, <4 x double> %vecmask_1, i8 1) + %v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, + <4 x i32> %ptrs_2, <4 x double> %vecmask_2, i8 1) + %v3 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, + <4 x i32> %ptrs_3, <4 x double> %vecmask_3, i8 1) + %v4 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, + <4 x i32> %ptrs_4, <4 x double> %vecmask_4, i8 1) + + assemble_4s(double, v, v1, v2, v3, v4) + + ret <16 x double> %v +} + + +define <16 x double> @__gather64_double(<16 x i64> %ptrs, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> + %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> + extract_4s(i64, ptrs) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x double> %vecmask_1, i8 1) + %v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x double> %vecmask_2, i8 1) + %v3 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, + <4 x i64> %ptrs_3, <4 x double> %vecmask_3, i8 1) + %v4 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, + <4 x i64> %ptrs_4, <4 x double> %vecmask_4, i8 1) + + assemble_4s(double, v, v1, v2, v3, v4) + + ret <16 x double> %v +} From d01718aa91ed8dc3520c5ad620a80d9266c5248c Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Thu, 30 Apr 2015 17:16:00 +0300 Subject: [PATCH 05/23] [AVX512]: avx512 common file was added --- builtins/target-avx512-common.ll | 293 +++++++++++++++++++++++++++++++ 1 file changed, 293 insertions(+) create mode 100644 builtins/target-avx512-common.ll diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll new file mode 100644 index 00000000..10100098 --- /dev/null +++ b/builtins/target-avx512-common.ll @@ -0,0 +1,293 @@ +;; Copyright (c) 2010-2015, Intel Corporation +;; All rights reserved. +;; +;; Redistribution and use in source and binary forms, with or without +;; modification, are permitted provided that the following conditions are +;; met: +;; +;; * Redistributions of source code must retain the above copyright +;; notice, this list of conditions and the following disclaimer. +;; +;; * Redistributions in binary form must reproduce the above copyright +;; notice, this list of conditions and the following disclaimer in the +;; documentation and/or other materials provided with the distribution. +;; +;; * Neither the name of Intel Corporation nor the names of its +;; contributors may be used to endorse or promote products derived from +;; this software without specific prior written permission. +;; +;; +;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; AVX target implementation. +;; +;; Please note that this file uses SSE intrinsics, but LLVM generates AVX +;; instructions, so it doesn't makes sense to change this implemenation. + + +ctlztz() +define_prefetches() +define_shuffles() +aossoa() + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rounding floats + +declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32) nounwind readnone + +define float @__round_uniform_float(float) nounwind readonly alwaysinline { + ; roundss, round mode nearest 0b00 | don't signal precision exceptions 0b1000 = 8 + ; the roundss intrinsic is a total mess--docs say: + ; + ; __m128 _mm_round_ss (__m128 a, __m128 b, const int c) + ; + ; b is a 128-bit parameter. The lowest 32 bits are the result of the rounding function + ; on b0. The higher order 96 bits are copied directly from input parameter a. The + ; return value is described by the following equations: + ; + ; r0 = RND(b0) + ; r1 = a1 + ; r2 = a2 + ; r3 = a3 + ; + ; It doesn't matter what we pass as a, since we only need the r0 value + ; here. So we pass the same register for both. Further, only the 0th + ; element of the b parameter matters + %xi = insertelement <4 x float> undef, float %0, i32 0 + %xr = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %xi, <4 x float> %xi, i32 8) + %rs = extractelement <4 x float> %xr, i32 0 + ret float %rs +} + +define float @__floor_uniform_float(float) nounwind readonly alwaysinline { + ; see above for round_ss instrinsic discussion... + %xi = insertelement <4 x float> undef, float %0, i32 0 + ; roundps, round down 0b01 | don't signal precision exceptions 0b1001 = 9 + %xr = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %xi, <4 x float> %xi, i32 9) + %rs = extractelement <4 x float> %xr, i32 0 + ret float %rs +} + +define float @__ceil_uniform_float(float) nounwind readonly alwaysinline { + ; see above for round_ss instrinsic discussion... + %xi = insertelement <4 x float> undef, float %0, i32 0 + ; roundps, round up 0b10 | don't signal precision exceptions 0b1010 = 10 + %xr = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %xi, <4 x float> %xi, i32 10) + %rs = extractelement <4 x float> %xr, i32 0 + ret float %rs +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rounding doubles + +declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) nounwind readnone + +define double @__round_uniform_double(double) nounwind readonly alwaysinline { + %xi = insertelement <2 x double> undef, double %0, i32 0 + %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 8) + %rs = extractelement <2 x double> %xr, i32 0 + ret double %rs +} + +define double @__floor_uniform_double(double) nounwind readonly alwaysinline { + ; see above for round_ss instrinsic discussion... + %xi = insertelement <2 x double> undef, double %0, i32 0 + ; roundsd, round down 0b01 | don't signal precision exceptions 0b1001 = 9 + %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 9) + %rs = extractelement <2 x double> %xr, i32 0 + ret double %rs +} + +define double @__ceil_uniform_double(double) nounwind readonly alwaysinline { + ; see above for round_ss instrinsic discussion... + %xi = insertelement <2 x double> undef, double %0, i32 0 + ; roundsd, round up 0b10 | don't signal precision exceptions 0b1010 = 10 + %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 10) + %rs = extractelement <2 x double> %xr, i32 0 + ret double %rs +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rcp + +declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone + +define float @__rcp_uniform_float(float) nounwind readonly alwaysinline { + ; do the rcpss call + ; uniform float iv = extract(__rcp_u(v), 0); + ; return iv * (2. - v * iv); + %vecval = insertelement <4 x float> undef, float %0, i32 0 + %call = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %vecval) + %scall = extractelement <4 x float> %call, i32 0 + + ; do one N-R iteration to improve precision, as above + %v_iv = fmul float %0, %scall + %two_minus = fsub float 2., %v_iv + %iv_mul = fmul float %scall, %two_minus + ret float %iv_mul +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rsqrt + +declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone + +define float @__rsqrt_uniform_float(float) nounwind readonly alwaysinline { + ; uniform float is = extract(__rsqrt_u(v), 0); + %v = insertelement <4 x float> undef, float %0, i32 0 + %vis = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %v) + %is = extractelement <4 x float> %vis, i32 0 + + ; Newton-Raphson iteration to improve precision + ; return 0.5 * is * (3. - (v * is) * is); + %v_is = fmul float %0, %is + %v_is_is = fmul float %v_is, %is + %three_sub = fsub float 3., %v_is_is + %is_mul = fmul float %is, %three_sub + %half_scale = fmul float 0.5, %is_mul + ret float %half_scale +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; sqrt + +declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone + +define float @__sqrt_uniform_float(float) nounwind readonly alwaysinline { + sse_unary_scalar(ret, 4, float, @llvm.x86.sse.sqrt.ss, %0) + ret float %ret +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; double precision sqrt + +declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone + +define double @__sqrt_uniform_double(double) nounwind alwaysinline { + sse_unary_scalar(ret, 2, double, @llvm.x86.sse2.sqrt.sd, %0) + ret double %ret +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; fast math mode + +declare void @llvm.x86.sse.stmxcsr(i8 *) nounwind +declare void @llvm.x86.sse.ldmxcsr(i8 *) nounwind + +define void @__fastmath() nounwind alwaysinline { + %ptr = alloca i32 + %ptr8 = bitcast i32 * %ptr to i8 * + call void @llvm.x86.sse.stmxcsr(i8 * %ptr8) + %oldval = load PTR_OP_ARGS(`i32 ') %ptr + + ; turn on DAZ (64)/FTZ (32768) -> 32832 + %update = or i32 %oldval, 32832 + store i32 %update, i32 *%ptr + call void @llvm.x86.sse.ldmxcsr(i8 * %ptr8) + ret void +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; float min/max + +define float @__max_uniform_float(float, float) nounwind readonly alwaysinline { + %cmp = fcmp ogt float %1, %0 + %ret = select i1 %cmp, float %1, float %0 + ret float %ret +} + +define float @__min_uniform_float(float, float) nounwind readonly alwaysinline { + %cmp = fcmp ogt float %1, %0 + %ret = select i1 %cmp, float %0, float %1 + ret float %ret +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; double precision min/max + +define double @__min_uniform_double(double, double) nounwind readnone alwaysinline { + %cmp = fcmp ogt double %1, %0 + %ret = select i1 %cmp, double %0, double %1 + ret double %ret +} + +define double @__max_uniform_double(double, double) nounwind readnone alwaysinline { + %cmp = fcmp ogt double %1, %0 + %ret = select i1 %cmp, double %1, double %0 + ret double %ret +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone +declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone +declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone +declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int min/max + +define i32 @__min_uniform_int32(i32, i32) nounwind readonly alwaysinline { + %cmp = icmp sgt i32 %1, %0 + %ret = select i1 %cmp, i32 %0, i32 %1 + ret i32 %ret +} + +define i32 @__max_uniform_int32(i32, i32) nounwind readonly alwaysinline { + %cmp = icmp sgt i32 %1, %0 + %ret = select i1 %cmp, i32 %1, i32 %0 + ret i32 %ret +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; unsigned int min/max + +define i32 @__min_uniform_uint32(i32, i32) nounwind readonly alwaysinline { + %cmp = icmp ugt i32 %1, %0 + %ret = select i1 %cmp, i32 %0, i32 %1 + ret i32 %ret +} + +define i32 @__max_uniform_uint32(i32, i32) nounwind readonly alwaysinline { + %cmp = icmp ugt i32 %1, %0 + %ret = select i1 %cmp, i32 %1, i32 %0 + ret i32 %ret +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; horizontal ops / reductions + +declare i32 @llvm.ctpop.i32(i32) nounwind readnone + +define i32 @__popcnt_int32(i32) nounwind readonly alwaysinline { + %call = call i32 @llvm.ctpop.i32(i32 %0) + ret i32 %call +} + +declare i64 @llvm.ctpop.i64(i64) nounwind readnone + +define i64 @__popcnt_int64(i64) nounwind readonly alwaysinline { + %call = call i64 @llvm.ctpop.i64(i64 %0) + ret i64 %call +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int8/int16 builtins + +define_avgs() +declare_nvptx() + From 7628f2a6c908e26f0edc84345cf36283c6ee56c2 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Wed, 6 May 2015 14:59:02 +0300 Subject: [PATCH 06/23] [AVX512]: try gemeric-16 like builtins --- builtins/target-avx512-common.ll | 519 ++++++++++++++++++------------- builtins/target-knl.ll | 512 +----------------------------- ispc.cpp | 6 + 3 files changed, 320 insertions(+), 717 deletions(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index 10100098..d402e927 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -29,265 +29,366 @@ ;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; AVX target implementation. -;; -;; Please note that this file uses SSE intrinsics, but LLVM generates AVX -;; instructions, so it doesn't makes sense to change this implemenation. +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128-v16:16:16-v32:32:32-v4:128:128"; +define(`MASK',`i1') +define(`HAVE_GATHER',`1') +define(`HAVE_SCATTER',`1') -ctlztz() -define_prefetches() -define_shuffles() -aossoa() +include(`util.m4') +stdlib_core() +scans() +reduce_equal(WIDTH) +rdrand_decls() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rounding floats +;; broadcast/rotate/shuffle -declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32) nounwind readnone +declare @__smear_float(float) nounwind readnone +declare @__smear_double(double) nounwind readnone +declare @__smear_i8(i8) nounwind readnone +declare @__smear_i16(i16) nounwind readnone +declare @__smear_i32(i32) nounwind readnone +declare @__smear_i64(i64) nounwind readnone -define float @__round_uniform_float(float) nounwind readonly alwaysinline { - ; roundss, round mode nearest 0b00 | don't signal precision exceptions 0b1000 = 8 - ; the roundss intrinsic is a total mess--docs say: - ; - ; __m128 _mm_round_ss (__m128 a, __m128 b, const int c) - ; - ; b is a 128-bit parameter. The lowest 32 bits are the result of the rounding function - ; on b0. The higher order 96 bits are copied directly from input parameter a. The - ; return value is described by the following equations: - ; - ; r0 = RND(b0) - ; r1 = a1 - ; r2 = a2 - ; r3 = a3 - ; - ; It doesn't matter what we pass as a, since we only need the r0 value - ; here. So we pass the same register for both. Further, only the 0th - ; element of the b parameter matters - %xi = insertelement <4 x float> undef, float %0, i32 0 - %xr = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %xi, <4 x float> %xi, i32 8) - %rs = extractelement <4 x float> %xr, i32 0 - ret float %rs -} +declare @__setzero_float() nounwind readnone +declare @__setzero_double() nounwind readnone +declare @__setzero_i8() nounwind readnone +declare @__setzero_i16() nounwind readnone +declare @__setzero_i32() nounwind readnone +declare @__setzero_i64() nounwind readnone -define float @__floor_uniform_float(float) nounwind readonly alwaysinline { - ; see above for round_ss instrinsic discussion... - %xi = insertelement <4 x float> undef, float %0, i32 0 - ; roundps, round down 0b01 | don't signal precision exceptions 0b1001 = 9 - %xr = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %xi, <4 x float> %xi, i32 9) - %rs = extractelement <4 x float> %xr, i32 0 - ret float %rs -} +declare @__undef_float() nounwind readnone +declare @__undef_double() nounwind readnone +declare @__undef_i8() nounwind readnone +declare @__undef_i16() nounwind readnone +declare @__undef_i32() nounwind readnone +declare @__undef_i64() nounwind readnone -define float @__ceil_uniform_float(float) nounwind readonly alwaysinline { - ; see above for round_ss instrinsic discussion... - %xi = insertelement <4 x float> undef, float %0, i32 0 - ; roundps, round up 0b10 | don't signal precision exceptions 0b1010 = 10 - %xr = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %xi, <4 x float> %xi, i32 10) - %rs = extractelement <4 x float> %xr, i32 0 - ret float %rs -} +declare @__broadcast_float(, i32) nounwind readnone +declare @__broadcast_double(, i32) nounwind readnone +declare @__broadcast_i8(, i32) nounwind readnone +declare @__broadcast_i16(, i32) nounwind readnone +declare @__broadcast_i32(, i32) nounwind readnone +declare @__broadcast_i64(, i32) nounwind readnone + +declare @__rotate_i8(, i32) nounwind readnone +declare @__rotate_i16(, i32) nounwind readnone +declare @__rotate_float(, i32) nounwind readnone +declare @__rotate_i32(, i32) nounwind readnone +declare @__rotate_double(, i32) nounwind readnone +declare @__rotate_i64(, i32) nounwind readnone + +declare @__shift_i8(, i32) nounwind readnone +declare @__shift_i16(, i32) nounwind readnone +declare @__shift_float(, i32) nounwind readnone +declare @__shift_i32(, i32) nounwind readnone +declare @__shift_double(, i32) nounwind readnone +declare @__shift_i64(, i32) nounwind readnone + +declare @__shuffle_i8(, ) nounwind readnone +declare @__shuffle2_i8(, , + ) nounwind readnone +declare @__shuffle_i16(, ) nounwind readnone +declare @__shuffle2_i16(, , + ) nounwind readnone +declare @__shuffle_float(, + ) nounwind readnone +declare @__shuffle2_float(, , + ) nounwind readnone +declare @__shuffle_i32(, + ) nounwind readnone +declare @__shuffle2_i32(, , + ) nounwind readnone +declare @__shuffle_double(, + ) nounwind readnone +declare @__shuffle2_double(, + , ) nounwind readnone +declare @__shuffle_i64(, + ) nounwind readnone +declare @__shuffle2_i64(, , + ) nounwind readnone ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rounding doubles +;; aos/soa -declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) nounwind readnone - -define double @__round_uniform_double(double) nounwind readonly alwaysinline { - %xi = insertelement <2 x double> undef, double %0, i32 0 - %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 8) - %rs = extractelement <2 x double> %xr, i32 0 - ret double %rs -} - -define double @__floor_uniform_double(double) nounwind readonly alwaysinline { - ; see above for round_ss instrinsic discussion... - %xi = insertelement <2 x double> undef, double %0, i32 0 - ; roundsd, round down 0b01 | don't signal precision exceptions 0b1001 = 9 - %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 9) - %rs = extractelement <2 x double> %xr, i32 0 - ret double %rs -} - -define double @__ceil_uniform_double(double) nounwind readonly alwaysinline { - ; see above for round_ss instrinsic discussion... - %xi = insertelement <2 x double> undef, double %0, i32 0 - ; roundsd, round up 0b10 | don't signal precision exceptions 0b1010 = 10 - %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 10) - %rs = extractelement <2 x double> %xr, i32 0 - ret double %rs -} +declare void @__soa_to_aos3_float( %v0, %v1, + %v2, float * noalias %p) nounwind +declare void @__aos_to_soa3_float(float * noalias %p, * %out0, + * %out1, * %out2) nounwind +declare void @__soa_to_aos4_float( %v0, %v1, + %v2, %v3, + float * noalias %p) nounwind +declare void @__aos_to_soa4_float(float * noalias %p, * noalias %out0, + * noalias %out1, + * noalias %out2, + * noalias %out3) nounwind ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rcp +;; half conversion routines -declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone - -define float @__rcp_uniform_float(float) nounwind readonly alwaysinline { - ; do the rcpss call - ; uniform float iv = extract(__rcp_u(v), 0); - ; return iv * (2. - v * iv); - %vecval = insertelement <4 x float> undef, float %0, i32 0 - %call = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %vecval) - %scall = extractelement <4 x float> %call, i32 0 - - ; do one N-R iteration to improve precision, as above - %v_iv = fmul float %0, %scall - %two_minus = fsub float 2., %v_iv - %iv_mul = fmul float %scall, %two_minus - ret float %iv_mul -} +declare float @__half_to_float_uniform(i16 %v) nounwind readnone +declare @__half_to_float_varying( %v) nounwind readnone +declare i16 @__float_to_half_uniform(float %v) nounwind readnone +declare @__float_to_half_varying( %v) nounwind readnone ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rsqrt +;; math -declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone +declare void @__fastmath() nounwind -define float @__rsqrt_uniform_float(float) nounwind readonly alwaysinline { - ; uniform float is = extract(__rsqrt_u(v), 0); - %v = insertelement <4 x float> undef, float %0, i32 0 - %vis = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %v) - %is = extractelement <4 x float> %vis, i32 0 +;; round/floor/ceil - ; Newton-Raphson iteration to improve precision - ; return 0.5 * is * (3. - (v * is) * is); - %v_is = fmul float %0, %is - %v_is_is = fmul float %v_is, %is - %three_sub = fsub float 3., %v_is_is - %is_mul = fmul float %is, %three_sub - %half_scale = fmul float 0.5, %is_mul - ret float %half_scale -} +declare float @__round_uniform_float(float) nounwind readnone +declare float @__floor_uniform_float(float) nounwind readnone +declare float @__ceil_uniform_float(float) nounwind readnone +declare double @__round_uniform_double(double) nounwind readnone +declare double @__floor_uniform_double(double) nounwind readnone +declare double @__ceil_uniform_double(double) nounwind readnone + +declare @__round_varying_float() nounwind readnone +declare @__floor_varying_float() nounwind readnone +declare @__ceil_varying_float() nounwind readnone +declare @__round_varying_double() nounwind readnone +declare @__floor_varying_double() nounwind readnone +declare @__ceil_varying_double() nounwind readnone + +;; min/max + +declare float @__max_uniform_float(float, float) nounwind readnone +declare float @__min_uniform_float(float, float) nounwind readnone +declare i32 @__min_uniform_int32(i32, i32) nounwind readnone +declare i32 @__max_uniform_int32(i32, i32) nounwind readnone +declare i32 @__min_uniform_uint32(i32, i32) nounwind readnone +declare i32 @__max_uniform_uint32(i32, i32) nounwind readnone +declare i64 @__min_uniform_int64(i64, i64) nounwind readnone +declare i64 @__max_uniform_int64(i64, i64) nounwind readnone +declare i64 @__min_uniform_uint64(i64, i64) nounwind readnone +declare i64 @__max_uniform_uint64(i64, i64) nounwind readnone +declare double @__min_uniform_double(double, double) nounwind readnone +declare double @__max_uniform_double(double, double) nounwind readnone + +declare @__max_varying_float(, + ) nounwind readnone +declare @__min_varying_float(, + ) nounwind readnone +declare @__min_varying_int32(, ) nounwind readnone +declare @__max_varying_int32(, ) nounwind readnone +declare @__min_varying_uint32(, ) nounwind readnone +declare @__max_varying_uint32(, ) nounwind readnone +declare @__min_varying_int64(, ) nounwind readnone +declare @__max_varying_int64(, ) nounwind readnone +declare @__min_varying_uint64(, ) nounwind readnone +declare @__max_varying_uint64(, ) nounwind readnone +declare @__min_varying_double(, + ) nounwind readnone +declare @__max_varying_double(, + ) nounwind readnone + +;; sqrt/rsqrt/rcp + +declare float @__rsqrt_uniform_float(float) nounwind readnone +declare float @__rcp_uniform_float(float) nounwind readnone +declare float @__sqrt_uniform_float(float) nounwind readnone +declare @__rcp_varying_float() nounwind readnone +declare @__rsqrt_varying_float() nounwind readnone + +declare @__sqrt_varying_float() nounwind readnone + +declare double @__sqrt_uniform_double(double) nounwind readnone +declare @__sqrt_varying_double() nounwind readnone + +;; bit ops + +declare i32 @__popcnt_int32(i32) nounwind readnone +declare i64 @__popcnt_int64(i64) nounwind readnone + +declare i32 @__count_trailing_zeros_i32(i32) nounwind readnone +declare i64 @__count_trailing_zeros_i64(i64) nounwind readnone +declare i32 @__count_leading_zeros_i32(i32) nounwind readnone +declare i64 @__count_leading_zeros_i64(i64) nounwind readnone + +; FIXME: need either to wire these up to the 8-wide SVML entrypoints, +; or, use the macro to call the 4-wide ones twice with our 8-wide +; vectors... + +;; svml + +include(`svml.m4') +svml_stubs(float,f,WIDTH) +svml_stubs(double,d,WIDTH) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; sqrt +;; reductions -declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone +declare i64 @__movmsk() nounwind readnone +declare i1 @__any() nounwind readnone +declare i1 @__all() nounwind readnone +declare i1 @__none() nounwind readnone -define float @__sqrt_uniform_float(float) nounwind readonly alwaysinline { - sse_unary_scalar(ret, 4, float, @llvm.x86.sse.sqrt.ss, %0) - ret float %ret -} +declare i16 @__reduce_add_int8() nounwind readnone +declare i32 @__reduce_add_int16() nounwind readnone + +declare float @__reduce_add_float() nounwind readnone +declare float @__reduce_min_float() nounwind readnone +declare float @__reduce_max_float() nounwind readnone + +declare i64 @__reduce_add_int32() nounwind readnone +declare i32 @__reduce_min_int32() nounwind readnone +declare i32 @__reduce_max_int32() nounwind readnone +declare i32 @__reduce_min_uint32() nounwind readnone +declare i32 @__reduce_max_uint32() nounwind readnone + +declare double @__reduce_add_double() nounwind readnone +declare double @__reduce_min_double() nounwind readnone +declare double @__reduce_max_double() nounwind readnone + +declare i64 @__reduce_add_int64() nounwind readnone +declare i64 @__reduce_min_int64() nounwind readnone +declare i64 @__reduce_max_int64() nounwind readnone +declare i64 @__reduce_min_uint64() nounwind readnone +declare i64 @__reduce_max_uint64() nounwind readnone ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; double precision sqrt +;; unaligned loads/loads+broadcasts -declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone -define double @__sqrt_uniform_double(double) nounwind alwaysinline { - sse_unary_scalar(ret, 2, double, @llvm.x86.sse2.sqrt.sd, %0) - ret double %ret +declare @__masked_load_i8(i8 * nocapture, %mask) nounwind readonly +declare @__masked_load_i16(i8 * nocapture, %mask) nounwind readonly +declare @__masked_load_i32(i8 * nocapture, %mask) nounwind readonly +declare @__masked_load_float(i8 * nocapture, %mask) nounwind readonly +declare @__masked_load_i64(i8 * nocapture, %mask) nounwind readonly +declare @__masked_load_double(i8 * nocapture, %mask) nounwind readonly + +declare void @__masked_store_i8(* nocapture, , + ) nounwind +declare void @__masked_store_i16(* nocapture, , + ) nounwind +declare void @__masked_store_i32(* nocapture, , + ) nounwind +declare void @__masked_store_float(* nocapture, , + ) nounwind +declare void @__masked_store_i64(* nocapture, , + %mask) nounwind +declare void @__masked_store_double(* nocapture, , + %mask) nounwind + + +define void @__masked_store_blend_i8(* nocapture, , + ) nounwind alwaysinline { + %v = load PTR_OP_ARGS(` ') %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void } -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; fast math mode +define void @__masked_store_blend_i16(* nocapture, , + ) nounwind alwaysinline { + %v = load PTR_OP_ARGS(` ') %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void +} -declare void @llvm.x86.sse.stmxcsr(i8 *) nounwind -declare void @llvm.x86.sse.ldmxcsr(i8 *) nounwind +define void @__masked_store_blend_i32(* nocapture, , + ) nounwind alwaysinline { + %v = load PTR_OP_ARGS(` ') %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void +} -define void @__fastmath() nounwind alwaysinline { - %ptr = alloca i32 - %ptr8 = bitcast i32 * %ptr to i8 * - call void @llvm.x86.sse.stmxcsr(i8 * %ptr8) - %oldval = load PTR_OP_ARGS(`i32 ') %ptr +define void @__masked_store_blend_float(* nocapture, , + ) nounwind alwaysinline { + %v = load PTR_OP_ARGS(` ') %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void +} - ; turn on DAZ (64)/FTZ (32768) -> 32832 - %update = or i32 %oldval, 32832 - store i32 %update, i32 *%ptr - call void @llvm.x86.sse.ldmxcsr(i8 * %ptr8) +define void @__masked_store_blend_i64(* nocapture, + , ) nounwind alwaysinline { + %v = load PTR_OP_ARGS(` ') %0 + %v1 = select %2, %1, %v + store %v1, * %0 + ret void +} + +define void @__masked_store_blend_double(* nocapture, + , ) nounwind alwaysinline { + %v = load PTR_OP_ARGS(` ') %0 + %v1 = select %2, %1, %v + store %v1, * %0 ret void } ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; float min/max +;; gather/scatter -define float @__max_uniform_float(float, float) nounwind readonly alwaysinline { - %cmp = fcmp ogt float %1, %0 - %ret = select i1 %cmp, float %1, float %0 - ret float %ret -} +define(`gather_scatter', ` +declare @__gather_base_offsets32_$1(i8 * nocapture, i32, , + ) nounwind readonly +declare @__gather_base_offsets64_$1(i8 * nocapture, i32, , + ) nounwind readonly +declare @__gather32_$1(, + ) nounwind readonly +declare @__gather64_$1(, + ) nounwind readonly -define float @__min_uniform_float(float, float) nounwind readonly alwaysinline { - %cmp = fcmp ogt float %1, %0 - %ret = select i1 %cmp, float %0, float %1 - ret float %ret -} +declare void @__scatter_base_offsets32_$1(i8* nocapture, i32, , + , ) nounwind +declare void @__scatter_base_offsets64_$1(i8* nocapture, i32, , + , ) nounwind +declare void @__scatter32_$1(, , + ) nounwind +declare void @__scatter64_$1(, , + ) nounwind +') -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; double precision min/max +gather_scatter(i8) +gather_scatter(i16) +gather_scatter(i32) +gather_scatter(float) +gather_scatter(i64) +gather_scatter(double) -define double @__min_uniform_double(double, double) nounwind readnone alwaysinline { - %cmp = fcmp ogt double %1, %0 - %ret = select i1 %cmp, double %0, double %1 - ret double %ret -} - -define double @__max_uniform_double(double, double) nounwind readnone alwaysinline { - %cmp = fcmp ogt double %1, %0 - %ret = select i1 %cmp, double %1, double %0 - ret double %ret -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone -declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone -declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone -declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; int min/max - -define i32 @__min_uniform_int32(i32, i32) nounwind readonly alwaysinline { - %cmp = icmp sgt i32 %1, %0 - %ret = select i1 %cmp, i32 %0, i32 %1 - ret i32 %ret -} - -define i32 @__max_uniform_int32(i32, i32) nounwind readonly alwaysinline { - %cmp = icmp sgt i32 %1, %0 - %ret = select i1 %cmp, i32 %1, i32 %0 - ret i32 %ret -} +declare i32 @__packed_load_active(i32 * nocapture, * nocapture, + ) nounwind +declare i32 @__packed_store_active(i32 * nocapture, %vals, + ) nounwind +declare i32 @__packed_store_active2(i32 * nocapture, %vals, + ) nounwind ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; unsigned int min/max - -define i32 @__min_uniform_uint32(i32, i32) nounwind readonly alwaysinline { - %cmp = icmp ugt i32 %1, %0 - %ret = select i1 %cmp, i32 %0, i32 %1 - ret i32 %ret -} - -define i32 @__max_uniform_uint32(i32, i32) nounwind readonly alwaysinline { - %cmp = icmp ugt i32 %1, %0 - %ret = select i1 %cmp, i32 %1, i32 %0 - ret i32 %ret -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; horizontal ops / reductions - -declare i32 @llvm.ctpop.i32(i32) nounwind readnone - -define i32 @__popcnt_int32(i32) nounwind readonly alwaysinline { - %call = call i32 @llvm.ctpop.i32(i32 %0) - ret i32 %call -} - -declare i64 @llvm.ctpop.i64(i64) nounwind readnone - -define i64 @__popcnt_int64(i64) nounwind readonly alwaysinline { - %call = call i64 @llvm.ctpop.i64(i64 %0) - ret i64 %call -} +;; prefetch +declare void @__prefetch_read_uniform_1(i8 * nocapture) nounwind +declare void @__prefetch_read_uniform_2(i8 * nocapture) nounwind +declare void @__prefetch_read_uniform_3(i8 * nocapture) nounwind +declare void @__prefetch_read_uniform_nt(i8 * nocapture) nounwind +declare void @__prefetch_read_varying_1( %addr, %mask) nounwind +declare void @__prefetch_read_varying_1_native(i8 * %base, i32 %scale, %offsets, %mask) nounwind +declare void @__prefetch_read_varying_2( %addr, %mask) nounwind +declare void @__prefetch_read_varying_2_native(i8 * %base, i32 %scale, %offsets, %mask) nounwind +declare void @__prefetch_read_varying_3( %addr, %mask) nounwind +declare void @__prefetch_read_varying_3_native(i8 * %base, i32 %scale, %offsets, %mask) nounwind +declare void @__prefetch_read_varying_nt( %addr, %mask) nounwind +declare void @__prefetch_read_varying_nt_native(i8 * %base, i32 %scale, %offsets, %mask) nounwind ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; int8/int16 builtins define_avgs() declare_nvptx() +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; reciprocals in double precision, if supported + +rsqrtd_decl() +rcpd_decl() + +transcendetals_decl() +trigonometry_decl() diff --git a/builtins/target-knl.ll b/builtins/target-knl.ll index abc39885..8305dda7 100644 --- a/builtins/target-knl.ll +++ b/builtins/target-knl.ll @@ -1,4 +1,4 @@ -;; Copyright (c) 2010-2013, Intel Corporation +;; Copyright (c) 2010-2014, Intel Corporation ;; All rights reserved. ;; ;; Redistribution and use in source and binary forms, with or without @@ -29,510 +29,6 @@ ;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -define(`HAVE_GATHER', `1') - -include(`target-avx-x2.ll') - -rdrand_definition() - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; int min/max - -declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readonly -declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readonly - -define <16 x i32> @__min_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { - binary8to16(m, i32, @llvm.x86.avx2.pmins.d, %0, %1) - ret <16 x i32> %m -} - -define <16 x i32> @__max_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { - binary8to16(m, i32, @llvm.x86.avx2.pmaxs.d, %0, %1) - ret <16 x i32> %m -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; unsigned int min/max - -declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readonly -declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readonly - -define <16 x i32> @__min_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { - binary8to16(m, i32, @llvm.x86.avx2.pminu.d, %0, %1) - ret <16 x i32> %m -} - -define <16 x i32> @__max_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { - binary8to16(m, i32, @llvm.x86.avx2.pmaxu.d, %0, %1) - ret <16 x i32> %m -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; float/half conversions - -declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readnone -; 0 is round nearest even -declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readnone - -define <16 x float> @__half_to_float_varying(<16 x i16> %v) nounwind readnone { - %r_0 = shufflevector <16 x i16> %v, <16 x i16> undef, - <8 x i32> - %vr_0 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_0) - %r_1 = shufflevector <16 x i16> %v, <16 x i16> undef, - <8 x i32> - %vr_1 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_1) - %r = shufflevector <8 x float> %vr_0, <8 x float> %vr_1, - <16 x i32> - ret <16 x float> %r -} - -define <16 x i16> @__float_to_half_varying(<16 x float> %v) nounwind readnone { - %r_0 = shufflevector <16 x float> %v, <16 x float> undef, - <8 x i32> - %vr_0 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_0, i32 0) - %r_1 = shufflevector <16 x float> %v, <16 x float> undef, - <8 x i32> - %vr_1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_1, i32 0) - %r = shufflevector <8 x i16> %vr_0, <8 x i16> %vr_1, - <16 x i32> - ret <16 x i16> %r -} - -define float @__half_to_float_uniform(i16 %v) nounwind readnone { - %v1 = bitcast i16 %v to <1 x i16> - %vv = shufflevector <1 x i16> %v1, <1 x i16> undef, - <8 x i32> - %rv = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %vv) - %r = extractelement <8 x float> %rv, i32 0 - ret float %r -} - -define i16 @__float_to_half_uniform(float %v) nounwind readnone { - %v1 = bitcast float %v to <1 x float> - %vv = shufflevector <1 x float> %v1, <1 x float> undef, - <8 x i32> - ; round to nearest even - %rv = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %vv, i32 0) - %r = extractelement <8 x i16> %rv, i32 0 - ret i16 %r -} - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; gather - -declare void @llvm.trap() noreturn nounwind - -; $1: type -; $2: var base name -define(`extract_4s', ` - %$2_1 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> - %$2_2 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> - %$2_3 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> - %$2_4 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> -') - -; $1: type -; $2: var base name -define(`extract_8s', ` - %$2_1 = shufflevector <16 x $1> %$2, <16 x $1> undef, - <8 x i32> - %$2_2 = shufflevector <16 x $1> %$2, <16 x $1> undef, - <8 x i32> -') - -; $1: element type -; $2: ret name -; $3: v1 -; $4: v2 -define(`assemble_8s', ` - %$2 = shufflevector <8 x $1> %$3, <8 x $1> %$4, - <16 x i32> -') - -; $1: element type -; $2: ret name -; $3: v1 -; $4: v2 -; $5: v3 -; $6: v4 -define(`assemble_4s', ` - %$2_1 = shufflevector <4 x $1> %$3, <4 x $1> %$4, - <8 x i32> - %$2_2 = shufflevector <4 x $1> %$5, <4 x $1> %$6, - <8 x i32> - assemble_8s($1, $2, $2_1, $2_2) -') - - -gen_gather(i8) -gen_gather(i16) - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; int32 gathers - -declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %target, i8 * %ptr, - <8 x i32> %indices, <8 x i32> %mask, i8 %scale) readonly nounwind -declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %target, i8 * %ptr, - <4 x i64> %indices, <4 x i32> %mask, i8 %scale) readonly nounwind - -define <16 x i32> @__gather_base_offsets32_i32(i8 * %ptr, i32 %scale, <16 x i32> %offsets, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - extract_8s(i32, offsets) - extract_8s(i32, vecmask) - - %v1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr, - <8 x i32> %offsets_1, <8 x i32> %vecmask_1, i8 %scale8) - %v2 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr, - <8 x i32> %offsets_2, <8 x i32> %vecmask_2, i8 %scale8) - - assemble_8s(i32, v, v1, v2) - - ret <16 x i32> %v -} - - -define <16 x i32> @__gather_base_offsets64_i32(i8 * %ptr, - i32 %scale, <16 x i64> %offsets, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - - extract_4s(i32, vecmask) - extract_4s(i64, offsets) - - %v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, - <4 x i64> %offsets_1, <4 x i32> %vecmask_1, i8 %scale8) - %v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, - <4 x i64> %offsets_2, <4 x i32> %vecmask_2, i8 %scale8) - %v3 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, - <4 x i64> %offsets_3, <4 x i32> %vecmask_3, i8 %scale8) - %v4 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, - <4 x i64> %offsets_4, <4 x i32> %vecmask_4, i8 %scale8) - - assemble_4s(i32, v, v1, v2, v3, v4) - - ret <16 x i32> %v -} - - -define <16 x i32> @__gather32_i32(<16 x i32> %ptrs, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - extract_8s(i32, ptrs) - extract_8s(i32, vecmask) - - %v1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null, - <8 x i32> %ptrs_1, <8 x i32> %vecmask_1, i8 1) - %v2 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null, - <8 x i32> %ptrs_2, <8 x i32> %vecmask_2, i8 1) - - assemble_8s(i32, v, v1, v2) - - ret <16 x i32> %v -} - - -define <16 x i32> @__gather64_i32(<16 x i64> %ptrs, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - extract_4s(i64, ptrs) - extract_4s(i32, vecmask) - - %v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, - <4 x i64> %ptrs_1, <4 x i32> %vecmask_1, i8 1) - %v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, - <4 x i64> %ptrs_2, <4 x i32> %vecmask_2, i8 1) - %v3 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, - <4 x i64> %ptrs_3, <4 x i32> %vecmask_3, i8 1) - %v4 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, - <4 x i64> %ptrs_4, <4 x i32> %vecmask_4, i8 1) - - assemble_4s(i32, v, v1, v2, v3, v4) - - ret <16 x i32> %v -} - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; float gathers - -declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %target, i8 * %ptr, - <8 x i32> %indices, <8 x float> %mask, i8 %scale8) readonly nounwind -declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %target, i8 * %ptr, - <4 x i64> %indices, <4 x float> %mask, i8 %scale8) readonly nounwind - -define <16 x float> @__gather_base_offsets32_float(i8 * %ptr, - i32 %scale, <16 x i32> %offsets, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %mask = bitcast <16 x i32> %vecmask to <16 x float> - extract_8s(i32, offsets) - extract_8s(float, mask) - - %v1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr, - <8 x i32> %offsets_1, <8 x float> %mask_1, i8 %scale8) - %v2 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr, - <8 x i32> %offsets_2, <8 x float> %mask_2, i8 %scale8) - - assemble_8s(float, v, v1, v2) - - ret <16 x float> %v -} - - -define <16 x float> @__gather_base_offsets64_float(i8 * %ptr, - i32 %scale, <16 x i64> %offsets, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %mask = bitcast <16 x i32> %vecmask to <16 x float> - extract_4s(i64, offsets) - extract_4s(float, mask) - - %v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, - <4 x i64> %offsets_1, <4 x float> %mask_1, i8 %scale8) - %v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, - <4 x i64> %offsets_2, <4 x float> %mask_2, i8 %scale8) - %v3 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, - <4 x i64> %offsets_3, <4 x float> %mask_3, i8 %scale8) - %v4 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, - <4 x i64> %offsets_4, <4 x float> %mask_4, i8 %scale8) - - assemble_4s(float, v, v1, v2, v3, v4) - - ret <16 x float> %v -} - - -define <16 x float> @__gather32_float(<16 x i32> %ptrs, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %mask = bitcast <16 x i32> %vecmask to <16 x float> - extract_8s(float, mask) - extract_8s(i32, ptrs) - - %v1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null, - <8 x i32> %ptrs_1, <8 x float> %mask_1, i8 1) - %v2 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null, - <8 x i32> %ptrs_2, <8 x float> %mask_2, i8 1) - - assemble_8s(float, v, v1, v2) - - ret <16 x float> %v -} - - -define <16 x float> @__gather64_float(<16 x i64> %ptrs, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %mask = bitcast <16 x i32> %vecmask to <16 x float> - extract_4s(i64, ptrs) - extract_4s(float, mask) - - %v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, - <4 x i64> %ptrs_1, <4 x float> %mask_1, i8 1) - %v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, - <4 x i64> %ptrs_2, <4 x float> %mask_2, i8 1) - %v3 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, - <4 x i64> %ptrs_3, <4 x float> %mask_3, i8 1) - %v4 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, - <4 x i64> %ptrs_4, <4 x float> %mask_4, i8 1) - - assemble_4s(float, v, v1, v2, v3, v4) - - ret <16 x float> %v -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; int64 gathers - -declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %target, i8 * %ptr, - <4 x i32> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind -declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %target, i8 * %ptr, - <4 x i64> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind - -define <16 x i64> @__gather_base_offsets32_i64(i8 * %ptr, - i32 %scale, <16 x i32> %offsets, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %vecmask = sext <16 x i32> %mask32 to <16 x i64> - extract_4s(i32, offsets) - extract_4s(i64, vecmask) - - %v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i32> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8) - %v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i32> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8) - %v3 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i32> %offsets_3, <4 x i64> %vecmask_3, i8 %scale8) - %v4 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i32> %offsets_4, <4 x i64> %vecmask_4, i8 %scale8) - - assemble_4s(i64, v, v1, v2, v3, v4) - - ret <16 x i64> %v -} - - -define <16 x i64> @__gather_base_offsets64_i64(i8 * %ptr, - i32 %scale, <16 x i64> %offsets, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %vecmask = sext <16 x i32> %mask32 to <16 x i64> - extract_4s(i64, offsets) - extract_4s(i64, vecmask) - - %v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i64> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8) - %v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i64> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8) - %v3 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i64> %offsets_3, <4 x i64> %vecmask_3, i8 %scale8) - %v4 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i64> %offsets_4, <4 x i64> %vecmask_4, i8 %scale8) - - assemble_4s(i64, v, v1, v2, v3, v4) - - ret <16 x i64> %v -} - - -define <16 x i64> @__gather32_i64(<16 x i32> %ptrs, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %vecmask = sext <16 x i32> %mask32 to <16 x i64> - extract_4s(i32, ptrs) - extract_4s(i64, vecmask) - - %v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, - <4 x i32> %ptrs_1, <4 x i64> %vecmask_1, i8 1) - %v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, - <4 x i32> %ptrs_2, <4 x i64> %vecmask_2, i8 1) - %v3 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, - <4 x i32> %ptrs_3, <4 x i64> %vecmask_3, i8 1) - %v4 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, - <4 x i32> %ptrs_4, <4 x i64> %vecmask_4, i8 1) - - assemble_4s(i64, v, v1, v2, v3, v4) - - ret <16 x i64> %v -} - -define <16 x i64> @__gather64_i64(<16 x i64> %ptrs, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %vecmask = sext <16 x i32> %mask32 to <16 x i64> - extract_4s(i64, ptrs) - extract_4s(i64, vecmask) - - %v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, - <4 x i64> %ptrs_1, <4 x i64> %vecmask_1, i8 1) - %v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, - <4 x i64> %ptrs_2, <4 x i64> %vecmask_2, i8 1) - %v3 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, - <4 x i64> %ptrs_3, <4 x i64> %vecmask_3, i8 1) - %v4 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, - <4 x i64> %ptrs_4, <4 x i64> %vecmask_4, i8 1) - - assemble_4s(i64, v, v1, v2, v3, v4) - - ret <16 x i64> %v -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; double gathers - -declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %target, i8 * %ptr, - <4 x i64> %indices, <4 x double> %mask, i8 %scale) readonly nounwind -declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %target, i8 * %ptr, - <4 x i32> %indices, <4 x double> %mask, i8 %scale) readonly nounwind - -define <16 x double> @__gather_base_offsets32_double(i8 * %ptr, - i32 %scale, <16 x i32> %offsets, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> - %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> - extract_4s(i32, offsets) - extract_4s(double, vecmask) - - %v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i32> %offsets_1, <4 x double> %vecmask_1, i8 %scale8) - %v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i32> %offsets_2, <4 x double> %vecmask_2, i8 %scale8) - %v3 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i32> %offsets_3, <4 x double> %vecmask_3, i8 %scale8) - %v4 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i32> %offsets_4, <4 x double> %vecmask_4, i8 %scale8) - - assemble_4s(double, v, v1, v2, v3, v4) - - ret <16 x double> %v -} - - -define <16 x double> @__gather_base_offsets64_double(i8 * %ptr, - i32 %scale, <16 x i64> %offsets, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> - %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> - extract_4s(i64, offsets) - extract_4s(double, vecmask) - - %v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i64> %offsets_1, <4 x double> %vecmask_1, i8 %scale8) - %v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i64> %offsets_2, <4 x double> %vecmask_2, i8 %scale8) - %v3 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i64> %offsets_3, <4 x double> %vecmask_3, i8 %scale8) - %v4 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i64> %offsets_4, <4 x double> %vecmask_4, i8 %scale8) - - assemble_4s(double, v, v1, v2, v3, v4) - - ret <16 x double> %v -} - - -define <16 x double> @__gather32_double(<16 x i32> %ptrs, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> - %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> - extract_4s(i32, ptrs) - extract_4s(double, vecmask) - - %v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, - <4 x i32> %ptrs_1, <4 x double> %vecmask_1, i8 1) - %v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, - <4 x i32> %ptrs_2, <4 x double> %vecmask_2, i8 1) - %v3 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, - <4 x i32> %ptrs_3, <4 x double> %vecmask_3, i8 1) - %v4 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, - <4 x i32> %ptrs_4, <4 x double> %vecmask_4, i8 1) - - assemble_4s(double, v, v1, v2, v3, v4) - - ret <16 x double> %v -} - - -define <16 x double> @__gather64_double(<16 x i64> %ptrs, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> - %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> - extract_4s(i64, ptrs) - extract_4s(double, vecmask) - - %v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, - <4 x i64> %ptrs_1, <4 x double> %vecmask_1, i8 1) - %v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, - <4 x i64> %ptrs_2, <4 x double> %vecmask_2, i8 1) - %v3 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, - <4 x i64> %ptrs_3, <4 x double> %vecmask_3, i8 1) - %v4 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, - <4 x i64> %ptrs_4, <4 x double> %vecmask_4, i8 1) - - assemble_4s(double, v, v1, v2, v3, v4) - - ret <16 x double> %v -} +define(`WIDTH',`16') +include(`target-avx512-common.ll') +saturation_arithmetic_novec() diff --git a/ispc.cpp b/ispc.cpp index 3596e171..05affd32 100644 --- a/ispc.cpp +++ b/ispc.cpp @@ -1126,7 +1126,9 @@ Target::SupportedTargets() { "avx2-i32x8, avx2-i32x16, avx2-i64x4, " "generic-x1, generic-x4, generic-x8, generic-x16, " "generic-x32, generic-x64, *-generic-x16, " +#if !defined(LLVM_3_2) && !defined(LLVM_3_3) && !defined(LLVM_3_4) && !defined(LLVM_3_5) && !defined(LLVM_3_6)// LLVM 3.7+ "knl-avx512" +#endif #ifdef ISPC_ARM_ENABLED ", neon-i8x16, neon-i16x8, neon-i32x4" #endif @@ -1195,8 +1197,10 @@ Target::ISAToString(ISA isa) { return "avx11"; case Target::AVX2: return "avx2"; +#if !defined(LLVM_3_2) && !defined(LLVM_3_3) && !defined(LLVM_3_4) && !defined(LLVM_3_5) && !defined(LLVM_3_6)// LLVM 3.7+ case Target::KNL_AVX512: return "knl-avx512"; +#endif case Target::SKX: return "skx"; case Target::GENERIC: @@ -1241,8 +1245,10 @@ Target::ISAToTargetString(ISA isa) { return "avx1.1-i32x8"; case Target::AVX2: return "avx2-i32x8"; +#if !defined(LLVM_3_2) && !defined(LLVM_3_3) && !defined(LLVM_3_4) && !defined(LLVM_3_5) && !defined(LLVM_3_6)// LLVM 3.7+ case Target::KNL_AVX512: return "knl-avx512"; +#endif case Target::SKX: return "avx2"; case Target::GENERIC: From 46528caa5aba7af7ead08c06db20aadb3a2f581b Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Wed, 6 May 2015 16:00:40 +0300 Subject: [PATCH 07/23] [AVX512]: add avx-based ll file --- builtins/target-knl.ll | 512 +++++++++++++++++++++++++++++++++++++++- builtins/target-knl.ll_ | 34 +++ 2 files changed, 542 insertions(+), 4 deletions(-) create mode 100644 builtins/target-knl.ll_ diff --git a/builtins/target-knl.ll b/builtins/target-knl.ll index 8305dda7..abc39885 100644 --- a/builtins/target-knl.ll +++ b/builtins/target-knl.ll @@ -1,4 +1,4 @@ -;; Copyright (c) 2010-2014, Intel Corporation +;; Copyright (c) 2010-2013, Intel Corporation ;; All rights reserved. ;; ;; Redistribution and use in source and binary forms, with or without @@ -29,6 +29,510 @@ ;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -define(`WIDTH',`16') -include(`target-avx512-common.ll') -saturation_arithmetic_novec() +define(`HAVE_GATHER', `1') + +include(`target-avx-x2.ll') + +rdrand_definition() + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int min/max + +declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readonly +declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readonly + +define <16 x i32> @__min_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary8to16(m, i32, @llvm.x86.avx2.pmins.d, %0, %1) + ret <16 x i32> %m +} + +define <16 x i32> @__max_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary8to16(m, i32, @llvm.x86.avx2.pmaxs.d, %0, %1) + ret <16 x i32> %m +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; unsigned int min/max + +declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readonly +declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readonly + +define <16 x i32> @__min_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary8to16(m, i32, @llvm.x86.avx2.pminu.d, %0, %1) + ret <16 x i32> %m +} + +define <16 x i32> @__max_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary8to16(m, i32, @llvm.x86.avx2.pmaxu.d, %0, %1) + ret <16 x i32> %m +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; float/half conversions + +declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readnone +; 0 is round nearest even +declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readnone + +define <16 x float> @__half_to_float_varying(<16 x i16> %v) nounwind readnone { + %r_0 = shufflevector <16 x i16> %v, <16 x i16> undef, + <8 x i32> + %vr_0 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_0) + %r_1 = shufflevector <16 x i16> %v, <16 x i16> undef, + <8 x i32> + %vr_1 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_1) + %r = shufflevector <8 x float> %vr_0, <8 x float> %vr_1, + <16 x i32> + ret <16 x float> %r +} + +define <16 x i16> @__float_to_half_varying(<16 x float> %v) nounwind readnone { + %r_0 = shufflevector <16 x float> %v, <16 x float> undef, + <8 x i32> + %vr_0 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_0, i32 0) + %r_1 = shufflevector <16 x float> %v, <16 x float> undef, + <8 x i32> + %vr_1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_1, i32 0) + %r = shufflevector <8 x i16> %vr_0, <8 x i16> %vr_1, + <16 x i32> + ret <16 x i16> %r +} + +define float @__half_to_float_uniform(i16 %v) nounwind readnone { + %v1 = bitcast i16 %v to <1 x i16> + %vv = shufflevector <1 x i16> %v1, <1 x i16> undef, + <8 x i32> + %rv = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %vv) + %r = extractelement <8 x float> %rv, i32 0 + ret float %r +} + +define i16 @__float_to_half_uniform(float %v) nounwind readnone { + %v1 = bitcast float %v to <1 x float> + %vv = shufflevector <1 x float> %v1, <1 x float> undef, + <8 x i32> + ; round to nearest even + %rv = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %vv, i32 0) + %r = extractelement <8 x i16> %rv, i32 0 + ret i16 %r +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; gather + +declare void @llvm.trap() noreturn nounwind + +; $1: type +; $2: var base name +define(`extract_4s', ` + %$2_1 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> + %$2_2 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> + %$2_3 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> + %$2_4 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> +') + +; $1: type +; $2: var base name +define(`extract_8s', ` + %$2_1 = shufflevector <16 x $1> %$2, <16 x $1> undef, + <8 x i32> + %$2_2 = shufflevector <16 x $1> %$2, <16 x $1> undef, + <8 x i32> +') + +; $1: element type +; $2: ret name +; $3: v1 +; $4: v2 +define(`assemble_8s', ` + %$2 = shufflevector <8 x $1> %$3, <8 x $1> %$4, + <16 x i32> +') + +; $1: element type +; $2: ret name +; $3: v1 +; $4: v2 +; $5: v3 +; $6: v4 +define(`assemble_4s', ` + %$2_1 = shufflevector <4 x $1> %$3, <4 x $1> %$4, + <8 x i32> + %$2_2 = shufflevector <4 x $1> %$5, <4 x $1> %$6, + <8 x i32> + assemble_8s($1, $2, $2_1, $2_2) +') + + +gen_gather(i8) +gen_gather(i16) + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int32 gathers + +declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %target, i8 * %ptr, + <8 x i32> %indices, <8 x i32> %mask, i8 %scale) readonly nounwind +declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %target, i8 * %ptr, + <4 x i64> %indices, <4 x i32> %mask, i8 %scale) readonly nounwind + +define <16 x i32> @__gather_base_offsets32_i32(i8 * %ptr, i32 %scale, <16 x i32> %offsets, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + extract_8s(i32, offsets) + extract_8s(i32, vecmask) + + %v1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr, + <8 x i32> %offsets_1, <8 x i32> %vecmask_1, i8 %scale8) + %v2 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr, + <8 x i32> %offsets_2, <8 x i32> %vecmask_2, i8 %scale8) + + assemble_8s(i32, v, v1, v2) + + ret <16 x i32> %v +} + + +define <16 x i32> @__gather_base_offsets64_i32(i8 * %ptr, + i32 %scale, <16 x i64> %offsets, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + + extract_4s(i32, vecmask) + extract_4s(i64, offsets) + + %v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x i32> %vecmask_1, i8 %scale8) + %v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x i32> %vecmask_2, i8 %scale8) + %v3 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, + <4 x i64> %offsets_3, <4 x i32> %vecmask_3, i8 %scale8) + %v4 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, + <4 x i64> %offsets_4, <4 x i32> %vecmask_4, i8 %scale8) + + assemble_4s(i32, v, v1, v2, v3, v4) + + ret <16 x i32> %v +} + + +define <16 x i32> @__gather32_i32(<16 x i32> %ptrs, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + extract_8s(i32, ptrs) + extract_8s(i32, vecmask) + + %v1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null, + <8 x i32> %ptrs_1, <8 x i32> %vecmask_1, i8 1) + %v2 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null, + <8 x i32> %ptrs_2, <8 x i32> %vecmask_2, i8 1) + + assemble_8s(i32, v, v1, v2) + + ret <16 x i32> %v +} + + +define <16 x i32> @__gather64_i32(<16 x i64> %ptrs, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + extract_4s(i64, ptrs) + extract_4s(i32, vecmask) + + %v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x i32> %vecmask_1, i8 1) + %v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x i32> %vecmask_2, i8 1) + %v3 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, + <4 x i64> %ptrs_3, <4 x i32> %vecmask_3, i8 1) + %v4 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, + <4 x i64> %ptrs_4, <4 x i32> %vecmask_4, i8 1) + + assemble_4s(i32, v, v1, v2, v3, v4) + + ret <16 x i32> %v +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; float gathers + +declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %target, i8 * %ptr, + <8 x i32> %indices, <8 x float> %mask, i8 %scale8) readonly nounwind +declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %target, i8 * %ptr, + <4 x i64> %indices, <4 x float> %mask, i8 %scale8) readonly nounwind + +define <16 x float> @__gather_base_offsets32_float(i8 * %ptr, + i32 %scale, <16 x i32> %offsets, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %mask = bitcast <16 x i32> %vecmask to <16 x float> + extract_8s(i32, offsets) + extract_8s(float, mask) + + %v1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr, + <8 x i32> %offsets_1, <8 x float> %mask_1, i8 %scale8) + %v2 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr, + <8 x i32> %offsets_2, <8 x float> %mask_2, i8 %scale8) + + assemble_8s(float, v, v1, v2) + + ret <16 x float> %v +} + + +define <16 x float> @__gather_base_offsets64_float(i8 * %ptr, + i32 %scale, <16 x i64> %offsets, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %mask = bitcast <16 x i32> %vecmask to <16 x float> + extract_4s(i64, offsets) + extract_4s(float, mask) + + %v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x float> %mask_1, i8 %scale8) + %v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x float> %mask_2, i8 %scale8) + %v3 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, + <4 x i64> %offsets_3, <4 x float> %mask_3, i8 %scale8) + %v4 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, + <4 x i64> %offsets_4, <4 x float> %mask_4, i8 %scale8) + + assemble_4s(float, v, v1, v2, v3, v4) + + ret <16 x float> %v +} + + +define <16 x float> @__gather32_float(<16 x i32> %ptrs, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %mask = bitcast <16 x i32> %vecmask to <16 x float> + extract_8s(float, mask) + extract_8s(i32, ptrs) + + %v1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null, + <8 x i32> %ptrs_1, <8 x float> %mask_1, i8 1) + %v2 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null, + <8 x i32> %ptrs_2, <8 x float> %mask_2, i8 1) + + assemble_8s(float, v, v1, v2) + + ret <16 x float> %v +} + + +define <16 x float> @__gather64_float(<16 x i64> %ptrs, + <16 x i32> %vecmask) nounwind readonly alwaysinline { + %mask = bitcast <16 x i32> %vecmask to <16 x float> + extract_4s(i64, ptrs) + extract_4s(float, mask) + + %v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x float> %mask_1, i8 1) + %v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x float> %mask_2, i8 1) + %v3 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, + <4 x i64> %ptrs_3, <4 x float> %mask_3, i8 1) + %v4 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, + <4 x i64> %ptrs_4, <4 x float> %mask_4, i8 1) + + assemble_4s(float, v, v1, v2, v3, v4) + + ret <16 x float> %v +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int64 gathers + +declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %target, i8 * %ptr, + <4 x i32> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind +declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %target, i8 * %ptr, + <4 x i64> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind + +define <16 x i64> @__gather_base_offsets32_i64(i8 * %ptr, + i32 %scale, <16 x i32> %offsets, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask = sext <16 x i32> %mask32 to <16 x i64> + extract_4s(i32, offsets) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i32> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i32> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8) + %v3 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i32> %offsets_3, <4 x i64> %vecmask_3, i8 %scale8) + %v4 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i32> %offsets_4, <4 x i64> %vecmask_4, i8 %scale8) + + assemble_4s(i64, v, v1, v2, v3, v4) + + ret <16 x i64> %v +} + + +define <16 x i64> @__gather_base_offsets64_i64(i8 * %ptr, + i32 %scale, <16 x i64> %offsets, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask = sext <16 x i32> %mask32 to <16 x i64> + extract_4s(i64, offsets) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8) + %v3 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i64> %offsets_3, <4 x i64> %vecmask_3, i8 %scale8) + %v4 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, + <4 x i64> %offsets_4, <4 x i64> %vecmask_4, i8 %scale8) + + assemble_4s(i64, v, v1, v2, v3, v4) + + ret <16 x i64> %v +} + + +define <16 x i64> @__gather32_i64(<16 x i32> %ptrs, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask = sext <16 x i32> %mask32 to <16 x i64> + extract_4s(i32, ptrs) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, + <4 x i32> %ptrs_1, <4 x i64> %vecmask_1, i8 1) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, + <4 x i32> %ptrs_2, <4 x i64> %vecmask_2, i8 1) + %v3 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, + <4 x i32> %ptrs_3, <4 x i64> %vecmask_3, i8 1) + %v4 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, + <4 x i32> %ptrs_4, <4 x i64> %vecmask_4, i8 1) + + assemble_4s(i64, v, v1, v2, v3, v4) + + ret <16 x i64> %v +} + +define <16 x i64> @__gather64_i64(<16 x i64> %ptrs, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask = sext <16 x i32> %mask32 to <16 x i64> + extract_4s(i64, ptrs) + extract_4s(i64, vecmask) + + %v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x i64> %vecmask_1, i8 1) + %v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x i64> %vecmask_2, i8 1) + %v3 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, + <4 x i64> %ptrs_3, <4 x i64> %vecmask_3, i8 1) + %v4 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, + <4 x i64> %ptrs_4, <4 x i64> %vecmask_4, i8 1) + + assemble_4s(i64, v, v1, v2, v3, v4) + + ret <16 x i64> %v +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; double gathers + +declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %target, i8 * %ptr, + <4 x i64> %indices, <4 x double> %mask, i8 %scale) readonly nounwind +declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %target, i8 * %ptr, + <4 x i32> %indices, <4 x double> %mask, i8 %scale) readonly nounwind + +define <16 x double> @__gather_base_offsets32_double(i8 * %ptr, + i32 %scale, <16 x i32> %offsets, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> + %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> + extract_4s(i32, offsets) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i32> %offsets_1, <4 x double> %vecmask_1, i8 %scale8) + %v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i32> %offsets_2, <4 x double> %vecmask_2, i8 %scale8) + %v3 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i32> %offsets_3, <4 x double> %vecmask_3, i8 %scale8) + %v4 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i32> %offsets_4, <4 x double> %vecmask_4, i8 %scale8) + + assemble_4s(double, v, v1, v2, v3, v4) + + ret <16 x double> %v +} + + +define <16 x double> @__gather_base_offsets64_double(i8 * %ptr, + i32 %scale, <16 x i64> %offsets, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %scale8 = trunc i32 %scale to i8 + %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> + %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> + extract_4s(i64, offsets) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i64> %offsets_1, <4 x double> %vecmask_1, i8 %scale8) + %v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i64> %offsets_2, <4 x double> %vecmask_2, i8 %scale8) + %v3 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i64> %offsets_3, <4 x double> %vecmask_3, i8 %scale8) + %v4 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, + <4 x i64> %offsets_4, <4 x double> %vecmask_4, i8 %scale8) + + assemble_4s(double, v, v1, v2, v3, v4) + + ret <16 x double> %v +} + + +define <16 x double> @__gather32_double(<16 x i32> %ptrs, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> + %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> + extract_4s(i32, ptrs) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, + <4 x i32> %ptrs_1, <4 x double> %vecmask_1, i8 1) + %v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, + <4 x i32> %ptrs_2, <4 x double> %vecmask_2, i8 1) + %v3 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, + <4 x i32> %ptrs_3, <4 x double> %vecmask_3, i8 1) + %v4 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, + <4 x i32> %ptrs_4, <4 x double> %vecmask_4, i8 1) + + assemble_4s(double, v, v1, v2, v3, v4) + + ret <16 x double> %v +} + + +define <16 x double> @__gather64_double(<16 x i64> %ptrs, + <16 x i32> %mask32) nounwind readonly alwaysinline { + %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> + %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> + extract_4s(i64, ptrs) + extract_4s(double, vecmask) + + %v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, + <4 x i64> %ptrs_1, <4 x double> %vecmask_1, i8 1) + %v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, + <4 x i64> %ptrs_2, <4 x double> %vecmask_2, i8 1) + %v3 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, + <4 x i64> %ptrs_3, <4 x double> %vecmask_3, i8 1) + %v4 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, + <4 x i64> %ptrs_4, <4 x double> %vecmask_4, i8 1) + + assemble_4s(double, v, v1, v2, v3, v4) + + ret <16 x double> %v +} diff --git a/builtins/target-knl.ll_ b/builtins/target-knl.ll_ new file mode 100644 index 00000000..8305dda7 --- /dev/null +++ b/builtins/target-knl.ll_ @@ -0,0 +1,34 @@ +;; Copyright (c) 2010-2014, Intel Corporation +;; All rights reserved. +;; +;; Redistribution and use in source and binary forms, with or without +;; modification, are permitted provided that the following conditions are +;; met: +;; +;; * Redistributions of source code must retain the above copyright +;; notice, this list of conditions and the following disclaimer. +;; +;; * Redistributions in binary form must reproduce the above copyright +;; notice, this list of conditions and the following disclaimer in the +;; documentation and/or other materials provided with the distribution. +;; +;; * Neither the name of Intel Corporation nor the names of its +;; contributors may be used to endorse or promote products derived from +;; this software without specific prior written permission. +;; +;; +;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +define(`WIDTH',`16') +include(`target-avx512-common.ll') +saturation_arithmetic_novec() From 9a03cd359070e81f22c68c60d718e7e180721843 Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Thu, 7 May 2015 11:36:48 +0300 Subject: [PATCH 08/23] [AVX512]: definitions through util.m4 was added --- builtins/target-avx512-common.ll | 97 +----- builtins/target-knl.ll | 512 +------------------------------ builtins/target-knl.ll_ | 34 -- 3 files changed, 14 insertions(+), 629 deletions(-) delete mode 100644 builtins/target-knl.ll_ diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index d402e927..daebe77b 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -29,8 +29,6 @@ ;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128-v16:16:16-v32:32:32-v4:128:128"; - define(`MASK',`i1') define(`HAVE_GATHER',`1') define(`HAVE_SCATTER',`1') @@ -40,7 +38,7 @@ include(`util.m4') stdlib_core() scans() reduce_equal(WIDTH) -rdrand_decls() +rdrand_definition() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; broadcast/rotate/shuffle @@ -66,64 +64,15 @@ declare @__undef_i16() nounwind readnone declare @__undef_i32() nounwind readnone declare @__undef_i64() nounwind readnone -declare @__broadcast_float(, i32) nounwind readnone -declare @__broadcast_double(, i32) nounwind readnone -declare @__broadcast_i8(, i32) nounwind readnone -declare @__broadcast_i16(, i32) nounwind readnone -declare @__broadcast_i32(, i32) nounwind readnone -declare @__broadcast_i64(, i32) nounwind readnone +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; shuffle -declare @__rotate_i8(, i32) nounwind readnone -declare @__rotate_i16(, i32) nounwind readnone -declare @__rotate_float(, i32) nounwind readnone -declare @__rotate_i32(, i32) nounwind readnone -declare @__rotate_double(, i32) nounwind readnone -declare @__rotate_i64(, i32) nounwind readnone - -declare @__shift_i8(, i32) nounwind readnone -declare @__shift_i16(, i32) nounwind readnone -declare @__shift_float(, i32) nounwind readnone -declare @__shift_i32(, i32) nounwind readnone -declare @__shift_double(, i32) nounwind readnone -declare @__shift_i64(, i32) nounwind readnone - -declare @__shuffle_i8(, ) nounwind readnone -declare @__shuffle2_i8(, , - ) nounwind readnone -declare @__shuffle_i16(, ) nounwind readnone -declare @__shuffle2_i16(, , - ) nounwind readnone -declare @__shuffle_float(, - ) nounwind readnone -declare @__shuffle2_float(, , - ) nounwind readnone -declare @__shuffle_i32(, - ) nounwind readnone -declare @__shuffle2_i32(, , - ) nounwind readnone -declare @__shuffle_double(, - ) nounwind readnone -declare @__shuffle2_double(, - , ) nounwind readnone -declare @__shuffle_i64(, - ) nounwind readnone -declare @__shuffle2_i64(, , - ) nounwind readnone +define_shuffles() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; aos/soa -declare void @__soa_to_aos3_float( %v0, %v1, - %v2, float * noalias %p) nounwind -declare void @__aos_to_soa3_float(float * noalias %p, * %out0, - * %out1, * %out2) nounwind -declare void @__soa_to_aos4_float( %v0, %v1, - %v2, %v3, - float * noalias %p) nounwind -declare void @__aos_to_soa4_float(float * noalias %p, * noalias %out0, - * noalias %out1, - * noalias %out2, - * noalias %out3) nounwind +aossoa() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; half conversion routines @@ -157,16 +106,14 @@ declare @__ceil_varying_double() nounwind readn ;; min/max +int64minmax() + declare float @__max_uniform_float(float, float) nounwind readnone declare float @__min_uniform_float(float, float) nounwind readnone declare i32 @__min_uniform_int32(i32, i32) nounwind readnone declare i32 @__max_uniform_int32(i32, i32) nounwind readnone declare i32 @__min_uniform_uint32(i32, i32) nounwind readnone declare i32 @__max_uniform_uint32(i32, i32) nounwind readnone -declare i64 @__min_uniform_int64(i64, i64) nounwind readnone -declare i64 @__max_uniform_int64(i64, i64) nounwind readnone -declare i64 @__min_uniform_uint64(i64, i64) nounwind readnone -declare i64 @__max_uniform_uint64(i64, i64) nounwind readnone declare double @__min_uniform_double(double, double) nounwind readnone declare double @__max_uniform_double(double, double) nounwind readnone @@ -178,10 +125,6 @@ declare @__min_varying_int32(, ) nounwin declare @__max_varying_int32(, ) nounwind readnone declare @__min_varying_uint32(, ) nounwind readnone declare @__max_varying_uint32(, ) nounwind readnone -declare @__min_varying_int64(, ) nounwind readnone -declare @__max_varying_int64(, ) nounwind readnone -declare @__min_varying_uint64(, ) nounwind readnone -declare @__max_varying_uint64(, ) nounwind readnone declare @__min_varying_double(, ) nounwind readnone declare @__max_varying_double(, @@ -205,10 +148,7 @@ declare @__sqrt_varying_double() nounwind readn declare i32 @__popcnt_int32(i32) nounwind readnone declare i64 @__popcnt_int64(i64) nounwind readnone -declare i32 @__count_trailing_zeros_i32(i32) nounwind readnone -declare i64 @__count_trailing_zeros_i64(i64) nounwind readnone -declare i32 @__count_leading_zeros_i32(i32) nounwind readnone -declare i64 @__count_leading_zeros_i64(i64) nounwind readnone +ctlztz() ; FIXME: need either to wire these up to the 8-wide SVML entrypoints, ; or, use the macro to call the 4-wide ones twice with our 8-wide @@ -354,30 +294,13 @@ gather_scatter(float) gather_scatter(i64) gather_scatter(double) -declare i32 @__packed_load_active(i32 * nocapture, * nocapture, - ) nounwind -declare i32 @__packed_store_active(i32 * nocapture, %vals, - ) nounwind -declare i32 @__packed_store_active2(i32 * nocapture, %vals, - ) nounwind - +packed_load_and_store() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; prefetch -declare void @__prefetch_read_uniform_1(i8 * nocapture) nounwind -declare void @__prefetch_read_uniform_2(i8 * nocapture) nounwind -declare void @__prefetch_read_uniform_3(i8 * nocapture) nounwind -declare void @__prefetch_read_uniform_nt(i8 * nocapture) nounwind +define_prefetches() -declare void @__prefetch_read_varying_1( %addr, %mask) nounwind -declare void @__prefetch_read_varying_1_native(i8 * %base, i32 %scale, %offsets, %mask) nounwind -declare void @__prefetch_read_varying_2( %addr, %mask) nounwind -declare void @__prefetch_read_varying_2_native(i8 * %base, i32 %scale, %offsets, %mask) nounwind -declare void @__prefetch_read_varying_3( %addr, %mask) nounwind -declare void @__prefetch_read_varying_3_native(i8 * %base, i32 %scale, %offsets, %mask) nounwind -declare void @__prefetch_read_varying_nt( %addr, %mask) nounwind -declare void @__prefetch_read_varying_nt_native(i8 * %base, i32 %scale, %offsets, %mask) nounwind ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; int8/int16 builtins diff --git a/builtins/target-knl.ll b/builtins/target-knl.ll index abc39885..65146f15 100644 --- a/builtins/target-knl.ll +++ b/builtins/target-knl.ll @@ -1,4 +1,4 @@ -;; Copyright (c) 2010-2013, Intel Corporation +;; Copyright (c) 2010-2014, Intel Corporation ;; All rights reserved. ;; ;; Redistribution and use in source and binary forms, with or without @@ -29,510 +29,6 @@ ;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -define(`HAVE_GATHER', `1') - -include(`target-avx-x2.ll') - -rdrand_definition() - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; int min/max - -declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readonly -declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readonly - -define <16 x i32> @__min_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { - binary8to16(m, i32, @llvm.x86.avx2.pmins.d, %0, %1) - ret <16 x i32> %m -} - -define <16 x i32> @__max_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { - binary8to16(m, i32, @llvm.x86.avx2.pmaxs.d, %0, %1) - ret <16 x i32> %m -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; unsigned int min/max - -declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readonly -declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readonly - -define <16 x i32> @__min_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { - binary8to16(m, i32, @llvm.x86.avx2.pminu.d, %0, %1) - ret <16 x i32> %m -} - -define <16 x i32> @__max_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { - binary8to16(m, i32, @llvm.x86.avx2.pmaxu.d, %0, %1) - ret <16 x i32> %m -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; float/half conversions - -declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readnone -; 0 is round nearest even -declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readnone - -define <16 x float> @__half_to_float_varying(<16 x i16> %v) nounwind readnone { - %r_0 = shufflevector <16 x i16> %v, <16 x i16> undef, - <8 x i32> - %vr_0 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_0) - %r_1 = shufflevector <16 x i16> %v, <16 x i16> undef, - <8 x i32> - %vr_1 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_1) - %r = shufflevector <8 x float> %vr_0, <8 x float> %vr_1, - <16 x i32> - ret <16 x float> %r -} - -define <16 x i16> @__float_to_half_varying(<16 x float> %v) nounwind readnone { - %r_0 = shufflevector <16 x float> %v, <16 x float> undef, - <8 x i32> - %vr_0 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_0, i32 0) - %r_1 = shufflevector <16 x float> %v, <16 x float> undef, - <8 x i32> - %vr_1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_1, i32 0) - %r = shufflevector <8 x i16> %vr_0, <8 x i16> %vr_1, - <16 x i32> - ret <16 x i16> %r -} - -define float @__half_to_float_uniform(i16 %v) nounwind readnone { - %v1 = bitcast i16 %v to <1 x i16> - %vv = shufflevector <1 x i16> %v1, <1 x i16> undef, - <8 x i32> - %rv = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %vv) - %r = extractelement <8 x float> %rv, i32 0 - ret float %r -} - -define i16 @__float_to_half_uniform(float %v) nounwind readnone { - %v1 = bitcast float %v to <1 x float> - %vv = shufflevector <1 x float> %v1, <1 x float> undef, - <8 x i32> - ; round to nearest even - %rv = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %vv, i32 0) - %r = extractelement <8 x i16> %rv, i32 0 - ret i16 %r -} - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; gather - -declare void @llvm.trap() noreturn nounwind - -; $1: type -; $2: var base name -define(`extract_4s', ` - %$2_1 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> - %$2_2 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> - %$2_3 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> - %$2_4 = shufflevector <16 x $1> %$2, <16 x $1> undef, <4 x i32> -') - -; $1: type -; $2: var base name -define(`extract_8s', ` - %$2_1 = shufflevector <16 x $1> %$2, <16 x $1> undef, - <8 x i32> - %$2_2 = shufflevector <16 x $1> %$2, <16 x $1> undef, - <8 x i32> -') - -; $1: element type -; $2: ret name -; $3: v1 -; $4: v2 -define(`assemble_8s', ` - %$2 = shufflevector <8 x $1> %$3, <8 x $1> %$4, - <16 x i32> -') - -; $1: element type -; $2: ret name -; $3: v1 -; $4: v2 -; $5: v3 -; $6: v4 -define(`assemble_4s', ` - %$2_1 = shufflevector <4 x $1> %$3, <4 x $1> %$4, - <8 x i32> - %$2_2 = shufflevector <4 x $1> %$5, <4 x $1> %$6, - <8 x i32> - assemble_8s($1, $2, $2_1, $2_2) -') - - -gen_gather(i8) -gen_gather(i16) - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; int32 gathers - -declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %target, i8 * %ptr, - <8 x i32> %indices, <8 x i32> %mask, i8 %scale) readonly nounwind -declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %target, i8 * %ptr, - <4 x i64> %indices, <4 x i32> %mask, i8 %scale) readonly nounwind - -define <16 x i32> @__gather_base_offsets32_i32(i8 * %ptr, i32 %scale, <16 x i32> %offsets, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - extract_8s(i32, offsets) - extract_8s(i32, vecmask) - - %v1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr, - <8 x i32> %offsets_1, <8 x i32> %vecmask_1, i8 %scale8) - %v2 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * %ptr, - <8 x i32> %offsets_2, <8 x i32> %vecmask_2, i8 %scale8) - - assemble_8s(i32, v, v1, v2) - - ret <16 x i32> %v -} - - -define <16 x i32> @__gather_base_offsets64_i32(i8 * %ptr, - i32 %scale, <16 x i64> %offsets, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - - extract_4s(i32, vecmask) - extract_4s(i64, offsets) - - %v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, - <4 x i64> %offsets_1, <4 x i32> %vecmask_1, i8 %scale8) - %v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, - <4 x i64> %offsets_2, <4 x i32> %vecmask_2, i8 %scale8) - %v3 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, - <4 x i64> %offsets_3, <4 x i32> %vecmask_3, i8 %scale8) - %v4 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * %ptr, - <4 x i64> %offsets_4, <4 x i32> %vecmask_4, i8 %scale8) - - assemble_4s(i32, v, v1, v2, v3, v4) - - ret <16 x i32> %v -} - - -define <16 x i32> @__gather32_i32(<16 x i32> %ptrs, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - extract_8s(i32, ptrs) - extract_8s(i32, vecmask) - - %v1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null, - <8 x i32> %ptrs_1, <8 x i32> %vecmask_1, i8 1) - %v2 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8 * null, - <8 x i32> %ptrs_2, <8 x i32> %vecmask_2, i8 1) - - assemble_8s(i32, v, v1, v2) - - ret <16 x i32> %v -} - - -define <16 x i32> @__gather64_i32(<16 x i64> %ptrs, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - extract_4s(i64, ptrs) - extract_4s(i32, vecmask) - - %v1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, - <4 x i64> %ptrs_1, <4 x i32> %vecmask_1, i8 1) - %v2 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, - <4 x i64> %ptrs_2, <4 x i32> %vecmask_2, i8 1) - %v3 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, - <4 x i64> %ptrs_3, <4 x i32> %vecmask_3, i8 1) - %v4 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8 * null, - <4 x i64> %ptrs_4, <4 x i32> %vecmask_4, i8 1) - - assemble_4s(i32, v, v1, v2, v3, v4) - - ret <16 x i32> %v -} - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; float gathers - -declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %target, i8 * %ptr, - <8 x i32> %indices, <8 x float> %mask, i8 %scale8) readonly nounwind -declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %target, i8 * %ptr, - <4 x i64> %indices, <4 x float> %mask, i8 %scale8) readonly nounwind - -define <16 x float> @__gather_base_offsets32_float(i8 * %ptr, - i32 %scale, <16 x i32> %offsets, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %mask = bitcast <16 x i32> %vecmask to <16 x float> - extract_8s(i32, offsets) - extract_8s(float, mask) - - %v1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr, - <8 x i32> %offsets_1, <8 x float> %mask_1, i8 %scale8) - %v2 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * %ptr, - <8 x i32> %offsets_2, <8 x float> %mask_2, i8 %scale8) - - assemble_8s(float, v, v1, v2) - - ret <16 x float> %v -} - - -define <16 x float> @__gather_base_offsets64_float(i8 * %ptr, - i32 %scale, <16 x i64> %offsets, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %mask = bitcast <16 x i32> %vecmask to <16 x float> - extract_4s(i64, offsets) - extract_4s(float, mask) - - %v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, - <4 x i64> %offsets_1, <4 x float> %mask_1, i8 %scale8) - %v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, - <4 x i64> %offsets_2, <4 x float> %mask_2, i8 %scale8) - %v3 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, - <4 x i64> %offsets_3, <4 x float> %mask_3, i8 %scale8) - %v4 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * %ptr, - <4 x i64> %offsets_4, <4 x float> %mask_4, i8 %scale8) - - assemble_4s(float, v, v1, v2, v3, v4) - - ret <16 x float> %v -} - - -define <16 x float> @__gather32_float(<16 x i32> %ptrs, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %mask = bitcast <16 x i32> %vecmask to <16 x float> - extract_8s(float, mask) - extract_8s(i32, ptrs) - - %v1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null, - <8 x i32> %ptrs_1, <8 x float> %mask_1, i8 1) - %v2 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8 * null, - <8 x i32> %ptrs_2, <8 x float> %mask_2, i8 1) - - assemble_8s(float, v, v1, v2) - - ret <16 x float> %v -} - - -define <16 x float> @__gather64_float(<16 x i64> %ptrs, - <16 x i32> %vecmask) nounwind readonly alwaysinline { - %mask = bitcast <16 x i32> %vecmask to <16 x float> - extract_4s(i64, ptrs) - extract_4s(float, mask) - - %v1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, - <4 x i64> %ptrs_1, <4 x float> %mask_1, i8 1) - %v2 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, - <4 x i64> %ptrs_2, <4 x float> %mask_2, i8 1) - %v3 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, - <4 x i64> %ptrs_3, <4 x float> %mask_3, i8 1) - %v4 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8 * null, - <4 x i64> %ptrs_4, <4 x float> %mask_4, i8 1) - - assemble_4s(float, v, v1, v2, v3, v4) - - ret <16 x float> %v -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; int64 gathers - -declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %target, i8 * %ptr, - <4 x i32> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind -declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %target, i8 * %ptr, - <4 x i64> %indices, <4 x i64> %mask, i8 %scale) readonly nounwind - -define <16 x i64> @__gather_base_offsets32_i64(i8 * %ptr, - i32 %scale, <16 x i32> %offsets, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %vecmask = sext <16 x i32> %mask32 to <16 x i64> - extract_4s(i32, offsets) - extract_4s(i64, vecmask) - - %v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i32> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8) - %v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i32> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8) - %v3 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i32> %offsets_3, <4 x i64> %vecmask_3, i8 %scale8) - %v4 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i32> %offsets_4, <4 x i64> %vecmask_4, i8 %scale8) - - assemble_4s(i64, v, v1, v2, v3, v4) - - ret <16 x i64> %v -} - - -define <16 x i64> @__gather_base_offsets64_i64(i8 * %ptr, - i32 %scale, <16 x i64> %offsets, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %vecmask = sext <16 x i32> %mask32 to <16 x i64> - extract_4s(i64, offsets) - extract_4s(i64, vecmask) - - %v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i64> %offsets_1, <4 x i64> %vecmask_1, i8 %scale8) - %v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i64> %offsets_2, <4 x i64> %vecmask_2, i8 %scale8) - %v3 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i64> %offsets_3, <4 x i64> %vecmask_3, i8 %scale8) - %v4 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * %ptr, - <4 x i64> %offsets_4, <4 x i64> %vecmask_4, i8 %scale8) - - assemble_4s(i64, v, v1, v2, v3, v4) - - ret <16 x i64> %v -} - - -define <16 x i64> @__gather32_i64(<16 x i32> %ptrs, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %vecmask = sext <16 x i32> %mask32 to <16 x i64> - extract_4s(i32, ptrs) - extract_4s(i64, vecmask) - - %v1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, - <4 x i32> %ptrs_1, <4 x i64> %vecmask_1, i8 1) - %v2 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, - <4 x i32> %ptrs_2, <4 x i64> %vecmask_2, i8 1) - %v3 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, - <4 x i32> %ptrs_3, <4 x i64> %vecmask_3, i8 1) - %v4 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8 * null, - <4 x i32> %ptrs_4, <4 x i64> %vecmask_4, i8 1) - - assemble_4s(i64, v, v1, v2, v3, v4) - - ret <16 x i64> %v -} - -define <16 x i64> @__gather64_i64(<16 x i64> %ptrs, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %vecmask = sext <16 x i32> %mask32 to <16 x i64> - extract_4s(i64, ptrs) - extract_4s(i64, vecmask) - - %v1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, - <4 x i64> %ptrs_1, <4 x i64> %vecmask_1, i8 1) - %v2 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, - <4 x i64> %ptrs_2, <4 x i64> %vecmask_2, i8 1) - %v3 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, - <4 x i64> %ptrs_3, <4 x i64> %vecmask_3, i8 1) - %v4 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8 * null, - <4 x i64> %ptrs_4, <4 x i64> %vecmask_4, i8 1) - - assemble_4s(i64, v, v1, v2, v3, v4) - - ret <16 x i64> %v -} - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; double gathers - -declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %target, i8 * %ptr, - <4 x i64> %indices, <4 x double> %mask, i8 %scale) readonly nounwind -declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %target, i8 * %ptr, - <4 x i32> %indices, <4 x double> %mask, i8 %scale) readonly nounwind - -define <16 x double> @__gather_base_offsets32_double(i8 * %ptr, - i32 %scale, <16 x i32> %offsets, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> - %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> - extract_4s(i32, offsets) - extract_4s(double, vecmask) - - %v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i32> %offsets_1, <4 x double> %vecmask_1, i8 %scale8) - %v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i32> %offsets_2, <4 x double> %vecmask_2, i8 %scale8) - %v3 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i32> %offsets_3, <4 x double> %vecmask_3, i8 %scale8) - %v4 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i32> %offsets_4, <4 x double> %vecmask_4, i8 %scale8) - - assemble_4s(double, v, v1, v2, v3, v4) - - ret <16 x double> %v -} - - -define <16 x double> @__gather_base_offsets64_double(i8 * %ptr, - i32 %scale, <16 x i64> %offsets, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %scale8 = trunc i32 %scale to i8 - %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> - %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> - extract_4s(i64, offsets) - extract_4s(double, vecmask) - - %v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i64> %offsets_1, <4 x double> %vecmask_1, i8 %scale8) - %v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i64> %offsets_2, <4 x double> %vecmask_2, i8 %scale8) - %v3 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i64> %offsets_3, <4 x double> %vecmask_3, i8 %scale8) - %v4 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * %ptr, - <4 x i64> %offsets_4, <4 x double> %vecmask_4, i8 %scale8) - - assemble_4s(double, v, v1, v2, v3, v4) - - ret <16 x double> %v -} - - -define <16 x double> @__gather32_double(<16 x i32> %ptrs, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> - %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> - extract_4s(i32, ptrs) - extract_4s(double, vecmask) - - %v1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, - <4 x i32> %ptrs_1, <4 x double> %vecmask_1, i8 1) - %v2 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, - <4 x i32> %ptrs_2, <4 x double> %vecmask_2, i8 1) - %v3 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, - <4 x i32> %ptrs_3, <4 x double> %vecmask_3, i8 1) - %v4 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8 * null, - <4 x i32> %ptrs_4, <4 x double> %vecmask_4, i8 1) - - assemble_4s(double, v, v1, v2, v3, v4) - - ret <16 x double> %v -} - - -define <16 x double> @__gather64_double(<16 x i64> %ptrs, - <16 x i32> %mask32) nounwind readonly alwaysinline { - %vecmask64 = sext <16 x i32> %mask32 to <16 x i64> - %vecmask = bitcast <16 x i64> %vecmask64 to <16 x double> - extract_4s(i64, ptrs) - extract_4s(double, vecmask) - - %v1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, - <4 x i64> %ptrs_1, <4 x double> %vecmask_1, i8 1) - %v2 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, - <4 x i64> %ptrs_2, <4 x double> %vecmask_2, i8 1) - %v3 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, - <4 x i64> %ptrs_3, <4 x double> %vecmask_3, i8 1) - %v4 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8 * null, - <4 x i64> %ptrs_4, <4 x double> %vecmask_4, i8 1) - - assemble_4s(double, v, v1, v2, v3, v4) - - ret <16 x double> %v -} +define(`WIDTH',`16') +include(`target-avx512-common.ll') +;;saturation_arithmetic_novec() diff --git a/builtins/target-knl.ll_ b/builtins/target-knl.ll_ deleted file mode 100644 index 8305dda7..00000000 --- a/builtins/target-knl.ll_ +++ /dev/null @@ -1,34 +0,0 @@ -;; Copyright (c) 2010-2014, Intel Corporation -;; All rights reserved. -;; -;; Redistribution and use in source and binary forms, with or without -;; modification, are permitted provided that the following conditions are -;; met: -;; -;; * Redistributions of source code must retain the above copyright -;; notice, this list of conditions and the following disclaimer. -;; -;; * Redistributions in binary form must reproduce the above copyright -;; notice, this list of conditions and the following disclaimer in the -;; documentation and/or other materials provided with the distribution. -;; -;; * Neither the name of Intel Corporation nor the names of its -;; contributors may be used to endorse or promote products derived from -;; this software without specific prior written permission. -;; -;; -;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -;; IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -;; TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -;; PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER -;; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -;; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -;; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -;; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -define(`WIDTH',`16') -include(`target-avx512-common.ll') -saturation_arithmetic_novec() From bea7cc9a8116ff2a930c63e192c684e81badfc60 Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Thu, 7 May 2015 13:56:01 +0300 Subject: [PATCH 09/23] [AVX512]: half/float conversions --- builtins/target-avx512-common.ll | 54 +++++++++++++++++++++++++++++--- 1 file changed, 50 insertions(+), 4 deletions(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index daebe77b..50e2e4b8 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -77,10 +77,56 @@ aossoa() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; half conversion routines -declare float @__half_to_float_uniform(i16 %v) nounwind readnone -declare @__half_to_float_varying( %v) nounwind readnone -declare i16 @__float_to_half_uniform(float %v) nounwind readnone -declare @__float_to_half_varying( %v) nounwind readnone +declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readnone +; 0 is round nearest even +declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readnone + +define <16 x float> @__half_to_float_varying(<16 x i16> %v) nounwind readnone { + %r_0 = shufflevector <16 x i16> %v, <16 x i16> undef, + <8 x i32> + %vr_0 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_0) + %r_1 = shufflevector <16 x i16> %v, <16 x i16> undef, + <8 x i32> + %vr_1 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %r_1) + %r = shufflevector <8 x float> %vr_0, <8 x float> %vr_1, + <16 x i32> + ret <16 x float> %r +} + +define <16 x i16> @__float_to_half_varying(<16 x float> %v) nounwind readnone { + %r_0 = shufflevector <16 x float> %v, <16 x float> undef, + <8 x i32> + %vr_0 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_0, i32 0) + %r_1 = shufflevector <16 x float> %v, <16 x float> undef, + <8 x i32> + %vr_1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %r_1, i32 0) + %r = shufflevector <8 x i16> %vr_0, <8 x i16> %vr_1, + <16 x i32> + ret <16 x i16> %r +} + +define float @__half_to_float_uniform(i16 %v) nounwind readnone { + %v1 = bitcast i16 %v to <1 x i16> + %vv = shufflevector <1 x i16> %v1, <1 x i16> undef, + <8 x i32> + %rv = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %vv) + %r = extractelement <8 x float> %rv, i32 0 + ret float %r +} + +define i16 @__float_to_half_uniform(float %v) nounwind readnone { + %v1 = bitcast float %v to <1 x float> + %vv = shufflevector <1 x float> %v1, <1 x float> undef, + <8 x i32> + ; round to nearest even + %rv = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %vv, i32 0) + %r = extractelement <8 x i16> %rv, i32 0 + ret i16 %r +} ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; math From 2549fa12c915c42640e91396c59c777230e44fde Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 7 May 2015 13:51:52 +0300 Subject: [PATCH 10/23] [AVX512]: masked load-store (not all loads) --- builtins/target-avx512-common.ll | 35 +++++++++++++++++++------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index 50e2e4b8..51021317 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -244,23 +244,30 @@ declare i64 @__reduce_max_uint64() nounwind readnone declare @__masked_load_i8(i8 * nocapture, %mask) nounwind readonly declare @__masked_load_i16(i8 * nocapture, %mask) nounwind readonly declare @__masked_load_i32(i8 * nocapture, %mask) nounwind readonly -declare @__masked_load_float(i8 * nocapture, %mask) nounwind readonly declare @__masked_load_i64(i8 * nocapture, %mask) nounwind readonly -declare @__masked_load_double(i8 * nocapture, %mask) nounwind readonly -declare void @__masked_store_i8(* nocapture, , - ) nounwind -declare void @__masked_store_i16(* nocapture, , - ) nounwind -declare void @__masked_store_i32(* nocapture, , - ) nounwind -declare void @__masked_store_float(* nocapture, , - ) nounwind -declare void @__masked_store_i64(* nocapture, , - %mask) nounwind -declare void @__masked_store_double(* nocapture, , - %mask) nounwind +masked_load_float_double() +gen_masked_store(i8) +gen_masked_store(i16) +gen_masked_store(i32) +gen_masked_store(i64) + +define void @__masked_store_float( * nocapture, , + ) nounwind alwaysinline { + %ptr = bitcast * %0 to * + %val = bitcast %1 to + call void @__masked_store_i32( * %ptr, %val, %2) + ret void +} + +define void @__masked_store_double( * nocapture, , + ) nounwind alwaysinline { + %ptr = bitcast * %0 to * + %val = bitcast %1 to + call void @__masked_store_i64( * %ptr, %val, %2) + ret void +} define void @__masked_store_blend_i8(* nocapture, , ) nounwind alwaysinline { From 7c9d9f6ee69740741ca69cb26dab0d2791f4b7e6 Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Thu, 7 May 2015 14:37:16 +0300 Subject: [PATCH 11/23] [AVX512]: reduce operations was added --- builtins/target-avx512-common.ll | 210 ++++++++++++++++++++++++------- 1 file changed, 168 insertions(+), 42 deletions(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index 51021317..4f457080 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -43,30 +43,6 @@ rdrand_definition() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; broadcast/rotate/shuffle -declare @__smear_float(float) nounwind readnone -declare @__smear_double(double) nounwind readnone -declare @__smear_i8(i8) nounwind readnone -declare @__smear_i16(i16) nounwind readnone -declare @__smear_i32(i32) nounwind readnone -declare @__smear_i64(i64) nounwind readnone - -declare @__setzero_float() nounwind readnone -declare @__setzero_double() nounwind readnone -declare @__setzero_i8() nounwind readnone -declare @__setzero_i16() nounwind readnone -declare @__setzero_i32() nounwind readnone -declare @__setzero_i64() nounwind readnone - -declare @__undef_float() nounwind readnone -declare @__undef_double() nounwind readnone -declare @__undef_i8() nounwind readnone -declare @__undef_i16() nounwind readnone -declare @__undef_i32() nounwind readnone -declare @__undef_i64() nounwind readnone - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; shuffle - define_shuffles() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -214,28 +190,178 @@ declare i1 @__any() nounwind readnone declare i1 @__all() nounwind readnone declare i1 @__none() nounwind readnone -declare i16 @__reduce_add_int8() nounwind readnone -declare i32 @__reduce_add_int16() nounwind readnone +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; horizontal int8/16 ops -declare float @__reduce_add_float() nounwind readnone -declare float @__reduce_min_float() nounwind readnone -declare float @__reduce_max_float() nounwind readnone +declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone -declare i64 @__reduce_add_int32() nounwind readnone -declare i32 @__reduce_min_int32() nounwind readnone -declare i32 @__reduce_max_int32() nounwind readnone -declare i32 @__reduce_min_uint32() nounwind readnone -declare i32 @__reduce_max_uint32() nounwind readnone +define i16 @__reduce_add_int8(<16 x i8>) nounwind readnone alwaysinline { + %rv = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %0, + <16 x i8> zeroinitializer) + %r0 = extractelement <2 x i64> %rv, i32 0 + %r1 = extractelement <2 x i64> %rv, i32 1 + %r = add i64 %r0, %r1 + %r16 = trunc i64 %r to i16 + ret i16 %r16 +} -declare double @__reduce_add_double() nounwind readnone -declare double @__reduce_min_double() nounwind readnone -declare double @__reduce_max_double() nounwind readnone +define internal <16 x i16> @__add_varying_i16(<16 x i16>, + <16 x i16>) nounwind readnone alwaysinline { + %r = add <16 x i16> %0, %1 + ret <16 x i16> %r +} -declare i64 @__reduce_add_int64() nounwind readnone -declare i64 @__reduce_min_int64() nounwind readnone -declare i64 @__reduce_max_int64() nounwind readnone -declare i64 @__reduce_min_uint64() nounwind readnone -declare i64 @__reduce_max_uint64() nounwind readnone +define internal i16 @__add_uniform_i16(i16, i16) nounwind readnone alwaysinline { + %r = add i16 %0, %1 + ret i16 %r +} + +define i16 @__reduce_add_int16(<16 x i16>) nounwind readnone alwaysinline { + reduce16(i16, @__add_varying_i16, @__add_uniform_i16) +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; horizontal float ops + +declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) nounwind readnone + +define float @__reduce_add_float(<16 x float>) nounwind readonly alwaysinline { + %va = shufflevector <16 x float> %0, <16 x float> undef, + <8 x i32> + %vb = shufflevector <16 x float> %0, <16 x float> undef, + <8 x i32> + %v1 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %va, <8 x float> %vb) + %v2 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %v1, <8 x float> %v1) + %v3 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %v2, <8 x float> %v2) + %scalar1 = extractelement <8 x float> %v3, i32 0 + %scalar2 = extractelement <8 x float> %v3, i32 4 + %sum = fadd float %scalar1, %scalar2 + ret float %sum +} + +define float @__reduce_min_float(<16 x float>) nounwind readnone alwaysinline { + reduce16(float, @__min_varying_float, @__min_uniform_float) +} + +define float @__reduce_max_float(<16 x float>) nounwind readnone alwaysinline { + reduce16(float, @__max_varying_float, @__max_uniform_float) +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; horizontal int32 ops + +define internal <16 x i32> @__add_varying_int32(<16 x i32>, + <16 x i32>) nounwind readnone alwaysinline { + %s = add <16 x i32> %0, %1 + ret <16 x i32> %s +} + +define internal i32 @__add_uniform_int32(i32, i32) nounwind readnone alwaysinline { + %s = add i32 %0, %1 + ret i32 %s +} + +define i32 @__reduce_add_int32(<16 x i32>) nounwind readnone alwaysinline { + reduce16(i32, @__add_varying_int32, @__add_uniform_int32) +} + +define i32 @__reduce_min_int32(<16 x i32>) nounwind readnone alwaysinline { + reduce16(i32, @__min_varying_int32, @__min_uniform_int32) +} + +define i32 @__reduce_max_int32(<16 x i32>) nounwind readnone alwaysinline { + reduce16(i32, @__max_varying_int32, @__max_uniform_int32) +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;; horizontal uint32 ops + +define i32 @__reduce_min_uint32(<16 x i32>) nounwind readnone alwaysinline { + reduce16(i32, @__min_varying_uint32, @__min_uniform_uint32) +} + +define i32 @__reduce_max_uint32(<16 x i32>) nounwind readnone alwaysinline { + reduce16(i32, @__max_varying_uint32, @__max_uniform_uint32) +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; horizontal double ops + +declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounwind readnone + +define double @__reduce_add_double(<16 x double>) nounwind readonly alwaysinline { + %va = shufflevector <16 x double> %0, <16 x double> undef, + <4 x i32> + %vb = shufflevector <16 x double> %0, <16 x double> undef, + <4 x i32> + %vc = shufflevector <16 x double> %0, <16 x double> undef, + <4 x i32> + %vd = shufflevector <16 x double> %0, <16 x double> undef, + <4 x i32> + %vab = fadd <4 x double> %va, %vb + %vcd = fadd <4 x double> %vc, %vd + + %sum0 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %vab, <4 x double> %vcd) + %sum1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %sum0, <4 x double> %sum0) + %final0 = extractelement <4 x double> %sum1, i32 0 + %final1 = extractelement <4 x double> %sum1, i32 2 + %sum = fadd double %final0, %final1 + ret double %sum +} + +define double @__reduce_min_double(<16 x double>) nounwind readnone alwaysinline { + reduce16(double, @__min_varying_double, @__min_uniform_double) +} + +define double @__reduce_max_double(<16 x double>) nounwind readnone alwaysinline { + reduce16(double, @__max_varying_double, @__max_uniform_double) +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; horizontal int64 ops + +define internal <16 x i64> @__add_varying_int64(<16 x i64>, + <16 x i64>) nounwind readnone alwaysinline { + %s = add <16 x i64> %0, %1 + ret <16 x i64> %s +} + +define internal i64 @__add_uniform_int64(i64, i64) nounwind readnone alwaysinline { + %s = add i64 %0, %1 + ret i64 %s +} + +define i64 @__reduce_add_int64(<16 x i64>) nounwind readnone alwaysinline { + reduce16(i64, @__add_varying_int64, @__add_uniform_int64) +} + + +define i64 @__reduce_min_int64(<16 x i64>) nounwind readnone alwaysinline { + reduce16(i64, @__min_varying_int64, @__min_uniform_int64) +} + + +define i64 @__reduce_max_int64(<16 x i64>) nounwind readnone alwaysinline { + reduce16(i64, @__max_varying_int64, @__max_uniform_int64) +} + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;; horizontal uint64 ops + +define i64 @__reduce_min_uint64(<16 x i64>) nounwind readnone alwaysinline { + reduce16(i64, @__min_varying_uint64, @__min_uniform_uint64) +} + + +define i64 @__reduce_max_uint64(<16 x i64>) nounwind readnone alwaysinline { + reduce16(i64, @__max_varying_uint64, @__max_uniform_uint64) +} ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; unaligned loads/loads+broadcasts From 28fda1a01309609bfb1f0f67eed3e6d01c036116 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 7 May 2015 14:19:35 +0300 Subject: [PATCH 12/23] [AVX512]: movmsk/any/all/none --- builtins/target-avx512-common.ll | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index 4f457080..287dc00f 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -185,10 +185,29 @@ svml_stubs(double,d,WIDTH) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; reductions -declare i64 @__movmsk() nounwind readnone -declare i1 @__any() nounwind readnone -declare i1 @__all() nounwind readnone -declare i1 @__none() nounwind readnone +define i64 @__movmsk() nounwind readnone alwaysinline { + %intmask = bitcast %0 to i16 + %res = zext i16 %intmask to i64 + ret i64 %res +} + +define i1 @__any() nounwind readnone alwaysinline { + %intmask = bitcast %0 to i16 + %res = icmp ne i16 %intmask, 0 + ret i1 %res +} + +define i1 @__all() nounwind readnone alwaysinline { + %intmask = bitcast %0 to i16 + %res = icmp eq i16 %intmask, 65535 + ret i1 %res +} + +define i1 @__none() nounwind readnone alwaysinline { + %intmask = bitcast %0 to i16 + %res = icmp eq i16 %intmask, 0 + ret i1 %res +} ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; horizontal int8/16 ops From f2743a6dc524aaa570bbf5d9080cc3ea2fd24fd4 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 7 May 2015 14:24:53 +0300 Subject: [PATCH 13/23] [AVX512]: masked_load_i8/16/32/64 --- builtins/target-avx512-common.ll | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index 287dc00f..e6a9d70c 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -385,11 +385,10 @@ define i64 @__reduce_max_uint64(<16 x i64>) nounwind readnone alwaysinline { ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; unaligned loads/loads+broadcasts - -declare @__masked_load_i8(i8 * nocapture, %mask) nounwind readonly -declare @__masked_load_i16(i8 * nocapture, %mask) nounwind readonly -declare @__masked_load_i32(i8 * nocapture, %mask) nounwind readonly -declare @__masked_load_i64(i8 * nocapture, %mask) nounwind readonly +masked_load(i8, 1) +masked_load(i16, 2) +masked_load(i32, 4) +masked_load(i64, 8) masked_load_float_double() From 66b94fc37cb2bba7e9f093b9a11c2858092957d7 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 7 May 2015 15:05:32 +0300 Subject: [PATCH 14/23] [AVX512]: add default -sde- wrapexe to runtests.py for knl-avx512 target; float/double varying rounding --- builtins/target-avx512-common.ll | 44 +++++++++++++++++++++++++++----- run_tests.py | 5 +++- 2 files changed, 42 insertions(+), 7 deletions(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index e6a9d70c..641585a8 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -119,12 +119,44 @@ declare double @__round_uniform_double(double) nounwind readnone declare double @__floor_uniform_double(double) nounwind readnone declare double @__ceil_uniform_double(double) nounwind readnone -declare @__round_varying_float() nounwind readnone -declare @__floor_varying_float() nounwind readnone -declare @__ceil_varying_float() nounwind readnone -declare @__round_varying_double() nounwind readnone -declare @__floor_varying_double() nounwind readnone -declare @__ceil_varying_double() nounwind readnone + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rounding floats + +declare <8 x float> @llvm.x86.avx.round.ps.256(<8 x float>, i32) nounwind readnone +define <16 x float> @__round_varying_float(<16 x float>) nounwind readonly alwaysinline { + ; roundps, round mode nearest 0b00 | don't signal precision exceptions 0b1000 = 8 + round8to16(%0, 8) +} + +define <16 x float> @__floor_varying_float(<16 x float>) nounwind readonly alwaysinline { + ; roundps, round down 0b01 | don't signal precision exceptions 0b1001 = 9 + round8to16(%0, 9) +} + +define <16 x float> @__ceil_varying_float(<16 x float>) nounwind readonly alwaysinline { + ; roundps, round up 0b10 | don't signal precision exceptions 0b1010 = 10 + round8to16(%0, 10) +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rounding doubles + +declare <4 x double> @llvm.x86.avx.round.pd.256(<4 x double>, i32) nounwind readnone +define <16 x double> @__round_varying_double(<16 x double>) nounwind readonly alwaysinline { + round4to16double(%0, 8) +} + +define <16 x double> @__floor_varying_double(<16 x double>) nounwind readonly alwaysinline { + round4to16double(%0, 9) +} + +define <16 x double> @__ceil_varying_double(<16 x double>) nounwind readonly alwaysinline { + round4to16double(%0, 10) +} + + + ;; min/max diff --git a/run_tests.py b/run_tests.py index 0dbdd605..bc87487a 100755 --- a/run_tests.py +++ b/run_tests.py @@ -555,7 +555,7 @@ def verify(): "sse4-i8x16", "avx1-i32x4" "avx1-i32x8", "avx1-i32x16", "avx1-i64x4", "avx1.1-i32x8", "avx1.1-i32x16", "avx1.1-i64x4", "avx2-i32x8", "avx2-i32x16", "avx2-i64x4", "generic-1", "generic-4", "generic-8", - "generic-16", "generic-32", "generic-64", "knc", "knl"]] + "generic-16", "generic-32", "generic-64", "knc", "knl", "knl-avx512"]] for i in range (0,len(f_lines)): if f_lines[i][0] == "%": continue @@ -692,6 +692,9 @@ def run_tests(options1, args, print_version): ispc_root = "." # checks the required environment otherwise prints an error message + if ((options.target == "knl-avx512") and (options.wrapexe == "")): + options.wrapexe = "sde -knl -- " + if (options.target == "knc"): options.wrapexe = "micnativeloadex" PATH_dir = string.split(os.getenv("PATH"), os.pathsep) From a6b7e717f5794bb9118a9446ad9303efde2e21b1 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 7 May 2015 15:47:22 +0300 Subject: [PATCH 15/23] [AVX512]: gathers/scatters --- builtins/target-avx512-common.ll | 62 ++++++++++++++++++-------------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index 641585a8..cc05288f 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -155,9 +155,6 @@ define <16 x double> @__ceil_varying_double(<16 x double>) nounwind readonly alw round4to16double(%0, 10) } - - - ;; min/max int64minmax() @@ -496,32 +493,45 @@ define void @__masked_store_blend_double(* nocapture, ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; gather/scatter -define(`gather_scatter', ` -declare @__gather_base_offsets32_$1(i8 * nocapture, i32, , - ) nounwind readonly -declare @__gather_base_offsets64_$1(i8 * nocapture, i32, , - ) nounwind readonly -declare @__gather32_$1(, - ) nounwind readonly -declare @__gather64_$1(, - ) nounwind readonly +define(`scatterbo32_64', ` +define void @__scatter_base_offsets32_$1(i8* %ptr, i32 %scale, %offsets, + %vals, %mask) nounwind { + call void @__scatter_factored_base_offsets32_$1(i8* %ptr, <16 x i32> %offsets, + i32 %scale, <16 x i32> zeroinitializer, <16 x $1> %vals, %mask) + ret void +} -declare void @__scatter_base_offsets32_$1(i8* nocapture, i32, , - , ) nounwind -declare void @__scatter_base_offsets64_$1(i8* nocapture, i32, , - , ) nounwind -declare void @__scatter32_$1(, , - ) nounwind -declare void @__scatter64_$1(, , - ) nounwind +define void @__scatter_base_offsets64_$1(i8* %ptr, i32 %scale, %offsets, + %vals, %mask) nounwind { + call void @__scatter_factored_base_offsets64_$1(i8* %ptr, <16 x i64> %offsets, + i32 %scale, <16 x i64> zeroinitializer, <16 x $1> %vals, %mask) + ret void +} ') -gather_scatter(i8) -gather_scatter(i16) -gather_scatter(i32) -gather_scatter(float) -gather_scatter(i64) -gather_scatter(double) + +gen_gather(i8) +gen_gather(i16) +gen_gather(i32) +gen_gather(i64) +gen_gather(float) +gen_gather(double) + +scatterbo32_64(i8) +scatterbo32_64(i16) +scatterbo32_64(i32) +scatterbo32_64(i64) +scatterbo32_64(float) +scatterbo32_64(double) + +gen_scatter(i8) +gen_scatter(i16) +gen_scatter(i32) +gen_scatter(i64) +gen_scatter(float) +gen_scatter(double) + + packed_load_and_store() From 82f5716362eb46f798993bf21dd7ea869546ae0d Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Thu, 7 May 2015 15:16:48 +0300 Subject: [PATCH 16/23] [AVX512]: max/min functions --- builtins/target-avx512-common.ll | 142 ++++++++++++++++++++++++------- 1 file changed, 113 insertions(+), 29 deletions(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index cc05288f..8f3f9c50 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -159,28 +159,121 @@ define <16 x double> @__ceil_varying_double(<16 x double>) nounwind readonly alw int64minmax() -declare float @__max_uniform_float(float, float) nounwind readnone -declare float @__min_uniform_float(float, float) nounwind readnone -declare i32 @__min_uniform_int32(i32, i32) nounwind readnone -declare i32 @__max_uniform_int32(i32, i32) nounwind readnone -declare i32 @__min_uniform_uint32(i32, i32) nounwind readnone -declare i32 @__max_uniform_uint32(i32, i32) nounwind readnone -declare double @__min_uniform_double(double, double) nounwind readnone -declare double @__max_uniform_double(double, double) nounwind readnone +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; float min/max -declare @__max_varying_float(, - ) nounwind readnone -declare @__min_varying_float(, - ) nounwind readnone -declare @__min_varying_int32(, ) nounwind readnone -declare @__max_varying_int32(, ) nounwind readnone -declare @__min_varying_uint32(, ) nounwind readnone -declare @__max_varying_uint32(, ) nounwind readnone -declare @__min_varying_double(, - ) nounwind readnone -declare @__max_varying_double(, - ) nounwind readnone +define float @__max_uniform_float(float, float) nounwind readonly alwaysinline { + %cmp = fcmp ogt float %1, %0 + %ret = select i1 %cmp, float %1, float %0 + ret float %ret +} +define float @__min_uniform_float(float, float) nounwind readonly alwaysinline { + %cmp = fcmp ogt float %1, %0 + %ret = select i1 %cmp, float %0, float %1 + ret float %ret +} + +declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind readnone +declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind readnone + +define <16 x float> @__max_varying_float(<16 x float>, + <16 x float>) nounwind readonly alwaysinline { + binary8to16(call, float, @llvm.x86.avx.max.ps.256, %0, %1) + ret <16 x float> %call +} + +define <16 x float> @__min_varying_float(<16 x float>, + <16 x float>) nounwind readonly alwaysinline { + binary8to16(call, float, @llvm.x86.avx.min.ps.256, %0, %1) + ret <16 x float> %call +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; int min/max + +define i32 @__min_uniform_int32(i32, i32) nounwind readonly alwaysinline { + %cmp = icmp sgt i32 %1, %0 + %ret = select i1 %cmp, i32 %0, i32 %1 + ret i32 %ret +} + +define i32 @__max_uniform_int32(i32, i32) nounwind readonly alwaysinline { + %cmp = icmp sgt i32 %1, %0 + %ret = select i1 %cmp, i32 %1, i32 %0 + ret i32 %ret +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; unsigned int min/max + +define i32 @__min_uniform_uint32(i32, i32) nounwind readonly alwaysinline { + %cmp = icmp ugt i32 %1, %0 + %ret = select i1 %cmp, i32 %0, i32 %1 + ret i32 %ret +} + +define i32 @__max_uniform_uint32(i32, i32) nounwind readonly alwaysinline { + %cmp = icmp ugt i32 %1, %0 + %ret = select i1 %cmp, i32 %1, i32 %0 + ret i32 %ret +} + +declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readonly +declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readonly + +define <16 x i32> @__min_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary8to16(m, i32, @llvm.x86.avx2.pmins.d, %0, %1) + ret <16 x i32> %m +} + +define <16 x i32> @__max_varying_int32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary8to16(m, i32, @llvm.x86.avx2.pmaxs.d, %0, %1) + ret <16 x i32> %m +} + +declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readonly +declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readonly + +define <16 x i32> @__min_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary8to16(m, i32, @llvm.x86.avx2.pminu.d, %0, %1) + ret <16 x i32> %m +} + +define <16 x i32> @__max_varying_uint32(<16 x i32>, <16 x i32>) nounwind readonly alwaysinline { + binary8to16(m, i32, @llvm.x86.avx2.pmaxu.d, %0, %1) + ret <16 x i32> %m +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; double precision min/max + +define double @__min_uniform_double(double, double) nounwind readnone alwaysinline { + %cmp = fcmp ogt double %1, %0 + %ret = select i1 %cmp, double %0, double %1 + ret double %ret +} + +define double @__max_uniform_double(double, double) nounwind readnone alwaysinline { + %cmp = fcmp ogt double %1, %0 + %ret = select i1 %cmp, double %1, double %0 + ret double %ret +} + +declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwind readnone +declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwind readnone + +define <16 x double> @__min_varying_double(<16 x double>, <16 x double>) nounwind readnone alwaysinline { + binary4to16(ret, double, @llvm.x86.avx.min.pd.256, %0, %1) + ret <16 x double> %ret +} + +define <16 x double> @__max_varying_double(<16 x double>, <16 x double>) nounwind readnone alwaysinline { + binary4to16(ret, double, @llvm.x86.avx.max.pd.256, %0, %1) + ret <16 x double> %ret +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; sqrt/rsqrt/rcp declare float @__rsqrt_uniform_float(float) nounwind readnone @@ -268,7 +361,6 @@ define i16 @__reduce_add_int16(<16 x i16>) nounwind readnone alwaysinline { reduce16(i16, @__add_varying_i16, @__add_uniform_i16) } - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; horizontal float ops @@ -296,7 +388,6 @@ define float @__reduce_max_float(<16 x float>) nounwind readnone alwaysinline { reduce16(float, @__max_varying_float, @__max_uniform_float) } - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; horizontal int32 ops @@ -323,7 +414,6 @@ define i32 @__reduce_max_int32(<16 x i32>) nounwind readnone alwaysinline { reduce16(i32, @__max_varying_int32, @__max_uniform_int32) } - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; horizontal uint32 ops @@ -335,7 +425,6 @@ define i32 @__reduce_max_uint32(<16 x i32>) nounwind readnone alwaysinline { reduce16(i32, @__max_varying_uint32, @__max_uniform_uint32) } - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; horizontal double ops @@ -369,7 +458,6 @@ define double @__reduce_max_double(<16 x double>) nounwind readnone alwaysinline reduce16(double, @__max_varying_double, @__max_uniform_double) } - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; horizontal int64 ops @@ -388,17 +476,14 @@ define i64 @__reduce_add_int64(<16 x i64>) nounwind readnone alwaysinline { reduce16(i64, @__add_varying_int64, @__add_uniform_int64) } - define i64 @__reduce_min_int64(<16 x i64>) nounwind readnone alwaysinline { reduce16(i64, @__min_varying_int64, @__min_uniform_int64) } - define i64 @__reduce_max_int64(<16 x i64>) nounwind readnone alwaysinline { reduce16(i64, @__max_varying_int64, @__max_uniform_int64) } - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; horizontal uint64 ops @@ -406,7 +491,6 @@ define i64 @__reduce_min_uint64(<16 x i64>) nounwind readnone alwaysinline { reduce16(i64, @__min_varying_uint64, @__min_uniform_uint64) } - define i64 @__reduce_max_uint64(<16 x i64>) nounwind readnone alwaysinline { reduce16(i64, @__max_varying_uint64, @__max_uniform_uint64) } From 2110708c8e3136ac4d21aa596c2e716f2ad8113d Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Thu, 7 May 2015 15:42:09 +0300 Subject: [PATCH 17/23] [AVX512]: sqrt/rsqrt/rcp --- builtins/target-avx512-common.ll | 132 ++++++++++++++++++++++++++++--- 1 file changed, 121 insertions(+), 11 deletions(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index 8f3f9c50..34c94dba 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -155,6 +155,7 @@ define <16 x double> @__ceil_varying_double(<16 x double>) nounwind readonly alw round4to16double(%0, 10) } +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; min/max int64minmax() @@ -274,26 +275,135 @@ define <16 x double> @__max_varying_double(<16 x double>, <16 x double>) nounwin } ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; sqrt/rsqrt/rcp +;; rsqrt -declare float @__rsqrt_uniform_float(float) nounwind readnone -declare float @__rcp_uniform_float(float) nounwind readnone -declare float @__sqrt_uniform_float(float) nounwind readnone -declare @__rcp_varying_float() nounwind readnone -declare @__rsqrt_varying_float() nounwind readnone +declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone -declare @__sqrt_varying_float() nounwind readnone +define float @__rsqrt_uniform_float(float) nounwind readonly alwaysinline { + ; uniform float is = extract(__rsqrt_u(v), 0); + %v = insertelement <4 x float> undef, float %0, i32 0 + %vis = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %v) + %is = extractelement <4 x float> %vis, i32 0 -declare double @__sqrt_uniform_double(double) nounwind readnone -declare @__sqrt_varying_double() nounwind readnone + ; Newton-Raphson iteration to improve precision + ; return 0.5 * is * (3. - (v * is) * is); + %v_is = fmul float %0, %is + %v_is_is = fmul float %v_is, %is + %three_sub = fsub float 3., %v_is_is + %is_mul = fmul float %is, %three_sub + %half_scale = fmul float 0.5, %is_mul + ret float %half_scale +} +declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone + +define <16 x float> @__rsqrt_varying_float(<16 x float> %v) nounwind readonly alwaysinline { + ; float is = __rsqrt_v(v); + unary8to16(is, float, @llvm.x86.avx.rsqrt.ps.256, %v) + ; return 0.5 * is * (3. - (v * is) * is); + %v_is = fmul <16 x float> %v, %is + %v_is_is = fmul <16 x float> %v_is, %is + %three_sub = fsub <16 x float> , %v_is_is + %is_mul = fmul <16 x float> %is, %three_sub + %half_scale = fmul <16 x float> , %is_mul + ret <16 x float> %half_scale +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rcp + +declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone + +define float @__rcp_uniform_float(float) nounwind readonly alwaysinline { + ; do the rcpss call + ; uniform float iv = extract(__rcp_u(v), 0); + ; return iv * (2. - v * iv); + %vecval = insertelement <4 x float> undef, float %0, i32 0 + %call = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %vecval) + %scall = extractelement <4 x float> %call, i32 0 + + ; do one N-R iteration to improve precision, as above + %v_iv = fmul float %0, %scall + %two_minus = fsub float 2., %v_iv + %iv_mul = fmul float %scall, %two_minus + ret float %iv_mul +} + +declare <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float>) nounwind readnone + +define <16 x float> @__rcp_varying_float(<16 x float>) nounwind readonly alwaysinline { + ; float iv = __rcp_v(v); + ; return iv * (2. - v * iv); + + unary8to16(call, float, @llvm.x86.avx.rcp.ps.256, %0) + ; do one N-R iteration + %v_iv = fmul <16 x float> %0, %call + %two_minus = fsub <16 x float> , %v_iv + %iv_mul = fmul <16 x float> %call, %two_minus + ret <16 x float> %iv_mul +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; sqrt + +declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone + +define float @__sqrt_uniform_float(float) nounwind readonly alwaysinline { + sse_unary_scalar(ret, 4, float, @llvm.x86.sse.sqrt.ss, %0) + ret float %ret +} + +declare <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float>) nounwind readnone + +define <16 x float> @__sqrt_varying_float(<16 x float>) nounwind readonly alwaysinline { + unary8to16(call, float, @llvm.x86.avx.sqrt.ps.256, %0) + ret <16 x float> %call +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; double precision sqrt + +declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone + +define double @__sqrt_uniform_double(double) nounwind alwaysinline { + sse_unary_scalar(ret, 2, double, @llvm.x86.sse2.sqrt.sd, %0) + ret double %ret +} + +declare <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double>) nounwind readnone + +define <16 x double> @__sqrt_varying_double(<16 x double>) nounwind alwaysinline { + unary4to16(ret, double, @llvm.x86.avx.sqrt.pd.256, %0) + ret <16 x double> %ret +} +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; bit ops -declare i32 @__popcnt_int32(i32) nounwind readnone -declare i64 @__popcnt_int64(i64) nounwind readnone +declare i32 @llvm.ctpop.i32(i32) nounwind readnone +define i32 @__popcnt_int32(i32) nounwind readonly alwaysinline { + %call = call i32 @llvm.ctpop.i32(i32 %0) + ret i32 %call +} + +declare i64 @llvm.ctpop.i64(i64) nounwind readnone + +define i64 @__popcnt_int64(i64) nounwind readonly alwaysinline { + %call = call i64 @llvm.ctpop.i64(i64 %0) + ret i64 %call +} ctlztz() +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; FIXME: need either to wire these up to the 8-wide SVML entrypoints, ; or, use the macro to call the 4-wide ones twice with our 8-wide ; vectors... From ef9c98fba8f251d262e4ad37e10c29b1a3e8232d Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 7 May 2015 16:26:09 +0300 Subject: [PATCH 18/23] [AVX512]: uniform float/double round/ceil/floor --- builtins/target-avx512-common.ll | 81 +++++++++++++++++++++++++++++--- 1 file changed, 75 insertions(+), 6 deletions(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index 34c94dba..877827a2 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -107,17 +107,86 @@ define i16 @__float_to_half_uniform(float %v) nounwind readnone { ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; math +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rounding floats + declare void @__fastmath() nounwind ;; round/floor/ceil -declare float @__round_uniform_float(float) nounwind readnone -declare float @__floor_uniform_float(float) nounwind readnone -declare float @__ceil_uniform_float(float) nounwind readnone +declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32) nounwind readnone -declare double @__round_uniform_double(double) nounwind readnone -declare double @__floor_uniform_double(double) nounwind readnone -declare double @__ceil_uniform_double(double) nounwind readnone +define float @__round_uniform_float(float) nounwind readonly alwaysinline { + ; roundss, round mode nearest 0b00 | don't signal precision exceptions 0b1000 = 8 + ; the roundss intrinsic is a total mess--docs say: + ; + ; __m128 _mm_round_ss (__m128 a, __m128 b, const int c) + ; + ; b is a 128-bit parameter. The lowest 32 bits are the result of the rounding function + ; on b0. The higher order 96 bits are copied directly from input parameter a. The + ; return value is described by the following equations: + ; + ; r0 = RND(b0) + ; r1 = a1 + ; r2 = a2 + ; r3 = a3 + ; + ; It doesn't matter what we pass as a, since we only need the r0 value + ; here. So we pass the same register for both. Further, only the 0th + ; element of the b parameter matters + %xi = insertelement <4 x float> undef, float %0, i32 0 + %xr = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %xi, <4 x float> %xi, i32 8) + %rs = extractelement <4 x float> %xr, i32 0 + ret float %rs +} + +define float @__floor_uniform_float(float) nounwind readonly alwaysinline { + ; see above for round_ss instrinsic discussion... + %xi = insertelement <4 x float> undef, float %0, i32 0 + ; roundps, round down 0b01 | don't signal precision exceptions 0b1001 = 9 + %xr = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %xi, <4 x float> %xi, i32 9) + %rs = extractelement <4 x float> %xr, i32 0 + ret float %rs +} + +define float @__ceil_uniform_float(float) nounwind readonly alwaysinline { + ; see above for round_ss instrinsic discussion... + %xi = insertelement <4 x float> undef, float %0, i32 0 + ; roundps, round up 0b10 | don't signal precision exceptions 0b1010 = 10 + %xr = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %xi, <4 x float> %xi, i32 10) + %rs = extractelement <4 x float> %xr, i32 0 + ret float %rs +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; rounding doubles + +declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) nounwind readnone + +define double @__round_uniform_double(double) nounwind readonly alwaysinline { + %xi = insertelement <2 x double> undef, double %0, i32 0 + %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 8) + %rs = extractelement <2 x double> %xr, i32 0 + ret double %rs +} + +define double @__floor_uniform_double(double) nounwind readonly alwaysinline { + ; see above for round_ss instrinsic discussion... + %xi = insertelement <2 x double> undef, double %0, i32 0 + ; roundsd, round down 0b01 | don't signal precision exceptions 0b1001 = 9 + %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 9) + %rs = extractelement <2 x double> %xr, i32 0 + ret double %rs +} + +define double @__ceil_uniform_double(double) nounwind readonly alwaysinline { + ; see above for round_ss instrinsic discussion... + %xi = insertelement <2 x double> undef, double %0, i32 0 + ; roundsd, round up 0b10 | don't signal precision exceptions 0b1010 = 10 + %xr = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %xi, <2 x double> %xi, i32 10) + %rs = extractelement <2 x double> %xr, i32 0 + ret double %rs +} ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; From 3514e03327052817a347e88fe468a192264c945c Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Thu, 7 May 2015 16:10:18 +0300 Subject: [PATCH 19/23] [AVX512]: disable Transcendentals and Trigonometry --- builtins/target-avx512-common.ll | 24 +++++++++++++++++------- ispc.cpp | 6 +++--- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index 877827a2..c26f54de 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -105,13 +105,25 @@ define i16 @__float_to_half_uniform(float %v) nounwind readnone { } ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; math +;; fast math mode + +declare void @llvm.x86.sse.stmxcsr(i8 *) nounwind +declare void @llvm.x86.sse.ldmxcsr(i8 *) nounwind + +define void @__fastmath() nounwind alwaysinline { + %ptr = alloca i32 + %ptr8 = bitcast i32 * %ptr to i8 * + call void @llvm.x86.sse.stmxcsr(i8 * %ptr8) + %oldval = load PTR_OP_ARGS(`i32 ') %ptr + + ; turn on DAZ (64)/FTZ (32768) -> 32832 + %update = or i32 %oldval, 32832 + store i32 %update, i32 *%ptr + call void @llvm.x86.sse.ldmxcsr(i8 * %ptr8) + ret void +} ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; rounding floats - -declare void @__fastmath() nounwind - ;; round/floor/ceil declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32) nounwind readnone @@ -815,5 +827,3 @@ declare_nvptx() rsqrtd_decl() rcpd_decl() -transcendetals_decl() -trigonometry_decl() diff --git a/ispc.cpp b/ispc.cpp index 05affd32..d4252eb0 100644 --- a/ispc.cpp +++ b/ispc.cpp @@ -893,10 +893,10 @@ Target::Target(const char *arch, const char *cpu, const char *isa, bool pic, boo this->m_hasHalf = true; this->m_hasRand = true; this->m_hasGather = this->m_hasScatter = true; - this->m_hasTranscendentals = true; + this->m_hasTranscendentals = false; // For MIC it is set to true due to performance reasons. The option should be tested. - this->m_hasTrigonometry = true; - this->m_hasRsqrtd = this->m_hasRcpd = true; + this->m_hasTrigonometry = false; + this->m_hasRsqrtd = this->m_hasRcpd = false; this->m_hasVecPrefetch = true; CPUfromISA = CPU_KNL; } From d7cd5986db5d670dcdc744a83fd5cad27998575d Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Thu, 7 May 2015 16:25:09 +0300 Subject: [PATCH 20/23] [AVX512]: disable prefetch --- builtins/target-avx512-common.ll | 2 ++ ispc.cpp | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index c26f54de..02acaafc 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -827,3 +827,5 @@ declare_nvptx() rsqrtd_decl() rcpd_decl() +transcendetals_decl() +trigonometry_decl() diff --git a/ispc.cpp b/ispc.cpp index d4252eb0..9c7f4b54 100644 --- a/ispc.cpp +++ b/ispc.cpp @@ -897,7 +897,7 @@ Target::Target(const char *arch, const char *cpu, const char *isa, bool pic, boo // For MIC it is set to true due to performance reasons. The option should be tested. this->m_hasTrigonometry = false; this->m_hasRsqrtd = this->m_hasRcpd = false; - this->m_hasVecPrefetch = true; + this->m_hasVecPrefetch = false; CPUfromISA = CPU_KNL; } #endif From db29cbe8515901f9405936f8745a5fea21e3ea18 Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Fri, 8 May 2015 14:54:51 +0300 Subject: [PATCH 21/23] [AVX512]: knl arch for clang --- run_tests.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/run_tests.py b/run_tests.py index bc87487a..3c08865c 100755 --- a/run_tests.py +++ b/run_tests.py @@ -270,6 +270,9 @@ def run_test(testname): elif (options.target == "knl"): cc_cmd = "%s -O2 -I. %s %s test_static.cpp -DTEST_SIG=%d %s -o %s" % \ (options.compiler_exe, gcc_arch, "-xMIC-AVX512", match, obj_name, exe_name) + elif (options.target == "knl-avx512"): + cc_cmd = "%s -O2 -I. %s %s test_static.cpp -DTEST_SIG=%d %s -o %s" % \ + (options.compiler_exe, gcc_arch, "-march=knl", match, obj_name, exe_name) else: cc_cmd = "%s -O2 -I. %s %s test_static.cpp -DTEST_SIG=%d %s -o %s" % \ (options.compiler_exe, gcc_arch, gcc_isa, match, obj_name, exe_name) From f5e7165537b25abdbf4dfeaa335c306bba2406ae Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Fri, 15 May 2015 15:43:21 +0300 Subject: [PATCH 22/23] [AVX512]: packed_load/store --- builtins/target-avx512-common.ll | 34 +++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index 02acaafc..b645bec0 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -806,9 +806,41 @@ gen_scatter(i64) gen_scatter(float) gen_scatter(double) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; packed_load/store +declare <16 x i32> @llvm.x86.avx512.mask.expand.load.d.512(i8* %addr, <16 x i32> %data, i16 %mask) -packed_load_and_store() +define i32 @__packed_load_active(i32 * %startptr, <16 x i32> * %val_ptr, + <16 x i1> %full_mask) nounwind alwaysinline { + %addr = bitcast i32* %startptr to i8* + %data = load PTR_OP_ARGS(`<16 x i32> ') %val_ptr + %mask = bitcast <16 x i1> %full_mask to i16 + %store_val = call <16 x i32> @llvm.x86.avx512.mask.expand.load.d.512(i8* %addr, <16 x i32> %data, i16 %mask) + store <16 x i32> %store_val, <16 x i32> * %val_ptr + %mask_i32 = zext i16 %mask to i32 + %res = call i32 @llvm.ctpop.i32(i32 %mask_i32) + ret i32 %res +} + +declare void @llvm.x86.avx512.mask.compress.store.d.512(i8* %addr, <16 x i32> %data, i16 %mask) + +define i32 @__packed_store_active(i32 * %startptr, <16 x i32> %vals, + <16 x i1> %full_mask) nounwind alwaysinline { + %addr = bitcast i32* %startptr to i8* + %mask = bitcast <16 x i1> %full_mask to i16 + call void @llvm.x86.avx512.mask.compress.store.d.512(i8* %addr, <16 x i32> %vals, i16 %mask) + %mask_i32 = zext i16 %mask to i32 + %res = call i32 @llvm.ctpop.i32(i32 %mask_i32) + ret i32 %res +} + +define i32 @__packed_store_active2(i32 * %startptr, <16 x i32> %vals, + <16 x i1> %full_mask) nounwind alwaysinline { + %res = call i32 @__packed_store_active(i32 * %startptr, <16 x i32> %vals, + <16 x i1> %full_mask) + ret i32 %res +} ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; prefetch From 5ec16356d0f8cd80cf14289402e18f50f4ab093f Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 21 May 2015 15:29:04 +0300 Subject: [PATCH 23/23] [AVX512]: copyright update --- builtins/target-avx512-common.ll | 2 +- builtins/target-knl.ll | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builtins/target-avx512-common.ll b/builtins/target-avx512-common.ll index b645bec0..2fff6827 100644 --- a/builtins/target-avx512-common.ll +++ b/builtins/target-avx512-common.ll @@ -1,4 +1,4 @@ -;; Copyright (c) 2010-2015, Intel Corporation +;; Copyright (c) 2015, Intel Corporation ;; All rights reserved. ;; ;; Redistribution and use in source and binary forms, with or without diff --git a/builtins/target-knl.ll b/builtins/target-knl.ll index 65146f15..bba27edc 100644 --- a/builtins/target-knl.ll +++ b/builtins/target-knl.ll @@ -1,4 +1,4 @@ -;; Copyright (c) 2010-2014, Intel Corporation +;; Copyright (c) 2015, Intel Corporation ;; All rights reserved. ;; ;; Redistribution and use in source and binary forms, with or without