From 292962e8b04e6cae410a9edfa74c96952cc5c5e0 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Fri, 20 Mar 2015 15:19:26 +0300 Subject: [PATCH 01/33] modified run_tests.py to support knl compilation --- run_tests.py | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/run_tests.py b/run_tests.py index ec756cf8..170a0b73 100755 --- a/run_tests.py +++ b/run_tests.py @@ -165,6 +165,9 @@ def run_test(testname): if (options.target == "knc"): ispc_cmd = ispc_exe_rel + " --werror --nowrap %s --arch=%s --target=%s" % \ (filename, options.arch, "generic-16") + elif (options.target == "knl"): + ispc_cmd = ispc_exe_rel + " --werror --nowrap %s --arch=%s --target=%s" % \ + (filename, options.arch, "generic-16") else: ispc_cmd = ispc_exe_rel + " --werror --nowrap %s --arch=%s --target=%s" % \ (filename, options.arch, options.target) @@ -264,6 +267,9 @@ def run_test(testname): if (options.target == "knc"): cc_cmd = "%s -O2 -I. %s %s test_static.cpp -DTEST_SIG=%d %s -o %s" % \ (options.compiler_exe, gcc_arch, "-mmic", match, obj_name, exe_name) + if (options.target == "knl"): + cc_cmd = "%s -O2 -I. %s %s test_static.cpp -DTEST_SIG=%d %s -o %s" % \ + (options.compiler_exe, gcc_arch, "-xMIC-AVX512", match, obj_name, exe_name) else: cc_cmd = "%s -O2 -I. %s %s test_static.cpp -DTEST_SIG=%d %s -o %s" % \ (options.compiler_exe, gcc_arch, gcc_isa, match, obj_name, exe_name) @@ -285,6 +291,9 @@ def run_test(testname): if (options.target == "knc"): ispc_cmd = ispc_exe_rel + " --woff %s -o %s --arch=%s --target=%s" % \ (filename, obj_name, options.arch, "generic-16") + if (options.target == "knl"): + ispc_cmd = ispc_exe_rel + " --woff %s -o %s --arch=%s --target=%s" % \ + (filename, obj_name, options.arch, "generic-16") else: ispc_cmd = ispc_exe_rel + " --woff %s -o %s --arch=%s --target=%s" % \ (filename, obj_name, options.arch, options.target) @@ -546,7 +555,7 @@ def verify(): "sse4-i8x16", "avx1-i32x4" "avx1-i32x8", "avx1-i32x16", "avx1-i64x4", "avx1.1-i32x8", "avx1.1-i32x16", "avx1.1-i64x4", "avx2-i32x8", "avx2-i32x16", "avx2-i64x4", "generic-1", "generic-4", "generic-8", - "generic-16", "generic-32", "generic-64", "knc"]] + "generic-16", "generic-32", "generic-64", "knc", "knl"]] for i in range (0,len(f_lines)): if f_lines[i][0] == "%": continue @@ -622,7 +631,7 @@ def run_tests(options1, args, print_version): global is_generic_target is_generic_target = ((options.target.find("generic-") != -1 and options.target != "generic-1" and options.target != "generic-x1") or - options.target == "knc") + options.target == "knc" or options.target == "knl") global is_nvptx_target is_nvptx_target = (options.target.find("nvptx") != -1) @@ -650,10 +659,15 @@ def run_tests(options1, args, print_version): elif options.target == "knc": error("No knc #include specified; using examples/intrinsics/knc.h\n", 2) options.include_file = "examples/intrinsics/knc.h" + elif options.target == "knl": + error("No knl #include specified; using examples/intrinsics/knl.h\n", 2) + options.include_file = "examples/intrinsics/knl.h" if options.compiler_exe == None: if (options.target == "knc"): options.compiler_exe = "icpc" + elif (options.target == "knl"): + options.compiler_exe = "icpc" elif is_windows: options.compiler_exe = "cl.exe" else: @@ -699,7 +713,7 @@ def run_tests(options1, args, print_version): OS = "Linux" if not (OS == 'Linux'): - error ("knc target supported only on Linux", 1) + error ("knc target is supported only on Linux", 1) # if no specific test files are specified, run all of the tests in tests/, # failing_tests/, and tests_errors/ if len(args) == 0: @@ -890,11 +904,12 @@ if __name__ == "__main__": parser.add_option("-f", "--ispc-flags", dest="ispc_flags", help="Additional flags for ispc (-g, -O1, ...)", default="") parser.add_option('-t', '--target', dest='target', - help='Set compilation target (sse2-i32x4, sse2-i32x8, sse4-i32x4, sse4-i32x8, sse4-i16x8, sse4-i8x16, avx1-i32x8, avx1-i32x16, avx1.1-i32x8, avx1.1-i32x16, avx2-i32x8, avx2-i32x16, generic-x1, generic-x4, generic-x8, generic-x16, generic-x32, generic-x64, knc)', - default="sse4") + help=('Set compilation target (sse2-i32x4, sse2-i32x8, sse4-i32x4, sse4-i32x8, ' + + 'sse4-i16x8, sse4-i8x16, avx1-i32x8, avx1-i32x16, avx1.1-i32x8, avx1.1-i32x16, ' + + 'avx2-i32x8, avx2-i32x16, generic-x1, generic-x4, generic-x8, generic-x16, ' + + 'generic-x32, generic-x64, knc, knl)'), default="sse4") parser.add_option('-a', '--arch', dest='arch', - help='Set architecture (arm, x86, x86-64)', - default="x86-64") + help='Set architecture (arm, x86, x86-64)',default="x86-64") parser.add_option("-c", "--compiler", dest="compiler_exe", help="C/C++ compiler binary to use to run tests", default=None) parser.add_option('-o', '--no-opt', dest='no_opt', help='Disable optimization', From 8cb5f311c6d03a0d282497992c9b92acf6a9fe62 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Fri, 20 Mar 2015 15:55:00 +0300 Subject: [PATCH 02/33] knl (through sde) target support for alloy.py --- alloy.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/alloy.py b/alloy.py index e07bcfd9..4c37007c 100755 --- a/alloy.py +++ b/alloy.py @@ -293,8 +293,10 @@ def check_targets(): AVX = ["avx1-i32x4", "avx1-i32x8", "avx1-i32x16", "avx1-i64x4"] AVX11 = ["avx1.1-i32x8","avx1.1-i32x16","avx1.1-i64x4"] AVX2 = ["avx2-i32x8", "avx2-i32x16", "avx2-i64x4"] + KNL = ["knl"] - targets = [["AVX2", AVX2, False], ["AVX1.1", AVX11, False], ["AVX", AVX, False], ["SSE4", SSE4, False], ["SSE2", SSE2, False]] + targets = [["AVX2", AVX2, False], ["AVX1.1", AVX11, False], ["AVX", AVX, False], ["SSE4", SSE4, False], + ["SSE2", SSE2, False], ["KNL", KNL, False]] f_lines = take_lines("check_isa.exe", "first") for i in range(0,5): if targets[i][0] in f_lines: @@ -329,6 +331,8 @@ def check_targets(): # here we have SDE f_lines = take_lines(sde_exists + " -help", "all") for i in range(0,len(f_lines)): + if targets[5][2] == False and "knl" in f_lines[i]: + answer_sde = answer_sde + [["-knl", "knl"]] if targets[3][2] == False and "wsm" in f_lines[i]: answer_sde = answer_sde + [["-wsm", "sse4-i32x4"], ["-wsm", "sse4-i32x8"], ["-wsm", "sse4-i16x8"], ["-wsm", "sse4-i8x16"]] if targets[2][2] == False and "snb" in f_lines[i]: @@ -872,8 +876,8 @@ if __name__ == '__main__': "Try to build compiler with all LLVM\n\talloy.py -r --only=build\n" + "Performance validation run with 10 runs of each test and comparing to branch 'old'\n\talloy.py -r --only=performance --compare-with=old --number=10\n" + "Validation run. Update fail_db.txt with new fails, send results to my@my.com\n\talloy.py -r --update-errors=F --notify='my@my.com'\n" + - "Test KNC target (not tested when tested all supported targets, so should be set explicitly via --only-targets)\n\talloy.py -r --only='stability' --only-targets='knc'\n") - + "Test KNC target (not tested when tested all supported targets, so should be set explicitly via --only-targets)\n\talloy.py -r --only='stability' --only-targets='knc'\n" + + "Test KNL target (requires sde)\n\talloy.py -r --only='stability' --only-targets='knl'\n") num_threads="%s" % multiprocessing.cpu_count() parser = MyParser(usage="Usage: alloy.py -r/-b [options]", epilog=examples) From 0ffea6832d852a760a7d494ecaea0bc0c4ebd5e1 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Fri, 20 Mar 2015 15:56:57 +0300 Subject: [PATCH 03/33] add knl.h to test knl target (need to be rewritten with AVX-512F intrinsics) --- examples/intrinsics/knl.h | 3977 +++++++++++++++++++++++++++++++++++++ 1 file changed, 3977 insertions(+) create mode 100644 examples/intrinsics/knl.h diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h new file mode 100644 index 00000000..385cc08c --- /dev/null +++ b/examples/intrinsics/knl.h @@ -0,0 +1,3977 @@ +/** + Copyright (c) 2015, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include // INT_MIN +#include +#include +#include +#include +#include + +#ifndef __INTEL_COMPILER +#error "Only Intel(R) C++ Compiler is supported" +#endif + +#include +#include + +#include // for operator<<(m512[i]) +#include // for operator<<(m512[i]) + +#if __INTEL_COMPILER < 1500 +#warning "Your compiler version is outdated which can reduce performance in some cases. Please, update your compiler!" +#endif + + +#if 0 + #define STRING(x) #x + #define TOSTRING(x) STRING(x) + #define PING std::cout << __FILE__ << " (" << __LINE__ << "): " << __FUNCTION__ << std::endl + #define PRINT(x) std::cout << STRING(x) << " = " << (x) << std::endl + #define PRINT2(x,y) std::cout << STRING(x) << " = " << (x) << ", " << STRING(y) << " = " << (y) << std::endl + #define PRINT3(x,y,z) std::cout << STRING(x) << " = " << (x) << ", " << STRING(y) << " = " << (y) << ", " << STRING(z) << " = " << (z) << std::endl + #define PRINT4(x,y,z,w) std::cout << STRING(x) << " = " << (x) << ", " << STRING(y) << " = " << (y) << ", " << STRING(z) << " = " << (z) << ", " << STRING(w) << " = " << (w) << std::endl +#endif + +#define FORCEINLINE __forceinline +#ifdef _MSC_VER +#define PRE_ALIGN(x) /*__declspec(align(x))*/ +#define POST_ALIGN(x) +#define roundf(x) (floorf(x + .5f)) +#define round(x) (floor(x + .5)) +#else +#define PRE_ALIGN(x) +#define POST_ALIGN(x) __attribute__ ((aligned(x))) +#endif + +#define KNC 1 // Required by cbackend +#define KNL 1 +extern "C" { + int printf(const unsigned char *, ...); + int puts(unsigned char *); + unsigned int putchar(unsigned int); + int fflush(void *); + uint8_t *memcpy(uint8_t *, uint8_t *, uint64_t); + uint8_t *memset(uint8_t *, uint8_t, uint64_t); + void memset_pattern16(void *, const void *, uint64_t); +} + +typedef float __vec1_f; +typedef double __vec1_d; +typedef int8_t __vec1_i8; +typedef int16_t __vec1_i16; +typedef int32_t __vec1_i32; +typedef int64_t __vec1_i64; + +struct __vec16_i32; + +typedef struct PRE_ALIGN(2) __vec16_i1 +{ + FORCEINLINE operator __mmask16() const { return v; } + FORCEINLINE __vec16_i1() { } + FORCEINLINE __vec16_i1(const __mmask16 &vv) : v(vv) { } + FORCEINLINE __vec16_i1(bool v0, bool v1, bool v2, bool v3, + bool v4, bool v5, bool v6, bool v7, + bool v8, bool v9, bool v10, bool v11, + bool v12, bool v13, bool v14, bool v15) { + v = ((v0 & 1) | + ((v1 & 1) << 1) | + ((v2 & 1) << 2) | + ((v3 & 1) << 3) | + ((v4 & 1) << 4) | + ((v5 & 1) << 5) | + ((v6 & 1) << 6) | + ((v7 & 1) << 7) | + ((v8 & 1) << 8) | + ((v9 & 1) << 9) | + ((v10 & 1) << 10) | + ((v11 & 1) << 11) | + ((v12 & 1) << 12) | + ((v13 & 1) << 13) | + ((v14 & 1) << 14) | + ((v15 & 1) << 15)); + } + FORCEINLINE uint8_t operator[](const int i) const { return ((v >> i) & 1); } + FORCEINLINE uint8_t operator[](const int i) { return ((v >> i) & 1); } + __mmask16 v; +} POST_ALIGN(2) __vec16_i1; + +typedef struct PRE_ALIGN(64) __vec16_f { + FORCEINLINE operator __m512() const { return v; } + FORCEINLINE __vec16_f() : v(_mm512_undefined_ps()) { } + FORCEINLINE __vec16_f(const __m512 &in) : v(in) {} + FORCEINLINE __vec16_f(const __vec16_f &o) : v(o.v) {} + FORCEINLINE __vec16_f& operator =(const __vec16_f &o) { v=o.v; return *this; } + FORCEINLINE __vec16_f(float v00, float v01, float v02, float v03, + float v04, float v05, float v06, float v07, + float v08, float v09, float v10, float v11, + float v12, float v13, float v14, float v15) { + v = _mm512_set_16to16_ps(v15, v14, v13, v12, v11, v10, v09, v08, v07, v06, v05, v04, v03, v02, v01, v00); + } + FORCEINLINE const float& operator[](const int i) const { return ((float*)this)[i]; } + FORCEINLINE float& operator[](const int i) { return ((float*)this)[i]; } + __m512 v; +} POST_ALIGN(64) __vec16_f; + +typedef struct PRE_ALIGN(64) __vec16_d { + FORCEINLINE __vec16_d() : v1(_mm512_undefined_pd()), v2(_mm512_undefined_pd()) {} + FORCEINLINE __vec16_d(const __vec16_d &o) : v1(o.v1), v2(o.v2) {} + FORCEINLINE __vec16_d(const __m512d _v1, const __m512d _v2) : v1(_v1), v2(_v2) {} + FORCEINLINE __vec16_d& operator =(const __vec16_d &o) { v1=o.v1; v2=o.v2; return *this; } + FORCEINLINE __vec16_d(double v00, double v01, double v02, double v03, + double v04, double v05, double v06, double v07, + double v08, double v09, double v10, double v11, + double v12, double v13, double v14, double v15) { + v1 = _mm512_set_8to8_pd(v15, v14, v13, v12, v11, v10, v09, v08); + v2 = _mm512_set_8to8_pd(v07, v06, v05, v04, v03, v02, v01, v00); + } + FORCEINLINE const double& operator[](const int i) const { return ((double*)this)[i]; } + FORCEINLINE double& operator[](const int i) { return ((double*)this)[i]; } + __m512d v1; + __m512d v2; +} POST_ALIGN(64) __vec16_d; + +typedef struct PRE_ALIGN(64) __vec16_i32 { + FORCEINLINE operator __m512i() const { return v; } + FORCEINLINE __vec16_i32() : v(_mm512_undefined_epi32()) {} + FORCEINLINE __vec16_i32(const int32_t &in) : v(_mm512_set1_epi32(in)) {} + FORCEINLINE __vec16_i32(const __m512i &in) : v(in) {} + FORCEINLINE __vec16_i32(const __vec16_i32 &o) : v(o.v) {} + FORCEINLINE __vec16_i32& operator =(const __vec16_i32 &o) { v=o.v; return *this; } + FORCEINLINE __vec16_i32(int32_t v00, int32_t v01, int32_t v02, int32_t v03, + int32_t v04, int32_t v05, int32_t v06, int32_t v07, + int32_t v08, int32_t v09, int32_t v10, int32_t v11, + int32_t v12, int32_t v13, int32_t v14, int32_t v15) { + v = _mm512_set_16to16_pi(v15, v14, v13, v12, v11, v10, v09, v08, v07, v06, v05, v04, v03, v02, v01, v00); + } + FORCEINLINE const int32_t& operator[](const int i) const { return ((int32_t*)this)[i]; } + FORCEINLINE int32_t& operator[](const int i) { return ((int32_t*)this)[i]; } + __m512i v; +} POST_ALIGN(64) __vec16_i32; + +typedef struct PRE_ALIGN(64) __vec16_i64 { + FORCEINLINE __vec16_i64() : v_lo(_mm512_undefined_epi32()), v_hi(_mm512_undefined_epi32()) {} + FORCEINLINE __vec16_i64(const __vec16_i64 &o) : v_lo(o.v_lo), v_hi(o.v_hi) {} + FORCEINLINE __vec16_i64(__m512i l, __m512i h) : v_lo(l), v_hi(h) {} + FORCEINLINE __vec16_i64& operator =(const __vec16_i64 &o) { v_lo=o.v_lo; v_hi=o.v_hi; return *this; } + FORCEINLINE __vec16_i64(int64_t v00, int64_t v01, int64_t v02, int64_t v03, + int64_t v04, int64_t v05, int64_t v06, int64_t v07, + int64_t v08, int64_t v09, int64_t v10, int64_t v11, + int64_t v12, int64_t v13, int64_t v14, int64_t v15) { + __m512i v1 = _mm512_set_8to8_epi64(v15, v14, v13, v12, v11, v10, v09, v08); + __m512i v2 = _mm512_set_8to8_epi64(v07, v06, v05, v04, v03, v02, v01, v00); + v_hi = _mm512_mask_permutevar_epi32(v_hi, 0xFF00, + _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), + v1); + v_hi = _mm512_mask_permutevar_epi32(v_hi, 0x00FF, + _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), + v2); + v_lo = _mm512_mask_permutevar_epi32(v_lo, 0xFF00, + _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), + v1); + v_lo = _mm512_mask_permutevar_epi32(v_lo, 0x00FF, + _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), + v2); + } + FORCEINLINE int64_t operator[](const int i) const { + return ((uint64_t(((int32_t*)this)[i])<<32)+((int32_t*)this)[i+16]); } + FORCEINLINE int64_t operator[](const int i) { + return ((uint64_t(((int32_t*)this)[i])<<32)+((int32_t*)this)[i+16]); } + __m512i v_hi; + __m512i v_lo; +} POST_ALIGN(64) __vec16_i64; + +FORCEINLINE __vec16_i64 zmm2hilo(const __m512i v1, const __m512i v2){ + __vec16_i64 v; + v.v_hi = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xFF00, + _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), + v2); + v.v_hi = _mm512_mask_permutevar_epi32(v.v_hi, 0x00FF, + _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), + v1); + v.v_lo = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xFF00, + _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), + v2); + v.v_lo = _mm512_mask_permutevar_epi32(v.v_lo, 0x00FF, + _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), + v1); + return v; +} + +FORCEINLINE void hilo2zmm(const __vec16_i64 &v, __m512i &_v1, __m512i &_v2) { + _v2 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, + _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), + v.v_hi); + _v2 = _mm512_mask_permutevar_epi32(_v2, 0x5555, + _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), + v.v_lo); + _v1 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, + _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), + v.v_hi); + _v1 = _mm512_mask_permutevar_epi32(_v1, 0x5555, + _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), + v.v_lo); +} + +FORCEINLINE __vec16_i64 hilo2zmm(const __vec16_i64 &v) { + __vec16_i64 ret; + hilo2zmm(v, ret.v_hi, ret.v_lo); + return ret; +} + + +template +struct vec16 { + FORCEINLINE vec16() { } + FORCEINLINE vec16(T v0, T v1, T v2, T v3, T v4, T v5, T v6, T v7, + T v8, T v9, T v10, T v11, T v12, T v13, T v14, T v15) { + v[0] = v0; v[1] = v1; v[2] = v2; v[3] = v3; + v[4] = v4; v[5] = v5; v[6] = v6; v[7] = v7; + v[8] = v8; v[9] = v9; v[10] = v10; v[11] = v11; + v[12] = v12; v[13] = v13; v[14] = v14; v[15] = v15; + } + FORCEINLINE const T& operator[](const int i) const { return v[i]; } + FORCEINLINE T& operator[](const int i) { return v[i]; } + T v[16]; +}; + +PRE_ALIGN(16) struct __vec16_i8 : public vec16 { + FORCEINLINE __vec16_i8() { } + FORCEINLINE __vec16_i8(const int8_t v0, const int8_t v1, const int8_t v2, const int8_t v3, + const int8_t v4, const int8_t v5, const int8_t v6, const int8_t v7, + const int8_t v8, const int8_t v9, const int8_t v10, const int8_t v11, + const int8_t v12, const int8_t v13, const int8_t v14, const int8_t v15) + : vec16(v0, v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15) { } + FORCEINLINE __vec16_i8(const __vec16_i8 &o); + FORCEINLINE __vec16_i8& operator =(const __vec16_i8 &o); +} POST_ALIGN(16); + +PRE_ALIGN(32) struct __vec16_i16 : public vec16 { + FORCEINLINE __vec16_i16() { } + FORCEINLINE __vec16_i16(const __vec16_i16 &o); + FORCEINLINE __vec16_i16& operator =(const __vec16_i16 &o); + FORCEINLINE __vec16_i16(int16_t v0, int16_t v1, int16_t v2, int16_t v3, + int16_t v4, int16_t v5, int16_t v6, int16_t v7, + int16_t v8, int16_t v9, int16_t v10, int16_t v11, + int16_t v12, int16_t v13, int16_t v14, int16_t v15) + : vec16(v0, v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15) { } +} POST_ALIGN(32); + + + +/////////////////////////////////////////////////////////////////////////// +// debugging helpers +// +inline std::ostream &operator<<(std::ostream &out, const __m512i &v) +{ + out << "["; + for (int i=0;i<16;i++) + out << (i!=0?",":"") << std::dec << std::setw(8) << ((int*)&v)[i] << std::dec; + out << "]" << std::flush; + return out; +} + +inline std::ostream &operator<<(std::ostream &out, const __m512 &v) +{ + out << "["; + for (int i=0;i<16;i++) + out << (i!=0?",":"") << ((float*)&v)[i]; + out << "]" << std::flush; + return out; +} + +inline std::ostream &operator<<(std::ostream &out, const __vec16_i1 &v) +{ + out << "["; + for (int i=0;i<16;i++) + out << (i!=0?",":"") << std::dec << std::setw(8) << (int)v[i] << std::dec; + out << "]" << std::flush; + return out; +} + +inline std::ostream &operator<<(std::ostream &out, const __vec16_i8 &v) +{ + out << "["; + for (int i=0;i<16;i++) + out << (i!=0?",":"") << std::dec << std::setw(8) << (int)((unsigned char*)&v)[i] << std::dec; + out << "]" << std::flush; + return out; +} + +inline std::ostream &operator<<(std::ostream &out, const __vec16_i16 &v) +{ + out << "["; + for (int i=0;i<16;i++) + out << (i!=0?",":"") << std::dec << std::setw(8) << (int)((uint16_t*)&v)[i] << std::dec; + out << "]" << std::flush; + return out; +} + +inline std::ostream &operator<<(std::ostream &out, const __vec16_d &v) +{ + out << "["; + for (int i=0;i<16;i++) { + out << (i!=0?",":"") << (v[i]); + } + out << "]" << std::flush; + return out; +} + +inline std::ostream &operator<<(std::ostream &out, const __vec16_i64 &v) +{ + out << "["; + uint32_t *ptr = (uint32_t*)&v; + for (int i=0;i<16;i++) { + uint64_t val = (uint64_t(ptr[i])<<32)+ptr[i+16]; + out << (i!=0?",":"") << std::dec << std::setw(8) << ((uint64_t)val) << std::dec; + } + out << "]" << std::flush; + return out; +} + +/////////////////////////////////////////////////////////////////////////// +// macros... + +FORCEINLINE __vec16_i8::__vec16_i8(const __vec16_i8 &o) +{ + for (int i=0;i<16;i++) + v[i] = o.v[i]; +} + +FORCEINLINE __vec16_i8& __vec16_i8::operator=(const __vec16_i8 &o) +{ + for (int i=0;i<16;i++) + v[i] = o.v[i]; + return *this; +} + +FORCEINLINE __vec16_i16::__vec16_i16(const __vec16_i16 &o) +{ + for (int i=0;i<16;i++) + v[i] = o.v[i]; +} + +FORCEINLINE __vec16_i16& __vec16_i16::operator=(const __vec16_i16 &o) +{ + for (int i=0;i<16;i++) + v[i] = o.v[i]; + return *this; +} +/////////////////////////////////////////////////////////////////////////// +// mask ops +/////////////////////////////////////////////////////////////////////////// + +static FORCEINLINE __vec16_i1 __movmsk(__vec16_i1 mask) { + return _mm512_kmov(mask); +} + +static FORCEINLINE bool __any(__vec16_i1 mask) { + return !_mm512_kortestz(mask, mask); +} + +static FORCEINLINE bool __all(__vec16_i1 mask) { + return _mm512_kortestc(mask, mask); +} + +static FORCEINLINE bool __none(__vec16_i1 mask) { + return _mm512_kortestz(mask, mask); +} + +static FORCEINLINE __vec16_i1 __equal_i1(__vec16_i1 a, __vec16_i1 b) { + return _mm512_knot( _mm512_kxor(a, b)); +} + +static FORCEINLINE __vec16_i1 __and(__vec16_i1 a, __vec16_i1 b) { + return _mm512_kand(a, b); +} + +static FORCEINLINE __vec16_i1 __not(__vec16_i1 a) { + return _mm512_knot(a); +} + +static FORCEINLINE __vec16_i1 __and_not1(__vec16_i1 a, __vec16_i1 b) { + return _mm512_kandn(a, b); +} + +static FORCEINLINE __vec16_i1 __and_not2(__vec16_i1 a, __vec16_i1 b) { + return _mm512_kandnr(a, b); +} + +static FORCEINLINE __vec16_i1 __xor(__vec16_i1 a, __vec16_i1 b) { + return _mm512_kxor(a, b); +} + +static FORCEINLINE __vec16_i1 __xnor(__vec16_i1 a, __vec16_i1 b) { + return _mm512_kxnor(a, b); +} + +static FORCEINLINE __vec16_i1 __or(__vec16_i1 a, __vec16_i1 b) { + return _mm512_kor(a, b); +} + +static FORCEINLINE __vec16_i1 __select(__vec16_i1 mask, __vec16_i1 a, + __vec16_i1 b) { + return ((a & mask) | (b & ~mask)); + //return __or(__and(a, mask), __andnr(b, mask)); +} + +static FORCEINLINE __vec16_i1 __select(bool cond, __vec16_i1 a, __vec16_i1 b) { + return cond ? a : b; +} + + +static FORCEINLINE bool __extract_element(__vec16_i1 mask, uint32_t index) { + return (mask & (1 << index)) ? true : false; +} + + +static FORCEINLINE int64_t __extract_element(const __vec16_i64 &v, uint32_t index) +{ + //uint *src = (uint *)&v; + const uint *src = (const uint *)&v; + return src[index+16] | (uint64_t(src[index]) << 32); +} + + + + + + + +/* + static FORCEINLINE void __insert_element(__vec16_i1 *vec, int index, + bool val) { + if (val == false) + vec->v &= ~(1 << index); + else + vec->v |= (1 << index); + } + */ + +template static FORCEINLINE __vec16_i1 __load(const __vec16_i1 *p) { + const uint16_t *ptr = (const uint16_t *)p; + __vec16_i1 r; + r = *ptr; + return r; +} + +template static FORCEINLINE void __store(__vec16_i1 *p, __vec16_i1 v) { + uint16_t *ptr = (uint16_t *)p; + *ptr = v; +} + +template static RetVecType __smear_i1(int i); +template <> FORCEINLINE __vec16_i1 __smear_i1<__vec16_i1>(int i) { + return i?0xFFFF:0x0; +} + +template static RetVecType __setzero_i1(); +template <> FORCEINLINE __vec16_i1 __setzero_i1<__vec16_i1>() { + return 0; +} + +template static RetVecType __undef_i1(); +template <> FORCEINLINE __vec16_i1 __undef_i1<__vec16_i1>() { + return __vec16_i1(); +} + + +/////////////////////////////////////////////////////////////////////////// +// int32 +/////////////////////////////////////////////////////////////////////////// + +static FORCEINLINE __vec16_i32 __add(__vec16_i32 a, __vec16_i32 b) { + return _mm512_add_epi32(a, b); +} + +static FORCEINLINE __vec16_i32 __sub(__vec16_i32 a, __vec16_i32 b) { + return _mm512_sub_epi32(a, b); +} + +static FORCEINLINE __vec16_i32 __mul(__vec16_i32 a, __vec16_i32 b) { + return _mm512_mullo_epi32(a, b); +} + +static FORCEINLINE __vec16_i32 __udiv(__vec16_i32 a, __vec16_i32 b) { + return _mm512_div_epu32(a, b); +} + +static FORCEINLINE __vec16_i32 __sdiv(__vec16_i32 a, __vec16_i32 b) { + return _mm512_div_epi32(a, b); +} + +static FORCEINLINE __vec16_i32 __urem(__vec16_i32 a, __vec16_i32 b) { + return _mm512_rem_epu32(a, b); +} + +static FORCEINLINE __vec16_i32 __srem(__vec16_i32 a, __vec16_i32 b) { + return _mm512_rem_epi32(a, b); +} + +static FORCEINLINE __vec16_i32 __or(__vec16_i32 a, __vec16_i32 b) { + return _mm512_or_epi32(a, b); +} + +static FORCEINLINE __vec16_i32 __and(__vec16_i32 a, __vec16_i32 b) { + return _mm512_and_epi32(a, b); +} + +static FORCEINLINE __vec16_i32 __xor(__vec16_i32 a, __vec16_i32 b) { + return _mm512_xor_epi32(a, b); +} + +static FORCEINLINE __vec16_i32 __shl(__vec16_i32 a, __vec16_i32 b) { + return _mm512_sllv_epi32(a, b); +} + +static FORCEINLINE __vec16_i32 __lshr(__vec16_i32 a, __vec16_i32 b) { + return _mm512_srlv_epi32(a, b); +} + +static FORCEINLINE __vec16_i32 __ashr(__vec16_i32 a, __vec16_i32 b) { + return _mm512_srav_epi32(a, b); +} + +static FORCEINLINE __vec16_i32 __shl(__vec16_i32 a, int32_t n) { + return _mm512_slli_epi32(a, n); +} + +static FORCEINLINE __vec16_i32 __lshr(__vec16_i32 a, int32_t n) { + return _mm512_srli_epi32(a, n); +} + +static FORCEINLINE __vec16_i32 __ashr(__vec16_i32 a, int32_t n) { + return _mm512_srai_epi32(a, n); +} + +static FORCEINLINE __vec16_i1 __equal_i32(const __vec16_i32 &a, const __vec16_i32 &b) { + return _mm512_cmpeq_epi32_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __equal_i32_and_mask(const __vec16_i32 &a, const __vec16_i32 &b, + __vec16_i1 m) { + return _mm512_mask_cmpeq_epi32_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __not_equal_i32(__vec16_i32 a, __vec16_i32 b) { + return _mm512_cmpneq_epi32_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __not_equal_i32_and_mask(__vec16_i32 a, __vec16_i32 b, + __vec16_i1 m) { + return _mm512_mask_cmpneq_epi32_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __unsigned_less_equal_i32(__vec16_i32 a, __vec16_i32 b) { + return _mm512_cmple_epu32_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __unsigned_less_equal_i32_and_mask(__vec16_i32 a, __vec16_i32 b, + __vec16_i1 m) { + return _mm512_mask_cmple_epu32_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __signed_less_equal_i32(__vec16_i32 a, __vec16_i32 b) { + return _mm512_cmple_epi32_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __signed_less_equal_i32_and_mask(__vec16_i32 a, __vec16_i32 b, + __vec16_i1 m) { + return _mm512_mask_cmple_epi32_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __unsigned_greater_equal_i32(__vec16_i32 a, __vec16_i32 b) { + return _mm512_cmpge_epu32_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __unsigned_greater_equal_i32_and_mask(__vec16_i32 a, __vec16_i32 b, + __vec16_i1 m) { + return _mm512_mask_cmpge_epu32_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __signed_greater_equal_i32(__vec16_i32 a, __vec16_i32 b) { + return _mm512_cmpge_epi32_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __signed_greater_equal_i32_and_mask(__vec16_i32 a, __vec16_i32 b, + __vec16_i1 m) { + return _mm512_mask_cmpge_epi32_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __unsigned_less_than_i32(__vec16_i32 a, __vec16_i32 b) { + return _mm512_cmplt_epu32_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __unsigned_less_than_i32_and_mask(__vec16_i32 a, __vec16_i32 b, + __vec16_i1 m) { + return _mm512_mask_cmplt_epu32_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __signed_less_than_i32(__vec16_i32 a, __vec16_i32 b) { + return _mm512_cmplt_epi32_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __signed_less_than_i32_and_mask(__vec16_i32 a, __vec16_i32 b, + __vec16_i1 m) { + return _mm512_mask_cmplt_epi32_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __unsigned_greater_than_i32(__vec16_i32 a, __vec16_i32 b) { + return _mm512_cmpgt_epu32_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __unsigned_greater_than_i32_and_mask(__vec16_i32 a, __vec16_i32 b, + __vec16_i1 m) { + return _mm512_mask_cmpgt_epu32_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __signed_greater_than_i32(__vec16_i32 a, __vec16_i32 b) { + return _mm512_cmpgt_epi32_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __signed_greater_than_i32_and_mask(__vec16_i32 a, __vec16_i32 b, + __vec16_i1 m) { + return _mm512_mask_cmpgt_epi32_mask(m, a, b); +} + +static FORCEINLINE __vec16_i32 __select(__vec16_i1 mask, + __vec16_i32 a, __vec16_i32 b) { + return _mm512_mask_mov_epi32(b.v, mask, a.v); +} + +static FORCEINLINE __vec16_i32 __select(bool cond, __vec16_i32 a, __vec16_i32 b) { + return cond ? a : b; +} + +static FORCEINLINE int32_t __extract_element(__vec16_i32 v, uint32_t index) { + return ((int32_t *)&v)[index]; +} + +static FORCEINLINE void __insert_element(__vec16_i32 *v, uint32_t index, int32_t val) { + ((int32_t *)v)[index] = val; +} + +template static RetVecType __smear_i32(int32_t i); +template <> FORCEINLINE __vec16_i32 __smear_i32<__vec16_i32>(int32_t i) { + return _mm512_set1_epi32(i); +} + +static const __vec16_i32 __ispc_one = __smear_i32<__vec16_i32>(1); +static const __vec16_i32 __ispc_zero = __smear_i32<__vec16_i32>(0); +static const __vec16_i32 __ispc_thirty_two = __smear_i32<__vec16_i32>(32); +static const __vec16_i32 __ispc_ffffffff = __smear_i32<__vec16_i32>(-1); +static const __vec16_i32 __ispc_stride1(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + +template static RetVecType __setzero_i32(); +template <> FORCEINLINE __vec16_i32 __setzero_i32<__vec16_i32>() { + return _mm512_setzero_epi32(); +} + +template static RetVecType __undef_i32(); +template <> FORCEINLINE __vec16_i32 __undef_i32<__vec16_i32>() { + return __vec16_i32(); +} + +static FORCEINLINE __vec16_i32 __broadcast_i32(__vec16_i32 v, int index) { + int32_t val = __extract_element(v, index & 0xf); + return _mm512_set1_epi32(val); +} + +static FORCEINLINE __vec16_i8 __cast_trunc(__vec16_i8, const __vec16_i16 i16) { + return __vec16_i8((uint8_t)i16[0], (uint8_t)i16[1], (uint8_t)i16[2], (uint8_t)i16[3], + (uint8_t)i16[4], (uint8_t)i16[5], (uint8_t)i16[6], (uint8_t)i16[7], + (uint8_t)i16[8], (uint8_t)i16[9], (uint8_t)i16[10], (uint8_t)i16[11], + (uint8_t)i16[12], (uint8_t)i16[13], (uint8_t)i16[14], (uint8_t)i16[15]); +} + +static FORCEINLINE __vec16_i16 __cast_trunc(__vec16_i16, const __vec16_i32 i32) { + __vec16_i16 ret; + __vec16_i32 i32_trunk = _mm512_and_epi32(i32, __smear_i32<__vec16_i32>(65535)); + _mm512_extstore_epi32(ret.v, i32_trunk, _MM_DOWNCONV_EPI32_UINT16, _MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i8 __cast_trunc(__vec16_i8, const __vec16_i32 i32) { + __vec16_i8 ret; + __vec16_i32 i32_trunk = _mm512_and_epi32(i32, __smear_i32<__vec16_i32>(255)); + _mm512_extstore_epi32(ret.v, i32, _MM_DOWNCONV_EPI32_UINT8, _MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i32 __cast_trunc(__vec16_i32, const __vec16_i64 i64) { + return __vec16_i32(i64.v_lo); +} + +static FORCEINLINE __vec16_i16 __cast_trunc(__vec16_i16, const __vec16_i64 i64) { + return __cast_trunc(__vec16_i16(), i64.v_lo); +} + +static FORCEINLINE __vec16_i8 __cast_trunc(__vec16_i8, const __vec16_i64 i64) { + return __cast_trunc(__vec16_i8(), i64.v_lo); +} + +static FORCEINLINE __vec16_i32 __rotate_i32(__vec16_i32 v, int index) { + __vec16_i32 idx = __smear_i32<__vec16_i32>(index); + __vec16_i32 shuffle = _mm512_and_epi32(_mm512_add_epi32(__ispc_stride1, idx), __smear_i32<__vec16_i32>(0xf)); + return _mm512_mask_permutevar_epi32(v, 0xffff, shuffle, v); +} + +static FORCEINLINE __vec16_i32 __shuffle_i32(__vec16_i32 v, __vec16_i32 index) { + return _mm512_mask_permutevar_epi32(v, 0xffff, index, v); +} + +static FORCEINLINE __vec16_i32 __shuffle2_i32(__vec16_i32 v0, __vec16_i32 v1, __vec16_i32 index) { + const __vec16_i1 mask = __signed_less_than_i32(index, __smear_i32<__vec16_i32>(0x10)); + index = __and(index, __smear_i32<__vec16_i32>(0xF)); + __vec16_i32 ret = __undef_i32<__vec16_i32>(); + ret = _mm512_mask_permutevar_epi32(ret, mask, index, v0); + ret = _mm512_mask_permutevar_epi32(ret, __not(mask), index, v1); + return ret; +} + +static FORCEINLINE __vec16_i32 __shift_i32(__vec16_i32 v, int index) { + __vec16_i32 mod_index = _mm512_add_epi32(__ispc_stride1, __smear_i32<__vec16_i32>(index)); + __vec16_i1 mask_ge = _mm512_cmpge_epi32_mask (mod_index, __smear_i32<__vec16_i32>(0)); + __vec16_i1 mask_le = _mm512_cmple_epi32_mask (mod_index, __smear_i32<__vec16_i32>(0xF)); + __vec16_i1 mask = mask_ge & mask_le; + __vec16_i32 ret = __smear_i32<__vec16_i32>(0); + ret = _mm512_mask_permutevar_epi32(ret, mask, mod_index, v); + return ret; +} + +template static FORCEINLINE __vec16_i32 __load(const __vec16_i32 *p) { +#ifdef ISPC_FORCE_ALIGNED_MEMORY + return _mm512_load_epi32(p); +#else + __vec16_i32 v; + v = _mm512_extloadunpacklo_epi32(v, p, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); + v = _mm512_extloadunpackhi_epi32(v, (uint8_t*)p+64, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); + return v; +#endif +} + +#if 0 +template <> FORCEINLINE __vec16_i32 __load<64>(const __vec16_i32 *p) { + return _mm512_load_epi32(p); +} +#endif + +template static FORCEINLINE void __store(__vec16_i32 *p, __vec16_i32 v) { +#ifdef ISPC_FORCE_ALIGNED_MEMORY + _mm512_store_epi32(p, v); +#else + _mm512_extpackstorelo_epi32(p, v, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); + _mm512_extpackstorehi_epi32((uint8_t*)p+64, v, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); +#endif +} + +#if 0 +template <> FORCEINLINE void __store<64>(__vec16_i32 *p, __vec16_i32 v) { + _mm512_store_epi32(p, v); +} +#endif + +/////////////////////////////////////////////////////////////////////////// +// int64 +/////////////////////////////////////////////////////////////////////////// + +static FORCEINLINE __vec16_i64 __select(__vec16_i1 mask, + __vec16_i64 a, __vec16_i64 b) { + __vec16_i64 ret; + ret.v_hi = _mm512_mask_mov_epi32(b.v_hi, mask, a.v_hi); + ret.v_lo = _mm512_mask_mov_epi32(b.v_lo, mask, a.v_lo); + return ret; +} + +static FORCEINLINE +void __masked_store_i64(void *p, const __vec16_i64 &v, __vec16_i1 mask) +{ + __m512i v1; + __m512i v2; + v1 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, + _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), + v.v_hi); + v1 = _mm512_mask_permutevar_epi32(v1, 0x5555, + _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), + v.v_lo); + v2 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, + _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), + v.v_hi); + v2 = _mm512_mask_permutevar_epi32(v2, 0x5555, + _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), + v.v_lo); + _mm512_mask_store_epi64(p, mask, v2); + _mm512_mask_store_epi64(((uint8_t*)p)+64, mask>>8, v1); +} + +static FORCEINLINE void __insert_element(__vec16_i64 *v, uint32_t index, int64_t val) { + ((int32_t *)&v->v_hi)[index] = val>>32; + ((int32_t *)&v->v_lo)[index] = val; +} + + +template static RetVecType __setzero_i64(); +template <> FORCEINLINE __vec16_i64 __setzero_i64<__vec16_i64>() { + __vec16_i64 ret; + ret.v_lo = _mm512_setzero_epi32(); + ret.v_hi = _mm512_setzero_epi32(); + return ret; +} + +template static RetVecType __undef_i64(); +template <> FORCEINLINE __vec16_i64 __undef_i64<__vec16_i64>() { + return __vec16_i64(); +} + +static FORCEINLINE __vec16_i64 __add(const __vec16_i64 &a, const __vec16_i64 &b) +{ + __mmask16 carry = 0; + __m512i lo = _mm512_addsetc_epi32(a.v_lo, b.v_lo, &carry); + __m512i hi = _mm512_adc_epi32(a.v_hi, carry, b.v_hi, &carry); + return __vec16_i64(lo, hi); +} + +static FORCEINLINE __vec16_i64 __sub(const __vec16_i64 &a, const __vec16_i64 &b) +{ + __mmask16 borrow = 0; + __m512i lo = _mm512_subsetb_epi32(a.v_lo, b.v_lo, &borrow); + __m512i hi = _mm512_sbb_epi32(a.v_hi, borrow, b.v_hi, &borrow); + return __vec16_i64(lo, hi); +} + +/*! 64x32 bit mul -- address computations often use a scale that we + know is 32 bits; and 32x64 is faster than 64x64 */ +static FORCEINLINE __vec16_i64 __mul(const __vec16_i32 &a, const __vec16_i64 &b) +{ + return __vec16_i64(_mm512_mullo_epi32(a.v,b.v_lo), + _mm512_add_epi32(_mm512_mullo_epi32(a.v, b.v_hi), + _mm512_mulhi_epi32(a.v, b.v_lo))); +} + +static FORCEINLINE void __abs_i32i64(__m512i &_hi, __m512i &_lo) +{ + /* abs(x) : + * mask = x >> 64; // sign bits + * abs(x) = (x^mask) - mask + */ + const __vec16_i32 mask = __ashr(_hi, __ispc_thirty_two); + __vec16_i32 hi = __xor(_hi, mask); + __vec16_i32 lo = __xor(_lo, mask); + __mmask16 borrow = 0; + _lo = _mm512_subsetb_epi32(lo, mask, &borrow); + _hi = _mm512_sbb_epi32 (hi, borrow, mask, &borrow); +} + +static FORCEINLINE __vec16_i64 __mul(__vec16_i64 a, __vec16_i64 b) +{ + const __vec16_i1 sign = __not_equal_i32(__ashr(__xor(a.v_hi, b.v_hi), __ispc_thirty_two), __ispc_zero); + __abs_i32i64(a.v_hi, a.v_lo); /* abs(a) */ + __abs_i32i64(b.v_hi, b.v_lo); /* abs(b) */ + __vec16_i32 lo = _mm512_mullo_epi32(a.v_lo, b.v_lo); + __vec16_i32 hi_m1 = _mm512_mulhi_epu32(a.v_lo, b.v_lo); + __vec16_i32 hi_m2 = _mm512_mullo_epi32(a.v_hi, b.v_lo); + __vec16_i32 hi_m3 = _mm512_mullo_epi32(a.v_lo, b.v_hi); + + __mmask16 carry = 0; + __vec16_i32 hi_p23 = _mm512_addsetc_epi32(hi_m2, hi_m3, &carry); + __vec16_i32 hi = _mm512_adc_epi32(hi_p23, carry, hi_m1, &carry); + + __vec16_i64 ret_abs(lo, hi); + return __select(sign, __sub(__vec16_i64(__ispc_zero, __ispc_zero), ret_abs), ret_abs); +} + +static FORCEINLINE __vec16_i64 __sdiv(const __vec16_i64 &a, const __vec16_i64 &b) +{ + __vec16_i64 ret; + for(int i=0; i<16; i++) { + int64_t dividend = __extract_element(a, i); + int64_t divisor = __extract_element(b, i); + int64_t quotient = dividend / divisor; // SVML + __insert_element(&ret, i, quotient); + } + return ret; +} + +static FORCEINLINE __vec16_i64 __udiv(const __vec16_i64 &a, const __vec16_i64 &b) +{ + __vec16_i64 ret; + for(int i=0; i<16; i++) { + uint64_t dividend = __extract_element(a, i); + uint64_t divisor = __extract_element(b, i); + uint64_t quotient = dividend / divisor; // SVML + __insert_element(&ret, i, quotient); + } + return ret; +} + +static FORCEINLINE __vec16_i64 __or(__vec16_i64 a, __vec16_i64 b) { + return __vec16_i64(_mm512_or_epi32(a.v_lo, b.v_lo), _mm512_or_epi32(a.v_hi, b.v_hi)); +} + +static FORCEINLINE __vec16_i64 __and(__vec16_i64 a, __vec16_i64 b) { + return __vec16_i64(_mm512_and_epi32(a.v_lo, b.v_lo), _mm512_and_epi32(a.v_hi, b.v_hi)); +} + +static FORCEINLINE __vec16_i64 __xor(__vec16_i64 a, __vec16_i64 b) { + return __vec16_i64(_mm512_xor_epi32(a.v_lo, b.v_lo), _mm512_xor_epi32(a.v_hi, b.v_hi)); +} + +static FORCEINLINE __vec16_i64 __shl(__vec16_i64 a, __vec16_i64 b) { + /* this is a safety gate in case b-shift >= 32 */ + const __vec16_i32 xfer = __select( + __signed_less_than_i32(b.v_lo, __ispc_thirty_two), + __lshr(a.v_lo, __sub(__ispc_thirty_two, b.v_lo)), + __shl (a.v_lo, __sub(b.v_lo, __ispc_thirty_two)) + ); + const __vec16_i32 hi = __or(__shl(a.v_hi, b.v_lo), xfer); + const __vec16_i32 lo = __shl(a.v_lo, b.v_lo); + return __vec16_i64(lo, hi); +} + +static FORCEINLINE __vec16_i64 __shl(__vec16_i64 a, unsigned long long b) { + __vec16_i32 hi; + if (b <= 32) hi = _mm512_or_epi32(_mm512_slli_epi32(a.v_hi, b), _mm512_srli_epi32(a.v_lo, 32-b)); + else hi = _mm512_slli_epi32(a.v_lo, b - 32); + __vec16_i32 lo = _mm512_slli_epi32(a.v_lo, b); + return __vec16_i64(lo, hi); +} + +static FORCEINLINE __vec16_i64 __lshr(__vec16_i64 a, __vec16_i64 b) { + /* this is a safety gate in case b-shift >= 32 */ + const __vec16_i32 xfer = __select( + __signed_less_than_i32(b.v_lo, __ispc_thirty_two), + __shl (a.v_hi, __sub(__ispc_thirty_two, b.v_lo)), + __lshr(a.v_hi, __sub(b.v_lo, __ispc_thirty_two)) + ); + const __vec16_i32 lo = __or(__lshr(a.v_lo, b.v_lo), xfer); + const __vec16_i32 hi = __lshr(a.v_hi, b.v_lo); + + return __vec16_i64(lo, hi); +} + +static FORCEINLINE __vec16_i64 __lshr(__vec16_i64 a, unsigned long long b) { + /* this is a safety gate in case b-shift >= 32 */ + __vec16_i32 xfer; + if (32 <= b) xfer = __lshr(a.v_hi, b-32); + else xfer = _mm512_and_epi32(_mm512_slli_epi32(__ispc_ffffffff, 32-b), _mm512_slli_epi32(a.v_hi, 32-b)); + __vec16_i32 hi = _mm512_srli_epi32(a.v_hi, b); + __vec16_i32 lo = _mm512_or_epi32(xfer, _mm512_srli_epi32(a.v_lo, b)); + return __vec16_i64(lo, hi); +} + +static FORCEINLINE __vec16_i64 __ashr(__vec16_i64 a, __vec16_i64 b) { + /* this is a safety gate in case b-shift >= 32 */ + const __vec16_i32 xfer = __select( + __signed_less_than_i32(b.v_lo, __ispc_thirty_two), + __shl (a.v_hi, __sub(__ispc_thirty_two, b.v_lo)), + __ashr(a.v_hi, __sub(b.v_lo, __ispc_thirty_two)) + ); + const __vec16_i32 lo = __or(__lshr(a.v_lo, b.v_lo), xfer); + const __vec16_i32 hi = __ashr(a.v_hi, b.v_lo); + return __vec16_i64(lo, hi); +} + +static FORCEINLINE __vec16_i64 __ashr(__vec16_i64 a, unsigned long long b) { + __vec16_i32 xfer; + if (b < 32) xfer = _mm512_slli_epi32(_mm512_and_epi32(a.v_hi, _mm512_set1_epi32((1< static RetVecType __smear_i64(const int64_t &l); +template <> FORCEINLINE __vec16_i64 __smear_i64<__vec16_i64>(const int64_t &l) { + const int *i = (const int*)&l; + return __vec16_i64(_mm512_set1_epi32(i[0]), _mm512_set1_epi32(i[1])); +} + +static FORCEINLINE __vec16_i64 __rotate_i64(__vec16_i64 v, int index) { + __vec16_i32 idx = __smear_i32<__vec16_i32>(index); + __vec16_i32 shuffle = _mm512_and_epi32(_mm512_add_epi32(__ispc_stride1, idx), __smear_i32<__vec16_i32>(0xf)); + return __vec16_i64(_mm512_mask_permutevar_epi32(v.v_lo, 0xffff, shuffle, v.v_lo), + _mm512_mask_permutevar_epi32(v.v_hi, 0xffff, shuffle, v.v_hi)); +} + +static FORCEINLINE __vec16_i64 __shuffle2_i64(__vec16_i64 v0, __vec16_i64 v1, __vec16_i32 index) { + return __vec16_i64(__shuffle2_i32(v0.v_lo, v1.v_lo, index), __shuffle2_i32(v0.v_hi, v1.v_hi, index)); +} + + +template static FORCEINLINE __vec16_i64 __load(const __vec16_i64 *p) { + __vec16_i32 v1; + __vec16_i32 v2; + const uint8_t*ptr = (const uint8_t*)p; + v2 = _mm512_extloadunpacklo_epi32(v2, ptr, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); + v2 = _mm512_extloadunpackhi_epi32(v2, ptr+64, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); + v1 = _mm512_extloadunpacklo_epi32(v1, ptr+64, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); + v1 = _mm512_extloadunpackhi_epi32(v1, ptr+128, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); + + __vec16_i64 ret; + ret.v_hi = _mm512_mask_permutevar_epi32(ret.v_hi, 0xFF00, + _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), + v1); + ret.v_hi = _mm512_mask_permutevar_epi32(ret.v_hi, 0x00FF, + _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), + v2); + ret.v_lo = _mm512_mask_permutevar_epi32(ret.v_lo, 0xFF00, + _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), + v1); + ret.v_lo = _mm512_mask_permutevar_epi32(ret.v_lo, 0x00FF, + _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), + v2); + return ret; +} + +#if 0 +template <> FORCEINLINE __vec16_i64 __load<64>(const __vec16_i64 *p) { + __m512i v2 = _mm512_load_epi32(p); + __m512i v1 = _mm512_load_epi32(((uint8_t*)p)+64); + __vec16_i64 ret; + ret.v_hi = _mm512_mask_permutevar_epi32(ret.v_hi, 0xFF00, + _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), + v1); + ret.v_hi = _mm512_mask_permutevar_epi32(ret.v_hi, 0x00FF, + _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), + v2); + ret.v_lo = _mm512_mask_permutevar_epi32(ret.v_lo, 0xFF00, + _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), + v1); + ret.v_lo = _mm512_mask_permutevar_epi32(ret.v_lo, 0x00FF, + _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), + v2); + return ret; +} + +template <> FORCEINLINE __vec16_i64 __load<128>(const __vec16_i64 *p) { + return __load<64>(p); +} +#endif + +template static FORCEINLINE void __store(__vec16_i64 *p, __vec16_i64 v) { + __m512i v1; + __m512i v2; + v1 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, + _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), + v.v_hi); + v1 = _mm512_mask_permutevar_epi32(v1, 0x5555, + _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), + v.v_lo); + v2 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, + _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), + v.v_hi); + v2 = _mm512_mask_permutevar_epi32(v2, 0x5555, + _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), + v.v_lo); + _mm512_extpackstorelo_epi32(p, v2, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); + _mm512_extpackstorehi_epi32((uint8_t*)p+64, v2, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); + _mm512_extpackstorelo_epi32((uint8_t*)p+64, v1, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); + _mm512_extpackstorehi_epi32((uint8_t*)p+128, v1, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); +} +#if 0 +template <> FORCEINLINE void __store<64>(__vec16_i64 *p, __vec16_i64 v) { + __m512i v1; + __m512i v2; + v1 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, + _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), + v.v_hi); + v1 = _mm512_mask_permutevar_epi32(v1, 0x5555, + _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), + v.v_lo); + v2 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, + _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), + v.v_hi); + v2 = _mm512_mask_permutevar_epi32(v2, 0x5555, + _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), + v.v_lo); + _mm512_store_epi64(p, v2); + _mm512_store_epi64(((uint8_t*)p)+64, v1); +} + +template <> FORCEINLINE void __store<128>(__vec16_i64 *p, __vec16_i64 v) { + __store<64>(p, v); +} +#endif + +/*! gather vector of 64-bit ints from addresses pointing to uniform ints + + (iw) WARNING: THIS CODE ONLY WORKS FOR GATHERS FROM ARRAYS OF + ***UNIFORM*** INT64's/POINTERS. (problem is that ispc doesn't + expose whether it's from array of uniform or array of varying + poitners, so in here there's no way to tell - only thing we can do + is pick one... + */ +static FORCEINLINE __vec16_i64 +__gather_base_offsets32_i64(uint8_t *base, uint32_t scale, __vec16_i32 offsets, + __vec16_i1 mask) { + __vec16_i64 ret; + ret.v_lo = _mm512_mask_i32extgather_epi32(_mm512_undefined_epi32(), mask, offsets, + base, _MM_UPCONV_EPI32_NONE, scale, + _MM_HINT_NONE); + ret.v_hi = _mm512_mask_i32extgather_epi32(_mm512_undefined_epi32(), mask, offsets, + base+4, _MM_UPCONV_EPI32_NONE, scale, + _MM_HINT_NONE); + return ret; +} + +/*! gather vector of 64-bit ints from addresses pointing to uniform ints + + (iw) WARNING: THIS CODE ONLY WORKS FOR GATHERS FROM ARRAYS OF + ***UNIFORM*** INT64's/POINTERS. (problem is that ispc doesn't + expose whether it's from array of uniform or array of varying + poitners, so in here there's no way to tell - only thing we can do + is pick one... + */ + static FORCEINLINE __vec16_i64 +__gather64_i64(__vec16_i64 addr, __vec16_i1 mask) +{ + __vec16_i64 ret; + + // There is no gather instruction with 64-bit offsets in KNC. + // We have to manually iterate over the upper 32 bits ;-) + __vec16_i1 still_to_do = mask; + const __vec16_i32 signed_offsets = _mm512_add_epi32(addr.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); + while (still_to_do) { + int first_active_lane = _mm_tzcnt_32((int)still_to_do); + const uint32_t &hi32 = ((uint*)&addr.v_hi)[first_active_lane]; + __vec16_i1 match = _mm512_mask_cmp_epi32_mask(still_to_do,addr.v_hi, + __smear_i32<__vec16_i32>((int32_t)hi32), + _MM_CMPINT_EQ); + + void * base = (void*)((((unsigned long)hi32) << 32) + (unsigned long)(-(long)INT_MIN)); + ret.v_lo = _mm512_mask_i32extgather_epi32(ret.v_lo, match, signed_offsets, + base, _MM_UPCONV_EPI32_NONE, 1, + _MM_HINT_NONE); + ret.v_hi = _mm512_mask_i32extgather_epi32(ret.v_hi, match, signed_offsets, + base+4, _MM_UPCONV_EPI32_NONE, 1, + _MM_HINT_NONE); + + still_to_do = _mm512_kxor(match, still_to_do); + } + + return ret; +} + + + +/////////////////////////////////////////////////////////////////////////// +// float +/////////////////////////////////////////////////////////////////////////// + +static FORCEINLINE __vec16_f __add(__vec16_f a, __vec16_f b) { + return _mm512_add_ps(a, b); +} + +static FORCEINLINE __vec16_f __sub(__vec16_f a, __vec16_f b) { + return _mm512_sub_ps(a, b); +} + +static FORCEINLINE __vec16_f __mul(__vec16_f a, __vec16_f b) { + return _mm512_mul_ps(a, b); +} + +static FORCEINLINE __vec16_f __div(__vec16_f a, __vec16_f b) { + return _mm512_div_ps(a, b); +} + +static FORCEINLINE __vec16_i1 __equal_float(__vec16_f a, __vec16_f b) { + return _mm512_cmpeq_ps_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __equal_float_and_mask(__vec16_f a, __vec16_f b, + __vec16_i1 m) { + return _mm512_mask_cmpeq_ps_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __not_equal_float(__vec16_f a, __vec16_f b) { + return _mm512_cmpneq_ps_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __not_equal_float_and_mask(__vec16_f a, __vec16_f b, + __vec16_i1 m) { + return _mm512_mask_cmpneq_ps_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __less_than_float(__vec16_f a, __vec16_f b) { + return _mm512_cmplt_ps_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __less_than_float_and_mask(__vec16_f a, __vec16_f b, + __vec16_i1 m) { + return _mm512_mask_cmplt_ps_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __less_equal_float(__vec16_f a, __vec16_f b) { + return _mm512_cmple_ps_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __less_equal_float_and_mask(__vec16_f a, __vec16_f b, + __vec16_i1 m) { + return _mm512_mask_cmple_ps_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __greater_than_float(__vec16_f a, __vec16_f b) { + return _mm512_cmpnle_ps_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __greater_than_float_and_mask(__vec16_f a, __vec16_f b, + __vec16_i1 m) { + return _mm512_mask_cmpnle_ps_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __greater_equal_float(__vec16_f a, __vec16_f b) { + return _mm512_cmpnlt_ps_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __greater_equal_float_and_mask(__vec16_f a, __vec16_f b, + __vec16_i1 m) { + return _mm512_mask_cmpnlt_ps_mask(m, a, b); +} + +static FORCEINLINE __vec16_i1 __ordered_float(__vec16_f a, __vec16_f b) { + return _mm512_cmpord_ps_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __ordered_float_and_mask(__vec16_f a, __vec16_f b, __vec16_i1 mask) { + return _mm512_mask_cmpord_ps_mask(mask, a, b); +} + +static FORCEINLINE __vec16_i1 __unordered_float(__vec16_f a, __vec16_f b) { + return _mm512_cmpunord_ps_mask(a, b); +} + +static FORCEINLINE __vec16_i1 __unordered_float_and_mask(__vec16_f a, __vec16_f b, __vec16_i1 mask) { + return _mm512_mask_cmpunord_ps_mask(mask, a, b); +} + +static FORCEINLINE __vec16_f __select(__vec16_i1 mask, __vec16_f a, __vec16_f b) { + return _mm512_mask_mov_ps(b, mask, a); +} + +static FORCEINLINE __vec16_f __select(bool cond, __vec16_f a, __vec16_f b) { + return cond ? a : b; +} + +static FORCEINLINE float __extract_element(__vec16_f v, uint32_t index) { + return ((float *)&v)[index]; +} + +static FORCEINLINE void __insert_element(__vec16_f *v, uint32_t index, float val) { + ((float *)v)[index] = val; +} + +template static RetVecType __smear_float(float f); +template <> FORCEINLINE __vec16_f __smear_float<__vec16_f>(float f) { + return _mm512_set_1to16_ps(f); +} + +template static RetVecType __setzero_float(); +template <> FORCEINLINE __vec16_f __setzero_float<__vec16_f>() { + return _mm512_setzero_ps(); +} + +template static RetVecType __undef_float(); +template <> FORCEINLINE __vec16_f __undef_float<__vec16_f>() { + return __vec16_f(); +} + +static FORCEINLINE __vec16_f __broadcast_float(__vec16_f v, int index) { + int32_t val = __extract_element(v, index & 0xf); + return _mm512_set1_ps(val); +} + +static FORCEINLINE __vec16_f __shuffle_float(__vec16_f v, __vec16_i32 index) { + return _mm512_castsi512_ps(_mm512_mask_permutevar_epi32(_mm512_castps_si512(v), 0xffff, index, _mm512_castps_si512(v))); +} + +static FORCEINLINE __vec16_f __shuffle2_float(__vec16_f v0, __vec16_f v1, __vec16_i32 index) { + __vec16_f ret; + for (int i = 0; i < 16; ++i){ + if (__extract_element(index, i) < 16) + __insert_element(&ret, i, __extract_element(v0, __extract_element(index, i) & 0xF)); + else + __insert_element(&ret, i, __extract_element(v1, __extract_element(index, i) & 0xF)); + } + return ret; +} + +template static FORCEINLINE __vec16_f __load(const __vec16_f *p) { +#ifdef ISPC_FORCE_ALIGNED_MEMORY + return _mm512_load_ps(p); +#else + __vec16_f v; + v = _mm512_extloadunpacklo_ps(v, p, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); + v = _mm512_extloadunpackhi_ps(v, (uint8_t*)p+64, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); + return v; +#endif +} +#if 0 +template <> FORCEINLINE __vec16_f __load<64>(const __vec16_f *p) { + return _mm512_load_ps(p); +} +#endif +template static FORCEINLINE void __store(__vec16_f *p, __vec16_f v) { +#ifdef ISPC_FORCE_ALIGNED_MEMORY + _mm512_store_ps(p, v); +#else + _mm512_extpackstorelo_ps(p, v, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); + _mm512_extpackstorehi_ps((uint8_t*)p+64, v, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); +#endif +} +#if 0 +template <> FORCEINLINE void __store<64>(__vec16_f *p, __vec16_f v) { + _mm512_store_ps(p, v); +} +#endif + +/////////////////////////////////////////////////////////////////////////// +// half<->float : this one passes the tests +// source : +// http://stackoverflow.com/questions/1659440/32-bit-to-16-bit-floating-point-conversion +/////////////////////////////////////////////////////////////////////////// +class Float16Compressor +{ + union Bits + { + float f; + int32_t si; + uint32_t ui; + }; + + static int const shift = 13; + static int const shiftSign = 16; + + static int32_t const infN = 0x7F800000; // flt32 infinity + static int32_t const maxN = 0x477FE000; // max flt16 normal as a flt32 + static int32_t const minN = 0x38800000; // min flt16 normal as a flt32 + static int32_t const signN = 0x80000000; // flt32 sign bit + + static int32_t const infC = infN >> shift; + static int32_t const nanN = (infC + 1) << shift; // minimum flt16 nan as a flt32 + static int32_t const maxC = maxN >> shift; + static int32_t const minC = minN >> shift; + static int32_t const signC = signN >> shiftSign; // flt16 sign bit + + static int32_t const mulN = 0x52000000; // (1 << 23) / minN + static int32_t const mulC = 0x33800000; // minN / (1 << (23 - shift)) + + static int32_t const subC = 0x003FF; // max flt32 subnormal down shifted + static int32_t const norC = 0x00400; // min flt32 normal down shifted + + static int32_t const maxD = infC - maxC - 1; + static int32_t const minD = minC - subC - 1; + + public: + + static uint16_t compress(float value) + { + Bits v, s; + v.f = value; + uint32_t sign = v.si & signN; + v.si ^= sign; + sign >>= shiftSign; // logical shift + s.si = mulN; + s.si = s.f * v.f; // correct subnormals + v.si ^= (s.si ^ v.si) & -(minN > v.si); + v.si ^= (infN ^ v.si) & -((infN > v.si) & (v.si > maxN)); + v.si ^= (nanN ^ v.si) & -((nanN > v.si) & (v.si > infN)); + v.ui >>= shift; // logical shift + v.si ^= ((v.si - maxD) ^ v.si) & -(v.si > maxC); + v.si ^= ((v.si - minD) ^ v.si) & -(v.si > subC); + return v.ui | sign; + } + + static float decompress(uint16_t value) + { + Bits v; + v.ui = value; + int32_t sign = v.si & signC; + v.si ^= sign; + sign <<= shiftSign; + v.si ^= ((v.si + minD) ^ v.si) & -(v.si > subC); + v.si ^= ((v.si + maxD) ^ v.si) & -(v.si > maxC); + Bits s; + s.si = mulC; + s.f *= v.si; + int32_t mask = -(norC > v.si); + v.si <<= shift; + v.si ^= (s.si ^ v.si) & mask; + v.si |= sign; + return v.f; + } +}; + +static FORCEINLINE float __half_to_float_uniform(int16_t h) +{ + return Float16Compressor::decompress(h); +} +static FORCEINLINE __vec16_f __half_to_float_varying(__vec16_i16 v) +{ + __vec16_f ret; + for (int i = 0; i < 16; ++i) + ret[i] = __half_to_float_uniform(v[i]); + return ret; +} + + +static FORCEINLINE int16_t __float_to_half_uniform(float f) +{ + return Float16Compressor::compress(f); +} +static FORCEINLINE __vec16_i16 __float_to_half_varying(__vec16_f v) +{ + __vec16_i16 ret; + for (int i = 0; i < 16; ++i) + ret[i] = __float_to_half_uniform(v[i]); + return ret; +} + +/////////////////////////////////////////////////////////////////////////// +// double +/////////////////////////////////////////////////////////////////////////// + +static FORCEINLINE __vec16_d __add(__vec16_d a, __vec16_d b) { + __vec16_d ret; + ret.v1 = _mm512_add_pd(a.v1, b.v1); + ret.v2 = _mm512_add_pd(a.v2, b.v2); + return ret; +} + +static FORCEINLINE __vec16_d __sub(__vec16_d a, __vec16_d b) { + __vec16_d ret; + ret.v1 = _mm512_sub_pd(a.v1, b.v1); + ret.v2 = _mm512_sub_pd(a.v2, b.v2); + return ret; +} + +static FORCEINLINE __vec16_d __mul(__vec16_d a, __vec16_d b) { + __vec16_d ret; + ret.v1 = _mm512_mul_pd(a.v1, b.v1); + ret.v2 = _mm512_mul_pd(a.v2, b.v2); + return ret; +} + +static FORCEINLINE __vec16_d __div(__vec16_d a, __vec16_d b) { + __vec16_d ret; + ret.v1 = _mm512_div_pd(a.v1, b.v1); + ret.v2 = _mm512_div_pd(a.v2, b.v2); + return ret; +} + +static FORCEINLINE __vec16_d __sqrt_varying_double(__vec16_d v) { + return __vec16_d(_mm512_sqrt_pd(v.v1),_mm512_sqrt_pd(v.v2)); +} + +static FORCEINLINE __vec16_i1 __equal_double(__vec16_d a, __vec16_d b) { + __vec16_i1 ret1; + __vec16_i1 ret2; + ret1 = _mm512_cmpeq_pd_mask(a.v1, b.v1); + ret2 = _mm512_cmpeq_pd_mask(a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_i1 __equal_double_and_mask(__vec16_d a, __vec16_d b, + __vec16_i1 m) { + __vec16_i1 ret1; + __vec16_i1 ret2; + ret1 = _mm512_mask_cmpeq_pd_mask(m, a.v1, b.v1); + __vec16_i1 tmp_m = m; + ret2 = _mm512_mask_cmpeq_pd_mask(_mm512_kswapb(tmp_m,tmp_m), a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_i1 __not_equal_double(__vec16_d a, __vec16_d b) { + __vec16_i1 ret1; + __vec16_i1 ret2; + ret1 = _mm512_cmpneq_pd_mask(a.v1, b.v1); + ret2 = _mm512_cmpneq_pd_mask(a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_i1 __not_equal_double_and_mask(__vec16_d a, __vec16_d b, + __vec16_i1 m) { + __vec16_i1 ret1; + __vec16_i1 ret2; + __vec16_i1 tmp_m = m; + ret1 = _mm512_mask_cmpneq_pd_mask(m, a.v1, b.v1); + ret2 = _mm512_mask_cmpneq_pd_mask(_mm512_kswapb(tmp_m, tmp_m), a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_i1 __less_than_double(__vec16_d a, __vec16_d b) { + __vec16_i1 ret1; + __vec16_i1 ret2; + ret1 = _mm512_cmplt_pd_mask(a.v1, b.v1); + ret2 = _mm512_cmplt_pd_mask(a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_i1 __less_than_double_and_mask(__vec16_d a, __vec16_d b, + __vec16_i1 m) { + __vec16_i1 ret1; + __vec16_i1 ret2; + __vec16_i1 tmp_m = m; + ret1 = _mm512_mask_cmplt_pd_mask(m, a.v1, b.v1); + ret2 = _mm512_mask_cmplt_pd_mask(_mm512_kswapb(tmp_m, tmp_m), a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_i1 __less_equal_double(__vec16_d a, __vec16_d b) { + __vec16_i1 ret1; + __vec16_i1 ret2; + ret1 = _mm512_cmple_pd_mask(a.v1, b.v1); + ret2 = _mm512_cmple_pd_mask(a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_i1 __less_equal_double_and_mask(__vec16_d a, __vec16_d b, + __vec16_i1 m) { + __vec16_i1 ret1; + __vec16_i1 ret2; + __vec16_i1 tmp_m = m; + ret1 = _mm512_mask_cmple_pd_mask(m, a.v1, b.v1); + ret2 = _mm512_mask_cmple_pd_mask(_mm512_kswapb(tmp_m, tmp_m), a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_i1 __greater_than_double(__vec16_d a, __vec16_d b) { + __vec16_i1 ret1; + __vec16_i1 ret2; + ret1 = _mm512_cmpnle_pd_mask(a.v1, b.v1); + ret2 = _mm512_cmpnle_pd_mask(a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_i1 __greater_than_double_and_mask(__vec16_d a, __vec16_d b, + __vec16_i1 m) { + __vec16_i1 ret1; + __vec16_i1 ret2; + __vec16_i1 tmp_m = m; + ret1 = _mm512_mask_cmpnle_pd_mask(m, a.v1, b.v1); + ret2 = _mm512_mask_cmpnle_pd_mask(_mm512_kswapb(tmp_m, tmp_m), a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_i1 __greater_equal_double(__vec16_d a, __vec16_d b) { + __vec16_i1 ret1; + __vec16_i1 ret2; + ret1 = _mm512_cmpnlt_pd_mask(a.v1, b.v1); + ret2 = _mm512_cmpnlt_pd_mask(a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_i1 __greater_equal_double_and_mask(__vec16_d a, __vec16_d b, + __vec16_i1 m) { + __vec16_i1 ret1; + __vec16_i1 ret2; + __vec16_i1 tmp_m = m; + ret1 = _mm512_mask_cmpnlt_pd_mask(m, a.v1, b.v1); + ret2 = _mm512_mask_cmpnlt_pd_mask(_mm512_kswapb(tmp_m, tmp_m), a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_i1 __ordered_double(__vec16_d a, __vec16_d b) { + __vec16_i1 ret1; + __vec16_i1 ret2; + ret1 = _mm512_cmpord_pd_mask(a.v1, b.v1); + ret2 = _mm512_cmpord_pd_mask(a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_i1 __unordered_double(__vec16_d a, __vec16_d b) { + __vec16_i1 ret1; + __vec16_i1 ret2; + ret1 = _mm512_cmpunord_pd_mask(a.v1, b.v1); + ret2 = _mm512_cmpunord_pd_mask(a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_i1 __unordered_double_and_mask(__vec16_d a, __vec16_d b, __vec16_i1 mask) { + __vec16_i1 ret1; + __vec16_i1 ret2; + __vec16_i1 tmp_m = mask; + ret1 = _mm512_mask_cmpunord_pd_mask(mask, a.v1, b.v1); + ret2 = _mm512_mask_cmpunord_pd_mask(_mm512_kswapb(tmp_m, tmp_m), a.v2, b.v2); + return _mm512_kmovlhb(ret1, ret2); +} + +static FORCEINLINE __vec16_d __select(__vec16_i1 mask, __vec16_d a, __vec16_d b) { + __vec16_d ret; + __vec16_i1 tmp_m = mask; + ret.v1 = _mm512_mask_mov_pd(b.v1, mask, a.v1); + ret.v2 = _mm512_mask_mov_pd(b.v2, _mm512_kswapb(tmp_m, tmp_m), a.v2); + return ret; +} + + +static FORCEINLINE __vec16_d __select(bool cond, __vec16_d a, __vec16_d b) { + return cond ? a : b; +} + +static FORCEINLINE double __extract_element(__vec16_d v, uint32_t index) { + return ((double *)&v)[index]; +} + +static FORCEINLINE void __insert_element(__vec16_d *v, uint32_t index, double val) { + ((double *)v)[index] = val; +} + +template static RetVecType __smear_double(double d); +template <> FORCEINLINE __vec16_d __smear_double<__vec16_d>(double d) { + __vec16_d ret; + ret.v1 = _mm512_set1_pd(d); + ret.v2 = _mm512_set1_pd(d); + return ret; +} + +template static RetVecType __setzero_double(); +template <> FORCEINLINE __vec16_d __setzero_double<__vec16_d>() { + __vec16_d ret; + ret.v1 = _mm512_setzero_pd(); + ret.v2 = _mm512_setzero_pd(); + return ret; +} + +template static RetVecType __undef_double(); +template <> FORCEINLINE __vec16_d __undef_double<__vec16_d>() { + return __vec16_d(); +} + +static FORCEINLINE __vec16_d __broadcast_double(__vec16_d v, int index) { + __vec16_d ret; + double val = __extract_element(v, index & 0xf); + ret.v1 = _mm512_set1_pd(val); + ret.v2 = _mm512_set1_pd(val); + return ret; +} + +static FORCEINLINE __vec16_d __shuffle2_double(__vec16_d v0, __vec16_d v1, __vec16_i32 index) { + __vec16_d ret; + for (int i = 0; i < 16; ++i){ + if (__extract_element(index, i) < 16) + __insert_element(&ret, i, __extract_element(v0, __extract_element(index, i) & 0xF)); + else + __insert_element(&ret, i, __extract_element(v1, __extract_element(index, i) & 0xF)); + } + return ret; +} + +template static FORCEINLINE __vec16_d __load(const __vec16_d *p) { + __vec16_d ret; + ret.v1 = _mm512_extloadunpacklo_pd(ret.v1, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + ret.v1 = _mm512_extloadunpackhi_pd(ret.v1, (uint8_t*)p+64, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + ret.v2 = _mm512_extloadunpacklo_pd(ret.v2, (uint8_t*)p+64, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + ret.v2 = _mm512_extloadunpackhi_pd(ret.v2, (uint8_t*)p+128, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + return ret; +} +#if 0 +template <> FORCEINLINE __vec16_d __load<64>(const __vec16_d *p) { + __vec16_d ret; + ret.v1 = _mm512_load_pd(p); + ret.v2 = _mm512_load_pd(((uint8_t*)p)+64); + return ret; +} + +template <> FORCEINLINE __vec16_d __load<128>(const __vec16_d *p) { + return __load<64>(p); +} +#endif +template static FORCEINLINE void __store(__vec16_d *p, __vec16_d v) { + _mm512_extpackstorelo_pd(p, v.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); + _mm512_extpackstorehi_pd((uint8_t*)p+64, v.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); + _mm512_extpackstorelo_pd((uint8_t*)p+64, v.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); + _mm512_extpackstorehi_pd((uint8_t*)p+128, v.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); +} +#if 0 +template <> FORCEINLINE void __store<64>(__vec16_d *p, __vec16_d v) { + _mm512_store_pd(p, v.v1); + _mm512_store_pd(((uint8_t*)p)+64, v.v2); +} + +template <> FORCEINLINE void __store<128>(__vec16_d *p, __vec16_d v) { + __store<64>(p, v); +} +#endif +/////////////////////////////////////////////////////////////////////////// +// casts +/////////////////////////////////////////////////////////////////////////// +static FORCEINLINE __vec16_i8 __cast_sext(const __vec16_i8 &, const __vec16_i1 &val) +{ + return __vec16_i8(-val[0], -val[1], -val[2], -val[3], -val[4], -val[5], -val[6], -val[7], + -val[8], -val[9], -val[10], -val[11], -val[12], -val[13], -val[14], -val[15]); +} + +static FORCEINLINE __vec16_i16 __cast_sext(const __vec16_i16 &, const __vec16_i1 &val) +{ + return __vec16_i16(-val[0], -val[1], -val[2], -val[3], -val[4], -val[5], -val[6], -val[7], + -val[8], -val[9], -val[10], -val[11], -val[12], -val[13], -val[14], -val[15]); +} + +static FORCEINLINE __vec16_i16 __cast_sext(const __vec16_i16 &, const __vec16_i8 &val) +{ + return __vec16_i16((int8_t)val[0], (int8_t)val[1], (int8_t)val[2], (int8_t)val[3], + (int8_t)val[4], (int8_t)val[5], (int8_t)val[6], (int8_t)val[7], + (int8_t)val[8], (int8_t)val[9], (int8_t)val[10], (int8_t)val[11], + (int8_t)val[12], (int8_t)val[13], (int8_t)val[14], (int8_t)val[15]); +} + +static FORCEINLINE __vec16_i32 __cast_sext(const __vec16_i32 &, const __vec16_i1 &val) +{ + __vec16_i32 ret = _mm512_setzero_epi32(); + __vec16_i32 one = _mm512_set1_epi32(-1); + return _mm512_mask_mov_epi32(ret, val, one); +} + +static FORCEINLINE __vec16_i32 __cast_sext(const __vec16_i32 &, const __vec16_i8 &val) +{ + //return _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 a = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + return a; +} + +static FORCEINLINE __vec16_i32 __cast_sext(const __vec16_i32 &, const __vec16_i16 &val) +{ + return _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); +} + + + +static FORCEINLINE __vec16_i64 __cast_sext(const __vec16_i64 &, const __vec16_i1 &val) +{ + __vec16_i32 ret = _mm512_mask_mov_epi32(_mm512_setzero_epi32(), val, _mm512_set1_epi32(-1)); + return __vec16_i64(ret, ret); +} + +static FORCEINLINE __vec16_i64 __cast_sext(const __vec16_i64 &, const __vec16_i8 &val) +{ + __vec16_i32 a = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + return __vec16_i64(a.v, _mm512_srai_epi32(a.v, 31)); +} + +static FORCEINLINE __vec16_i64 __cast_sext(const __vec16_i64 &, const __vec16_i32 &val) +{ + return __vec16_i64(val.v, _mm512_srai_epi32(val.v, 31)); +} + + + +static FORCEINLINE __vec16_i8 __cast_zext(const __vec16_i8 &, const __vec16_i1 &val) +{ + return __vec16_i8(val[0], val[1], val[2], val[3], val[4], val[5], val[6], val[7], + val[8], val[9], val[10], val[11], val[12], val[13], val[14], val[15]); +} + +static FORCEINLINE __vec16_i16 __cast_zext(const __vec16_i16 &, const __vec16_i1 &val) +{ + return __vec16_i16(val[0], val[1], val[2], val[3], val[4], val[5], val[6], val[7], + val[8], val[9], val[10], val[11], val[12], val[13], val[14], val[15]); +} + +static FORCEINLINE __vec16_i16 __cast_zext(const __vec16_i16 &, const __vec16_i8 &val) +{ + return __vec16_i16((uint8_t)val[0], (uint8_t)val[1], (uint8_t)val[2], (uint8_t)val[3], + (uint8_t)val[4], (uint8_t)val[5], (uint8_t)val[6], (uint8_t)val[7], + (uint8_t)val[8], (uint8_t)val[9], (uint8_t)val[10], (uint8_t)val[11], + (uint8_t)val[12], (uint8_t)val[13], (uint8_t)val[14], (uint8_t)val[15]); +} + +static FORCEINLINE __vec16_i32 __cast_zext(const __vec16_i32 &, const __vec16_i1 &val) +{ + __vec16_i32 ret = _mm512_setzero_epi32(); + __vec16_i32 one = _mm512_set1_epi32(1); + return _mm512_mask_mov_epi32(ret, val, one); +} + +static FORCEINLINE __vec16_i32 __cast_zext(const __vec16_i32 &, const __vec16_i8 &val) +{ + return _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_UINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); +} + +static FORCEINLINE __vec16_i32 __cast_zext(const __vec16_i32 &, const __vec16_i16 &val) +{ + return _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_UINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); +} + +static FORCEINLINE __vec16_i64 __cast_zext(const __vec16_i64 &, const __vec16_i1 &val) +{ + __vec16_i32 ret_hi = _mm512_setzero_epi32(); + __vec16_i32 ret_lo = _mm512_setzero_epi32(); + __vec16_i32 one = _mm512_set1_epi32(1); + ret_lo = _mm512_mask_mov_epi32(ret_lo, val, one); + return __vec16_i64 (ret_lo, ret_hi); +} + +static FORCEINLINE __vec16_i64 __cast_zext(const __vec16_i64 &, const __vec16_i8 &val) +{ + return __vec16_i64(__cast_zext(__vec16_i32(), val), _mm512_setzero_epi32()); +} + +static FORCEINLINE __vec16_i64 __cast_zext(const __vec16_i64 &, const __vec16_i16 &val) +{ + return __vec16_i64(__cast_zext(__vec16_i32(), val), _mm512_setzero_epi32()); +} + +static FORCEINLINE __vec16_i64 __cast_zext(const __vec16_i64 &, const __vec16_i32 &val) +{ + return __vec16_i64(val.v, _mm512_setzero_epi32()); +} + +static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i8 val) { + return _mm512_extload_ps(&val, _MM_UPCONV_PS_SINT8, _MM_BROADCAST_16X16, _MM_HINT_NONE); +} + +static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i16 val) { + return _mm512_extload_ps(&val, _MM_UPCONV_PS_SINT16, _MM_BROADCAST_16X16, _MM_HINT_NONE); +} + +static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i32 val) { + return _mm512_cvtfxpnt_round_adjustepi32_ps(val, _MM_ROUND_MODE_NEAREST, _MM_EXPADJ_NONE); +} + +static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i64 val) { + __m512i tmp1; + __m512i tmp2; + hilo2zmm(val, tmp1, tmp2); + __vec16_f ret; + +/* + // Cycles don't work. It seems that it is icc bug. + for (int i = 0; i < 8; i++) { + ret[i] = (float)(((int64_t*)&tmp1)[i]); + } + for (int i = 0; i < 8; i++) { + ((float*)&ret)[i + 8] = (float)(((int64_t*)&tmp2)[i]); + } +*/ + + ret[0] = (float)(((int64_t*)&tmp1)[0]); + ret[1] = (float)(((int64_t*)&tmp1)[1]); + ret[2] = (float)(((int64_t*)&tmp1)[2]); + ret[3] = (float)(((int64_t*)&tmp1)[3]); + ret[4] = (float)(((int64_t*)&tmp1)[4]); + ret[5] = (float)(((int64_t*)&tmp1)[5]); + ret[6] = (float)(((int64_t*)&tmp1)[6]); + ret[7] = (float)(((int64_t*)&tmp1)[7]); + + ret[8] = (float)(((int64_t*)&tmp2)[0]); + ret[9] = (float)(((int64_t*)&tmp2)[1]); + ret[10] = (float)(((int64_t*)&tmp2)[2]); + ret[11] = (float)(((int64_t*)&tmp2)[3]); + ret[12] = (float)(((int64_t*)&tmp2)[4]); + ret[13] = (float)(((int64_t*)&tmp2)[5]); + ret[14] = (float)(((int64_t*)&tmp2)[6]); + ret[15] = (float)(((int64_t*)&tmp2)[7]); + + return ret; +} + +static FORCEINLINE __vec16_d __cast_sitofp(__vec16_d, __vec16_i8 val) { + __vec16_i32 vi = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST_16X16, _MM_HINT_NONE); + __vec16_d ret; + ret.v1 = _mm512_cvtepi32lo_pd(vi); + __vec16_i32 other8 = _mm512_permute4f128_epi32(vi, _MM_PERM_DCDC); + ret.v2 = _mm512_cvtepi32lo_pd(other8); + return ret; +} + +static FORCEINLINE __vec16_d __cast_sitofp(__vec16_d, __vec16_i16 val) { + __vec16_i32 vi = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST_16X16, _MM_HINT_NONE); + __vec16_d ret; + ret.v1 = _mm512_cvtepi32lo_pd(vi); + __vec16_i32 other8 = _mm512_permute4f128_epi32(vi, _MM_PERM_DCDC); + ret.v2 = _mm512_cvtepi32lo_pd(other8); + return ret; +} + +static FORCEINLINE __vec16_d __cast_sitofp(__vec16_d, __vec16_i32 val) { + __vec16_d ret; + ret.v1 = _mm512_cvtepi32lo_pd(val); + __vec16_i32 other8 = _mm512_permute4f128_epi32(val, _MM_PERM_DCDC); + ret.v2 = _mm512_cvtepi32lo_pd(other8); + return ret; +} + +static FORCEINLINE __vec16_d __cast_sitofp(__vec16_d, __vec16_i64 val) { + __m512i tmp1; + __m512i tmp2; + hilo2zmm(val, tmp1, tmp2); + __vec16_d ret; + for (int i = 0; i < 8; i++) { + ((double*)&ret.v1)[i] = (double)(((int64_t*)&tmp1)[i]); + } + for (int i = 0; i < 8; i++) { + ((double*)&ret.v2)[i] = (double)(((int64_t*)&tmp2)[i]); + } + return ret; +} + + +static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i1 v) +{ + const __m512 ret = _mm512_setzero_ps(); + const __m512 one = _mm512_set1_ps(1.0); + return _mm512_mask_mov_ps(ret, v, one); +} + +static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, const __vec16_i8 &v) { + return _mm512_extload_ps(v.v, _MM_UPCONV_PS_UINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); +} + +static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i16 val) { + return _mm512_extload_ps(&val, _MM_UPCONV_PS_UINT16, _MM_BROADCAST_16X16, _MM_HINT_NONE); +} + +static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i32 v) { + return _mm512_cvtfxpnt_round_adjustepu32_ps(v, _MM_FROUND_NO_EXC, _MM_EXPADJ_NONE); +} + +static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i64 val) { + __m512i tmp1; + __m512i tmp2; + hilo2zmm(val, tmp1, tmp2); + __vec16_f ret; + // Cycles don't work. It seems that it is icc bug. + /* + for (int i = 0; i < 8; i++) { + ((float*)&ret)[i] = ((float)(((uint64_t*)&tmp1)[i])); + } + for (int i = 0; i < 8; i++) { + ((float*)&ret)[i + 8] = ((float)(((uint64_t*)&tmp2)[i])); + } + */ + ret[0] = ((float)(((uint64_t*)&tmp1)[0])); + ret[1] = ((float)(((uint64_t*)&tmp1)[1])); + ret[2] = ((float)(((uint64_t*)&tmp1)[2])); + ret[3] = ((float)(((uint64_t*)&tmp1)[3])); + ret[4] = ((float)(((uint64_t*)&tmp1)[4])); + ret[5] = ((float)(((uint64_t*)&tmp1)[5])); + ret[6] = ((float)(((uint64_t*)&tmp1)[6])); + ret[7] = ((float)(((uint64_t*)&tmp1)[7])); + ret[8] = ((float)(((uint64_t*)&tmp2)[0])); + ret[9] = ((float)(((uint64_t*)&tmp2)[1])); + ret[10] = ((float)(((uint64_t*)&tmp2)[2])); + ret[11] = ((float)(((uint64_t*)&tmp2)[3])); + ret[12] = ((float)(((uint64_t*)&tmp2)[4])); + ret[13] = ((float)(((uint64_t*)&tmp2)[5])); + ret[14] = ((float)(((uint64_t*)&tmp2)[6])); + ret[15] = ((float)(((uint64_t*)&tmp2)[7])); + return ret; +} + +static FORCEINLINE __vec16_d __cast_uitofp(__vec16_d, __vec16_i8 val) +{ + __vec16_i32 vi = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_UINT8, _MM_BROADCAST_16X16, _MM_HINT_NONE); + __vec16_d ret; + ret.v1 = _mm512_cvtepu32lo_pd(vi); + __vec16_i32 other8 = _mm512_permute4f128_epi32(vi, _MM_PERM_DCDC); + ret.v2 = _mm512_cvtepu32lo_pd(other8); + return ret; +} + +static FORCEINLINE __vec16_d __cast_uitofp(__vec16_d, __vec16_i16 val) +{ + __vec16_i32 vi = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_UINT16, _MM_BROADCAST_16X16, _MM_HINT_NONE); + __vec16_d ret; + ret.v1 = _mm512_cvtepu32lo_pd(vi); + __vec16_i32 other8 = _mm512_permute4f128_epi32(vi, _MM_PERM_DCDC); + ret.v2 = _mm512_cvtepu32lo_pd(other8); + return ret; +} + +static FORCEINLINE __vec16_d __cast_uitofp(__vec16_d, __vec16_i32 val) +{ + __vec16_d ret; + ret.v1 = _mm512_cvtepu32lo_pd(val); + __vec16_i32 other8 = _mm512_permute4f128_epi32(val, _MM_PERM_DCDC); + ret.v2 = _mm512_cvtepu32lo_pd(other8); + return ret; +} + + +static FORCEINLINE __vec16_d __cast_uitofp(__vec16_d, __vec16_i64 val) { + __m512i tmp1; + __m512i tmp2; + hilo2zmm(val, tmp1, tmp2); + __vec16_d ret; + for (int i = 0; i < 8; i++) { + ((double*)&ret.v1)[i] = (double)(((uint64_t*)&tmp1)[i]); + } + for (int i = 0; i < 8; i++) { + ((double*)&ret.v2)[i] = (double)(((uint64_t*)&tmp2)[i]); + } + return ret; +} + + +// float/double to signed int +static FORCEINLINE __vec16_i32 __cast_fptosi(__vec16_i32, __vec16_f val) { + return _mm512_cvtfxpnt_round_adjustps_epi32(val, _MM_ROUND_MODE_TOWARD_ZERO, _MM_EXPADJ_NONE); +} + +static FORCEINLINE __vec16_i8 __cast_fptosi(__vec16_i8, __vec16_f val) { + __vec16_i8 ret; + __vec16_i32 tmp = __cast_fptosi(__vec16_i32(), val); + _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_SINT8, _MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i16 __cast_fptosi(__vec16_i16, __vec16_f val) { + __vec16_i16 ret; + __vec16_i32 tmp = __cast_fptosi(__vec16_i32(), val); + _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_SINT16, _MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i64 __cast_fptosi(__vec16_i64, __vec16_f val) { + __m512i tmp1; + for (int i = 0; i < 8; i++) { + ((int64_t*)&tmp1)[i] = (int64_t)(((float*)&val)[i]); + } + __m512i tmp2; + for (int i = 0; i < 8; i++) { + ((int64_t*)&tmp2)[i] = (int64_t)(((float*)&val)[i + 8]); + } + return zmm2hilo(tmp1, tmp2); +} + +static FORCEINLINE __vec16_i32 __cast_fptosi(__vec16_i32, __vec16_d val) { + __vec16_i32 tmp = _mm512_cvtfxpnt_roundpd_epi32lo(val.v2, _MM_ROUND_MODE_TOWARD_ZERO); + __vec16_i32 ret_hi8 = _mm512_permute4f128_epi32(tmp, _MM_PERM_BADC); + __vec16_i32 ret_lo8 = _mm512_cvtfxpnt_roundpd_epi32lo(val.v1, _MM_ROUND_MODE_TOWARD_ZERO); + return _mm512_xor_epi32(ret_lo8, ret_hi8); +} + +static FORCEINLINE __vec16_i8 __cast_fptosi(__vec16_i8, __vec16_d val) { + __vec16_i8 ret; + __vec16_i32 tmp = __cast_fptosi(__vec16_i32(), val); + _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_SINT8, _MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i16 __cast_fptosi(__vec16_i16, __vec16_d val) { + __vec16_i16 ret; + __vec16_i32 tmp = __cast_fptosi(__vec16_i32(), val); + _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_SINT16, _MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i64 __cast_fptosi(__vec16_i64, __vec16_d val) { + __m512i tmp1; + for (int i = 0; i < 8; i++) { + ((int64_t*)&tmp1)[i] = (int64_t)(((double*)&val.v1)[i]); + } + __m512i tmp2; + for (int i = 0; i < 8; i++) { + ((int64_t*)&tmp2)[i] = (int64_t)(((double*)&val.v2)[i]); + } + return zmm2hilo(tmp1, tmp2); +} + + +static FORCEINLINE __vec16_i32 __cast_fptoui(__vec16_i32, __vec16_f val) { + return _mm512_cvtfxpnt_round_adjustps_epu32(val, _MM_ROUND_MODE_TOWARD_ZERO, _MM_EXPADJ_NONE); +} + +static FORCEINLINE __vec16_i8 __cast_fptoui(__vec16_i8, __vec16_f val) { + __vec16_i8 ret; + __vec16_i32 tmp = __cast_fptoui(__vec16_i32(), val); + _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_UINT8, _MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i16 __cast_fptoui(__vec16_i16, __vec16_f val) { + __vec16_i16 ret; + __vec16_i32 tmp = __cast_fptoui(__vec16_i32(), val); + _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_UINT16, _MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i64 __cast_fptoui(__vec16_i64, __vec16_f val) { + __m512i tmp1; + for (int i = 0; i < 8; i++) { + ((uint64_t*)&tmp1)[i] = (uint64_t)(((float*)&val)[i]); + } + __m512i tmp2; + for (int i = 0; i < 8; i++) { + ((uint64_t*)&tmp2)[i] = (uint64_t)(((float*)&val)[i + 8]); + } + return zmm2hilo(tmp1, tmp2); +} + +static FORCEINLINE __vec16_i32 __cast_fptoui(__vec16_i32, __vec16_d val) { + __vec16_i32 tmp = _mm512_cvtfxpnt_roundpd_epu32lo(val.v2, _MM_ROUND_MODE_TOWARD_ZERO); + __vec16_i32 ret_hi8 = _mm512_permute4f128_epi32(tmp, _MM_PERM_BADC); + __vec16_i32 ret_lo8 = _mm512_cvtfxpnt_roundpd_epu32lo(val.v1, _MM_ROUND_MODE_TOWARD_ZERO); + return _mm512_xor_epi32(ret_lo8, ret_hi8); +} + +static FORCEINLINE __vec16_i8 __cast_fptoui(__vec16_i8, __vec16_d val) { + __vec16_i8 ret; + __vec16_i32 tmp = __cast_fptoui(__vec16_i32(), val); + _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_UINT8, _MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i16 __cast_fptoui(__vec16_i16, __vec16_d val) { + __vec16_i16 ret; + __vec16_i32 tmp = __cast_fptoui(__vec16_i32(), val); + _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_UINT16, _MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i64 __cast_fptoui(__vec16_i64, __vec16_d val) { + __m512i tmp1; + for (int i = 0; i < 8; i++) { + ((uint64_t*)&tmp1)[i] = (uint64_t)(((double*)&val.v1)[i]); + } + __m512i tmp2; + for (int i = 0; i < 8; i++) { + ((uint64_t*)&tmp2)[i] = (uint64_t)(((double*)&val.v2)[i]); + } + return zmm2hilo(tmp1, tmp2); +} + + + + + + + +static FORCEINLINE __vec16_d __cast_fpext(__vec16_d, __vec16_f val) { + __vec16_d ret; + ret.v1 = _mm512_cvtpslo_pd(val.v); + __vec16_f other8 = _mm512_permute4f128_epi32(_mm512_castps_si512(val.v), _MM_PERM_DCDC); + ret.v2 = _mm512_cvtpslo_pd(other8); + return ret; +} + +static FORCEINLINE __vec16_f __cast_fptrunc(__vec16_f, __vec16_d val) { + __m512i r0i = _mm512_castps_si512(_mm512_cvtpd_pslo(val.v2)); + __m512i r1i = _mm512_castps_si512(_mm512_cvtpd_pslo(val.v1)); + return _mm512_mask_permute4f128_epi32(r1i, 0xFF00, r0i, _MM_PERM_BABA); +} + +static FORCEINLINE __vec16_f __cast_bits(__vec16_f, __vec16_i32 val) { + return _mm512_castsi512_ps(val); +} + +static FORCEINLINE __vec16_i32 __cast_bits(__vec16_i32, __vec16_f val) { + return _mm512_castps_si512(val); +} + +static FORCEINLINE __vec16_i32 __cast_bits(__vec16_i32, __vec16_i32 val) { + return val; +} + +static FORCEINLINE __vec16_i64 __cast_bits(__vec16_i64, __vec16_d val) { + __vec16_i64 ret; + ret.v_hi = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xFF00, + _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), + _mm512_castpd_si512(val.v2)); + ret.v_hi = _mm512_mask_permutevar_epi32(ret.v_hi, 0x00FF, + _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), + _mm512_castpd_si512(val.v1)); + ret.v_lo = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xFF00, + _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), + _mm512_castpd_si512(val.v2)); + ret.v_lo = _mm512_mask_permutevar_epi32(ret.v_lo, 0x00FF, + _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), + _mm512_castpd_si512(val.v1)); + return ret; +} + +static FORCEINLINE __vec16_d __cast_bits(__vec16_d, __vec16_i64 val) { + __vec16_d ret; + ret.v2 = _mm512_castsi512_pd( + _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, + _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), + val.v_hi)); + ret.v2 = _mm512_castsi512_pd( + _mm512_mask_permutevar_epi32(_mm512_castpd_si512(ret.v2), 0x5555, + _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), + val.v_lo)); + ret.v1 = _mm512_castsi512_pd( + _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, + _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), + val.v_hi)); + ret.v1 = _mm512_castsi512_pd( + _mm512_mask_permutevar_epi32(_mm512_castpd_si512(ret.v1), 0x5555, + _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), + val.v_lo)); + return ret; +} + +static FORCEINLINE __vec16_i64 __cast_bits(__vec16_i64, __vec16_i64 val) { + return val; +} + +/////////////////////////////////////////////////////////////////////////// +// templates for int8/16 operations +/////////////////////////////////////////////////////////////////////////// +#define BINARY_OP(TYPE, NAME, OP) \ +static FORCEINLINE TYPE NAME(TYPE a, TYPE b) { \ + TYPE ret; \ + for (int i = 0; i < 16; ++i) \ + ret[i] = a[i] OP b[i]; \ + return ret; \ +} + +/* knc::macro::used */ +#define BINARY_OP_CAST(TYPE, CAST, NAME, OP) \ +static FORCEINLINE TYPE NAME(TYPE a, TYPE b) { \ + TYPE ret; \ + for (int i = 0; i < 16; ++i) \ + ret[i] = (CAST)(a[i]) OP (CAST)(b[i]); \ + return ret; \ +} + +#define CMP_OP(TYPE, SUFFIX, CAST, NAME, OP) \ +static FORCEINLINE __vec16_i1 NAME##_##SUFFIX(TYPE a, TYPE b) { \ + __vec16_i1 ret; \ + ret.v = 0; \ + for (int i = 0; i < 16; ++i) \ + ret.v |= ((CAST)(a[i]) OP (CAST)(b[i])) << i; \ + return ret; \ +} + +#define SHIFT_UNIFORM(TYPE, CAST, NAME, OP) \ +static FORCEINLINE TYPE NAME(TYPE a, int32_t b) { \ + TYPE ret; \ + for (int i = 0; i < 16; ++i) \ + ret[i] = (CAST)(a[i]) OP b; \ + return ret; \ +} + +#define SELECT(TYPE) \ +static FORCEINLINE TYPE __select(__vec16_i1 mask, TYPE a, TYPE b) { \ + TYPE ret; \ + for (int i = 0; i < 16; ++i) \ + ret[i] = (mask.v & (1< static RetVecType __setzero_i8(); +template <> FORCEINLINE __vec16_i8 __setzero_i8<__vec16_i8>() { + return __vec16_i8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +BINARY_OP(__vec16_i8, __add, +) +BINARY_OP(__vec16_i8, __sub, -) +BINARY_OP(__vec16_i8, __mul, *) + +BINARY_OP(__vec16_i8, __or, |) +BINARY_OP(__vec16_i8, __and, &) +BINARY_OP(__vec16_i8, __xor, ^) +BINARY_OP(__vec16_i8, __shl, <<) + +BINARY_OP_CAST(__vec16_i8, uint8_t, __udiv, /) +BINARY_OP_CAST(__vec16_i8, int8_t, __sdiv, /) + +BINARY_OP_CAST(__vec16_i8, uint8_t, __urem, %) +BINARY_OP_CAST(__vec16_i8, int8_t, __srem, %) +BINARY_OP_CAST(__vec16_i8, uint8_t, __lshr, >>) +BINARY_OP_CAST(__vec16_i8, int8_t, __ashr, >>) + +SHIFT_UNIFORM(__vec16_i8, uint8_t, __lshr, >>) +SHIFT_UNIFORM(__vec16_i8, int8_t, __ashr, >>) +SHIFT_UNIFORM(__vec16_i8, int8_t, __shl, <<) + +CMP_OP(__vec16_i8, i8, int8_t, __equal, ==) +CMP_OP(__vec16_i8, i8, uint8_t, __unsigned_less_equal, <=) +CMP_OP(__vec16_i8, i8, int8_t, __signed_less_equal, <=) +CMP_OP(__vec16_i8, i8, uint8_t, __unsigned_greater_equal, >=) +CMP_OP(__vec16_i8, i8, int8_t, __signed_greater_equal, >=) +CMP_OP(__vec16_i8, i8, uint8_t, __unsigned_less_than, <) +CMP_OP(__vec16_i8, i8, int8_t, __signed_less_than, <) +CMP_OP(__vec16_i8, i8, uint8_t, __unsigned_greater_than, >) +CMP_OP(__vec16_i8, i8, int8_t, __signed_greater_than, >) + +SELECT(__vec16_i8) + +static FORCEINLINE int8_t __extract_element(__vec16_i8 v, uint32_t index) { + return v[index]; +} + +static FORCEINLINE void __insert_element(__vec16_i8 *v, uint32_t index, int8_t val) { + ((int32_t *)v)[index] = val; +} + +static FORCEINLINE __vec16_i8 __broadcast_i8(__vec16_i8 v, int index) { + int32_t val = __extract_element(v, index & 0xf); + __vec16_i32 tmp = _mm512_set1_epi32(val); + __vec16_i8 ret; + _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i1 __not_equal_i8(__vec16_i8 a, __vec16_i8 b) { + __vec16_i32 tmp_a = _mm512_extload_epi32(&a, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_b = _mm512_extload_epi32(&b, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + return __not_equal_i32(tmp_a, tmp_b); +} + +static FORCEINLINE __vec16_i1 __equal_i8_and_mask(const __vec16_i8 &a, const __vec16_i8 &b, __vec16_i1 m) { + __vec16_i32 tmp_a = _mm512_extload_epi32(&a, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_b = _mm512_extload_epi32(&b, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + return __equal_i32_and_mask(tmp_a, tmp_b, m); +} + +static FORCEINLINE __vec16_i1 __not_equal_i8_and_mask(__vec16_i8 a, __vec16_i8 b, __vec16_i1 m) { + __vec16_i32 tmp_a = _mm512_extload_epi32(&a, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_b = _mm512_extload_epi32(&b, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + return __not_equal_i32_and_mask(tmp_a, tmp_b, m); +} + +static FORCEINLINE __vec16_i8 __rotate_i8(__vec16_i8 v, int index) { + __vec16_i32 tmp_v = _mm512_extload_epi32(&v, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp = __rotate_i32(tmp_v, index); + __vec16_i8 ret; + _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i8 __shuffle_i8(__vec16_i8 v, __vec16_i32 index) { + __vec16_i32 tmp_v = _mm512_extload_epi32(&v, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp = __shuffle_i32(tmp_v, index); + __vec16_i8 ret; + _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); + return ret; + +} + +template static RetVecType __smear_i8(int8_t i); +template <> FORCEINLINE __vec16_i8 __smear_i8<__vec16_i8>(int8_t i) { + __vec16_i32 tmp = __smear_i32<__vec16_i32>(i); + __vec16_i8 ret; + _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i8 __shuffle2_i8(__vec16_i8 v0, __vec16_i8 v1, __vec16_i32 index) { + __vec16_i32 tmp_v0 = _mm512_extload_epi32(&v0, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_v1 = _mm512_extload_epi32(&v1, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp = __shuffle2_i32(tmp_v0, tmp_v1, index); + __vec16_i8 ret; + _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); + return ret; +} +/////////////////////////////////////////////////////////////////////////// +// int16 +/////////////////////////////////////////////////////////////////////////// + + +template static RetVecType __setzero_i16(); +template <> FORCEINLINE __vec16_i16 __setzero_i16<__vec16_i16>() { + return __vec16_i16(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +BINARY_OP(__vec16_i16, __add, +) +BINARY_OP(__vec16_i16, __sub, -) +BINARY_OP(__vec16_i16, __mul, *) + +BINARY_OP(__vec16_i16, __or, |) +BINARY_OP(__vec16_i16, __and, &) +BINARY_OP(__vec16_i16, __xor, ^) +BINARY_OP(__vec16_i16, __shl, <<) + +BINARY_OP_CAST(__vec16_i16, uint16_t, __udiv, /) +BINARY_OP_CAST(__vec16_i16, int16_t, __sdiv, /) + +BINARY_OP_CAST(__vec16_i16, uint16_t, __urem, %) +BINARY_OP_CAST(__vec16_i16, int16_t, __srem, %) +BINARY_OP_CAST(__vec16_i16, uint16_t, __lshr, >>) +BINARY_OP_CAST(__vec16_i16, int16_t, __ashr, >>) + +SHIFT_UNIFORM(__vec16_i16, uint16_t, __lshr, >>) +SHIFT_UNIFORM(__vec16_i16, int16_t, __ashr, >>) +SHIFT_UNIFORM(__vec16_i16, int16_t, __shl, <<) + +CMP_OP(__vec16_i16, i16, int16_t, __equal, ==) +CMP_OP(__vec16_i16, i16, uint16_t, __unsigned_less_equal, <=) +CMP_OP(__vec16_i16, i16, int16_t, __signed_less_equal, <=) +CMP_OP(__vec16_i16, i16, uint16_t, __unsigned_greater_equal, >=) +CMP_OP(__vec16_i16, i16, int16_t, __signed_greater_equal, >=) +CMP_OP(__vec16_i16, i16, uint16_t, __unsigned_less_than, <) +CMP_OP(__vec16_i16, i16, int16_t, __signed_less_than, <) +CMP_OP(__vec16_i16, i16, uint16_t, __unsigned_greater_than, >) +CMP_OP(__vec16_i16, i16, int16_t, __signed_greater_than, >) + +SELECT(__vec16_i16) + +static FORCEINLINE int16_t __extract_element(__vec16_i16 v, uint32_t index) { + return v[index]; +} + +static FORCEINLINE void __insert_element(__vec16_i16 *v, uint32_t index, int16_t val) { + ((int16_t *)v)[index] = val; +} + +static FORCEINLINE __vec16_i16 __broadcast_i16(__vec16_i16 v, int index) { + int32_t val = __extract_element(v, index & 0xf); + __vec16_i32 tmp = _mm512_set1_epi32(val); + __vec16_i16 ret; + _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i1 __not_equal_i16(__vec16_i16 a, __vec16_i16 b) { + __vec16_i32 tmp_a = _mm512_extload_epi32(&a, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_b = _mm512_extload_epi32(&b, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + return __not_equal_i32(tmp_a, tmp_b); +} + +static FORCEINLINE __vec16_i1 __equal_i16_and_mask(const __vec16_i16 &a, const __vec16_i16 &b, __vec16_i1 m) { + __vec16_i32 tmp_a = _mm512_extload_epi32(&a, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_b = _mm512_extload_epi32(&b, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + return __equal_i32_and_mask(tmp_a, tmp_b, m); +} + +static FORCEINLINE __vec16_i1 __not_equal_i16_and_mask(__vec16_i16 a, __vec16_i16 b, __vec16_i1 m) { + __vec16_i32 tmp_a = _mm512_extload_epi32(&a, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_b = _mm512_extload_epi32(&b, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + return __not_equal_i32_and_mask(tmp_a, tmp_b, m); +} + +static FORCEINLINE __vec16_i16 __rotate_i16(__vec16_i16 v, int index) { + __vec16_i32 tmp_v = _mm512_extload_epi32(&v, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp = __rotate_i32(tmp_v, index); + __vec16_i16 ret; + _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i16 __shuffle_i16(__vec16_i16 v, __vec16_i32 index) { + __vec16_i32 tmp_v = _mm512_extload_epi32(&v, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp = __shuffle_i32(tmp_v, index); + __vec16_i16 ret; + _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); + return ret; +} + +template static RetVecType __smear_i16(int16_t i); +template <> FORCEINLINE __vec16_i16 __smear_i16<__vec16_i16>(int16_t i) { + __vec16_i32 tmp = __smear_i32<__vec16_i32>(i); + __vec16_i16 ret; + _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i16 __shuffle2_i16(__vec16_i16 v0, __vec16_i16 v1, __vec16_i32 index) { + __vec16_i32 tmp_v0 = _mm512_extload_epi32(&v0, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_v1 = _mm512_extload_epi32(&v1, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp = __shuffle2_i32(tmp_v0, tmp_v1, index); + __vec16_i16 ret; + _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); + return ret; +} + +/////////////////////////////////////////////////////////////////////////// +// various math functions +/////////////////////////////////////////////////////////////////////////// + +static FORCEINLINE float __round_uniform_float(float v) { + return roundf(v); +} + +static FORCEINLINE float __floor_uniform_float(float v) { + return floorf(v); +} + +static FORCEINLINE float __ceil_uniform_float(float v) { + return ceilf(v); +} + +static FORCEINLINE double __round_uniform_double(double v) { + return round(v); +} + +static FORCEINLINE double __floor_uniform_double(double v) { + return floor(v); +} + +static FORCEINLINE double __ceil_uniform_double(double v) { + return ceil(v); +} + +static FORCEINLINE __vec16_f __round_varying_float(__vec16_f v) { + return _mm512_round_ps(v, _MM_ROUND_MODE_NEAREST, _MM_EXPADJ_NONE); +} + +static FORCEINLINE __vec16_f __floor_varying_float(__vec16_f v) { + return _mm512_floor_ps(v); +} + +static FORCEINLINE __vec16_f __ceil_varying_float(__vec16_f v) { + return _mm512_ceil_ps(v); +} + +// min/max + +static FORCEINLINE float __min_uniform_float(float a, float b) { return (ab) ? a : b; } +static FORCEINLINE double __min_uniform_double(double a, double b) { return (ab) ? a : b; } + +static FORCEINLINE int32_t __min_uniform_int32(int32_t a, int32_t b) { return (ab) ? a : b; } +static FORCEINLINE int32_t __min_uniform_uint32(uint32_t a, uint32_t b) { return (ab) ? a : b; } + +static FORCEINLINE int64_t __min_uniform_int64(int64_t a, int64_t b) { return (ab) ? a : b; } +static FORCEINLINE int64_t __min_uniform_uint64(uint64_t a, uint64_t b) { return (ab) ? a : b; } + +static FORCEINLINE __vec16_f __max_varying_float (__vec16_f v1, __vec16_f v2) { return _mm512_gmax_ps(v1, v2); } +static FORCEINLINE __vec16_f __min_varying_float (__vec16_f v1, __vec16_f v2) { return _mm512_gmin_ps(v1, v2); } +static FORCEINLINE __vec16_d __max_varying_double(__vec16_d v1, __vec16_d v2) { return __vec16_d(_mm512_gmax_pd(v1.v1, v2.v1), _mm512_gmax_pd(v1.v2,v2.v2)); } +static FORCEINLINE __vec16_d __min_varying_double(__vec16_d v1, __vec16_d v2) { return __vec16_d(_mm512_gmin_pd(v1.v1, v2.v1), _mm512_gmin_pd(v1.v2,v2.v2)); } + +static FORCEINLINE __vec16_i32 __max_varying_int32 (__vec16_i32 v1, __vec16_i32 v2) { return _mm512_max_epi32(v1, v2); } +static FORCEINLINE __vec16_i32 __min_varying_int32 (__vec16_i32 v1, __vec16_i32 v2) { return _mm512_min_epi32(v1, v2); } +static FORCEINLINE __vec16_i32 __max_varying_uint32(__vec16_i32 v1, __vec16_i32 v2) { return _mm512_max_epu32(v1, v2); } +static FORCEINLINE __vec16_i32 __min_varying_uint32(__vec16_i32 v1, __vec16_i32 v2) { return _mm512_min_epu32(v1, v2); } + +static FORCEINLINE __vec16_i64 __max_varying_int64 (__vec16_i64 v1, __vec16_i64 v2) { + __vec16_i64 ret; + ret.v_hi = _mm512_max_epi32(v1.v_hi, v2.v_hi); + __vec16_i1 mask = _mm512_cmpeq_epi32_mask(ret.v_hi, v2.v_hi); + ret.v_lo = _mm512_mask_max_epi32(v1.v_lo, mask, v1.v_lo, v2.v_lo); + return ret; +} + +static FORCEINLINE __vec16_i64 __min_varying_int64 (__vec16_i64 v1, __vec16_i64 v2) { + __vec16_i64 ret; + ret.v_hi = _mm512_min_epi32(v1.v_hi, v2.v_hi); + __vec16_i1 mask = _mm512_cmpeq_epi32_mask(ret.v_hi, v2.v_hi); + ret.v_lo = _mm512_mask_min_epi32(v1.v_lo, mask, v1.v_lo, v2.v_lo); + return ret; +} + +static FORCEINLINE __vec16_i64 __max_varying_uint64 (__vec16_i64 v1, __vec16_i64 v2) { + __vec16_i64 ret; + ret.v_hi = _mm512_max_epu32(v1.v_hi, v2.v_hi); + __vec16_i1 mask = _mm512_cmpeq_epi32_mask(ret.v_hi, v2.v_hi); + ret.v_lo = _mm512_mask_max_epu32(v1.v_lo, mask, v1.v_lo, v2.v_lo); + return ret; +} + +static FORCEINLINE __vec16_i64 __min_varying_uint64 (__vec16_i64 v1, __vec16_i64 v2) { + __vec16_i64 ret; + ret.v_hi = _mm512_min_epu32(v1.v_hi, v2.v_hi); + __vec16_i1 mask = _mm512_cmpeq_epi32_mask(ret.v_hi, v2.v_hi); + ret.v_lo = _mm512_mask_min_epu32(v1.v_lo, mask, v1.v_lo, v2.v_lo); + return ret; +} + +// sqrt/rsqrt/rcp + +static FORCEINLINE float __rsqrt_uniform_float(float v) { + return 1.f / sqrtf(v); +} + +static FORCEINLINE float __rcp_uniform_float(float v) { + return 1.f / v; +} + +static FORCEINLINE float __sqrt_uniform_float(float v) { + return sqrtf(v); +} + +static FORCEINLINE double __sqrt_uniform_double(double v) { + return sqrt(v); +} + +static FORCEINLINE __vec16_f __sqrt_varying_float(__vec16_f v) { + return _mm512_sqrt_ps(v); +} + +static FORCEINLINE __vec16_f __rcp_varying_float(__vec16_f v) { +#ifdef ISPC_FAST_MATH + return _mm512_rcp23_ps(v); // Approximation with 23 bits of accuracy. +#else + return _mm512_recip_ps(v); +#endif +} +static FORCEINLINE __vec16_d __rcp_varying_double(__vec16_d x) { + __vec16_d y; + for (int i = 0; i < 16; i++) + __insert_element(&y, i, 1.0/__extract_element(x,i)); + return y; +} +static FORCEINLINE double __rcp_uniform_double(double v) +{ + return 1.0/v; +} + + +static FORCEINLINE __vec16_f __rsqrt_varying_float(__vec16_f v) { +#ifdef ISPC_FAST_MATH + return _mm512_rsqrt23_ps(v); // Approximation with 0.775ULP accuracy +#else + return _mm512_invsqrt_ps(v); +#endif +} + +static FORCEINLINE __vec16_d __rsqrt_varying_double(__vec16_d x) { + __vec16_d y; + for (int i = 0; i < 16; i++) + __insert_element(&y, i, 1.0/sqrt(__extract_element(x,i))); + return y; +} + +static FORCEINLINE double __rsqrt_uniform_double(double v) +{ + return 1.0/v; +} + + +/////////////////////////////////////////////////////////////////////////// +// bit ops +/////////////////////////////////////////////////////////////////////////// + +static FORCEINLINE int32_t __popcnt_int32(const __vec1_i32 mask) { + return _mm_countbits_32(mask); +} + +static FORCEINLINE int32_t __popcnt_int64(const __vec1_i64 mask) { + return _mm_countbits_64(mask); +} + + +static FORCEINLINE int32_t __count_trailing_zeros_i32(const __vec1_i32 mask) { + return _mm_tzcnt_32(mask); +} + +static FORCEINLINE int64_t __count_trailing_zeros_i64(const __vec1_i64 mask) { + return _mm_tzcnt_64(mask); +} + +static FORCEINLINE int32_t __count_leading_zeros_i32(__vec1_i32 mask) { + uint32_t n = 0; + if (mask == 0) + return 32; + while (1) { + if (mask < 0) break; + n ++; + mask <<= 1; + } + return n; +} + +static FORCEINLINE void __insert_element(__vec1_i32 *v, int index, uint32_t val) { + ((uint32_t *)v)[index] = val; \ +} + +static FORCEINLINE int64_t __count_leading_zeros_i64(__vec1_i64 mask) { + uint32_t n = 0; + if (mask == 0) + return 64; + while (1) { + if (mask < 0) break; + n ++; + mask <<= 1; + } + return n; +} + +/////////////////////////////////////////////////////////////////////////// +// reductions +/////////////////////////////////////////////////////////////////////////// + +REDUCE_ADD ( int16_t, __vec16_i8, __reduce_add_int8) +REDUCE_ADD ( int32_t, __vec16_i16, __reduce_add_int16) + +static FORCEINLINE int32_t __reduce_add_int32(__vec16_i32 v) { + return _mm512_reduce_add_epi32(v); +} + +static FORCEINLINE int32_t __reduce_min_int32(__vec16_i32 v) { + return _mm512_reduce_min_epi32(v); +} + +static FORCEINLINE int32_t __reduce_max_int32(__vec16_i32 v) { + return _mm512_reduce_max_epi32(v); +} + +static FORCEINLINE uint32_t __reduce_min_uint32(__vec16_i32 v) { + return _mm512_reduce_min_epu32(v); +} + +static FORCEINLINE uint32_t __reduce_max_uint32(__vec16_i32 v) { + return _mm512_reduce_max_epu32(v); +} + +static FORCEINLINE int64_t __reduce_add_int64(__vec16_i64 v) { + __m512i tmp1; + __m512i tmp2; + hilo2zmm(v, tmp1, tmp2); +#if __INTEL_COMPILER < 1500 + int64_t res1 = _mm512_reduce_add_epi64((__m512)tmp1); + int64_t res2 = _mm512_reduce_add_epi64((__m512)tmp2); +#else + int64_t res1 = _mm512_reduce_add_epi64(tmp1); + int64_t res2 = _mm512_reduce_add_epi64(tmp2); +#endif + return res1 + res2; +} + +static FORCEINLINE int64_t __reduce_min_int64(__vec16_i64 v) { + __m512i tmp1; + __m512i tmp2; + hilo2zmm(v, tmp1, tmp2); +#if __INTEL_COMPILER < 1500 + int64_t res1 = _mm512_reduce_min_epi64((__m512)tmp1); + int64_t res2 = _mm512_reduce_min_epi64((__m512)tmp2); +#else + int64_t res1 = _mm512_reduce_min_epi64(tmp1); + int64_t res2 = _mm512_reduce_min_epi64(tmp2); +#endif + return (res1 < res2) ? res1 : res2; +} + +static FORCEINLINE int64_t __reduce_max_int64(__vec16_i64 v) { + __m512i tmp1; + __m512i tmp2; + hilo2zmm(v, tmp1, tmp2); +#if __INTEL_COMPILER < 1500 + int64_t res1 = _mm512_reduce_max_epi64((__m512)tmp1); + int64_t res2 = _mm512_reduce_max_epi64((__m512)tmp2); +#else + int64_t res1 = _mm512_reduce_max_epi64(tmp1); + int64_t res2 = _mm512_reduce_max_epi64(tmp2); +#endif + return (res1 > res2) ? res1 : res2; +} + +static FORCEINLINE uint64_t __reduce_min_uint64(__vec16_i64 v) { + __m512i tmp1; + __m512i tmp2; + hilo2zmm(v, tmp1, tmp2); +#if __INTEL_COMPILER < 1500 + uint64_t res1 = _mm512_reduce_min_epu64((__m512)tmp1); + uint64_t res2 = _mm512_reduce_min_epu64((__m512)tmp2); +#else + uint64_t res1 = _mm512_reduce_min_epu64(tmp1); + uint64_t res2 = _mm512_reduce_min_epu64(tmp2); +#endif + return (res1 < res2) ? res1 : res2; +} + +static FORCEINLINE uint64_t __reduce_max_uint64(__vec16_i64 v) { + __m512i tmp1; + __m512i tmp2; + hilo2zmm(v, tmp1, tmp2); +#if __INTEL_COMPILER < 1500 + uint64_t res1 = _mm512_reduce_max_epu64((__m512)tmp1); + uint64_t res2 = _mm512_reduce_max_epu64((__m512)tmp2); +#else + uint64_t res1 = _mm512_reduce_max_epu64(tmp1); + uint64_t res2 = _mm512_reduce_max_epu64(tmp2); +#endif + return (res1 > res2) ? res1 : res2; +} + +static FORCEINLINE float __reduce_add_float(__vec16_f v) { + return _mm512_reduce_add_ps(v); +} + +static FORCEINLINE float __reduce_min_float(__vec16_f v) { + return _mm512_reduce_min_ps(v); +} + +static FORCEINLINE float __reduce_max_float(__vec16_f v) { + return _mm512_reduce_max_ps(v); +} + +static FORCEINLINE float __reduce_add_double(__vec16_d v) { + return _mm512_reduce_add_pd(v.v1) + _mm512_reduce_add_pd(v.v2); +} + +static FORCEINLINE float __reduce_min_double(__vec16_d v) { + return std::min(_mm512_reduce_min_pd(v.v1), _mm512_reduce_min_pd(v.v2)); +} + +static FORCEINLINE float __reduce_max_double(__vec16_d v) { + return std::max(_mm512_reduce_max_pd(v.v1), _mm512_reduce_max_pd(v.v2)); +} + +/////////////////////////////////////////////////////////////////////////// +// masked load/store +/////////////////////////////////////////////////////////////////////////// + +// Currently, when a pseudo_gather is converted into a masked load, it has to be unaligned +static FORCEINLINE __vec16_i32 __masked_load_i32(void *p, __vec16_i1 mask) { +#ifdef ISPC_FORCE_ALIGNED_MEMORY + return _mm512_mask_load_epi32(__vec16_i32(), mask, p); +#else + __vec16_i32 tmp; + tmp.v = _mm512_mask_extloadunpacklo_epi32(tmp.v, 0xFFFF, p, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); + tmp.v = _mm512_mask_extloadunpackhi_epi32(tmp.v, 0xFFFF, (uint8_t*)p+64, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); + __vec16_i32 ret; + return _mm512_mask_mov_epi32(ret.v, mask, tmp.v); +#endif +} + +static FORCEINLINE __vec16_f __masked_load_float(void *p, __vec16_i1 mask) { +#ifdef ISPC_FORCE_ALIGNED_MEMORY + return _mm512_mask_load_ps(_mm512_undefined_ps(), mask,p); +#else + __vec16_f tmp; + tmp.v = _mm512_mask_extloadunpacklo_ps(tmp.v, 0xFFFF, p, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); + tmp.v = _mm512_mask_extloadunpackhi_ps(tmp.v, 0xFFFF, (uint8_t*)p+64, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); + __vec16_f ret; + return _mm512_mask_mov_ps(ret.v, mask, tmp.v); +#endif +} + +static FORCEINLINE __vec16_i64 __masked_load_i64(void *p, __vec16_i1 mask) { + __vec16_i32 first8 = __masked_load_i32(p, mask); + __vec16_i32 second8 = __masked_load_i32(p + 64, mask); + return zmm2hilo(first8, second8); +} + +static FORCEINLINE __vec16_d __masked_load_double(void *p, __vec16_i1 mask) { +#ifdef ISPC_FORCE_ALIGNED_MEMORY + __vec16_d ret; + __vec16_i1 tmp_m = mask; + tmp_m = _mm512_kswapb(tmp_m, tmp_m); + ret.v1 = _mm512_mask_load_pd(ret.v1, mask, p); + ret.v2 = _mm512_mask_load_pd(ret.v2, tmp_m, (uint8_t*)p+64); + return ret; +#else + __vec16_d tmp; + tmp.v1 = _mm512_mask_extloadunpacklo_pd(tmp.v1, 0xFF, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + tmp.v1 = _mm512_mask_extloadunpackhi_pd(tmp.v1, 0xFF, (uint8_t*)p+64, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + tmp.v2 = _mm512_mask_extloadunpacklo_pd(tmp.v2, 0xFF, (uint8_t*)p+64, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + tmp.v2 = _mm512_mask_extloadunpackhi_pd(tmp.v2, 0xFF, (uint8_t*)p+128, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + __vec16_d ret; + __vec16_i1 tmp_m = mask; + tmp_m = _mm512_kswapb(tmp_m, tmp_m); + ret.v1 = _mm512_mask_mov_pd(ret.v1, mask, tmp.v1); + ret.v2 = _mm512_mask_mov_pd(ret.v2, tmp_m, tmp.v2); + return ret; +#endif +} + +static FORCEINLINE void __masked_store_i8(void *p, const __vec16_i8 &val, __vec16_i1 mask) { + __vec16_i32 tmp = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); +#ifdef ISPC_FORCE_ALIGNED_MEMORY + _mm512_mask_extstore_epi32(p, mask, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); +#else + #if 0 // TODO: both implementations seem to work, need to test which one is faster + _mm512_mask_i32extscatter_epi32 (p, mask, __vec16_i32(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15), tmp, _MM_DOWNCONV_EPI32_SINT8, sizeof(uint8_t), _MM_HINT_NONE); + #else + __vec16_i32 tmp_; + tmp_.v = _mm512_extloadunpacklo_epi32(tmp_.v, p, _MM_UPCONV_EPI32_SINT8, _MM_HINT_NONE); + tmp_.v = _mm512_extloadunpackhi_epi32(tmp_.v, (uint8_t*)p+64, _MM_UPCONV_EPI32_SINT8, _MM_HINT_NONE); + tmp_.v = _mm512_mask_mov_epi32(tmp_.v, mask, tmp.v); + _mm512_extpackstorelo_epi32(p, tmp_.v, _MM_DOWNCONV_EPI32_SINT8, _MM_HINT_NONE); + _mm512_extpackstorehi_epi32((uint8_t*)p+64, tmp_.v, _MM_DOWNCONV_EPI32_SINT8, _MM_HINT_NONE); + #endif // if 0 +#endif +} + +static FORCEINLINE __vec16_i8 __masked_load_i8(void *p, __vec16_i1 mask) { +#ifdef ISPC_FORCE_ALIGNED_MEMORY + __vec16_i32 tmp = _mm512_mask_extload_epi32(_mm512_undefined_epi32(), mask, p, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); +#else + __vec16_i32 tmp; + tmp.v = _mm512_mask_extloadunpacklo_epi32(tmp.v, 0xFFFF, p, _MM_UPCONV_EPI32_SINT8, _MM_HINT_NONE); + tmp.v = _mm512_mask_extloadunpackhi_epi32(tmp.v, 0xFFFF, (uint8_t*)p+64, _MM_UPCONV_EPI32_SINT8, _MM_HINT_NONE); +#endif + __vec16_i8 ret; + _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8, _MM_HINT_NONE); + return ret; +} + +template static FORCEINLINE __vec16_i8 __load(const __vec16_i8 *p) { + return *p; +} +template static FORCEINLINE void __store(__vec16_i8 *p, __vec16_i8 v) { + *p = v; +} + +static FORCEINLINE void +__scatter_base_offsets32_i8(uint8_t *b, uint32_t scale, __vec16_i32 offsets, + __vec16_i8 val, __vec16_i1 mask) +{ + __vec16_i32 tmp = _mm512_extload_epi32(&val,_MM_UPCONV_EPI32_SINT8, + _MM_BROADCAST32_NONE, _MM_HINT_NONE); + _mm512_mask_i32extscatter_epi32(b, mask, offsets, tmp, + _MM_DOWNCONV_EPI32_SINT8, scale, + _MM_HINT_NONE); +} + + +static FORCEINLINE void +__scatter_base_offsets32_i16(uint8_t *b, uint32_t scale, __vec16_i32 offsets, + __vec16_i16 val, __vec16_i1 mask) +{ + __vec16_i32 tmp = _mm512_extload_epi32(&val,_MM_UPCONV_EPI32_SINT16, + _MM_BROADCAST32_NONE, _MM_HINT_NONE); + _mm512_mask_i32extscatter_epi32(b, mask, offsets, tmp, + _MM_DOWNCONV_EPI32_SINT16, scale, + _MM_HINT_NONE); +} + + +static FORCEINLINE void __masked_store_i16(void *p, const __vec16_i16 &val, __vec16_i1 mask) { + __vec16_i32 tmp = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); +#ifdef ISPC_FORCE_ALIGNED_MEMORY + _mm512_mask_extstore_epi32(p, mask, tmp, _MM_DOWNCONV_EPI32_SINT16, _MM_HINT_NONE); +#else + #if 0 // TODO: both implementations seem to work, need to test which one is faster + _mm512_mask_i32extscatter_epi32 (p, mask, __vec16_i32(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15), tmp, _MM_DOWNCONV_EPI32_SINT16, sizeof(uint16_t), _MM_HINT_NONE); + #else + __vec16_i32 tmp_; + tmp_.v = _mm512_extloadunpacklo_epi32(tmp_.v, p, _MM_UPCONV_EPI32_SINT16, _MM_HINT_NONE); + tmp_.v = _mm512_extloadunpackhi_epi32(tmp_.v, (uint8_t*)p+64, _MM_UPCONV_EPI32_SINT16, _MM_HINT_NONE); + tmp_.v = _mm512_mask_mov_epi32(tmp_.v, mask, tmp.v); + _mm512_extpackstorelo_epi32(p, tmp_.v, _MM_DOWNCONV_EPI32_SINT16, _MM_HINT_NONE); + _mm512_extpackstorehi_epi32((uint8_t*)p+64, tmp_.v, _MM_DOWNCONV_EPI32_SINT16, _MM_HINT_NONE); + #endif // if 0 +#endif +} + +static FORCEINLINE __vec16_i16 __masked_load_i16(void *p, __vec16_i1 mask) { +#ifdef ISPC_FORCE_ALIGNED_MEMORY + __vec16_i32 tmp = _mm512_mask_extload_epi32(_mm512_undefined_epi32(), mask, p, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); +#else + __vec16_i32 tmp; + tmp.v = _mm512_mask_extloadunpacklo_epi32(tmp.v, 0xFFFF, p, _MM_UPCONV_EPI32_SINT16, _MM_HINT_NONE); + tmp.v = _mm512_mask_extloadunpackhi_epi32(tmp.v, 0xFFFF, (uint8_t*)p+64, _MM_UPCONV_EPI32_SINT16, _MM_HINT_NONE); +#endif + __vec16_i16 ret; + _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16, _MM_HINT_NONE); + return ret; +} + + +template static FORCEINLINE __vec16_i16 __load(const __vec16_i16 *p) { + return *p; +} + +template static FORCEINLINE void __store(__vec16_i16 *p, __vec16_i16 v) { + *p = v; +} + + +static FORCEINLINE void __masked_store_i32(void *p, __vec16_i32 val, __vec16_i1 mask) { +#ifdef ISPC_FORCE_ALIGNED_MEMORY + _mm512_mask_store_epi32(p, mask, val.v); +#else + __vec16_i32 tmp; + tmp.v = _mm512_extloadunpacklo_epi32(tmp.v, p, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); + tmp.v = _mm512_extloadunpackhi_epi32(tmp.v, (uint8_t*)p+64, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); + tmp.v = _mm512_mask_mov_epi32(tmp.v, mask, val.v); + _mm512_extpackstorelo_epi32(p, tmp.v, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); + _mm512_extpackstorehi_epi32((uint8_t*)p+64, tmp.v, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); +#endif +} + +static FORCEINLINE void __masked_store_float(void *p, __vec16_f val, + __vec16_i1 mask) { +#ifdef ISPC_FORCE_ALIGNED_MEMORY + _mm512_mask_store_ps(p, mask, val.v); +#else + __vec16_f tmp; + tmp.v = _mm512_extloadunpacklo_ps(tmp.v, p, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); + tmp.v = _mm512_extloadunpackhi_ps(tmp.v, (uint8_t*)p+64, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); + tmp.v = _mm512_mask_mov_ps(tmp.v, mask, val.v); + _mm512_extpackstorelo_ps(p, tmp.v, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); + _mm512_extpackstorehi_ps((uint8_t*)p+64, tmp.v, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); +#endif +} + +static FORCEINLINE void __masked_store_double(void *p, __vec16_d val, + __vec16_i1 mask) { +#ifdef ISPC_FORCE_ALIGNED_MEMORY + __vec16_i1 tmp_m = mask; + tmp_m = _mm512_kswapb(tmp_m, tmp_m); + _mm512_mask_store_pd(p, mask, val.v1); + _mm512_mask_store_pd((uint8_t*)p+64, tmp_m, val.v2); +#else + __vec16_d tmp; + __vec16_i1 tmp_m = mask; + tmp_m = _mm512_kswapb(tmp_m, tmp_m); + tmp.v1 = _mm512_extloadunpacklo_pd(tmp.v1, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + tmp.v1 = _mm512_extloadunpackhi_pd(tmp.v1, (uint8_t*)p+64, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + tmp.v2 = _mm512_extloadunpacklo_pd(tmp.v2, (uint8_t*)p+64, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + tmp.v2 = _mm512_extloadunpackhi_pd(tmp.v2, (uint8_t*)p+128, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + tmp.v1 = _mm512_mask_mov_pd(tmp.v1, mask, val.v1); + tmp.v2 = _mm512_mask_mov_pd(tmp.v2, tmp_m, val.v2); + _mm512_extpackstorelo_pd(p, tmp.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); + _mm512_extpackstorehi_pd((uint8_t*)p+64, tmp.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); + _mm512_extpackstorelo_pd((uint8_t*)p+64, tmp.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); + _mm512_extpackstorehi_pd((uint8_t*)p+128, tmp.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); +#endif +} + +static FORCEINLINE void __masked_store_blend_i32(void *p, __vec16_i32 val, + __vec16_i1 mask) { + __masked_store_i32(p, val, mask); +} + +static FORCEINLINE void __masked_store_blend_float(void *p, __vec16_f val, + __vec16_i1 mask) { + __masked_store_float(p, val, mask); +} + +/////////////////////////////////////////////////////////////////////////// +// gather/scatter +/////////////////////////////////////////////////////////////////////////// + +// offsets * offsetScale is in bytes (for all of these) + +static FORCEINLINE __vec16_i8 +__gather_base_offsets32_i8(uint8_t *base, uint32_t scale, __vec16_i32 offsets, + __vec16_i1 mask) { + // (iw): need to temporarily store as int because gathers can only return ints. + __vec16_i32 tmp = _mm512_mask_i32extgather_epi32(_mm512_undefined_epi32(), mask, offsets, base, + _MM_UPCONV_EPI32_SINT8, scale, + _MM_HINT_NONE); + // now, downconverting to chars into temporary char vector + __vec16_i8 ret; + _mm512_extstore_epi32(ret.v,tmp,_MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i16 +__gather_base_offsets32_i16(uint8_t *base, uint32_t scale, __vec16_i32 offsets, + __vec16_i1 mask) { + // (iw): need to temporarily store as int because gathers can only return ints. + __vec16_i32 tmp = _mm512_mask_i32extgather_epi32(_mm512_undefined_epi32(), mask, offsets, base, + _MM_UPCONV_EPI32_SINT16, scale, + _MM_HINT_NONE); + // now, downconverting to chars into temporary char vector + __vec16_i16 ret; + _mm512_extstore_epi32(ret.v,tmp,_MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i32 +__gather_base_offsets32_i32(uint8_t *base, uint32_t scale, __vec16_i32 offsets, + __vec16_i1 mask) { + return _mm512_mask_i32extgather_epi32(_mm512_undefined_epi32(), mask, offsets, + base, _MM_UPCONV_EPI32_NONE, scale, + _MM_HINT_NONE); +} + +static FORCEINLINE __vec16_f +__gather_base_offsets32_float(uint8_t *base, uint32_t scale, __vec16_i32 offsets, + __vec16_i1 mask) { + return _mm512_mask_i32extgather_ps(_mm512_undefined_ps(), mask, offsets, + base, _MM_UPCONV_PS_NONE, scale, + _MM_HINT_NONE); +} + +static FORCEINLINE __vec16_d +__gather_base_offsets32_double(uint8_t *base, uint32_t scale, __vec16_i32 offsets, + __vec16_i1 mask) { + __vec16_d ret; + ret.v1 = _mm512_mask_i32loextgather_pd(_mm512_undefined_pd(), mask, offsets, + base, _MM_UPCONV_PD_NONE, scale, + _MM_HINT_NONE); + __m512i shuffled_offsets = _mm512_permute4f128_epi32(offsets.v, _MM_PERM_DCDC); + ret.v2 = _mm512_mask_i32loextgather_pd(_mm512_undefined_pd(), mask, shuffled_offsets, + base, _MM_UPCONV_PD_NONE, scale, + _MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i32 +__gather64_i32(__vec16_i64 addr, __vec16_i1 mask) +{ + __vec16_i32 ret; + + // There is no gather instruction with 64-bit offsets in KNC. + // We have to manually iterate over the upper 32 bits ;-) + __vec16_i1 still_to_do = mask; + const __vec16_i32 signed_offsets = _mm512_add_epi32(addr.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); + while (still_to_do) { + int first_active_lane = _mm_tzcnt_32((int)still_to_do); + const uint &hi32 = ((uint*)&addr.v_hi)[first_active_lane]; + __vec16_i1 match = _mm512_mask_cmp_epi32_mask(still_to_do, addr.v_hi, + __smear_i32<__vec16_i32>((int32_t)hi32), + _MM_CMPINT_EQ); + + void * base = (void*)((((unsigned long)hi32) << 32) + (unsigned long)(-(long)INT_MIN)); + + ret.v = _mm512_mask_i32extgather_epi32(ret.v, match, signed_offsets, + base, _MM_UPCONV_EPI32_NONE, 1, + _MM_HINT_NONE); + still_to_do = _mm512_kxor(match, still_to_do); + } + + return ret; +} + +static FORCEINLINE __vec16_f +__gather64_float(__vec16_i64 addr, __vec16_i1 mask) +{ + __vec16_f ret; + + // There is no gather instruction with 64-bit offsets in KNC. + // We have to manually iterate over the upper 32 bits ;-) + __vec16_i1 still_to_do = mask; + const __vec16_i32 signed_offsets = _mm512_add_epi32(addr.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); + while (still_to_do) { + int first_active_lane = _mm_tzcnt_32((int)still_to_do); + const uint &hi32 = ((uint*)&addr.v_hi)[first_active_lane]; + __vec16_i1 match = _mm512_mask_cmp_epi32_mask(still_to_do,addr.v_hi, + __smear_i32<__vec16_i32>((int32_t)hi32), + _MM_CMPINT_EQ); + + void * base = (void*)((((unsigned long)hi32) << 32) + (unsigned long)(-(long)INT_MIN)); + + ret.v = _mm512_mask_i32extgather_ps(ret.v, match, signed_offsets, + base, _MM_UPCONV_PS_NONE, 1, + _MM_HINT_NONE); + still_to_do = _mm512_kxor(match, still_to_do); + } + + return ret; +} + +static FORCEINLINE __vec16_d +__gather64_double(__vec16_i64 addr, __vec16_i1 mask) +{ + __vec16_d ret; + + __vec16_i32 addr_lo, addr_hi; + hilo2zmm(addr, addr_lo.v, addr_hi.v); + +#if __INTEL_COMPILER < 1500 + ret.v1 = (__m512d)_mm512_i64extgather_pd ((__m512)addr_lo.v, 0, _MM_UPCONV_PD_NONE, 1, _MM_HINT_NONE); + ret.v2 = (__m512d)_mm512_i64extgather_pd ((__m512)addr_hi.v, 0, _MM_UPCONV_PD_NONE, 1, _MM_HINT_NONE); +#else + ret.v1 = _mm512_i64extgather_pd (addr_lo, 0, _MM_UPCONV_PD_NONE, 1, _MM_HINT_NONE); + ret.v2 = _mm512_i64extgather_pd (addr_hi, 0, _MM_UPCONV_PD_NONE, 1, _MM_HINT_NONE); +#endif + return ret; +} + + + +/*! gather with 64-bit offsets. + + \todo add optimization that falls back to 32-bit offset gather if + upper 32 bits are all 0es (in practice, offsets are usually array + indices, and _usually_ <4G even if the compiler cannot statically + figure out that this is the case */ + +static FORCEINLINE __vec16_f +__gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, + __vec16_i1 mask) { + + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); + // There is no gather instruction with 64-bit offsets in KNC. + // We have to manually iterate over the upper 32 bits ;-) + __vec16_i1 still_to_do = mask; + __vec16_f ret; + while (still_to_do) { + int first_active_lane = _mm_tzcnt_32((int)still_to_do); + const uint &hi32 = ((uint*)&offsets.v_hi)[first_active_lane]; + __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, + __smear_i32<__vec16_i32>((int32_t)hi32), + _MM_CMPINT_EQ); + void * base = (void*)((unsigned long)_base + + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); + + ret = _mm512_mask_i32extgather_ps(ret, match, signed_offsets, base, + _MM_UPCONV_PS_NONE, scale, + _MM_HINT_NONE); + still_to_do = _mm512_kxor(match, still_to_do); + } + + return ret; +} + +static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, + __vec16_i1 mask) +{ + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); + __vec16_i1 still_to_do = mask; + __vec16_i32 tmp; + while (still_to_do) { + int first_active_lane = _mm_tzcnt_32((int)still_to_do); + const uint &hi32 = ((uint*)&offsets.v_hi)[first_active_lane]; + __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, + __smear_i32<__vec16_i32>((int32_t)hi32), + _MM_CMPINT_EQ); + + void * base = (void*)((unsigned long)_base + + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); + tmp = _mm512_mask_i32extgather_epi32(tmp, match, signed_offsets, base, + _MM_UPCONV_EPI32_SINT8, scale, + _MM_HINT_NONE); + still_to_do = _mm512_kxor(match,still_to_do); + } + __vec16_i8 ret; + _mm512_extstore_epi32(ret.v,tmp,_MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i8 +__gather64_i8(__vec16_i64 addr, __vec16_i1 mask) +{ + return __gather_base_offsets64_i8(0, 1, addr, mask); +} + +static FORCEINLINE __vec16_i16 __gather_base_offsets64_i16(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, + __vec16_i1 mask) +{ + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); + __vec16_i1 still_to_do = mask; + __vec16_i32 tmp; + while (still_to_do) { + int first_active_lane = _mm_tzcnt_32((int)still_to_do); + const uint &hi32 = ((uint*)&offsets.v_hi)[first_active_lane]; + __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, + __smear_i32<__vec16_i32>((int32_t)hi32), + _MM_CMPINT_EQ); + + void * base = (void*)((unsigned long)_base + + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); + tmp = _mm512_mask_i32extgather_epi32(tmp, match, signed_offsets, base, + _MM_UPCONV_EPI32_SINT16, scale, + _MM_HINT_NONE); + still_to_do = _mm512_kxor(match,still_to_do); + } + __vec16_i16 ret; + _mm512_extstore_epi32(ret.v,tmp,_MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); + return ret; +} + +static FORCEINLINE __vec16_i16 +__gather64_i16(__vec16_i64 addr, __vec16_i1 mask) +{ + return __gather_base_offsets64_i16(0, 1, addr, mask); +} + +static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, + __vec16_f value, + __vec16_i1 mask) { + + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); + __vec16_i1 still_to_do = mask; + while (still_to_do) { + int first_active_lane = _mm_tzcnt_32((int)still_to_do); + const uint &hi32 = ((uint*)&offsets.v_hi)[first_active_lane]; + __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, + __smear_i32<__vec16_i32>((int32_t)hi32), + _MM_CMPINT_EQ); + + void * base = (void*)((unsigned long)_base + + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); + _mm512_mask_i32extscatter_ps(base, match, signed_offsets, + value, + _MM_DOWNCONV_PS_NONE, scale, + _MM_HINT_NONE); + still_to_do = _mm512_kxor(match,still_to_do); + } +} + +static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, + __vec16_i32 value, + __vec16_i1 mask) { + + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); + __vec16_i1 still_to_do = mask; + while (still_to_do) { + int first_active_lane = _mm_tzcnt_32((int)still_to_do); + const uint &hi32 = ((uint*)&offsets.v_hi)[first_active_lane]; + __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, + __smear_i32<__vec16_i32>((int32_t)hi32), + _MM_CMPINT_EQ); + + void * base = (void*)((unsigned long)_base + + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); + _mm512_mask_i32extscatter_epi32(base, match, signed_offsets, + value, + _MM_DOWNCONV_EPI32_NONE, scale, + _MM_HINT_NONE); + still_to_do = _mm512_kxor(match,still_to_do); + } +} + +static FORCEINLINE void __scatter_base_offsets64_i64(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, + __vec16_i64 value, + __vec16_i1 mask) { + + const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); + __vec16_i1 still_to_do = mask; + while (still_to_do) { + int first_active_lane = _mm_tzcnt_32((int)still_to_do); + const uint &hi32 = ((uint*)&offsets.v_hi)[first_active_lane]; + __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, + __smear_i32<__vec16_i32>((int32_t)hi32), + _MM_CMPINT_EQ); + + void * base = (void*)((unsigned long)_base + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); + + _mm512_mask_i32extscatter_epi32(base, match, signed_offsets, value.v_lo, _MM_DOWNCONV_EPI32_NONE, scale, _MM_HINT_NONE); + _mm512_mask_i32extscatter_epi32(base + sizeof(uint32_t), match, signed_offsets, value.v_hi, _MM_DOWNCONV_EPI32_NONE, scale, _MM_HINT_NONE); + + still_to_do = _mm512_kxor(match,still_to_do); + } +} + +static FORCEINLINE void // TODO +__scatter_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, + __vec16_i8 value, + __vec16_i1 mask) { + __vec16_i1 still_to_do = mask; + + __vec16_i32 tmp = _mm512_extload_epi32(&value, _MM_UPCONV_EPI32_SINT8, + _MM_BROADCAST32_NONE, _MM_HINT_NONE); + // _mm512_mask_extstore_epi32(p, mask, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); + + while (still_to_do) { + int first_active_lane = _mm_tzcnt_32((int)still_to_do); + const uint &hi32 = ((uint*)&offsets.v_hi)[first_active_lane]; + __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, + __smear_i32<__vec16_i32>((int32_t)hi32), + _MM_CMPINT_EQ); + + void * base = (void*)((unsigned long)_base + + ((scale*(unsigned long)hi32) << 32)); + _mm512_mask_i32extscatter_epi32(base, match, offsets.v_lo, + tmp, + _MM_DOWNCONV_EPI32_SINT8, scale, + _MM_HINT_NONE); + still_to_do = _mm512_kxor(match,still_to_do); + } +} + + +static FORCEINLINE __vec16_i32 +__gather_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, + __vec16_i1 mask) +{ + __vec16_f r = __gather_base_offsets64_float(_base,scale,offsets,mask); + return (__vec16_i32&)r; +} + +// scatter + +static FORCEINLINE void __scatter_base_offsets32_i32(uint8_t *b, uint32_t scale, __vec16_i32 offsets, __vec16_i32 val, __vec16_i1 mask) +{ + _mm512_mask_i32extscatter_epi32(b, mask, offsets, val, _MM_DOWNCONV_EPI32_NONE, scale, _MM_HINT_NONE); +} + +static FORCEINLINE void __scatter_base_offsets32_i64(uint8_t *b, uint32_t scale, __vec16_i32 offsets, __vec16_i64 val, __vec16_i1 mask) +{ + _mm512_mask_i32extscatter_epi32(b, mask, offsets, val.v_lo, _MM_DOWNCONV_EPI32_NONE, scale, _MM_HINT_NONE); + _mm512_mask_i32extscatter_epi32(b + sizeof(uint32_t), mask, offsets, val.v_hi, _MM_DOWNCONV_EPI32_NONE, scale, _MM_HINT_NONE); +} + +static FORCEINLINE void __scatter_base_offsets32_float(void *base, uint32_t scale, __vec16_i32 offsets, __vec16_f val, __vec16_i1 mask) +{ + _mm512_mask_i32extscatter_ps(base, mask, offsets, val, _MM_DOWNCONV_PS_NONE, scale, _MM_HINT_NONE); +} + +static FORCEINLINE void __scatter_base_offsets32_double(void *base, uint32_t scale, __vec16_i32 offsets, __vec16_d val, __vec16_i1 mask) +{ + _mm512_mask_i32loextscatter_pd(base, mask, offsets, val.v1, _MM_DOWNCONV_PD_NONE, scale, _MM_HINT_NONE); + __m512i shuffled_offsets = _mm512_permute4f128_epi32(offsets.v, _MM_PERM_DCDC); + const __mmask8 mask8 = 0x00FF & (mask >> 8); + _mm512_mask_i32loextscatter_pd(base, mask8, shuffled_offsets, val.v2, _MM_DOWNCONV_PD_NONE, scale, _MM_HINT_NONE); +} + +static FORCEINLINE void __scatter64_float(__vec16_i64 ptrs, __vec16_f val, __vec16_i1 mask){ +#if __INTEL_COMPILER < 1500 + #warning "__scatter64_float is slow due to outdated compiler" + __scatter_base_offsets64_float(0, 1, ptrs, val, mask); +#else + __vec16_i32 first8ptrs, second8ptrs; + hilo2zmm(ptrs, first8ptrs.v, second8ptrs.v); + _mm512_mask_i64scatter_pslo (0, mask, first8ptrs, val, 1); + const __mmask8 mask_hi = 0x00FF & (mask >> 8); + _mm512_mask_i64scatter_pslo (0, mask_hi, second8ptrs, _mm512_permute4f128_ps(val.v, _MM_PERM_DCDC), 1); +#endif +} + +static FORCEINLINE void __scatter64_i32(__vec16_i64 ptrs, __vec16_i32 val, __vec16_i1 mask) { +#if __INTEL_COMPILER < 1500 + #warning "__scatter64_i32 is slow due to outdated compiler" + __scatter_base_offsets64_i32(0, 1, ptrs, val, mask); +#else + __vec16_i32 first8ptrs, second8ptrs; + hilo2zmm(ptrs, first8ptrs.v, second8ptrs.v); + _mm512_mask_i64scatter_epi32lo (0, mask, first8ptrs, val, 1); + const __mmask8 mask_hi = 0x00FF & (mask >> 8); + _mm512_mask_i64scatter_epi32lo (0, mask_hi, second8ptrs, _mm512_permute4f128_epi32(val.v, _MM_PERM_DCDC), 1); +#endif +} + +static FORCEINLINE void __scatter64_i64(__vec16_i64 ptrs, __vec16_i64 val, __vec16_i1 mask) { +#if __INTEL_COMPILER < 1500 + #warning "__scatter64_i64 is slow due to outdated compiler" + __scatter_base_offsets64_i64(0, 1, ptrs, val, mask); +#else + __vec16_i32 first8ptrs, second8ptrs; + hilo2zmm(ptrs, first8ptrs.v, second8ptrs.v); + __vec16_i32 first8vals, second8vals; + hilo2zmm(val, first8vals.v, second8vals.v); + _mm512_mask_i64extscatter_epi64 (0, mask, first8ptrs, first8vals, _MM_DOWNCONV_EPI64_NONE, 1, _MM_HINT_NONE); + const __mmask8 mask8 = 0x00FF & (mask >> 8); + _mm512_mask_i64extscatter_epi64 (0, mask8, second8ptrs, second8vals, _MM_DOWNCONV_EPI64_NONE, 1, _MM_HINT_NONE); +#endif +} + + +/////////////////////////////////////////////////////////////////////////// +// packed load/store +/////////////////////////////////////////////////////////////////////////// + +static FORCEINLINE int32_t __packed_load_active(uint32_t *p, __vec16_i32 *val, __vec16_i1 mask) { + __vec16_i32 v = __load<64>(val); + v = _mm512_mask_extloadunpacklo_epi32(v, mask, p, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); + v = _mm512_mask_extloadunpackhi_epi32(v, mask, (uint8_t*)p+64, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); + __store<64>(val, v); + return _mm_countbits_32(uint32_t(mask)); +} + +static FORCEINLINE int32_t __packed_store_active(uint32_t *p, __vec16_i32 val, __vec16_i1 mask) { + _mm512_mask_extpackstorelo_epi32(p, mask, val, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); + _mm512_mask_extpackstorehi_epi32((uint8_t*)p+64, mask, val, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); + return _mm_countbits_32(uint32_t(mask)); +} + +static FORCEINLINE int32_t __packed_store_active2(uint32_t *p, __vec16_i32 val, __vec16_i1 mask) +{ + return __packed_store_active(p, val, mask); +} + + +/////////////////////////////////////////////////////////////////////////// +// aos/soa +/////////////////////////////////////////////////////////////////////////// + + +static FORCEINLINE void __soa_to_aos3_float(__vec16_f v0, __vec16_f v1, __vec16_f v2, + float *ptr) { + _mm512_i32scatter_ps(ptr, __vec16_i32(0,12,24,36,48,60,72,84,96,108,120,132,144,156,168,180), v0, 1); + _mm512_i32scatter_ps(ptr+1, __vec16_i32(0,12,24,36,48,60,72,84,96,108,120,132,144,156,168,180), v1, 1); + _mm512_i32scatter_ps(ptr+2, __vec16_i32(0,12,24,36,48,60,72,84,96,108,120,132,144,156,168,180), v2, 1); +} + +static FORCEINLINE void __aos_to_soa3_float(float *ptr, __vec16_f *out0, __vec16_f *out1, + __vec16_f *out2) { + *out0 = _mm512_i32gather_ps(__vec16_i32(0,12,24,36,48,60,72,84,96,108,120,132,144,156,168,180), ptr, 1); + *out1 = _mm512_i32gather_ps(__vec16_i32(0,12,24,36,48,60,72,84,96,108,120,132,144,156,168,180), ptr+1, 1); + *out2 = _mm512_i32gather_ps(__vec16_i32(0,12,24,36,48,60,72,84,96,108,120,132,144,156,168,180), ptr+2, 1); +} + +static FORCEINLINE void __soa_to_aos4_float(__vec16_f v0, __vec16_f v1, __vec16_f v2, + __vec16_f v3, float *ptr) { + /* + __vec16_f v0 (1,5, 9,13,17,21,25,29,33,37,41,45,49,53,57,61); + __vec16_f v1 (2,6,10,14,18,22,26,30,34,38,42,46,50,54,58,62); + __vec16_f v2 (3,7,11,15,19,23,27,31,35,39,44,47,51,55,59,63); + __vec16_f v3 (4,8,12,16,20,24,28,32,36,40,45,48,52,56,60,64); + + + // v0 = A1 ... A16, v1 = B1 ..., v3 = D1 ... D16 + __vec16_f tmp00 = _mm512_mask_swizzle_ps (v0, 0xCCCC, v1, _MM_SWIZ_REG_BADC); // A1A2B1B2 A5A6B5B6 ... + __vec16_f tmp01 = _mm512_mask_swizzle_ps (v0, 0x3333, v1, _MM_SWIZ_REG_BADC); // B3B4A3A4 B7B8A7A8 ... + __vec16_f tmp02 = _mm512_mask_swizzle_ps (v2, 0xCCCC, v3, _MM_SWIZ_REG_BADC); // C1C2D1D2 ... + __vec16_f tmp03 = _mm512_mask_swizzle_ps (v2, 0x3333, v3, _MM_SWIZ_REG_BADC); // D3D4C3C4 ... + + __vec16_f tmp10 = _mm512_mask_swizzle_ps (tmp00, 0xAAAA, tmp02, _MM_SWIZ_REG_CDAB); // A1C1B1D1 A5C5B5D5 ... + __vec16_f tmp11 = _mm512_mask_swizzle_ps (tmp00, 0x5555, tmp02, _MM_SWIZ_REG_CDAB); // C2A2D2B2 C6A6D6B6 ... + __vec16_f tmp12 = _mm512_mask_swizzle_ps (tmp01, 0xAAAA, tmp03, _MM_SWIZ_REG_CDAB); // DBCA ... + __vec16_f tmp13 = _mm512_mask_swizzle_ps (tmp01, 0x5555, tmp03, _MM_SWIZ_REG_CDAB); // BDAC ... + */ + + + _mm512_i32scatter_ps(ptr, __vec16_i32(0,16,32,48,64,80,96,112,128,144,160,176,192,208,224,240), v0, 1); + _mm512_i32scatter_ps(ptr+1, __vec16_i32(0,16,32,48,64,80,96,112,128,144,160,176,192,208,224,240), v1, 1); + _mm512_i32scatter_ps(ptr+2, __vec16_i32(0,16,32,48,64,80,96,112,128,144,160,176,192,208,224,240), v2, 1); + _mm512_i32scatter_ps(ptr+3, __vec16_i32(0,16,32,48,64,80,96,112,128,144,160,176,192,208,224,240), v3, 1); +} + + +static FORCEINLINE void __aos_to_soa4_float(float *ptr, __vec16_f *out0, __vec16_f *out1, + __vec16_f *out2, __vec16_f *out3) { + *out0 = _mm512_i32gather_ps(__vec16_i32(0,16,32,48,64,80,96,112,128,144,160,176,192,208,224,240), ptr, 1); + *out1 = _mm512_i32gather_ps(__vec16_i32(0,16,32,48,64,80,96,112,128,144,160,176,192,208,224,240), ptr+1, 1); + *out2 = _mm512_i32gather_ps(__vec16_i32(0,16,32,48,64,80,96,112,128,144,160,176,192,208,224,240), ptr+2, 1); + *out3 = _mm512_i32gather_ps(__vec16_i32(0,16,32,48,64,80,96,112,128,144,160,176,192,208,224,240), ptr+3, 1); +} + + +/////////////////////////////////////////////////////////////////////////// +// prefetch +/////////////////////////////////////////////////////////////////////////// + +static FORCEINLINE void __prefetch_read_uniform_1(uint8_t *p) { + _mm_prefetch((const char*) p, _MM_HINT_T0); // prefetch into L1$ +} + +static FORCEINLINE void __prefetch_read_uniform_2(uint8_t *p) { + _mm_prefetch((const char*) p, _MM_HINT_T1); // prefetch into L2$ +} + +static FORCEINLINE void __prefetch_read_uniform_3(uint8_t *p) { + // There is no L3$ on KNC, but we prefetch into L2$ instead. + _mm_prefetch((const char*) p, _MM_HINT_T1); // prefetch into L2$ +} + +static FORCEINLINE void __prefetch_read_uniform_nt(uint8_t *p) { + _mm_prefetch((const char*) p, _MM_HINT_T2); // prefetch into L2$ with non-temporal hint + // _mm_prefetch(p, _MM_HINT_NTA); // prefetch into L1$ with non-temporal hint +} + +#define PREFETCH_READ_VARYING(CACHE_NUM, HINT) \ +static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM##_native(uint8_t *base, uint32_t scale, \ + __vec16_i32 offsets, __vec16_i1 mask) { \ + _mm512_mask_prefetch_i32gather_ps (offsets, mask, base, scale, HINT); \ +} \ +static FORCEINLINE void __prefetch_read_varying_##CACHE_NUM(__vec16_i64 addr, __vec16_i1 mask) {} \ + +PREFETCH_READ_VARYING(1, _MM_HINT_T0) +PREFETCH_READ_VARYING(2, _MM_HINT_T1) +// L3 prefetch is mapped to L2 cache +PREFETCH_READ_VARYING(3, _MM_HINT_T1) +PREFETCH_READ_VARYING(nt, _MM_HINT_T2) + +/////////////////////////////////////////////////////////////////////////// +// atomics +/////////////////////////////////////////////////////////////////////////// + +static FORCEINLINE uint32_t __atomic_add(uint32_t *p, uint32_t v) { +#ifdef _MSC_VER + return InterlockedAdd((LONG volatile *)p, v) - v; +#else + return __sync_fetch_and_add(p, v); +#endif +} + +static FORCEINLINE uint32_t __atomic_sub(uint32_t *p, uint32_t v) { +#ifdef _MSC_VER + return InterlockedAdd((LONG volatile *)p, -v) + v; +#else + return __sync_fetch_and_sub(p, v); +#endif +} + +static FORCEINLINE uint32_t __atomic_and(uint32_t *p, uint32_t v) { +#ifdef _MSC_VER + return _InterlockedAnd((LONG volatile *)p, v); +#else + return __sync_fetch_and_and(p, v); +#endif +} + +static FORCEINLINE uint32_t __atomic_or(uint32_t *p, uint32_t v) { +#ifdef _MSC_VER + return _InterlockedOr((LONG volatile *)p, v); +#else + return __sync_fetch_and_or(p, v); +#endif +} + +static FORCEINLINE uint32_t __atomic_xor(uint32_t *p, uint32_t v) { +#ifdef _MSC_VER + return _InterlockedXor((LONG volatile *)p, v); +#else + return __sync_fetch_and_xor(p, v); +#endif +} + +static FORCEINLINE uint32_t __atomic_min(uint32_t *p, uint32_t v) { + int32_t old, min; + do { + old = *((volatile int32_t *)p); + min = (old < (int32_t)v) ? old : (int32_t)v; +#ifdef _MSC_VER + } while (InterlockedCompareExchange((LONG volatile *)p, min, old) != old); +#else + } while (__sync_bool_compare_and_swap(p, old, min) == false); +#endif + return old; +} + +static FORCEINLINE uint32_t __atomic_max(uint32_t *p, uint32_t v) { + int32_t old, max; + do { + old = *((volatile int32_t *)p); + max = (old > (int32_t)v) ? old : (int32_t)v; +#ifdef _MSC_VER + } while (InterlockedCompareExchange((LONG volatile *)p, max, old) != old); +#else + } while (__sync_bool_compare_and_swap(p, old, max) == false); +#endif + return old; +} + +static FORCEINLINE uint32_t __atomic_umin(uint32_t *p, uint32_t v) { + uint32_t old, min; + do { + old = *((volatile uint32_t *)p); + min = (old < v) ? old : v; +#ifdef _MSC_VER + } while (InterlockedCompareExchange((LONG volatile *)p, min, old) != old); +#else + } while (__sync_bool_compare_and_swap(p, old, min) == false); +#endif + return old; +} + +static FORCEINLINE uint32_t __atomic_umax(uint32_t *p, uint32_t v) { + uint32_t old, max; + do { + old = *((volatile uint32_t *)p); + max = (old > v) ? old : v; +#ifdef _MSC_VER + } while (InterlockedCompareExchange((LONG volatile *)p, max, old) != old); +#else + } while (__sync_bool_compare_and_swap(p, old, max) == false); +#endif + return old; +} + +static FORCEINLINE uint32_t __atomic_xchg(uint32_t *p, uint32_t v) { +#ifdef _MSC_VER + return InterlockedExchange((LONG volatile *)p, v); +#else + return __sync_lock_test_and_set(p, v); +#endif +} + +static FORCEINLINE uint32_t __atomic_cmpxchg(uint32_t *p, uint32_t cmpval, + uint32_t newval) { +#ifdef _MSC_VER + return InterlockedCompareExchange((LONG volatile *)p, newval, cmpval); +#else + return __sync_val_compare_and_swap(p, cmpval, newval); +#endif +} + +static FORCEINLINE uint64_t __atomic_add(uint64_t *p, uint64_t v) { +#ifdef _MSC_VER + return InterlockedAdd64((LONGLONG volatile *)p, v) - v; +#else + return __sync_fetch_and_add(p, v); +#endif +} + +static FORCEINLINE uint64_t __atomic_sub(uint64_t *p, uint64_t v) { +#ifdef _MSC_VER + return InterlockedAdd64((LONGLONG volatile *)p, -v) + v; +#else + return __sync_fetch_and_sub(p, v); +#endif +} + +static FORCEINLINE uint64_t __atomic_and(uint64_t *p, uint64_t v) { +#ifdef _MSC_VER + return InterlockedAnd64((LONGLONG volatile *)p, v) - v; +#else + return __sync_fetch_and_and(p, v); +#endif +} + +static FORCEINLINE uint64_t __atomic_or(uint64_t *p, uint64_t v) { +#ifdef _MSC_VER + return InterlockedOr64((LONGLONG volatile *)p, v) - v; +#else + return __sync_fetch_and_or(p, v); +#endif +} + +static FORCEINLINE uint64_t __atomic_xor(uint64_t *p, uint64_t v) { +#ifdef _MSC_VER + return InterlockedXor64((LONGLONG volatile *)p, v) - v; +#else + return __sync_fetch_and_xor(p, v); +#endif +} + +static FORCEINLINE uint64_t __atomic_min(uint64_t *p, uint64_t v) { + int64_t old, min; + do { + old = *((volatile int64_t *)p); + min = (old < (int64_t)v) ? old : (int64_t)v; +#ifdef _MSC_VER + } while (InterlockedCompareExchange64((LONGLONG volatile *)p, min, old) != old); +#else + } while (__sync_bool_compare_and_swap(p, old, min) == false); +#endif + return old; +} + +static FORCEINLINE uint64_t __atomic_max(uint64_t *p, uint64_t v) { + int64_t old, max; + do { + old = *((volatile int64_t *)p); + max = (old > (int64_t)v) ? old : (int64_t)v; +#ifdef _MSC_VER + } while (InterlockedCompareExchange64((LONGLONG volatile *)p, max, old) != old); +#else + } while (__sync_bool_compare_and_swap(p, old, max) == false); +#endif + return old; +} + +static FORCEINLINE uint64_t __atomic_umin(uint64_t *p, uint64_t v) { + uint64_t old, min; + do { + old = *((volatile uint64_t *)p); + min = (old < v) ? old : v; +#ifdef _MSC_VER + } while (InterlockedCompareExchange64((LONGLONG volatile *)p, min, old) != old); +#else + } while (__sync_bool_compare_and_swap(p, old, min) == false); +#endif + return old; +} + +static FORCEINLINE uint64_t __atomic_umax(uint64_t *p, uint64_t v) { + uint64_t old, max; + do { + old = *((volatile uint64_t *)p); + max = (old > v) ? old : v; +#ifdef _MSC_VER + } while (InterlockedCompareExchange64((LONGLONG volatile *)p, max, old) != old); +#else + } while (__sync_bool_compare_and_swap(p, old, max) == false); +#endif + return old; +} + +static FORCEINLINE uint64_t __atomic_xchg(uint64_t *p, uint64_t v) { +#ifdef _MSC_VER + return InterlockedExchange64((LONGLONG volatile *)p, v); +#else + return __sync_lock_test_and_set(p, v); +#endif +} + +static FORCEINLINE uint64_t __atomic_cmpxchg(uint64_t *p, uint64_t cmpval, + uint64_t newval) { +#ifdef _MSC_VER + return InterlockedCompareExchange64((LONGLONG volatile *)p, newval, cmpval); +#else + return __sync_val_compare_and_swap(p, cmpval, newval); +#endif +} + +#ifdef WIN32 +#include +#define __clock __rdtsc +#else // WIN32 +static FORCEINLINE uint64_t __clock() { + uint32_t low, high; +#ifdef __x86_64 + __asm__ __volatile__ ("xorl %%eax,%%eax \n cpuid" + ::: "%rax", "%rbx", "%rcx", "%rdx" ); +#else + __asm__ __volatile__ ("xorl %%eax,%%eax \n cpuid" + ::: "%eax", "%ebx", "%ecx", "%edx" ); +#endif + __asm__ __volatile__ ("rdtsc" : "=a" (low), "=d" (high)); + return (uint64_t)high << 32 | low; +} +#endif // !WIN32 + +/////////////////////////////////////////////////////////////////////////// +// Transcendentals + + +#define TRANSCENDENTALS(op) \ +static FORCEINLINE __vec16_f __##op##_varying_float(__vec16_f v) { return _mm512_##op##_ps(v); } \ +static FORCEINLINE float __##op##_uniform_float(float v) { return op##f(v); } \ +static FORCEINLINE __vec16_d __##op##_varying_double(__vec16_d v) { return __vec16_d(_mm512_##op##_pd(v.v1),_mm512_##op##_pd(v.v2)); } \ +static FORCEINLINE double __##op##_uniform_double(double a) { return op(a); } + +TRANSCENDENTALS(log) +TRANSCENDENTALS(exp) + +static FORCEINLINE float __pow_uniform_float(float a, float b) { return powf(a, b);} +static FORCEINLINE __vec16_f __pow_varying_float(__vec16_f a, __vec16_f b) { return _mm512_pow_ps(a,b); } +static FORCEINLINE double __pow_uniform_double(double a, double b) { return pow(a,b);} +static FORCEINLINE __vec16_d __pow_varying_double(__vec16_d a, __vec16_d b) { return __vec16_d(_mm512_pow_pd(a.v1,b.v1),_mm512_pow_pd(a.v2,b.v2)); } + +/////////////////////////////////////////////////////////////////////////// +// Trigonometry + +TRANSCENDENTALS(sin) +TRANSCENDENTALS(asin) +TRANSCENDENTALS(cos) +TRANSCENDENTALS(acos) +TRANSCENDENTALS(tan) +TRANSCENDENTALS(atan) + +static FORCEINLINE float __atan2_uniform_float(float a, float b) { return atan2f(a, b);} +static FORCEINLINE __vec16_f __atan2_varying_float(__vec16_f a, __vec16_f b) { return _mm512_atan2_ps(a,b); } +static FORCEINLINE double __atan2_uniform_double(double a, double b) { return atan2(a,b);} +static FORCEINLINE __vec16_d __atan2_varying_double(__vec16_d a, __vec16_d b) { return __vec16_d(_mm512_atan2_pd(a.v1,b.v1),_mm512_atan2_pd(a.v2,b.v2)); } + +#undef FORCEINLINE +#undef PRE_ALIGN +#undef POST_ALIGN From 49b9297166766e07da1c3e27c476b6cc2eba6eec Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Wed, 25 Mar 2015 18:46:47 +0300 Subject: [PATCH 04/33] cast s(z)ext -> add avx and sse, + started ext..load rwriting --- examples/intrinsics/knl.h | 147 ++++++++++++++++---------------------- 1 file changed, 61 insertions(+), 86 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 385cc08c..9ea4bbdc 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -43,6 +43,7 @@ #endif #include +#include #include #include // for operator<<(m512[i]) @@ -775,10 +776,7 @@ template static FORCEINLINE __vec16_i32 __load(const __vec16_i32 *p) #ifdef ISPC_FORCE_ALIGNED_MEMORY return _mm512_load_epi32(p); #else - __vec16_i32 v; - v = _mm512_extloadunpacklo_epi32(v, p, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); - v = _mm512_extloadunpackhi_epi32(v, (uint8_t*)p+64, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); - return v; + return _mm512_loadu_si512(p); #endif } @@ -1149,10 +1147,9 @@ template static FORCEINLINE __vec16_i64 __load(const __vec16_i64 *p) __vec16_i32 v1; __vec16_i32 v2; const uint8_t*ptr = (const uint8_t*)p; - v2 = _mm512_extloadunpacklo_epi32(v2, ptr, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); - v2 = _mm512_extloadunpackhi_epi32(v2, ptr+64, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); - v1 = _mm512_extloadunpacklo_epi32(v1, ptr+64, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); - v1 = _mm512_extloadunpackhi_epi32(v1, ptr+128, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); + + v1 = _mm512_mask_loadu_epi64(v1, 0xFFFF, ptr+64); + v2 = _mm512_mask_loadu_epi64(v2, 0xFFFF, ptr); __vec16_i64 ret; ret.v_hi = _mm512_mask_permutevar_epi32(ret.v_hi, 0xFF00, @@ -1446,10 +1443,7 @@ template static FORCEINLINE __vec16_f __load(const __vec16_f *p) { #ifdef ISPC_FORCE_ALIGNED_MEMORY return _mm512_load_ps(p); #else - __vec16_f v; - v = _mm512_extloadunpacklo_ps(v, p, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); - v = _mm512_extloadunpackhi_ps(v, (uint8_t*)p+64, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); - return v; + return _mm512_loadu_ps(p); #endif } #if 0 @@ -1805,10 +1799,8 @@ static FORCEINLINE __vec16_d __shuffle2_double(__vec16_d v0, __vec16_d v1, __vec template static FORCEINLINE __vec16_d __load(const __vec16_d *p) { __vec16_d ret; - ret.v1 = _mm512_extloadunpacklo_pd(ret.v1, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); - ret.v1 = _mm512_extloadunpackhi_pd(ret.v1, (uint8_t*)p+64, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); - ret.v2 = _mm512_extloadunpacklo_pd(ret.v2, (uint8_t*)p+64, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); - ret.v2 = _mm512_extloadunpackhi_pd(ret.v2, (uint8_t*)p+128, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); + ret.v1 = _mm512_loadu_pd(p); + ret.v2 = _mm512_loadu_pd((uint8_t*)p+64); return ret; } #if 0 @@ -1871,18 +1863,16 @@ static FORCEINLINE __vec16_i32 __cast_sext(const __vec16_i32 &, const __vec16_i1 static FORCEINLINE __vec16_i32 __cast_sext(const __vec16_i32 &, const __vec16_i8 &val) { - //return _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); - __vec16_i32 a = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); - return a; + __m128i val_t = _mm_loadu_si128((__m128i *)val.v); + return _mm512_cvtepi8_epi32(val_t); } static FORCEINLINE __vec16_i32 __cast_sext(const __vec16_i32 &, const __vec16_i16 &val) { - return _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __m256i val_t = _mm256_loadu_si256((__m256i *)val.v); + return _mm512_cvtepi16_epi32(val_t); } - - static FORCEINLINE __vec16_i64 __cast_sext(const __vec16_i64 &, const __vec16_i1 &val) { __vec16_i32 ret = _mm512_mask_mov_epi32(_mm512_setzero_epi32(), val, _mm512_set1_epi32(-1)); @@ -1891,7 +1881,7 @@ static FORCEINLINE __vec16_i64 __cast_sext(const __vec16_i64 &, const __vec16_i1 static FORCEINLINE __vec16_i64 __cast_sext(const __vec16_i64 &, const __vec16_i8 &val) { - __vec16_i32 a = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 a = __cast_sext(__vec16_i32(), val); return __vec16_i64(a.v, _mm512_srai_epi32(a.v, 31)); } @@ -1931,12 +1921,14 @@ static FORCEINLINE __vec16_i32 __cast_zext(const __vec16_i32 &, const __vec16_i1 static FORCEINLINE __vec16_i32 __cast_zext(const __vec16_i32 &, const __vec16_i8 &val) { - return _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_UINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __m128i val_t = _mm_loadu_si128((__m128i *)val.v); + return _mm512_cvtepu8_epi32(val_t); } static FORCEINLINE __vec16_i32 __cast_zext(const __vec16_i32 &, const __vec16_i16 &val) { - return _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_UINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __m256i val_t = _mm256_loadu_si256((__m256i *)val.v); + return _mm512_cvtepu16_epi32(val_t); } static FORCEINLINE __vec16_i64 __cast_zext(const __vec16_i64 &, const __vec16_i1 &val) @@ -1963,17 +1955,18 @@ static FORCEINLINE __vec16_i64 __cast_zext(const __vec16_i64 &, const __vec16_i3 return __vec16_i64(val.v, _mm512_setzero_epi32()); } +static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i32 val) { + return _mm512_cvtepi32_ps(val); +} + static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i8 val) { - return _mm512_extload_ps(&val, _MM_UPCONV_PS_SINT8, _MM_BROADCAST_16X16, _MM_HINT_NONE); + return __cast_sitofp(__vec16_f(), __cast_sext(__vec16_i32(), val)); } static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i16 val) { - return _mm512_extload_ps(&val, _MM_UPCONV_PS_SINT16, _MM_BROADCAST_16X16, _MM_HINT_NONE); + return __cast_sitofp(__vec16_f(), __cast_sext(__vec16_i32(), val)); } -static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i32 val) { - return _mm512_cvtfxpnt_round_adjustepi32_ps(val, _MM_ROUND_MODE_NEAREST, _MM_EXPADJ_NONE); -} static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i64 val) { __m512i tmp1; @@ -1982,7 +1975,7 @@ static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i64 val) { __vec16_f ret; /* - // Cycles don't work. It seems that it is icc bug. + // Loops don't work. It seems that it is icc bug. for (int i = 0; i < 8; i++) { ret[i] = (float)(((int64_t*)&tmp1)[i]); } @@ -2013,7 +2006,7 @@ static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i64 val) { } static FORCEINLINE __vec16_d __cast_sitofp(__vec16_d, __vec16_i8 val) { - __vec16_i32 vi = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST_16X16, _MM_HINT_NONE); + __vec16_i32 vi = __cast_sext(__vec16_i32(), val); __vec16_d ret; ret.v1 = _mm512_cvtepi32lo_pd(vi); __vec16_i32 other8 = _mm512_permute4f128_epi32(vi, _MM_PERM_DCDC); @@ -2022,7 +2015,7 @@ static FORCEINLINE __vec16_d __cast_sitofp(__vec16_d, __vec16_i8 val) { } static FORCEINLINE __vec16_d __cast_sitofp(__vec16_d, __vec16_i16 val) { - __vec16_i32 vi = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST_16X16, _MM_HINT_NONE); + __vec16_i32 vi = __cast_sext(__vec16_i32(), val); __vec16_d ret; ret.v1 = _mm512_cvtepi32lo_pd(vi); __vec16_i32 other8 = _mm512_permute4f128_epi32(vi, _MM_PERM_DCDC); @@ -2052,7 +2045,6 @@ static FORCEINLINE __vec16_d __cast_sitofp(__vec16_d, __vec16_i64 val) { return ret; } - static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i1 v) { const __m512 ret = _mm512_setzero_ps(); @@ -2060,16 +2052,16 @@ static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i1 v) return _mm512_mask_mov_ps(ret, v, one); } +static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i32 v) { + return _mm512_cvtepi32_ps(v); +} + static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, const __vec16_i8 &v) { - return _mm512_extload_ps(v.v, _MM_UPCONV_PS_UINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + return __cast_uitofp(__vec16_f(), __cast_zext(__vec16_i32(), v)); } static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i16 val) { - return _mm512_extload_ps(&val, _MM_UPCONV_PS_UINT16, _MM_BROADCAST_16X16, _MM_HINT_NONE); -} - -static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i32 v) { - return _mm512_cvtfxpnt_round_adjustepu32_ps(v, _MM_FROUND_NO_EXC, _MM_EXPADJ_NONE); + return __cast_uitofp(__vec16_f(), __cast_zext(__vec16_i32(), val)); } static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i64 val) { @@ -2077,7 +2069,7 @@ static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i64 val) { __m512i tmp2; hilo2zmm(val, tmp1, tmp2); __vec16_f ret; - // Cycles don't work. It seems that it is icc bug. + // Loops don't work. It seems that it is icc bug. /* for (int i = 0; i < 8; i++) { ((float*)&ret)[i] = ((float)(((uint64_t*)&tmp1)[i])); @@ -2107,7 +2099,7 @@ static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i64 val) { static FORCEINLINE __vec16_d __cast_uitofp(__vec16_d, __vec16_i8 val) { - __vec16_i32 vi = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_UINT8, _MM_BROADCAST_16X16, _MM_HINT_NONE); + __vec16_i32 vi = __cast_zext(__vec16_i32(), val); __vec16_d ret; ret.v1 = _mm512_cvtepu32lo_pd(vi); __vec16_i32 other8 = _mm512_permute4f128_epi32(vi, _MM_PERM_DCDC); @@ -2117,7 +2109,7 @@ static FORCEINLINE __vec16_d __cast_uitofp(__vec16_d, __vec16_i8 val) static FORCEINLINE __vec16_d __cast_uitofp(__vec16_d, __vec16_i16 val) { - __vec16_i32 vi = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_UINT16, _MM_BROADCAST_16X16, _MM_HINT_NONE); + __vec16_i32 vi = __cast_zext(__vec16_i32(), val); __vec16_d ret; ret.v1 = _mm512_cvtepu32lo_pd(vi); __vec16_i32 other8 = _mm512_permute4f128_epi32(vi, _MM_PERM_DCDC); @@ -2466,25 +2458,25 @@ static FORCEINLINE __vec16_i8 __broadcast_i8(__vec16_i8 v, int index) { } static FORCEINLINE __vec16_i1 __not_equal_i8(__vec16_i8 a, __vec16_i8 b) { - __vec16_i32 tmp_a = _mm512_extload_epi32(&a, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); - __vec16_i32 tmp_b = _mm512_extload_epi32(&b, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_a = __cast_sext(__vec16_i32(), a); + __vec16_i32 tmp_b = __cast_sext(__vec16_i32(), b); return __not_equal_i32(tmp_a, tmp_b); } static FORCEINLINE __vec16_i1 __equal_i8_and_mask(const __vec16_i8 &a, const __vec16_i8 &b, __vec16_i1 m) { - __vec16_i32 tmp_a = _mm512_extload_epi32(&a, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); - __vec16_i32 tmp_b = _mm512_extload_epi32(&b, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_a = __cast_sext(__vec16_i32(), a); + __vec16_i32 tmp_b = __cast_sext(__vec16_i32(), b); return __equal_i32_and_mask(tmp_a, tmp_b, m); } static FORCEINLINE __vec16_i1 __not_equal_i8_and_mask(__vec16_i8 a, __vec16_i8 b, __vec16_i1 m) { - __vec16_i32 tmp_a = _mm512_extload_epi32(&a, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); - __vec16_i32 tmp_b = _mm512_extload_epi32(&b, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_a = __cast_sext(__vec16_i32(), a); + __vec16_i32 tmp_b = __cast_sext(__vec16_i32(), b); return __not_equal_i32_and_mask(tmp_a, tmp_b, m); } static FORCEINLINE __vec16_i8 __rotate_i8(__vec16_i8 v, int index) { - __vec16_i32 tmp_v = _mm512_extload_epi32(&v, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_v = __cast_sext(__vec16_i32(), v); __vec16_i32 tmp = __rotate_i32(tmp_v, index); __vec16_i8 ret; _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); @@ -2492,7 +2484,7 @@ static FORCEINLINE __vec16_i8 __rotate_i8(__vec16_i8 v, int index) { } static FORCEINLINE __vec16_i8 __shuffle_i8(__vec16_i8 v, __vec16_i32 index) { - __vec16_i32 tmp_v = _mm512_extload_epi32(&v, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_v = __cast_sext(__vec16_i32(), v); __vec16_i32 tmp = __shuffle_i32(tmp_v, index); __vec16_i8 ret; _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); @@ -2509,8 +2501,8 @@ template <> FORCEINLINE __vec16_i8 __smear_i8<__vec16_i8>(int8_t i) { } static FORCEINLINE __vec16_i8 __shuffle2_i8(__vec16_i8 v0, __vec16_i8 v1, __vec16_i32 index) { - __vec16_i32 tmp_v0 = _mm512_extload_epi32(&v0, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); - __vec16_i32 tmp_v1 = _mm512_extload_epi32(&v1, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_v0 = __cast_sext(__vec16_i32(), v0); + __vec16_i32 tmp_v1 = __cast_sext(__vec16_i32(), v1); __vec16_i32 tmp = __shuffle2_i32(tmp_v0, tmp_v1, index); __vec16_i8 ret; _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); @@ -2576,25 +2568,25 @@ static FORCEINLINE __vec16_i16 __broadcast_i16(__vec16_i16 v, int index) { } static FORCEINLINE __vec16_i1 __not_equal_i16(__vec16_i16 a, __vec16_i16 b) { - __vec16_i32 tmp_a = _mm512_extload_epi32(&a, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); - __vec16_i32 tmp_b = _mm512_extload_epi32(&b, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_a = __cast_sext(__vec16_i32(), a); + __vec16_i32 tmp_b = __cast_sext(__vec16_i32(), b); return __not_equal_i32(tmp_a, tmp_b); } static FORCEINLINE __vec16_i1 __equal_i16_and_mask(const __vec16_i16 &a, const __vec16_i16 &b, __vec16_i1 m) { - __vec16_i32 tmp_a = _mm512_extload_epi32(&a, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); - __vec16_i32 tmp_b = _mm512_extload_epi32(&b, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_a = __cast_sext(__vec16_i32(), a); + __vec16_i32 tmp_b = __cast_sext(__vec16_i32(), b); return __equal_i32_and_mask(tmp_a, tmp_b, m); } static FORCEINLINE __vec16_i1 __not_equal_i16_and_mask(__vec16_i16 a, __vec16_i16 b, __vec16_i1 m) { - __vec16_i32 tmp_a = _mm512_extload_epi32(&a, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); - __vec16_i32 tmp_b = _mm512_extload_epi32(&b, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_a = __cast_sext(__vec16_i32(), a); + __vec16_i32 tmp_b = __cast_sext(__vec16_i32(), b); return __not_equal_i32_and_mask(tmp_a, tmp_b, m); } static FORCEINLINE __vec16_i16 __rotate_i16(__vec16_i16 v, int index) { - __vec16_i32 tmp_v = _mm512_extload_epi32(&v, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_v = __cast_sext(__vec16_i32(), v); __vec16_i32 tmp = __rotate_i32(tmp_v, index); __vec16_i16 ret; _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); @@ -2602,7 +2594,7 @@ static FORCEINLINE __vec16_i16 __rotate_i16(__vec16_i16 v, int index) { } static FORCEINLINE __vec16_i16 __shuffle_i16(__vec16_i16 v, __vec16_i32 index) { - __vec16_i32 tmp_v = _mm512_extload_epi32(&v, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_v = __cast_sext(__vec16_i32(), v); __vec16_i32 tmp = __shuffle_i32(tmp_v, index); __vec16_i16 ret; _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); @@ -2618,8 +2610,8 @@ template <> FORCEINLINE __vec16_i16 __smear_i16<__vec16_i16>(int16_t i) { } static FORCEINLINE __vec16_i16 __shuffle2_i16(__vec16_i16 v0, __vec16_i16 v1, __vec16_i32 index) { - __vec16_i32 tmp_v0 = _mm512_extload_epi32(&v0, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); - __vec16_i32 tmp_v1 = _mm512_extload_epi32(&v1, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __vec16_i32 tmp_v0 = __cast_sext(__vec16_i32(), v0); + __vec16_i32 tmp_v1 = __cast_sext(__vec16_i32(), v1); __vec16_i32 tmp = __shuffle2_i32(tmp_v0, tmp_v1, index); __vec16_i16 ret; _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); @@ -2966,11 +2958,7 @@ static FORCEINLINE __vec16_i32 __masked_load_i32(void *p, __vec16_i1 mask) { #ifdef ISPC_FORCE_ALIGNED_MEMORY return _mm512_mask_load_epi32(__vec16_i32(), mask, p); #else - __vec16_i32 tmp; - tmp.v = _mm512_mask_extloadunpacklo_epi32(tmp.v, 0xFFFF, p, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); - tmp.v = _mm512_mask_extloadunpackhi_epi32(tmp.v, 0xFFFF, (uint8_t*)p+64, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); - __vec16_i32 ret; - return _mm512_mask_mov_epi32(ret.v, mask, tmp.v); + return _mm512_mask_loadu_epi32(__vec16_i32(), mask, p); #endif } @@ -2978,11 +2966,7 @@ static FORCEINLINE __vec16_f __masked_load_float(void *p, __vec16_i1 mask) { #ifdef ISPC_FORCE_ALIGNED_MEMORY return _mm512_mask_load_ps(_mm512_undefined_ps(), mask,p); #else - __vec16_f tmp; - tmp.v = _mm512_mask_extloadunpacklo_ps(tmp.v, 0xFFFF, p, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); - tmp.v = _mm512_mask_extloadunpackhi_ps(tmp.v, 0xFFFF, (uint8_t*)p+64, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); - __vec16_f ret; - return _mm512_mask_mov_ps(ret.v, mask, tmp.v); + return _mm512_mask_loadu_ps(_mm512_undefined_ps(), mask,p); #endif } @@ -2993,26 +2977,17 @@ static FORCEINLINE __vec16_i64 __masked_load_i64(void *p, __vec16_i1 mask) { } static FORCEINLINE __vec16_d __masked_load_double(void *p, __vec16_i1 mask) { -#ifdef ISPC_FORCE_ALIGNED_MEMORY __vec16_d ret; __vec16_i1 tmp_m = mask; tmp_m = _mm512_kswapb(tmp_m, tmp_m); +#ifdef ISPC_FORCE_ALIGNED_MEMORY ret.v1 = _mm512_mask_load_pd(ret.v1, mask, p); ret.v2 = _mm512_mask_load_pd(ret.v2, tmp_m, (uint8_t*)p+64); - return ret; #else - __vec16_d tmp; - tmp.v1 = _mm512_mask_extloadunpacklo_pd(tmp.v1, 0xFF, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); - tmp.v1 = _mm512_mask_extloadunpackhi_pd(tmp.v1, 0xFF, (uint8_t*)p+64, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); - tmp.v2 = _mm512_mask_extloadunpacklo_pd(tmp.v2, 0xFF, (uint8_t*)p+64, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); - tmp.v2 = _mm512_mask_extloadunpackhi_pd(tmp.v2, 0xFF, (uint8_t*)p+128, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); - __vec16_d ret; - __vec16_i1 tmp_m = mask; - tmp_m = _mm512_kswapb(tmp_m, tmp_m); - ret.v1 = _mm512_mask_mov_pd(ret.v1, mask, tmp.v1); - ret.v2 = _mm512_mask_mov_pd(ret.v2, tmp_m, tmp.v2); - return ret; + ret.v1 = _mm512_mask_loadu_pd(ret.v1, mask, p); + ret.v2 = _mm512_mask_loadu_pd(ret.v2, tmp_m, (uint8_t*)p+64); #endif + return ret; } static FORCEINLINE void __masked_store_i8(void *p, const __vec16_i8 &val, __vec16_i1 mask) { From fc8a32425cce8af0ae5a773042898f917ea53748 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Wed, 25 Mar 2015 18:58:36 +0300 Subject: [PATCH 05/33] cast_fptosi/ui --- examples/intrinsics/knl.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 9ea4bbdc..ee048c0b 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -2144,7 +2144,7 @@ static FORCEINLINE __vec16_d __cast_uitofp(__vec16_d, __vec16_i64 val) { // float/double to signed int static FORCEINLINE __vec16_i32 __cast_fptosi(__vec16_i32, __vec16_f val) { - return _mm512_cvtfxpnt_round_adjustps_epi32(val, _MM_ROUND_MODE_TOWARD_ZERO, _MM_EXPADJ_NONE); + return _mm512_cvt_roundps_epi32(val, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); } static FORCEINLINE __vec16_i8 __cast_fptosi(__vec16_i8, __vec16_f val) { @@ -2208,7 +2208,7 @@ static FORCEINLINE __vec16_i64 __cast_fptosi(__vec16_i64, __vec16_d val) { static FORCEINLINE __vec16_i32 __cast_fptoui(__vec16_i32, __vec16_f val) { - return _mm512_cvtfxpnt_round_adjustps_epu32(val, _MM_ROUND_MODE_TOWARD_ZERO, _MM_EXPADJ_NONE); + return _mm512_cvt_roundps_epu32(val, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); } static FORCEINLINE __vec16_i8 __cast_fptoui(__vec16_i8, __vec16_f val) { From 1aa8309a9ee1039f593c246888627b6a4b7ae960 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Wed, 25 Mar 2015 19:12:28 +0300 Subject: [PATCH 06/33] store_ps fixed --- examples/intrinsics/knl.h | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index ee048c0b..170d8c98 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -1455,8 +1455,7 @@ template static FORCEINLINE void __store(__vec16_f *p, __vec16_f v) #ifdef ISPC_FORCE_ALIGNED_MEMORY _mm512_store_ps(p, v); #else - _mm512_extpackstorelo_ps(p, v, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); - _mm512_extpackstorehi_ps((uint8_t*)p+64, v, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); + _mm512_storeu_ps(p, v); #endif } #if 0 @@ -3106,17 +3105,11 @@ static FORCEINLINE void __masked_store_i32(void *p, __vec16_i32 val, __vec16_i1 #endif } -static FORCEINLINE void __masked_store_float(void *p, __vec16_f val, - __vec16_i1 mask) { +static FORCEINLINE void __masked_store_float(void *p, __vec16_f val, __vec16_i1 mask) { #ifdef ISPC_FORCE_ALIGNED_MEMORY _mm512_mask_store_ps(p, mask, val.v); #else - __vec16_f tmp; - tmp.v = _mm512_extloadunpacklo_ps(tmp.v, p, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); - tmp.v = _mm512_extloadunpackhi_ps(tmp.v, (uint8_t*)p+64, _MM_UPCONV_PS_NONE, _MM_HINT_NONE); - tmp.v = _mm512_mask_mov_ps(tmp.v, mask, val.v); - _mm512_extpackstorelo_ps(p, tmp.v, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); - _mm512_extpackstorehi_ps((uint8_t*)p+64, tmp.v, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); + _mm512_mask_storeu_ps(p, mask, val.v); #endif } From aefcea95cc1d5fa7782c043df4e2f600b793e4d4 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Wed, 25 Mar 2015 19:48:20 +0300 Subject: [PATCH 07/33] shift mask left like (mask << 8). may cause errors --- examples/intrinsics/knl.h | 51 +++++++++++++++------------------------ 1 file changed, 19 insertions(+), 32 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 170d8c98..32337eea 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -1148,8 +1148,8 @@ template static FORCEINLINE __vec16_i64 __load(const __vec16_i64 *p) __vec16_i32 v2; const uint8_t*ptr = (const uint8_t*)p; - v1 = _mm512_mask_loadu_epi64(v1, 0xFFFF, ptr+64); - v2 = _mm512_mask_loadu_epi64(v2, 0xFFFF, ptr); + v1 = _mm512_mask_loadu_epi64(v1, 0xFF, ptr+64); + v2 = _mm512_mask_loadu_epi64(v2, 0xFF, ptr); __vec16_i64 ret; ret.v_hi = _mm512_mask_permutevar_epi32(ret.v_hi, 0xFF00, @@ -1615,8 +1615,7 @@ static FORCEINLINE __vec16_i1 __equal_double_and_mask(__vec16_d a, __vec16_d b, __vec16_i1 ret1; __vec16_i1 ret2; ret1 = _mm512_mask_cmpeq_pd_mask(m, a.v1, b.v1); - __vec16_i1 tmp_m = m; - ret2 = _mm512_mask_cmpeq_pd_mask(_mm512_kswapb(tmp_m,tmp_m), a.v2, b.v2); + ret2 = _mm512_mask_cmpeq_pd_mask((m << 8), a.v2, b.v2); return _mm512_kmovlhb(ret1, ret2); } @@ -1632,9 +1631,8 @@ static FORCEINLINE __vec16_i1 __not_equal_double_and_mask(__vec16_d a, __vec16_d __vec16_i1 m) { __vec16_i1 ret1; __vec16_i1 ret2; - __vec16_i1 tmp_m = m; ret1 = _mm512_mask_cmpneq_pd_mask(m, a.v1, b.v1); - ret2 = _mm512_mask_cmpneq_pd_mask(_mm512_kswapb(tmp_m, tmp_m), a.v2, b.v2); + ret2 = _mm512_mask_cmpneq_pd_mask((m << 8), a.v2, b.v2); return _mm512_kmovlhb(ret1, ret2); } @@ -1652,7 +1650,7 @@ static FORCEINLINE __vec16_i1 __less_than_double_and_mask(__vec16_d a, __vec16_d __vec16_i1 ret2; __vec16_i1 tmp_m = m; ret1 = _mm512_mask_cmplt_pd_mask(m, a.v1, b.v1); - ret2 = _mm512_mask_cmplt_pd_mask(_mm512_kswapb(tmp_m, tmp_m), a.v2, b.v2); + ret2 = _mm512_mask_cmplt_pd_mask((m << 8), a.v2, b.v2); return _mm512_kmovlhb(ret1, ret2); } @@ -1670,7 +1668,7 @@ static FORCEINLINE __vec16_i1 __less_equal_double_and_mask(__vec16_d a, __vec16_ __vec16_i1 ret2; __vec16_i1 tmp_m = m; ret1 = _mm512_mask_cmple_pd_mask(m, a.v1, b.v1); - ret2 = _mm512_mask_cmple_pd_mask(_mm512_kswapb(tmp_m, tmp_m), a.v2, b.v2); + ret2 = _mm512_mask_cmple_pd_mask((m << 8), a.v2, b.v2); return _mm512_kmovlhb(ret1, ret2); } @@ -1688,7 +1686,7 @@ static FORCEINLINE __vec16_i1 __greater_than_double_and_mask(__vec16_d a, __vec1 __vec16_i1 ret2; __vec16_i1 tmp_m = m; ret1 = _mm512_mask_cmpnle_pd_mask(m, a.v1, b.v1); - ret2 = _mm512_mask_cmpnle_pd_mask(_mm512_kswapb(tmp_m, tmp_m), a.v2, b.v2); + ret2 = _mm512_mask_cmpnle_pd_mask((m << 8), a.v2, b.v2); return _mm512_kmovlhb(ret1, ret2); } @@ -1700,13 +1698,11 @@ static FORCEINLINE __vec16_i1 __greater_equal_double(__vec16_d a, __vec16_d b) { return _mm512_kmovlhb(ret1, ret2); } -static FORCEINLINE __vec16_i1 __greater_equal_double_and_mask(__vec16_d a, __vec16_d b, - __vec16_i1 m) { +static FORCEINLINE __vec16_i1 __greater_equal_double_and_mask(__vec16_d a, __vec16_d b, __vec16_i1 m) { __vec16_i1 ret1; __vec16_i1 ret2; - __vec16_i1 tmp_m = m; ret1 = _mm512_mask_cmpnlt_pd_mask(m, a.v1, b.v1); - ret2 = _mm512_mask_cmpnlt_pd_mask(_mm512_kswapb(tmp_m, tmp_m), a.v2, b.v2); + ret2 = _mm512_mask_cmpnlt_pd_mask((m << 8), a.v2, b.v2); return _mm512_kmovlhb(ret1, ret2); } @@ -1729,21 +1725,18 @@ static FORCEINLINE __vec16_i1 __unordered_double(__vec16_d a, __vec16_d b) { static FORCEINLINE __vec16_i1 __unordered_double_and_mask(__vec16_d a, __vec16_d b, __vec16_i1 mask) { __vec16_i1 ret1; __vec16_i1 ret2; - __vec16_i1 tmp_m = mask; ret1 = _mm512_mask_cmpunord_pd_mask(mask, a.v1, b.v1); - ret2 = _mm512_mask_cmpunord_pd_mask(_mm512_kswapb(tmp_m, tmp_m), a.v2, b.v2); + ret2 = _mm512_mask_cmpunord_pd_mask((mask << 8), a.v2, b.v2); return _mm512_kmovlhb(ret1, ret2); } static FORCEINLINE __vec16_d __select(__vec16_i1 mask, __vec16_d a, __vec16_d b) { __vec16_d ret; - __vec16_i1 tmp_m = mask; ret.v1 = _mm512_mask_mov_pd(b.v1, mask, a.v1); - ret.v2 = _mm512_mask_mov_pd(b.v2, _mm512_kswapb(tmp_m, tmp_m), a.v2); + ret.v2 = _mm512_mask_mov_pd(b.v2, (mask << 8), a.v2); return ret; } - static FORCEINLINE __vec16_d __select(bool cond, __vec16_d a, __vec16_d b) { return cond ? a : b; } @@ -2674,10 +2667,10 @@ static FORCEINLINE int64_t __max_uniform_int64(int64_t a, int64_t b) { return (a static FORCEINLINE int64_t __min_uniform_uint64(uint64_t a, uint64_t b) { return (ab) ? a : b; } -static FORCEINLINE __vec16_f __max_varying_float (__vec16_f v1, __vec16_f v2) { return _mm512_gmax_ps(v1, v2); } -static FORCEINLINE __vec16_f __min_varying_float (__vec16_f v1, __vec16_f v2) { return _mm512_gmin_ps(v1, v2); } -static FORCEINLINE __vec16_d __max_varying_double(__vec16_d v1, __vec16_d v2) { return __vec16_d(_mm512_gmax_pd(v1.v1, v2.v1), _mm512_gmax_pd(v1.v2,v2.v2)); } -static FORCEINLINE __vec16_d __min_varying_double(__vec16_d v1, __vec16_d v2) { return __vec16_d(_mm512_gmin_pd(v1.v1, v2.v1), _mm512_gmin_pd(v1.v2,v2.v2)); } +static FORCEINLINE __vec16_f __max_varying_float (__vec16_f v1, __vec16_f v2) { return _mm512_max_ps(v1, v2); } +static FORCEINLINE __vec16_f __min_varying_float (__vec16_f v1, __vec16_f v2) { return _mm512_min_ps(v1, v2); } +static FORCEINLINE __vec16_d __max_varying_double(__vec16_d v1, __vec16_d v2) { return __vec16_d(_mm512_max_pd(v1.v1, v2.v1), _mm512_max_pd(v1.v2,v2.v2)); } +static FORCEINLINE __vec16_d __min_varying_double(__vec16_d v1, __vec16_d v2) { return __vec16_d(_mm512_min_pd(v1.v1, v2.v1), _mm512_min_pd(v1.v2,v2.v2)); } static FORCEINLINE __vec16_i32 __max_varying_int32 (__vec16_i32 v1, __vec16_i32 v2) { return _mm512_max_epi32(v1, v2); } static FORCEINLINE __vec16_i32 __min_varying_int32 (__vec16_i32 v1, __vec16_i32 v2) { return _mm512_min_epi32(v1, v2); } @@ -2977,14 +2970,12 @@ static FORCEINLINE __vec16_i64 __masked_load_i64(void *p, __vec16_i1 mask) { static FORCEINLINE __vec16_d __masked_load_double(void *p, __vec16_i1 mask) { __vec16_d ret; - __vec16_i1 tmp_m = mask; - tmp_m = _mm512_kswapb(tmp_m, tmp_m); #ifdef ISPC_FORCE_ALIGNED_MEMORY ret.v1 = _mm512_mask_load_pd(ret.v1, mask, p); - ret.v2 = _mm512_mask_load_pd(ret.v2, tmp_m, (uint8_t*)p+64); + ret.v2 = _mm512_mask_load_pd(ret.v2, (mask << 8), (uint8_t*)p+64); #else ret.v1 = _mm512_mask_loadu_pd(ret.v1, mask, p); - ret.v2 = _mm512_mask_loadu_pd(ret.v2, tmp_m, (uint8_t*)p+64); + ret.v2 = _mm512_mask_loadu_pd(ret.v2, (mask << 8), (uint8_t*)p+64); #endif return ret; } @@ -3116,20 +3107,16 @@ static FORCEINLINE void __masked_store_float(void *p, __vec16_f val, __vec16_i1 static FORCEINLINE void __masked_store_double(void *p, __vec16_d val, __vec16_i1 mask) { #ifdef ISPC_FORCE_ALIGNED_MEMORY - __vec16_i1 tmp_m = mask; - tmp_m = _mm512_kswapb(tmp_m, tmp_m); _mm512_mask_store_pd(p, mask, val.v1); - _mm512_mask_store_pd((uint8_t*)p+64, tmp_m, val.v2); + _mm512_mask_store_pd((uint8_t*)p+64, (mask << 8), val.v2); #else __vec16_d tmp; - __vec16_i1 tmp_m = mask; - tmp_m = _mm512_kswapb(tmp_m, tmp_m); tmp.v1 = _mm512_extloadunpacklo_pd(tmp.v1, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); tmp.v1 = _mm512_extloadunpackhi_pd(tmp.v1, (uint8_t*)p+64, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); tmp.v2 = _mm512_extloadunpacklo_pd(tmp.v2, (uint8_t*)p+64, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); tmp.v2 = _mm512_extloadunpackhi_pd(tmp.v2, (uint8_t*)p+128, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); tmp.v1 = _mm512_mask_mov_pd(tmp.v1, mask, val.v1); - tmp.v2 = _mm512_mask_mov_pd(tmp.v2, tmp_m, val.v2); + tmp.v2 = _mm512_mask_mov_pd(tmp.v2, (mask << 8), val.v2); _mm512_extpackstorelo_pd(p, tmp.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); _mm512_extpackstorehi_pd((uint8_t*)p+64, tmp.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); _mm512_extpackstorelo_pd((uint8_t*)p+64, tmp.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); From de0d69ab26bfc8264c4e75c60f9c0f0214d0ff38 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Wed, 25 Mar 2015 19:54:19 +0300 Subject: [PATCH 08/33] extpackstorehi/lo_pd --- examples/intrinsics/knl.h | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 32337eea..0ec55807 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -1808,10 +1808,8 @@ template <> FORCEINLINE __vec16_d __load<128>(const __vec16_d *p) { } #endif template static FORCEINLINE void __store(__vec16_d *p, __vec16_d v) { - _mm512_extpackstorelo_pd(p, v.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); - _mm512_extpackstorehi_pd((uint8_t*)p+64, v.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); - _mm512_extpackstorelo_pd((uint8_t*)p+64, v.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); - _mm512_extpackstorehi_pd((uint8_t*)p+128, v.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); + _mm512_storeu_pd(p, v.v1); + _mm512_storeu_pd((uint8_t*)p+64, v.v2); } #if 0 template <> FORCEINLINE void __store<64>(__vec16_d *p, __vec16_d v) { @@ -3110,17 +3108,8 @@ static FORCEINLINE void __masked_store_double(void *p, __vec16_d val, _mm512_mask_store_pd(p, mask, val.v1); _mm512_mask_store_pd((uint8_t*)p+64, (mask << 8), val.v2); #else - __vec16_d tmp; - tmp.v1 = _mm512_extloadunpacklo_pd(tmp.v1, p, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); - tmp.v1 = _mm512_extloadunpackhi_pd(tmp.v1, (uint8_t*)p+64, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); - tmp.v2 = _mm512_extloadunpacklo_pd(tmp.v2, (uint8_t*)p+64, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); - tmp.v2 = _mm512_extloadunpackhi_pd(tmp.v2, (uint8_t*)p+128, _MM_UPCONV_PD_NONE, _MM_HINT_NONE); - tmp.v1 = _mm512_mask_mov_pd(tmp.v1, mask, val.v1); - tmp.v2 = _mm512_mask_mov_pd(tmp.v2, (mask << 8), val.v2); - _mm512_extpackstorelo_pd(p, tmp.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); - _mm512_extpackstorehi_pd((uint8_t*)p+64, tmp.v1, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); - _mm512_extpackstorelo_pd((uint8_t*)p+64, tmp.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); - _mm512_extpackstorehi_pd((uint8_t*)p+128, tmp.v2, _MM_DOWNCONV_PD_NONE, _MM_HINT_NONE); + _mm512_mask_storeu_pd(p, mask, val.v1); + _mm512_mask_storeu_pd((uint8_t*)p+64, (mask << 8), val.v2); #endif } From 5c523332afa32ecdf34310e0ed2569ce885c8188 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Wed, 25 Mar 2015 20:09:45 +0300 Subject: [PATCH 09/33] extpackstorehi/lo_epi32 (without i8 and i16) --- examples/intrinsics/knl.h | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 0ec55807..de4698a9 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -790,8 +790,7 @@ template static FORCEINLINE void __store(__vec16_i32 *p, __vec16_i32 #ifdef ISPC_FORCE_ALIGNED_MEMORY _mm512_store_epi32(p, v); #else - _mm512_extpackstorelo_epi32(p, v, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); - _mm512_extpackstorehi_epi32((uint8_t*)p+64, v, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); + _mm512_mask_storeu_epi32(p, 0xFFFF, v); #endif } @@ -1207,10 +1206,8 @@ template static FORCEINLINE void __store(__vec16_i64 *p, __vec16_i64 v2 = _mm512_mask_permutevar_epi32(v2, 0x5555, _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), v.v_lo); - _mm512_extpackstorelo_epi32(p, v2, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); - _mm512_extpackstorehi_epi32((uint8_t*)p+64, v2, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); - _mm512_extpackstorelo_epi32((uint8_t*)p+64, v1, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); - _mm512_extpackstorehi_epi32((uint8_t*)p+128, v1, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); + _mm512_mask_storeu_epi64(p, 0xFF, v2); + _mm512_mask_storeu_epi64((uint8_t*)p+64, 0xFF, v1); } #if 0 template <> FORCEINLINE void __store<64>(__vec16_i64 *p, __vec16_i64 v) { @@ -3085,12 +3082,7 @@ static FORCEINLINE void __masked_store_i32(void *p, __vec16_i32 val, __vec16_i1 #ifdef ISPC_FORCE_ALIGNED_MEMORY _mm512_mask_store_epi32(p, mask, val.v); #else - __vec16_i32 tmp; - tmp.v = _mm512_extloadunpacklo_epi32(tmp.v, p, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); - tmp.v = _mm512_extloadunpackhi_epi32(tmp.v, (uint8_t*)p+64, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); - tmp.v = _mm512_mask_mov_epi32(tmp.v, mask, val.v); - _mm512_extpackstorelo_epi32(p, tmp.v, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); - _mm512_extpackstorehi_epi32((uint8_t*)p+64, tmp.v, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); + _mm512_mask_storeu_epi32(p, mask, val.v); #endif } @@ -3533,15 +3525,13 @@ static FORCEINLINE void __scatter64_i64(__vec16_i64 ptrs, __vec16_i64 val, __vec static FORCEINLINE int32_t __packed_load_active(uint32_t *p, __vec16_i32 *val, __vec16_i1 mask) { __vec16_i32 v = __load<64>(val); - v = _mm512_mask_extloadunpacklo_epi32(v, mask, p, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); - v = _mm512_mask_extloadunpackhi_epi32(v, mask, (uint8_t*)p+64, _MM_UPCONV_EPI32_NONE, _MM_HINT_NONE); + v = _mm512_mask_loadu_epi32(v, mask, p); __store<64>(val, v); return _mm_countbits_32(uint32_t(mask)); } static FORCEINLINE int32_t __packed_store_active(uint32_t *p, __vec16_i32 val, __vec16_i1 mask) { - _mm512_mask_extpackstorelo_epi32(p, mask, val, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); - _mm512_mask_extpackstorehi_epi32((uint8_t*)p+64, mask, val, _MM_DOWNCONV_EPI32_NONE, _MM_HINT_NONE); + _mm512_mask_storeu_epi32(p, mask, val); return _mm_countbits_32(uint32_t(mask)); } From 0a166f245ccc0749aca18d7ffb01e49db1d7d4a8 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 26 Mar 2015 12:22:03 +0300 Subject: [PATCH 10/33] merged in brodman's version of knl.h --- examples/intrinsics/knl.h | 1141 +++++++++++++------------------------ 1 file changed, 396 insertions(+), 745 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index de4698a9..e1692d84 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -75,7 +75,6 @@ #define POST_ALIGN(x) __attribute__ ((aligned(x))) #endif -#define KNC 1 // Required by cbackend #define KNL 1 extern "C" { int printf(const unsigned char *, ...); @@ -101,6 +100,11 @@ typedef struct PRE_ALIGN(2) __vec16_i1 FORCEINLINE operator __mmask16() const { return v; } FORCEINLINE __vec16_i1() { } FORCEINLINE __vec16_i1(const __mmask16 &vv) : v(vv) { } + FORCEINLINE __vec16_i1(const __mmask8 &lo, const __mmask8 &hi) { + __mmask16 xlo = lo; + __mmask16 xhi = hi; + v = xlo | (xhi << 8); + } FORCEINLINE __vec16_i1(bool v0, bool v1, bool v2, bool v3, bool v4, bool v5, bool v6, bool v7, bool v8, bool v9, bool v10, bool v11, @@ -124,6 +128,8 @@ typedef struct PRE_ALIGN(2) __vec16_i1 } FORCEINLINE uint8_t operator[](const int i) const { return ((v >> i) & 1); } FORCEINLINE uint8_t operator[](const int i) { return ((v >> i) & 1); } + FORCEINLINE __mmask8 lo() const { return (v & 0x00FF); } + FORCEINLINE __mmask8 hi() const { return (v >> 8); } __mmask16 v; } POST_ALIGN(2) __vec16_i1; @@ -137,7 +143,7 @@ typedef struct PRE_ALIGN(64) __vec16_f { float v04, float v05, float v06, float v07, float v08, float v09, float v10, float v11, float v12, float v13, float v14, float v15) { - v = _mm512_set_16to16_ps(v15, v14, v13, v12, v11, v10, v09, v08, v07, v06, v05, v04, v03, v02, v01, v00); + v = _mm512_set_ps(v15, v14, v13, v12, v11, v10, v09, v08, v07, v06, v05, v04, v03, v02, v01, v00); } FORCEINLINE const float& operator[](const int i) const { return ((float*)this)[i]; } FORCEINLINE float& operator[](const int i) { return ((float*)this)[i]; } @@ -145,21 +151,21 @@ typedef struct PRE_ALIGN(64) __vec16_f { } POST_ALIGN(64) __vec16_f; typedef struct PRE_ALIGN(64) __vec16_d { - FORCEINLINE __vec16_d() : v1(_mm512_undefined_pd()), v2(_mm512_undefined_pd()) {} - FORCEINLINE __vec16_d(const __vec16_d &o) : v1(o.v1), v2(o.v2) {} - FORCEINLINE __vec16_d(const __m512d _v1, const __m512d _v2) : v1(_v1), v2(_v2) {} - FORCEINLINE __vec16_d& operator =(const __vec16_d &o) { v1=o.v1; v2=o.v2; return *this; } + FORCEINLINE __vec16_d() : v_lo(_mm512_undefined_pd()), v_hi(_mm512_undefined_pd()) {} + FORCEINLINE __vec16_d(const __vec16_d &o) : v_lo(o.v_lo), v_hi(o.v_hi) {} + FORCEINLINE __vec16_d(const __m512d _v_lo, const __m512d _v_hi) : v_lo(_v_lo), v_hi(_v_hi) {} + FORCEINLINE __vec16_d& operator =(const __vec16_d &o) { v_lo=o.v_lo; v_hi=o.v_hi; return *this; } FORCEINLINE __vec16_d(double v00, double v01, double v02, double v03, double v04, double v05, double v06, double v07, double v08, double v09, double v10, double v11, double v12, double v13, double v14, double v15) { - v1 = _mm512_set_8to8_pd(v15, v14, v13, v12, v11, v10, v09, v08); - v2 = _mm512_set_8to8_pd(v07, v06, v05, v04, v03, v02, v01, v00); + v_hi = _mm512_set_pd(v15, v14, v13, v12, v11, v10, v09, v08); + v_lo = _mm512_set_pd(v07, v06, v05, v04, v03, v02, v01, v00); } FORCEINLINE const double& operator[](const int i) const { return ((double*)this)[i]; } FORCEINLINE double& operator[](const int i) { return ((double*)this)[i]; } - __m512d v1; - __m512d v2; + __m512d v_lo; + __m512d v_hi; } POST_ALIGN(64) __vec16_d; typedef struct PRE_ALIGN(64) __vec16_i32 { @@ -173,7 +179,7 @@ typedef struct PRE_ALIGN(64) __vec16_i32 { int32_t v04, int32_t v05, int32_t v06, int32_t v07, int32_t v08, int32_t v09, int32_t v10, int32_t v11, int32_t v12, int32_t v13, int32_t v14, int32_t v15) { - v = _mm512_set_16to16_pi(v15, v14, v13, v12, v11, v10, v09, v08, v07, v06, v05, v04, v03, v02, v01, v00); + v = _mm512_set_epi32(v15, v14, v13, v12, v11, v10, v09, v08, v07, v06, v05, v04, v03, v02, v01, v00); } FORCEINLINE const int32_t& operator[](const int i) const { return ((int32_t*)this)[i]; } FORCEINLINE int32_t& operator[](const int i) { return ((int32_t*)this)[i]; } @@ -182,6 +188,7 @@ typedef struct PRE_ALIGN(64) __vec16_i32 { typedef struct PRE_ALIGN(64) __vec16_i64 { FORCEINLINE __vec16_i64() : v_lo(_mm512_undefined_epi32()), v_hi(_mm512_undefined_epi32()) {} + FORCEINLINE __vec16_i64(const int64_t &in) : v_lo(_mm512_set1_epi64(in)), v_hi(_mm512_set1_epi64(in)) {} FORCEINLINE __vec16_i64(const __vec16_i64 &o) : v_lo(o.v_lo), v_hi(o.v_hi) {} FORCEINLINE __vec16_i64(__m512i l, __m512i h) : v_lo(l), v_hi(h) {} FORCEINLINE __vec16_i64& operator =(const __vec16_i64 &o) { v_lo=o.v_lo; v_hi=o.v_hi; return *this; } @@ -189,68 +196,15 @@ typedef struct PRE_ALIGN(64) __vec16_i64 { int64_t v04, int64_t v05, int64_t v06, int64_t v07, int64_t v08, int64_t v09, int64_t v10, int64_t v11, int64_t v12, int64_t v13, int64_t v14, int64_t v15) { - __m512i v1 = _mm512_set_8to8_epi64(v15, v14, v13, v12, v11, v10, v09, v08); - __m512i v2 = _mm512_set_8to8_epi64(v07, v06, v05, v04, v03, v02, v01, v00); - v_hi = _mm512_mask_permutevar_epi32(v_hi, 0xFF00, - _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), - v1); - v_hi = _mm512_mask_permutevar_epi32(v_hi, 0x00FF, - _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), - v2); - v_lo = _mm512_mask_permutevar_epi32(v_lo, 0xFF00, - _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), - v1); - v_lo = _mm512_mask_permutevar_epi32(v_lo, 0x00FF, - _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), - v2); + v_hi = _mm512_set_epi64(v15, v14, v13, v12, v11, v10, v09, v08); + v_lo = _mm512_set_epi64(v07, v06, v05, v04, v03, v02, v01, v00); } - FORCEINLINE int64_t operator[](const int i) const { - return ((uint64_t(((int32_t*)this)[i])<<32)+((int32_t*)this)[i+16]); } - FORCEINLINE int64_t operator[](const int i) { - return ((uint64_t(((int32_t*)this)[i])<<32)+((int32_t*)this)[i+16]); } - __m512i v_hi; - __m512i v_lo; + FORCEINLINE const int64_t& operator[](const int i) const { return ((int64_t*)this)[i]; } + FORCEINLINE int64_t& operator[](const int i) { return ((int64_t*)this)[i]; } + __m512i v_lo; + __m512i v_hi; } POST_ALIGN(64) __vec16_i64; -FORCEINLINE __vec16_i64 zmm2hilo(const __m512i v1, const __m512i v2){ - __vec16_i64 v; - v.v_hi = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xFF00, - _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), - v2); - v.v_hi = _mm512_mask_permutevar_epi32(v.v_hi, 0x00FF, - _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), - v1); - v.v_lo = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xFF00, - _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), - v2); - v.v_lo = _mm512_mask_permutevar_epi32(v.v_lo, 0x00FF, - _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), - v1); - return v; -} - -FORCEINLINE void hilo2zmm(const __vec16_i64 &v, __m512i &_v1, __m512i &_v2) { - _v2 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, - _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), - v.v_hi); - _v2 = _mm512_mask_permutevar_epi32(_v2, 0x5555, - _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), - v.v_lo); - _v1 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, - _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), - v.v_hi); - _v1 = _mm512_mask_permutevar_epi32(_v1, 0x5555, - _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), - v.v_lo); -} - -FORCEINLINE __vec16_i64 hilo2zmm(const __vec16_i64 &v) { - __vec16_i64 ret; - hilo2zmm(v, ret.v_hi, ret.v_lo); - return ret; -} - - template struct vec16 { FORCEINLINE vec16() { } @@ -411,7 +365,7 @@ static FORCEINLINE bool __none(__vec16_i1 mask) { } static FORCEINLINE __vec16_i1 __equal_i1(__vec16_i1 a, __vec16_i1 b) { - return _mm512_knot( _mm512_kxor(a, b)); + return _mm512_knot(_mm512_kxor(a, b)); } static FORCEINLINE __vec16_i1 __and(__vec16_i1 a, __vec16_i1 b) { @@ -427,7 +381,7 @@ static FORCEINLINE __vec16_i1 __and_not1(__vec16_i1 a, __vec16_i1 b) { } static FORCEINLINE __vec16_i1 __and_not2(__vec16_i1 a, __vec16_i1 b) { - return _mm512_kandnr(a, b); + return _mm512_kandn(b, a); } static FORCEINLINE __vec16_i1 __xor(__vec16_i1 a, __vec16_i1 b) { @@ -460,17 +414,9 @@ static FORCEINLINE bool __extract_element(__vec16_i1 mask, uint32_t index) { static FORCEINLINE int64_t __extract_element(const __vec16_i64 &v, uint32_t index) { - //uint *src = (uint *)&v; - const uint *src = (const uint *)&v; - return src[index+16] | (uint64_t(src[index]) << 32); + return ((int64_t*)&v)[index]; } - - - - - - /* static FORCEINLINE void __insert_element(__vec16_i1 *vec, int index, bool val) { @@ -744,9 +690,8 @@ static FORCEINLINE __vec16_i8 __cast_trunc(__vec16_i8, const __vec16_i64 i64) { } static FORCEINLINE __vec16_i32 __rotate_i32(__vec16_i32 v, int index) { - __vec16_i32 idx = __smear_i32<__vec16_i32>(index); - __vec16_i32 shuffle = _mm512_and_epi32(_mm512_add_epi32(__ispc_stride1, idx), __smear_i32<__vec16_i32>(0xf)); - return _mm512_mask_permutevar_epi32(v, 0xffff, shuffle, v); + index &= 0xFF; + return _mm512_alignr_epi32(v, v, index); } static FORCEINLINE __vec16_i32 __shuffle_i32(__vec16_i32 v, __vec16_i32 index) { @@ -763,13 +708,8 @@ static FORCEINLINE __vec16_i32 __shuffle2_i32(__vec16_i32 v0, __vec16_i32 v1, __ } static FORCEINLINE __vec16_i32 __shift_i32(__vec16_i32 v, int index) { - __vec16_i32 mod_index = _mm512_add_epi32(__ispc_stride1, __smear_i32<__vec16_i32>(index)); - __vec16_i1 mask_ge = _mm512_cmpge_epi32_mask (mod_index, __smear_i32<__vec16_i32>(0)); - __vec16_i1 mask_le = _mm512_cmple_epi32_mask (mod_index, __smear_i32<__vec16_i32>(0xF)); - __vec16_i1 mask = mask_ge & mask_le; - __vec16_i32 ret = __smear_i32<__vec16_i32>(0); - ret = _mm512_mask_permutevar_epi32(ret, mask, mod_index, v); - return ret; + index &= 0xFF; + return _mm512_alignr_epi32(_mm512_setzero_epi32(), v, index); } template static FORCEINLINE __vec16_i32 __load(const __vec16_i32 *p) { @@ -790,7 +730,7 @@ template static FORCEINLINE void __store(__vec16_i32 *p, __vec16_i32 #ifdef ISPC_FORCE_ALIGNED_MEMORY _mm512_store_epi32(p, v); #else - _mm512_mask_storeu_epi32(p, 0xFFFF, v); + _mm512_storeu_si512(p, v); #endif } @@ -807,35 +747,24 @@ template <> FORCEINLINE void __store<64>(__vec16_i32 *p, __vec16_i32 v) { static FORCEINLINE __vec16_i64 __select(__vec16_i1 mask, __vec16_i64 a, __vec16_i64 b) { __vec16_i64 ret; - ret.v_hi = _mm512_mask_mov_epi32(b.v_hi, mask, a.v_hi); + ret.v_lo = _mm512_mask_blend_epi64(mask.lo(), b.v_lo, a.v_lo); + ret.v_hi = _mm512_mask_blend_epi64(mask.hi(), b.v_hi, a.v_hi); + // TODO: Check if this works better: ret.v_lo = _mm512_mask_mov_epi32(b.v_lo, mask, a.v_lo); + ret.v_hi = _mm512_mask_mov_epi32(b.v_hi, mask, a.v_hi); return ret; } static FORCEINLINE void __masked_store_i64(void *p, const __vec16_i64 &v, __vec16_i1 mask) { - __m512i v1; - __m512i v2; - v1 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, - _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), - v.v_hi); - v1 = _mm512_mask_permutevar_epi32(v1, 0x5555, - _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), - v.v_lo); - v2 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, - _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), - v.v_hi); - v2 = _mm512_mask_permutevar_epi32(v2, 0x5555, - _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), - v.v_lo); - _mm512_mask_store_epi64(p, mask, v2); - _mm512_mask_store_epi64(((uint8_t*)p)+64, mask>>8, v1); + // Does this need alignment checking? + _mm512_mask_store_epi64(p, mask.lo(), v.v_lo); + _mm512_mask_store_epi64(((uint8_t*)p)+64, mask.hi(), v.v_hi); } static FORCEINLINE void __insert_element(__vec16_i64 *v, uint32_t index, int64_t val) { - ((int32_t *)&v->v_hi)[index] = val>>32; - ((int32_t *)&v->v_lo)[index] = val; + ((int64_t *)v)[index] = val; } @@ -854,17 +783,15 @@ template <> FORCEINLINE __vec16_i64 __undef_i64<__vec16_i64>() { static FORCEINLINE __vec16_i64 __add(const __vec16_i64 &a, const __vec16_i64 &b) { - __mmask16 carry = 0; - __m512i lo = _mm512_addsetc_epi32(a.v_lo, b.v_lo, &carry); - __m512i hi = _mm512_adc_epi32(a.v_hi, carry, b.v_hi, &carry); + __m512i lo = _mm512_add_epi64(a.v_lo, b.v_lo); + __m512i hi = _mm512_add_epi64(a.v_hi, b.v_hi); return __vec16_i64(lo, hi); } static FORCEINLINE __vec16_i64 __sub(const __vec16_i64 &a, const __vec16_i64 &b) { - __mmask16 borrow = 0; - __m512i lo = _mm512_subsetb_epi32(a.v_lo, b.v_lo, &borrow); - __m512i hi = _mm512_sbb_epi32(a.v_hi, borrow, b.v_hi, &borrow); + __m512i lo = _mm512_sub_epi64(a.v_lo, b.v_lo); + __m512i hi = _mm512_sub_epi64(a.v_hi, b.v_hi); return __vec16_i64(lo, hi); } @@ -872,6 +799,7 @@ static FORCEINLINE __vec16_i64 __sub(const __vec16_i64 &a, const __vec16_i64 &b) know is 32 bits; and 32x64 is faster than 64x64 */ static FORCEINLINE __vec16_i64 __mul(const __vec16_i32 &a, const __vec16_i64 &b) { + // TODO return __vec16_i64(_mm512_mullo_epi32(a.v,b.v_lo), _mm512_add_epi32(_mm512_mullo_epi32(a.v, b.v_hi), _mm512_mulhi_epi32(a.v, b.v_lo))); @@ -893,20 +821,9 @@ static FORCEINLINE void __abs_i32i64(__m512i &_hi, __m512i &_lo) static FORCEINLINE __vec16_i64 __mul(__vec16_i64 a, __vec16_i64 b) { - const __vec16_i1 sign = __not_equal_i32(__ashr(__xor(a.v_hi, b.v_hi), __ispc_thirty_two), __ispc_zero); - __abs_i32i64(a.v_hi, a.v_lo); /* abs(a) */ - __abs_i32i64(b.v_hi, b.v_lo); /* abs(b) */ - __vec16_i32 lo = _mm512_mullo_epi32(a.v_lo, b.v_lo); - __vec16_i32 hi_m1 = _mm512_mulhi_epu32(a.v_lo, b.v_lo); - __vec16_i32 hi_m2 = _mm512_mullo_epi32(a.v_hi, b.v_lo); - __vec16_i32 hi_m3 = _mm512_mullo_epi32(a.v_lo, b.v_hi); - - __mmask16 carry = 0; - __vec16_i32 hi_p23 = _mm512_addsetc_epi32(hi_m2, hi_m3, &carry); - __vec16_i32 hi = _mm512_adc_epi32(hi_p23, carry, hi_m1, &carry); - - __vec16_i64 ret_abs(lo, hi); - return __select(sign, __sub(__vec16_i64(__ispc_zero, __ispc_zero), ret_abs), ret_abs); + __m512i lo = _mm512_mullox_epi64(a.v_lo, b.v_lo); + __m512i hi = _mm512_mullox_epi64(a.v_hi, b.v_hi); + return __vec16_i64(lo, hi); } static FORCEINLINE __vec16_i64 __sdiv(const __vec16_i64 &a, const __vec16_i64 &b) @@ -934,91 +851,69 @@ static FORCEINLINE __vec16_i64 __udiv(const __vec16_i64 &a, const __vec16_i64 &b } static FORCEINLINE __vec16_i64 __or(__vec16_i64 a, __vec16_i64 b) { - return __vec16_i64(_mm512_or_epi32(a.v_lo, b.v_lo), _mm512_or_epi32(a.v_hi, b.v_hi)); + return __vec16_i64(_mm512_or_epi64(a.v_lo, b.v_lo), _mm512_or_epi64(a.v_hi, b.v_hi)); } static FORCEINLINE __vec16_i64 __and(__vec16_i64 a, __vec16_i64 b) { - return __vec16_i64(_mm512_and_epi32(a.v_lo, b.v_lo), _mm512_and_epi32(a.v_hi, b.v_hi)); + return __vec16_i64(_mm512_and_epi64(a.v_lo, b.v_lo), _mm512_and_epi64(a.v_hi, b.v_hi)); } static FORCEINLINE __vec16_i64 __xor(__vec16_i64 a, __vec16_i64 b) { - return __vec16_i64(_mm512_xor_epi32(a.v_lo, b.v_lo), _mm512_xor_epi32(a.v_hi, b.v_hi)); + return __vec16_i64(_mm512_xor_epi64(a.v_lo, b.v_lo), _mm512_xor_epi64(a.v_hi, b.v_hi)); } static FORCEINLINE __vec16_i64 __shl(__vec16_i64 a, __vec16_i64 b) { - /* this is a safety gate in case b-shift >= 32 */ - const __vec16_i32 xfer = __select( - __signed_less_than_i32(b.v_lo, __ispc_thirty_two), - __lshr(a.v_lo, __sub(__ispc_thirty_two, b.v_lo)), - __shl (a.v_lo, __sub(b.v_lo, __ispc_thirty_two)) - ); - const __vec16_i32 hi = __or(__shl(a.v_hi, b.v_lo), xfer); - const __vec16_i32 lo = __shl(a.v_lo, b.v_lo); + const __vec16_i32 lo = _mm512_sllv_epi64(a.v_lo, b.v_lo); + const __vec16_i32 hi = _mm512_sllv_epi64(a.v_hi, b.v_hi); + return __vec16_i64(lo, hi); } static FORCEINLINE __vec16_i64 __shl(__vec16_i64 a, unsigned long long b) { - __vec16_i32 hi; - if (b <= 32) hi = _mm512_or_epi32(_mm512_slli_epi32(a.v_hi, b), _mm512_srli_epi32(a.v_lo, 32-b)); - else hi = _mm512_slli_epi32(a.v_lo, b - 32); - __vec16_i32 lo = _mm512_slli_epi32(a.v_lo, b); + const __vec16_i32 lo = _mm512_slli_epi64(a.v_lo, b); + const __vec16_i32 hi = _mm512_slli_epi64(a.v_hi, b); return __vec16_i64(lo, hi); } static FORCEINLINE __vec16_i64 __lshr(__vec16_i64 a, __vec16_i64 b) { - /* this is a safety gate in case b-shift >= 32 */ - const __vec16_i32 xfer = __select( - __signed_less_than_i32(b.v_lo, __ispc_thirty_two), - __shl (a.v_hi, __sub(__ispc_thirty_two, b.v_lo)), - __lshr(a.v_hi, __sub(b.v_lo, __ispc_thirty_two)) - ); - const __vec16_i32 lo = __or(__lshr(a.v_lo, b.v_lo), xfer); - const __vec16_i32 hi = __lshr(a.v_hi, b.v_lo); + const __vec16_i32 lo = _mm512_srlv_epi64(a.v_lo, b.v_lo); + const __vec16_i32 hi = _mm512_srlv_epi64(a.v_hi, b.v_hi); return __vec16_i64(lo, hi); } static FORCEINLINE __vec16_i64 __lshr(__vec16_i64 a, unsigned long long b) { - /* this is a safety gate in case b-shift >= 32 */ - __vec16_i32 xfer; - if (32 <= b) xfer = __lshr(a.v_hi, b-32); - else xfer = _mm512_and_epi32(_mm512_slli_epi32(__ispc_ffffffff, 32-b), _mm512_slli_epi32(a.v_hi, 32-b)); - __vec16_i32 hi = _mm512_srli_epi32(a.v_hi, b); - __vec16_i32 lo = _mm512_or_epi32(xfer, _mm512_srli_epi32(a.v_lo, b)); + const __vec16_i32 lo = _mm512_srli_epi64(a.v_lo, b); + const __vec16_i32 hi = _mm512_srli_epi64(a.v_hi, b); + return __vec16_i64(lo, hi); } static FORCEINLINE __vec16_i64 __ashr(__vec16_i64 a, __vec16_i64 b) { - /* this is a safety gate in case b-shift >= 32 */ - const __vec16_i32 xfer = __select( - __signed_less_than_i32(b.v_lo, __ispc_thirty_two), - __shl (a.v_hi, __sub(__ispc_thirty_two, b.v_lo)), - __ashr(a.v_hi, __sub(b.v_lo, __ispc_thirty_two)) - ); - const __vec16_i32 lo = __or(__lshr(a.v_lo, b.v_lo), xfer); - const __vec16_i32 hi = __ashr(a.v_hi, b.v_lo); + const __vec16_i32 lo = _mm512_srav_epi64(a.v_lo, b.v_lo); + const __vec16_i32 hi = _mm512_srav_epi64(a.v_hi, b.v_hi); + return __vec16_i64(lo, hi); } static FORCEINLINE __vec16_i64 __ashr(__vec16_i64 a, unsigned long long b) { - __vec16_i32 xfer; - if (b < 32) xfer = _mm512_slli_epi32(_mm512_and_epi32(a.v_hi, _mm512_set1_epi32((1< static RetVecType __smear_i64(const int64_t &l); template <> FORCEINLINE __vec16_i64 __smear_i64<__vec16_i64>(const int64_t &l) { - const int *i = (const int*)&l; - return __vec16_i64(_mm512_set1_epi32(i[0]), _mm512_set1_epi32(i[1])); + return __vec16_i64(l); } static FORCEINLINE __vec16_i64 __rotate_i64(__vec16_i64 v, int index) { - __vec16_i32 idx = __smear_i32<__vec16_i32>(index); - __vec16_i32 shuffle = _mm512_and_epi32(_mm512_add_epi32(__ispc_stride1, idx), __smear_i32<__vec16_i32>(0xf)); - return __vec16_i64(_mm512_mask_permutevar_epi32(v.v_lo, 0xffff, shuffle, v.v_lo), - _mm512_mask_permutevar_epi32(v.v_hi, 0xffff, shuffle, v.v_hi)); + if (index == 0) return v; + else { + // "normalize" to get rid of wraparound + index &= 0xF; + if (index == 8) return __vec16_i64(v.v_hi, v.v_lo); + else { + bool swap = true; + if (index > 8) { + swap = false; + index -= 8; + } + __m512i v1 = _mm512_alignr_epi64(v.v_hi, v.v_lo, index); + __m512i v2 = _mm512_alignr_epi64(v.v_lo, v.v_hi, index); + return (swap) ? __vec16_i64(v1, v2) : __vec16_i64(v2, v1); + } + } } static FORCEINLINE __vec16_i64 __shuffle2_i64(__vec16_i64 v0, __vec16_i64 v1, __vec16_i32 index) { + // look up return __vec16_i64(__shuffle2_i32(v0.v_lo, v1.v_lo, index), __shuffle2_i32(v0.v_hi, v1.v_hi, index)); } template static FORCEINLINE __vec16_i64 __load(const __vec16_i64 *p) { - __vec16_i32 v1; - __vec16_i32 v2; + __vec16_i64 v; const uint8_t*ptr = (const uint8_t*)p; - v1 = _mm512_mask_loadu_epi64(v1, 0xFF, ptr+64); - v2 = _mm512_mask_loadu_epi64(v2, 0xFF, ptr); + v.v_lo = _mm512_loadu_si512(ptr); + v.v_hi = _mm512_loadu_si512(ptr+64); - __vec16_i64 ret; - ret.v_hi = _mm512_mask_permutevar_epi32(ret.v_hi, 0xFF00, - _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), - v1); - ret.v_hi = _mm512_mask_permutevar_epi32(ret.v_hi, 0x00FF, - _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), - v2); - ret.v_lo = _mm512_mask_permutevar_epi32(ret.v_lo, 0xFF00, - _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), - v1); - ret.v_lo = _mm512_mask_permutevar_epi32(ret.v_lo, 0x00FF, - _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), - v2); - return ret; + return v; } #if 0 @@ -1171,18 +1060,6 @@ template <> FORCEINLINE __vec16_i64 __load<64>(const __vec16_i64 *p) { __m512i v2 = _mm512_load_epi32(p); __m512i v1 = _mm512_load_epi32(((uint8_t*)p)+64); __vec16_i64 ret; - ret.v_hi = _mm512_mask_permutevar_epi32(ret.v_hi, 0xFF00, - _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), - v1); - ret.v_hi = _mm512_mask_permutevar_epi32(ret.v_hi, 0x00FF, - _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), - v2); - ret.v_lo = _mm512_mask_permutevar_epi32(ret.v_lo, 0xFF00, - _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), - v1); - ret.v_lo = _mm512_mask_permutevar_epi32(ret.v_lo, 0x00FF, - _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), - v2); return ret; } @@ -1192,39 +1069,11 @@ template <> FORCEINLINE __vec16_i64 __load<128>(const __vec16_i64 *p) { #endif template static FORCEINLINE void __store(__vec16_i64 *p, __vec16_i64 v) { - __m512i v1; - __m512i v2; - v1 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, - _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), - v.v_hi); - v1 = _mm512_mask_permutevar_epi32(v1, 0x5555, - _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), - v.v_lo); - v2 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, - _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), - v.v_hi); - v2 = _mm512_mask_permutevar_epi32(v2, 0x5555, - _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), - v.v_lo); - _mm512_mask_storeu_epi64(p, 0xFF, v2); - _mm512_mask_storeu_epi64((uint8_t*)p+64, 0xFF, v1); + _mm512_storeu_si512(p, v.v_lo); + _mm512_storeu_si512(p+64, v.v_hi); } #if 0 template <> FORCEINLINE void __store<64>(__vec16_i64 *p, __vec16_i64 v) { - __m512i v1; - __m512i v2; - v1 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, - _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), - v.v_hi); - v1 = _mm512_mask_permutevar_epi32(v1, 0x5555, - _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), - v.v_lo); - v2 = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, - _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), - v.v_hi); - v2 = _mm512_mask_permutevar_epi32(v2, 0x5555, - _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), - v.v_lo); _mm512_store_epi64(p, v2); _mm512_store_epi64(((uint8_t*)p)+64, v1); } @@ -1246,12 +1095,13 @@ static FORCEINLINE __vec16_i64 __gather_base_offsets32_i64(uint8_t *base, uint32_t scale, __vec16_i32 offsets, __vec16_i1 mask) { __vec16_i64 ret; - ret.v_lo = _mm512_mask_i32extgather_epi32(_mm512_undefined_epi32(), mask, offsets, - base, _MM_UPCONV_EPI32_NONE, scale, - _MM_HINT_NONE); - ret.v_hi = _mm512_mask_i32extgather_epi32(_mm512_undefined_epi32(), mask, offsets, - base+4, _MM_UPCONV_EPI32_NONE, scale, - _MM_HINT_NONE); + ret.v_lo = _mm512_mask_i32logather_epi64(_mm512_undefined_epi32(), mask.lo(), + offsets, base, scale); + ret.v_hi = _mm512_mask_i32logather_epi64(_mm512_undefined_epi32(), mask.hi(), + _mm512_shuffle_i32x4(offsets, + _mm512_undefined_epi32(), + 0xE), + base, scale); return ret; } @@ -1267,29 +1117,11 @@ __gather_base_offsets32_i64(uint8_t *base, uint32_t scale, __vec16_i32 offsets, __gather64_i64(__vec16_i64 addr, __vec16_i1 mask) { __vec16_i64 ret; - - // There is no gather instruction with 64-bit offsets in KNC. - // We have to manually iterate over the upper 32 bits ;-) - __vec16_i1 still_to_do = mask; - const __vec16_i32 signed_offsets = _mm512_add_epi32(addr.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); - while (still_to_do) { - int first_active_lane = _mm_tzcnt_32((int)still_to_do); - const uint32_t &hi32 = ((uint*)&addr.v_hi)[first_active_lane]; - __vec16_i1 match = _mm512_mask_cmp_epi32_mask(still_to_do,addr.v_hi, - __smear_i32<__vec16_i32>((int32_t)hi32), - _MM_CMPINT_EQ); - - void * base = (void*)((((unsigned long)hi32) << 32) + (unsigned long)(-(long)INT_MIN)); - ret.v_lo = _mm512_mask_i32extgather_epi32(ret.v_lo, match, signed_offsets, - base, _MM_UPCONV_EPI32_NONE, 1, - _MM_HINT_NONE); - ret.v_hi = _mm512_mask_i32extgather_epi32(ret.v_hi, match, signed_offsets, - base+4, _MM_UPCONV_EPI32_NONE, 1, - _MM_HINT_NONE); - - still_to_do = _mm512_kxor(match, still_to_do); - } - + + ret.v_lo = _mm512_mask_i64gather_epi64(_mm512_undefined_epi32(), mask.lo(), + addr.v_lo, 0, 1); + ret.v_hi = _mm512_mask_i64gather_epi64(_mm512_undefined_epi32(), mask.hi(), + addr.v_hi, 0, 1); return ret; } @@ -1569,41 +1401,41 @@ static FORCEINLINE __vec16_i16 __float_to_half_varying(__vec16_f v) static FORCEINLINE __vec16_d __add(__vec16_d a, __vec16_d b) { __vec16_d ret; - ret.v1 = _mm512_add_pd(a.v1, b.v1); - ret.v2 = _mm512_add_pd(a.v2, b.v2); + ret.v_lo = _mm512_add_pd(a.v_lo, b.v_lo); + ret.v_hi = _mm512_add_pd(a.v_hi, b.v_hi); return ret; } static FORCEINLINE __vec16_d __sub(__vec16_d a, __vec16_d b) { __vec16_d ret; - ret.v1 = _mm512_sub_pd(a.v1, b.v1); - ret.v2 = _mm512_sub_pd(a.v2, b.v2); + ret.v_lo = _mm512_sub_pd(a.v_lo, b.v_lo); + ret.v_hi = _mm512_sub_pd(a.v_hi, b.v_hi); return ret; } static FORCEINLINE __vec16_d __mul(__vec16_d a, __vec16_d b) { __vec16_d ret; - ret.v1 = _mm512_mul_pd(a.v1, b.v1); - ret.v2 = _mm512_mul_pd(a.v2, b.v2); + ret.v_lo = _mm512_mul_pd(a.v_lo, b.v_lo); + ret.v_hi = _mm512_mul_pd(a.v_hi, b.v_hi); return ret; } static FORCEINLINE __vec16_d __div(__vec16_d a, __vec16_d b) { __vec16_d ret; - ret.v1 = _mm512_div_pd(a.v1, b.v1); - ret.v2 = _mm512_div_pd(a.v2, b.v2); + ret.v_lo = _mm512_div_pd(a.v_lo, b.v_lo); + ret.v_hi = _mm512_div_pd(a.v_hi, b.v_hi); return ret; } static FORCEINLINE __vec16_d __sqrt_varying_double(__vec16_d v) { - return __vec16_d(_mm512_sqrt_pd(v.v1),_mm512_sqrt_pd(v.v2)); + return __vec16_d(_mm512_sqrt_pd(v.v_lo),_mm512_sqrt_pd(v.v_hi)); } static FORCEINLINE __vec16_i1 __equal_double(__vec16_d a, __vec16_d b) { __vec16_i1 ret1; __vec16_i1 ret2; - ret1 = _mm512_cmpeq_pd_mask(a.v1, b.v1); - ret2 = _mm512_cmpeq_pd_mask(a.v2, b.v2); + ret1 = _mm512_cmpeq_pd_mask(a.v_lo, b.v_lo); + ret2 = _mm512_cmpeq_pd_mask(a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } @@ -1611,16 +1443,16 @@ static FORCEINLINE __vec16_i1 __equal_double_and_mask(__vec16_d a, __vec16_d b, __vec16_i1 m) { __vec16_i1 ret1; __vec16_i1 ret2; - ret1 = _mm512_mask_cmpeq_pd_mask(m, a.v1, b.v1); - ret2 = _mm512_mask_cmpeq_pd_mask((m << 8), a.v2, b.v2); + ret1 = _mm512_mask_cmpeq_pd_mask(m.lo(), a.v_lo, b.v_lo); + ret2 = _mm512_mask_cmpeq_pd_mask(m.hi(), a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } static FORCEINLINE __vec16_i1 __not_equal_double(__vec16_d a, __vec16_d b) { __vec16_i1 ret1; __vec16_i1 ret2; - ret1 = _mm512_cmpneq_pd_mask(a.v1, b.v1); - ret2 = _mm512_cmpneq_pd_mask(a.v2, b.v2); + ret1 = _mm512_cmpneq_pd_mask(a.v_lo, b.v_lo); + ret2 = _mm512_cmpneq_pd_mask(a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } @@ -1628,16 +1460,17 @@ static FORCEINLINE __vec16_i1 __not_equal_double_and_mask(__vec16_d a, __vec16_d __vec16_i1 m) { __vec16_i1 ret1; __vec16_i1 ret2; - ret1 = _mm512_mask_cmpneq_pd_mask(m, a.v1, b.v1); - ret2 = _mm512_mask_cmpneq_pd_mask((m << 8), a.v2, b.v2); + __vec16_i1 tmp_m = m; + ret1 = _mm512_mask_cmpneq_pd_mask(m.lo(), a.v_lo, b.v_lo); + ret2 = _mm512_mask_cmpneq_pd_mask(m.hi(), a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } static FORCEINLINE __vec16_i1 __less_than_double(__vec16_d a, __vec16_d b) { __vec16_i1 ret1; __vec16_i1 ret2; - ret1 = _mm512_cmplt_pd_mask(a.v1, b.v1); - ret2 = _mm512_cmplt_pd_mask(a.v2, b.v2); + ret1 = _mm512_cmplt_pd_mask(a.v_lo, b.v_lo); + ret2 = _mm512_cmplt_pd_mask(a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } @@ -1646,16 +1479,16 @@ static FORCEINLINE __vec16_i1 __less_than_double_and_mask(__vec16_d a, __vec16_d __vec16_i1 ret1; __vec16_i1 ret2; __vec16_i1 tmp_m = m; - ret1 = _mm512_mask_cmplt_pd_mask(m, a.v1, b.v1); - ret2 = _mm512_mask_cmplt_pd_mask((m << 8), a.v2, b.v2); + ret1 = _mm512_mask_cmplt_pd_mask(m.lo(), a.v_lo, b.v_lo); + ret2 = _mm512_mask_cmplt_pd_mask(m.hi(), a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } static FORCEINLINE __vec16_i1 __less_equal_double(__vec16_d a, __vec16_d b) { __vec16_i1 ret1; __vec16_i1 ret2; - ret1 = _mm512_cmple_pd_mask(a.v1, b.v1); - ret2 = _mm512_cmple_pd_mask(a.v2, b.v2); + ret1 = _mm512_cmple_pd_mask(a.v_lo, b.v_lo); + ret2 = _mm512_cmple_pd_mask(a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } @@ -1664,16 +1497,16 @@ static FORCEINLINE __vec16_i1 __less_equal_double_and_mask(__vec16_d a, __vec16_ __vec16_i1 ret1; __vec16_i1 ret2; __vec16_i1 tmp_m = m; - ret1 = _mm512_mask_cmple_pd_mask(m, a.v1, b.v1); - ret2 = _mm512_mask_cmple_pd_mask((m << 8), a.v2, b.v2); + ret1 = _mm512_mask_cmple_pd_mask(m.lo(), a.v_lo, b.v_lo); + ret2 = _mm512_mask_cmple_pd_mask(m.hi(), a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } static FORCEINLINE __vec16_i1 __greater_than_double(__vec16_d a, __vec16_d b) { __vec16_i1 ret1; __vec16_i1 ret2; - ret1 = _mm512_cmpnle_pd_mask(a.v1, b.v1); - ret2 = _mm512_cmpnle_pd_mask(a.v2, b.v2); + ret1 = _mm512_cmpnle_pd_mask(a.v_lo, b.v_lo); + ret2 = _mm512_cmpnle_pd_mask(a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } @@ -1682,58 +1515,63 @@ static FORCEINLINE __vec16_i1 __greater_than_double_and_mask(__vec16_d a, __vec1 __vec16_i1 ret1; __vec16_i1 ret2; __vec16_i1 tmp_m = m; - ret1 = _mm512_mask_cmpnle_pd_mask(m, a.v1, b.v1); - ret2 = _mm512_mask_cmpnle_pd_mask((m << 8), a.v2, b.v2); + ret1 = _mm512_mask_cmpnle_pd_mask(m.lo(), a.v_lo, b.v_lo); + ret2 = _mm512_mask_cmpnle_pd_mask(m.hi(), a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } static FORCEINLINE __vec16_i1 __greater_equal_double(__vec16_d a, __vec16_d b) { __vec16_i1 ret1; __vec16_i1 ret2; - ret1 = _mm512_cmpnlt_pd_mask(a.v1, b.v1); - ret2 = _mm512_cmpnlt_pd_mask(a.v2, b.v2); + ret1 = _mm512_cmpnlt_pd_mask(a.v_lo, b.v_lo); + ret2 = _mm512_cmpnlt_pd_mask(a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } -static FORCEINLINE __vec16_i1 __greater_equal_double_and_mask(__vec16_d a, __vec16_d b, __vec16_i1 m) { +static FORCEINLINE __vec16_i1 __greater_equal_double_and_mask(__vec16_d a, __vec16_d b, + __vec16_i1 m) { __vec16_i1 ret1; __vec16_i1 ret2; - ret1 = _mm512_mask_cmpnlt_pd_mask(m, a.v1, b.v1); - ret2 = _mm512_mask_cmpnlt_pd_mask((m << 8), a.v2, b.v2); + __vec16_i1 tmp_m = m; + ret1 = _mm512_mask_cmpnlt_pd_mask(m.lo(), a.v_lo, b.v_lo); + ret2 = _mm512_mask_cmpnlt_pd_mask(m.hi(), a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } static FORCEINLINE __vec16_i1 __ordered_double(__vec16_d a, __vec16_d b) { __vec16_i1 ret1; __vec16_i1 ret2; - ret1 = _mm512_cmpord_pd_mask(a.v1, b.v1); - ret2 = _mm512_cmpord_pd_mask(a.v2, b.v2); + ret1 = _mm512_cmpord_pd_mask(a.v_lo, b.v_lo); + ret2 = _mm512_cmpord_pd_mask(a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } static FORCEINLINE __vec16_i1 __unordered_double(__vec16_d a, __vec16_d b) { __vec16_i1 ret1; __vec16_i1 ret2; - ret1 = _mm512_cmpunord_pd_mask(a.v1, b.v1); - ret2 = _mm512_cmpunord_pd_mask(a.v2, b.v2); + ret1 = _mm512_cmpunord_pd_mask(a.v_lo, b.v_lo); + ret2 = _mm512_cmpunord_pd_mask(a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } static FORCEINLINE __vec16_i1 __unordered_double_and_mask(__vec16_d a, __vec16_d b, __vec16_i1 mask) { __vec16_i1 ret1; __vec16_i1 ret2; - ret1 = _mm512_mask_cmpunord_pd_mask(mask, a.v1, b.v1); - ret2 = _mm512_mask_cmpunord_pd_mask((mask << 8), a.v2, b.v2); + __vec16_i1 tmp_m = mask; + ret1 = _mm512_mask_cmpunord_pd_mask(mask.lo(), a.v_lo, b.v_lo); + ret2 = _mm512_mask_cmpunord_pd_mask(mask.hi(), a.v_hi, b.v_hi); return _mm512_kmovlhb(ret1, ret2); } static FORCEINLINE __vec16_d __select(__vec16_i1 mask, __vec16_d a, __vec16_d b) { __vec16_d ret; - ret.v1 = _mm512_mask_mov_pd(b.v1, mask, a.v1); - ret.v2 = _mm512_mask_mov_pd(b.v2, (mask << 8), a.v2); + __vec16_i1 tmp_m = mask; + ret.v_lo = _mm512_mask_mov_pd(b.v_lo, mask.lo(), a.v_lo); + ret.v_hi = _mm512_mask_mov_pd(b.v_hi, mask.hi(), a.v_hi); return ret; } + static FORCEINLINE __vec16_d __select(bool cond, __vec16_d a, __vec16_d b) { return cond ? a : b; } @@ -1749,16 +1587,16 @@ static FORCEINLINE void __insert_element(__vec16_d *v, uint32_t index, double v template static RetVecType __smear_double(double d); template <> FORCEINLINE __vec16_d __smear_double<__vec16_d>(double d) { __vec16_d ret; - ret.v1 = _mm512_set1_pd(d); - ret.v2 = _mm512_set1_pd(d); + ret.v_lo = _mm512_set1_pd(d); + ret.v_hi = _mm512_set1_pd(d); return ret; } template static RetVecType __setzero_double(); template <> FORCEINLINE __vec16_d __setzero_double<__vec16_d>() { __vec16_d ret; - ret.v1 = _mm512_setzero_pd(); - ret.v2 = _mm512_setzero_pd(); + ret.v_lo = _mm512_setzero_pd(); + ret.v_hi = _mm512_setzero_pd(); return ret; } @@ -1770,8 +1608,8 @@ template <> FORCEINLINE __vec16_d __undef_double<__vec16_d>() { static FORCEINLINE __vec16_d __broadcast_double(__vec16_d v, int index) { __vec16_d ret; double val = __extract_element(v, index & 0xf); - ret.v1 = _mm512_set1_pd(val); - ret.v2 = _mm512_set1_pd(val); + ret.v_lo = _mm512_set1_pd(val); + ret.v_hi = _mm512_set1_pd(val); return ret; } @@ -1788,8 +1626,8 @@ static FORCEINLINE __vec16_d __shuffle2_double(__vec16_d v0, __vec16_d v1, __vec template static FORCEINLINE __vec16_d __load(const __vec16_d *p) { __vec16_d ret; - ret.v1 = _mm512_loadu_pd(p); - ret.v2 = _mm512_loadu_pd((uint8_t*)p+64); + ret.v_lo = _mm512_loadu_pd(p); + ret.v_hi = _mm512_loadu_pd((uint8_t*)p+64); return ret; } #if 0 @@ -1805,8 +1643,8 @@ template <> FORCEINLINE __vec16_d __load<128>(const __vec16_d *p) { } #endif template static FORCEINLINE void __store(__vec16_d *p, __vec16_d v) { - _mm512_storeu_pd(p, v.v1); - _mm512_storeu_pd((uint8_t*)p+64, v.v2); + _mm512_storeu_pd(p, v.v_lo); + _mm512_storeu_pd((uint8_t*)p+64, v.v_hi); } #if 0 template <> FORCEINLINE void __store<64>(__vec16_d *p, __vec16_d v) { @@ -1942,10 +1780,6 @@ static FORCEINLINE __vec16_i64 __cast_zext(const __vec16_i64 &, const __vec16_i3 return __vec16_i64(val.v, _mm512_setzero_epi32()); } -static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i32 val) { - return _mm512_cvtepi32_ps(val); -} - static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i8 val) { return __cast_sitofp(__vec16_f(), __cast_sext(__vec16_i32(), val)); } @@ -1954,11 +1788,11 @@ static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i16 val) { return __cast_sitofp(__vec16_f(), __cast_sext(__vec16_i32(), val)); } +static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i32 val) { + return _mm512_cvtepi32_ps(val); +} static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i64 val) { - __m512i tmp1; - __m512i tmp2; - hilo2zmm(val, tmp1, tmp2); __vec16_f ret; /* @@ -1971,23 +1805,23 @@ static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i64 val) { } */ - ret[0] = (float)(((int64_t*)&tmp1)[0]); - ret[1] = (float)(((int64_t*)&tmp1)[1]); - ret[2] = (float)(((int64_t*)&tmp1)[2]); - ret[3] = (float)(((int64_t*)&tmp1)[3]); - ret[4] = (float)(((int64_t*)&tmp1)[4]); - ret[5] = (float)(((int64_t*)&tmp1)[5]); - ret[6] = (float)(((int64_t*)&tmp1)[6]); - ret[7] = (float)(((int64_t*)&tmp1)[7]); + ret[0] = (float)(((int64_t*)&val.v_lo)[0]); + ret[1] = (float)(((int64_t*)&val.v_lo)[1]); + ret[2] = (float)(((int64_t*)&val.v_lo)[2]); + ret[3] = (float)(((int64_t*)&val.v_lo)[3]); + ret[4] = (float)(((int64_t*)&val.v_lo)[4]); + ret[5] = (float)(((int64_t*)&val.v_lo)[5]); + ret[6] = (float)(((int64_t*)&val.v_lo)[6]); + ret[7] = (float)(((int64_t*)&val.v_lo)[7]); - ret[8] = (float)(((int64_t*)&tmp2)[0]); - ret[9] = (float)(((int64_t*)&tmp2)[1]); - ret[10] = (float)(((int64_t*)&tmp2)[2]); - ret[11] = (float)(((int64_t*)&tmp2)[3]); - ret[12] = (float)(((int64_t*)&tmp2)[4]); - ret[13] = (float)(((int64_t*)&tmp2)[5]); - ret[14] = (float)(((int64_t*)&tmp2)[6]); - ret[15] = (float)(((int64_t*)&tmp2)[7]); + ret[8] = (float)(((int64_t*)&val.v_hi)[0]); + ret[9] = (float)(((int64_t*)&val.v_hi)[1]); + ret[10] = (float)(((int64_t*)&val.v_hi)[2]); + ret[11] = (float)(((int64_t*)&val.v_hi)[3]); + ret[12] = (float)(((int64_t*)&val.v_hi)[4]); + ret[13] = (float)(((int64_t*)&val.v_hi)[5]); + ret[14] = (float)(((int64_t*)&val.v_hi)[6]); + ret[15] = (float)(((int64_t*)&val.v_hi)[7]); return ret; } @@ -1995,43 +1829,41 @@ static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i64 val) { static FORCEINLINE __vec16_d __cast_sitofp(__vec16_d, __vec16_i8 val) { __vec16_i32 vi = __cast_sext(__vec16_i32(), val); __vec16_d ret; - ret.v1 = _mm512_cvtepi32lo_pd(vi); + ret.v_lo = _mm512_cvtepi32lo_pd(vi); __vec16_i32 other8 = _mm512_permute4f128_epi32(vi, _MM_PERM_DCDC); - ret.v2 = _mm512_cvtepi32lo_pd(other8); + ret.v_hi = _mm512_cvtepi32lo_pd(other8); return ret; } static FORCEINLINE __vec16_d __cast_sitofp(__vec16_d, __vec16_i16 val) { __vec16_i32 vi = __cast_sext(__vec16_i32(), val); __vec16_d ret; - ret.v1 = _mm512_cvtepi32lo_pd(vi); + ret.v_lo = _mm512_cvtepi32lo_pd(vi); __vec16_i32 other8 = _mm512_permute4f128_epi32(vi, _MM_PERM_DCDC); - ret.v2 = _mm512_cvtepi32lo_pd(other8); + ret.v_hi = _mm512_cvtepi32lo_pd(other8); return ret; } static FORCEINLINE __vec16_d __cast_sitofp(__vec16_d, __vec16_i32 val) { __vec16_d ret; - ret.v1 = _mm512_cvtepi32lo_pd(val); + ret.v_lo = _mm512_cvtepi32lo_pd(val); __vec16_i32 other8 = _mm512_permute4f128_epi32(val, _MM_PERM_DCDC); - ret.v2 = _mm512_cvtepi32lo_pd(other8); + ret.v_hi = _mm512_cvtepi32lo_pd(other8); return ret; } static FORCEINLINE __vec16_d __cast_sitofp(__vec16_d, __vec16_i64 val) { - __m512i tmp1; - __m512i tmp2; - hilo2zmm(val, tmp1, tmp2); __vec16_d ret; for (int i = 0; i < 8; i++) { - ((double*)&ret.v1)[i] = (double)(((int64_t*)&tmp1)[i]); + ((double*)&ret.v_lo)[i] = (double)(((int64_t*)&val.v_lo)[i]); } for (int i = 0; i < 8; i++) { - ((double*)&ret.v2)[i] = (double)(((int64_t*)&tmp2)[i]); + ((double*)&ret.v_hi)[i] = (double)(((int64_t*)&val.v_hi)[i]); } return ret; } + static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i1 v) { const __m512 ret = _mm512_setzero_ps(); @@ -2039,10 +1871,6 @@ static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i1 v) return _mm512_mask_mov_ps(ret, v, one); } -static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i32 v) { - return _mm512_cvtepi32_ps(v); -} - static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, const __vec16_i8 &v) { return __cast_uitofp(__vec16_f(), __cast_zext(__vec16_i32(), v)); } @@ -2051,12 +1879,13 @@ static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i16 val) { return __cast_uitofp(__vec16_f(), __cast_zext(__vec16_i32(), val)); } +static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i32 v) { + return _mm512_cvtepu32_ps(v); +} + static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i64 val) { - __m512i tmp1; - __m512i tmp2; - hilo2zmm(val, tmp1, tmp2); __vec16_f ret; - // Loops don't work. It seems that it is icc bug. + // Cycles don't work. It seems that it is icc bug. /* for (int i = 0; i < 8; i++) { ((float*)&ret)[i] = ((float)(((uint64_t*)&tmp1)[i])); @@ -2065,22 +1894,22 @@ static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i64 val) { ((float*)&ret)[i + 8] = ((float)(((uint64_t*)&tmp2)[i])); } */ - ret[0] = ((float)(((uint64_t*)&tmp1)[0])); - ret[1] = ((float)(((uint64_t*)&tmp1)[1])); - ret[2] = ((float)(((uint64_t*)&tmp1)[2])); - ret[3] = ((float)(((uint64_t*)&tmp1)[3])); - ret[4] = ((float)(((uint64_t*)&tmp1)[4])); - ret[5] = ((float)(((uint64_t*)&tmp1)[5])); - ret[6] = ((float)(((uint64_t*)&tmp1)[6])); - ret[7] = ((float)(((uint64_t*)&tmp1)[7])); - ret[8] = ((float)(((uint64_t*)&tmp2)[0])); - ret[9] = ((float)(((uint64_t*)&tmp2)[1])); - ret[10] = ((float)(((uint64_t*)&tmp2)[2])); - ret[11] = ((float)(((uint64_t*)&tmp2)[3])); - ret[12] = ((float)(((uint64_t*)&tmp2)[4])); - ret[13] = ((float)(((uint64_t*)&tmp2)[5])); - ret[14] = ((float)(((uint64_t*)&tmp2)[6])); - ret[15] = ((float)(((uint64_t*)&tmp2)[7])); + ret[0] = ((float)(((uint64_t*)&val.v_lo)[0])); + ret[1] = ((float)(((uint64_t*)&val.v_lo)[1])); + ret[2] = ((float)(((uint64_t*)&val.v_lo)[2])); + ret[3] = ((float)(((uint64_t*)&val.v_lo)[3])); + ret[4] = ((float)(((uint64_t*)&val.v_lo)[4])); + ret[5] = ((float)(((uint64_t*)&val.v_lo)[5])); + ret[6] = ((float)(((uint64_t*)&val.v_lo)[6])); + ret[7] = ((float)(((uint64_t*)&val.v_hi)[7])); + ret[8] = ((float)(((uint64_t*)&val.v_hi)[0])); + ret[9] = ((float)(((uint64_t*)&val.v_hi)[1])); + ret[10] = ((float)(((uint64_t*)&val.v_hi)[2])); + ret[11] = ((float)(((uint64_t*)&val.v_hi)[3])); + ret[12] = ((float)(((uint64_t*)&val.v_hi)[4])); + ret[13] = ((float)(((uint64_t*)&val.v_hi)[5])); + ret[14] = ((float)(((uint64_t*)&val.v_hi)[6])); + ret[15] = ((float)(((uint64_t*)&val.v_hi)[7])); return ret; } @@ -2088,9 +1917,9 @@ static FORCEINLINE __vec16_d __cast_uitofp(__vec16_d, __vec16_i8 val) { __vec16_i32 vi = __cast_zext(__vec16_i32(), val); __vec16_d ret; - ret.v1 = _mm512_cvtepu32lo_pd(vi); + ret.v_lo = _mm512_cvtepu32lo_pd(vi); __vec16_i32 other8 = _mm512_permute4f128_epi32(vi, _MM_PERM_DCDC); - ret.v2 = _mm512_cvtepu32lo_pd(other8); + ret.v_hi = _mm512_cvtepu32lo_pd(other8); return ret; } @@ -2098,32 +1927,29 @@ static FORCEINLINE __vec16_d __cast_uitofp(__vec16_d, __vec16_i16 val) { __vec16_i32 vi = __cast_zext(__vec16_i32(), val); __vec16_d ret; - ret.v1 = _mm512_cvtepu32lo_pd(vi); + ret.v_lo = _mm512_cvtepu32lo_pd(vi); __vec16_i32 other8 = _mm512_permute4f128_epi32(vi, _MM_PERM_DCDC); - ret.v2 = _mm512_cvtepu32lo_pd(other8); + ret.v_hi = _mm512_cvtepu32lo_pd(other8); return ret; } static FORCEINLINE __vec16_d __cast_uitofp(__vec16_d, __vec16_i32 val) { __vec16_d ret; - ret.v1 = _mm512_cvtepu32lo_pd(val); + ret.v_lo = _mm512_cvtepu32lo_pd(val); __vec16_i32 other8 = _mm512_permute4f128_epi32(val, _MM_PERM_DCDC); - ret.v2 = _mm512_cvtepu32lo_pd(other8); + ret.v_hi = _mm512_cvtepu32lo_pd(other8); return ret; } static FORCEINLINE __vec16_d __cast_uitofp(__vec16_d, __vec16_i64 val) { - __m512i tmp1; - __m512i tmp2; - hilo2zmm(val, tmp1, tmp2); __vec16_d ret; for (int i = 0; i < 8; i++) { - ((double*)&ret.v1)[i] = (double)(((uint64_t*)&tmp1)[i]); + ((double*)&ret.v_lo)[i] = (double)(((uint64_t*)&val.v_lo)[i]); } for (int i = 0; i < 8; i++) { - ((double*)&ret.v2)[i] = (double)(((uint64_t*)&tmp2)[i]); + ((double*)&ret.v_hi)[i] = (double)(((uint64_t*)&val.v_hi)[i]); } return ret; } @@ -2131,7 +1957,7 @@ static FORCEINLINE __vec16_d __cast_uitofp(__vec16_d, __vec16_i64 val) { // float/double to signed int static FORCEINLINE __vec16_i32 __cast_fptosi(__vec16_i32, __vec16_f val) { - return _mm512_cvt_roundps_epi32(val, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); + return _mm512_cvtps_epi32(val); } static FORCEINLINE __vec16_i8 __cast_fptosi(__vec16_i8, __vec16_f val) { @@ -2149,21 +1975,20 @@ static FORCEINLINE __vec16_i16 __cast_fptosi(__vec16_i16, __vec16_f val) { } static FORCEINLINE __vec16_i64 __cast_fptosi(__vec16_i64, __vec16_f val) { - __m512i tmp1; + __vec16_i64 ret; for (int i = 0; i < 8; i++) { - ((int64_t*)&tmp1)[i] = (int64_t)(((float*)&val)[i]); + ((int64_t*)&ret.v_lo)[i] = (int64_t)(((float*)&val)[i]); } - __m512i tmp2; for (int i = 0; i < 8; i++) { - ((int64_t*)&tmp2)[i] = (int64_t)(((float*)&val)[i + 8]); + ((int64_t*)&ret.v_hi)[i] = (int64_t)(((float*)&val)[i + 8]); } - return zmm2hilo(tmp1, tmp2); + return ret; } static FORCEINLINE __vec16_i32 __cast_fptosi(__vec16_i32, __vec16_d val) { - __vec16_i32 tmp = _mm512_cvtfxpnt_roundpd_epi32lo(val.v2, _MM_ROUND_MODE_TOWARD_ZERO); + __vec16_i32 tmp = _mm512_cvtfxpnt_roundpd_epi32lo(val.v_hi, _MM_ROUND_MODE_TOWARD_ZERO); __vec16_i32 ret_hi8 = _mm512_permute4f128_epi32(tmp, _MM_PERM_BADC); - __vec16_i32 ret_lo8 = _mm512_cvtfxpnt_roundpd_epi32lo(val.v1, _MM_ROUND_MODE_TOWARD_ZERO); + __vec16_i32 ret_lo8 = _mm512_cvtfxpnt_roundpd_epi32lo(val.v_lo, _MM_ROUND_MODE_TOWARD_ZERO); return _mm512_xor_epi32(ret_lo8, ret_hi8); } @@ -2182,15 +2007,15 @@ static FORCEINLINE __vec16_i16 __cast_fptosi(__vec16_i16, __vec16_d val) { } static FORCEINLINE __vec16_i64 __cast_fptosi(__vec16_i64, __vec16_d val) { - __m512i tmp1; + __vec16_i64 ret; for (int i = 0; i < 8; i++) { - ((int64_t*)&tmp1)[i] = (int64_t)(((double*)&val.v1)[i]); + ((int64_t*)&ret.v_lo)[i] = (int64_t)(((double*)&val.v_lo)[i]); } __m512i tmp2; for (int i = 0; i < 8; i++) { - ((int64_t*)&tmp2)[i] = (int64_t)(((double*)&val.v2)[i]); + ((int64_t*)&ret.v_hi)[i] = (int64_t)(((double*)&val.v_hi)[i]); } - return zmm2hilo(tmp1, tmp2); + return ret; } @@ -2213,21 +2038,21 @@ static FORCEINLINE __vec16_i16 __cast_fptoui(__vec16_i16, __vec16_f val) { } static FORCEINLINE __vec16_i64 __cast_fptoui(__vec16_i64, __vec16_f val) { - __m512i tmp1; + __vec16_i64 ret; for (int i = 0; i < 8; i++) { - ((uint64_t*)&tmp1)[i] = (uint64_t)(((float*)&val)[i]); + ((uint64_t*)&ret.v_lo)[i] = (uint64_t)(((float*)&val)[i]); } __m512i tmp2; for (int i = 0; i < 8; i++) { - ((uint64_t*)&tmp2)[i] = (uint64_t)(((float*)&val)[i + 8]); + ((uint64_t*)&ret.v_hi)[i] = (uint64_t)(((float*)&val)[i + 8]); } - return zmm2hilo(tmp1, tmp2); + return ret; } static FORCEINLINE __vec16_i32 __cast_fptoui(__vec16_i32, __vec16_d val) { - __vec16_i32 tmp = _mm512_cvtfxpnt_roundpd_epu32lo(val.v2, _MM_ROUND_MODE_TOWARD_ZERO); + __vec16_i32 tmp = _mm512_cvtfxpnt_roundpd_epu32lo(val.v_hi, _MM_ROUND_MODE_TOWARD_ZERO); __vec16_i32 ret_hi8 = _mm512_permute4f128_epi32(tmp, _MM_PERM_BADC); - __vec16_i32 ret_lo8 = _mm512_cvtfxpnt_roundpd_epu32lo(val.v1, _MM_ROUND_MODE_TOWARD_ZERO); + __vec16_i32 ret_lo8 = _mm512_cvtfxpnt_roundpd_epu32lo(val.v_lo, _MM_ROUND_MODE_TOWARD_ZERO); return _mm512_xor_epi32(ret_lo8, ret_hi8); } @@ -2246,15 +2071,15 @@ static FORCEINLINE __vec16_i16 __cast_fptoui(__vec16_i16, __vec16_d val) { } static FORCEINLINE __vec16_i64 __cast_fptoui(__vec16_i64, __vec16_d val) { - __m512i tmp1; + __vec16_i64 ret; for (int i = 0; i < 8; i++) { - ((uint64_t*)&tmp1)[i] = (uint64_t)(((double*)&val.v1)[i]); + ((uint64_t*)&ret.v_lo)[i] = (uint64_t)(((double*)&val.v_lo)[i]); } __m512i tmp2; for (int i = 0; i < 8; i++) { - ((uint64_t*)&tmp2)[i] = (uint64_t)(((double*)&val.v2)[i]); + ((uint64_t*)&ret.v_hi)[i] = (uint64_t)(((double*)&val.v_hi)[i]); } - return zmm2hilo(tmp1, tmp2); + return ret; } @@ -2265,15 +2090,15 @@ static FORCEINLINE __vec16_i64 __cast_fptoui(__vec16_i64, __vec16_d val) { static FORCEINLINE __vec16_d __cast_fpext(__vec16_d, __vec16_f val) { __vec16_d ret; - ret.v1 = _mm512_cvtpslo_pd(val.v); + ret.v_lo = _mm512_cvtpslo_pd(val.v); __vec16_f other8 = _mm512_permute4f128_epi32(_mm512_castps_si512(val.v), _MM_PERM_DCDC); - ret.v2 = _mm512_cvtpslo_pd(other8); + ret.v_hi = _mm512_cvtpslo_pd(other8); return ret; } static FORCEINLINE __vec16_f __cast_fptrunc(__vec16_f, __vec16_d val) { - __m512i r0i = _mm512_castps_si512(_mm512_cvtpd_pslo(val.v2)); - __m512i r1i = _mm512_castps_si512(_mm512_cvtpd_pslo(val.v1)); + __m512i r0i = _mm512_castps_si512(_mm512_cvtpd_pslo(val.v_hi)); + __m512i r1i = _mm512_castps_si512(_mm512_cvtpd_pslo(val.v_lo)); return _mm512_mask_permute4f128_epi32(r1i, 0xFF00, r0i, _MM_PERM_BABA); } @@ -2291,39 +2116,15 @@ static FORCEINLINE __vec16_i32 __cast_bits(__vec16_i32, __vec16_i32 val) { static FORCEINLINE __vec16_i64 __cast_bits(__vec16_i64, __vec16_d val) { __vec16_i64 ret; - ret.v_hi = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xFF00, - _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), - _mm512_castpd_si512(val.v2)); - ret.v_hi = _mm512_mask_permutevar_epi32(ret.v_hi, 0x00FF, - _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), - _mm512_castpd_si512(val.v1)); - ret.v_lo = _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xFF00, - _mm512_set_16to16_pi(14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1), - _mm512_castpd_si512(val.v2)); - ret.v_lo = _mm512_mask_permutevar_epi32(ret.v_lo, 0x00FF, - _mm512_set_16to16_pi(15,13,11,9,7,5,3,1,14,12,10,8,6,4,2,0), - _mm512_castpd_si512(val.v1)); + ret.v_lo = _mm512_castpd_si512(val.v_lo); + ret.v_hi = _mm512_castpd_si512(val.v_hi); return ret; } static FORCEINLINE __vec16_d __cast_bits(__vec16_d, __vec16_i64 val) { __vec16_d ret; - ret.v2 = _mm512_castsi512_pd( - _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, - _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), - val.v_hi)); - ret.v2 = _mm512_castsi512_pd( - _mm512_mask_permutevar_epi32(_mm512_castpd_si512(ret.v2), 0x5555, - _mm512_set_16to16_pi(15,15,14,14,13,13,12,12,11,11,10,10,9,9,8,8), - val.v_lo)); - ret.v1 = _mm512_castsi512_pd( - _mm512_mask_permutevar_epi32(_mm512_undefined_epi32(), 0xAAAA, - _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), - val.v_hi)); - ret.v1 = _mm512_castsi512_pd( - _mm512_mask_permutevar_epi32(_mm512_castpd_si512(ret.v1), 0x5555, - _mm512_set_16to16_pi(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0), - val.v_lo)); + ret.v_lo = _mm512_castsi512_pd(val.v_lo); + ret.v_hi = _mm512_castsi512_pd(val.v_hi); return ret; } @@ -2664,8 +2465,8 @@ static FORCEINLINE int64_t __max_uniform_uint64(uint64_t a, uint64_t b) { return static FORCEINLINE __vec16_f __max_varying_float (__vec16_f v1, __vec16_f v2) { return _mm512_max_ps(v1, v2); } static FORCEINLINE __vec16_f __min_varying_float (__vec16_f v1, __vec16_f v2) { return _mm512_min_ps(v1, v2); } -static FORCEINLINE __vec16_d __max_varying_double(__vec16_d v1, __vec16_d v2) { return __vec16_d(_mm512_max_pd(v1.v1, v2.v1), _mm512_max_pd(v1.v2,v2.v2)); } -static FORCEINLINE __vec16_d __min_varying_double(__vec16_d v1, __vec16_d v2) { return __vec16_d(_mm512_min_pd(v1.v1, v2.v1), _mm512_min_pd(v1.v2,v2.v2)); } +static FORCEINLINE __vec16_d __max_varying_double(__vec16_d v1, __vec16_d v2) { return __vec16_d(_mm512_max_pd(v1.v_lo, v2.v_lo), _mm512_max_pd(v1.v_hi,v2.v_hi)); } +static FORCEINLINE __vec16_d __min_varying_double(__vec16_d v1, __vec16_d v2) { return __vec16_d(_mm512_min_pd(v1.v_lo, v2.v_lo), _mm512_min_pd(v1.v_hi,v2.v_hi)); } static FORCEINLINE __vec16_i32 __max_varying_int32 (__vec16_i32 v1, __vec16_i32 v2) { return _mm512_max_epi32(v1, v2); } static FORCEINLINE __vec16_i32 __min_varying_int32 (__vec16_i32 v1, __vec16_i32 v2) { return _mm512_min_epi32(v1, v2); } @@ -2843,72 +2644,32 @@ static FORCEINLINE uint32_t __reduce_max_uint32(__vec16_i32 v) { } static FORCEINLINE int64_t __reduce_add_int64(__vec16_i64 v) { - __m512i tmp1; - __m512i tmp2; - hilo2zmm(v, tmp1, tmp2); -#if __INTEL_COMPILER < 1500 - int64_t res1 = _mm512_reduce_add_epi64((__m512)tmp1); - int64_t res2 = _mm512_reduce_add_epi64((__m512)tmp2); -#else - int64_t res1 = _mm512_reduce_add_epi64(tmp1); - int64_t res2 = _mm512_reduce_add_epi64(tmp2); -#endif + int64_t res1 = _mm512_reduce_add_epi64(v.v_lo); + int64_t res2 = _mm512_reduce_add_epi64(v.v_hi); return res1 + res2; } static FORCEINLINE int64_t __reduce_min_int64(__vec16_i64 v) { - __m512i tmp1; - __m512i tmp2; - hilo2zmm(v, tmp1, tmp2); -#if __INTEL_COMPILER < 1500 - int64_t res1 = _mm512_reduce_min_epi64((__m512)tmp1); - int64_t res2 = _mm512_reduce_min_epi64((__m512)tmp2); -#else - int64_t res1 = _mm512_reduce_min_epi64(tmp1); - int64_t res2 = _mm512_reduce_min_epi64(tmp2); -#endif + int64_t res1 = _mm512_reduce_min_epi64(v.v_lo); + int64_t res2 = _mm512_reduce_min_epi64(v.v_hi); return (res1 < res2) ? res1 : res2; } static FORCEINLINE int64_t __reduce_max_int64(__vec16_i64 v) { - __m512i tmp1; - __m512i tmp2; - hilo2zmm(v, tmp1, tmp2); -#if __INTEL_COMPILER < 1500 - int64_t res1 = _mm512_reduce_max_epi64((__m512)tmp1); - int64_t res2 = _mm512_reduce_max_epi64((__m512)tmp2); -#else - int64_t res1 = _mm512_reduce_max_epi64(tmp1); - int64_t res2 = _mm512_reduce_max_epi64(tmp2); -#endif + int64_t res1 = _mm512_reduce_max_epi64(v.v_lo); + int64_t res2 = _mm512_reduce_max_epi64(v.v_hi); return (res1 > res2) ? res1 : res2; } static FORCEINLINE uint64_t __reduce_min_uint64(__vec16_i64 v) { - __m512i tmp1; - __m512i tmp2; - hilo2zmm(v, tmp1, tmp2); -#if __INTEL_COMPILER < 1500 - uint64_t res1 = _mm512_reduce_min_epu64((__m512)tmp1); - uint64_t res2 = _mm512_reduce_min_epu64((__m512)tmp2); -#else - uint64_t res1 = _mm512_reduce_min_epu64(tmp1); - uint64_t res2 = _mm512_reduce_min_epu64(tmp2); -#endif + uint64_t res1 = _mm512_reduce_min_epu64(v.v_lo); + uint64_t res2 = _mm512_reduce_min_epu64(v.v_hi); return (res1 < res2) ? res1 : res2; } static FORCEINLINE uint64_t __reduce_max_uint64(__vec16_i64 v) { - __m512i tmp1; - __m512i tmp2; - hilo2zmm(v, tmp1, tmp2); -#if __INTEL_COMPILER < 1500 - uint64_t res1 = _mm512_reduce_max_epu64((__m512)tmp1); - uint64_t res2 = _mm512_reduce_max_epu64((__m512)tmp2); -#else - uint64_t res1 = _mm512_reduce_max_epu64(tmp1); - uint64_t res2 = _mm512_reduce_max_epu64(tmp2); -#endif + uint64_t res1 = _mm512_reduce_max_epu64(v.v_lo); + uint64_t res2 = _mm512_reduce_max_epu64(v.v_hi); return (res1 > res2) ? res1 : res2; } @@ -2925,15 +2686,15 @@ static FORCEINLINE float __reduce_max_float(__vec16_f v) { } static FORCEINLINE float __reduce_add_double(__vec16_d v) { - return _mm512_reduce_add_pd(v.v1) + _mm512_reduce_add_pd(v.v2); + return _mm512_reduce_add_pd(v.v_lo) + _mm512_reduce_add_pd(v.v_hi); } static FORCEINLINE float __reduce_min_double(__vec16_d v) { - return std::min(_mm512_reduce_min_pd(v.v1), _mm512_reduce_min_pd(v.v2)); + return std::min(_mm512_reduce_min_pd(v.v_lo), _mm512_reduce_min_pd(v.v_hi)); } static FORCEINLINE float __reduce_max_double(__vec16_d v) { - return std::max(_mm512_reduce_max_pd(v.v1), _mm512_reduce_max_pd(v.v2)); + return std::max(_mm512_reduce_max_pd(v.v_lo), _mm512_reduce_max_pd(v.v_hi)); } /////////////////////////////////////////////////////////////////////////// @@ -2943,9 +2704,9 @@ static FORCEINLINE float __reduce_max_double(__vec16_d v) { // Currently, when a pseudo_gather is converted into a masked load, it has to be unaligned static FORCEINLINE __vec16_i32 __masked_load_i32(void *p, __vec16_i1 mask) { #ifdef ISPC_FORCE_ALIGNED_MEMORY - return _mm512_mask_load_epi32(__vec16_i32(), mask, p); + return _mm512_mask_load_epi32(_mm512_undefined_epi32(), mask, p); #else - return _mm512_mask_loadu_epi32(__vec16_i32(), mask, p); + return _mm512_mask_loadu_epi32(_mm512_undefined_epi32(), mask, p); #endif } @@ -2958,21 +2719,31 @@ static FORCEINLINE __vec16_f __masked_load_float(void *p, __vec16_i1 mask) { } static FORCEINLINE __vec16_i64 __masked_load_i64(void *p, __vec16_i1 mask) { - __vec16_i32 first8 = __masked_load_i32(p, mask); - __vec16_i32 second8 = __masked_load_i32(p + 64, mask); - return zmm2hilo(first8, second8); +#ifdef ISPC_FORCE_ALIGNED_MEMORY + __vec16_i64 ret; + ret.v_lo = _mm512_mask_load_epi64(_mm512_undefined_epi32(), mask.lo(),p); + ret.v_hi = _mm512_mask_load_epi64(_mm512_undefined_epi32(), mask.hi(),p+64); + return ret; +#else + __vec16_i64 ret; + ret.v_lo = _mm512_mask_loadu_epi64(_mm512_undefined_epi32(), mask.lo(),p); + ret.v_hi = _mm512_mask_loadu_epi64(_mm512_undefined_epi32(), mask.hi(),p+64); + return ret; +#endif } static FORCEINLINE __vec16_d __masked_load_double(void *p, __vec16_i1 mask) { - __vec16_d ret; #ifdef ISPC_FORCE_ALIGNED_MEMORY - ret.v1 = _mm512_mask_load_pd(ret.v1, mask, p); - ret.v2 = _mm512_mask_load_pd(ret.v2, (mask << 8), (uint8_t*)p+64); -#else - ret.v1 = _mm512_mask_loadu_pd(ret.v1, mask, p); - ret.v2 = _mm512_mask_loadu_pd(ret.v2, (mask << 8), (uint8_t*)p+64); -#endif + __vec16_d ret; + ret.v_lo = _mm512_mask_load_pd(_mm512_undefined_pd(), mask.lo(), p); + ret.v_hi = _mm512_mask_load_pd(_mm512_undefined_pd(), mask.hi(), (uint8_t*)p+64); return ret; +#else + __vec16_d ret; + ret.v_lo = _mm512_mask_loadu_pd(_mm512_undefined_pd(), mask.lo(), p); + ret.v_hi = _mm512_mask_loadu_pd(_mm512_undefined_pd(), mask.hi(), (uint8_t*)p+64); + return ret; +#endif } static FORCEINLINE void __masked_store_i8(void *p, const __vec16_i8 &val, __vec16_i1 mask) { @@ -3080,28 +2851,29 @@ template static FORCEINLINE void __store(__vec16_i16 *p, __vec16_i16 static FORCEINLINE void __masked_store_i32(void *p, __vec16_i32 val, __vec16_i1 mask) { #ifdef ISPC_FORCE_ALIGNED_MEMORY - _mm512_mask_store_epi32(p, mask, val.v); + _mm512_mask_store_epi32(p, mask, val); #else - _mm512_mask_storeu_epi32(p, mask, val.v); + _mm512_mask_storeu_epi32(p, mask, val); #endif } -static FORCEINLINE void __masked_store_float(void *p, __vec16_f val, __vec16_i1 mask) { +static FORCEINLINE void __masked_store_float(void *p, __vec16_f val, + __vec16_i1 mask) { #ifdef ISPC_FORCE_ALIGNED_MEMORY - _mm512_mask_store_ps(p, mask, val.v); + _mm512_mask_store_ps(p, mask, val); #else - _mm512_mask_storeu_ps(p, mask, val.v); + _mm512_mask_storeu_ps(p, mask, val); #endif } static FORCEINLINE void __masked_store_double(void *p, __vec16_d val, __vec16_i1 mask) { #ifdef ISPC_FORCE_ALIGNED_MEMORY - _mm512_mask_store_pd(p, mask, val.v1); - _mm512_mask_store_pd((uint8_t*)p+64, (mask << 8), val.v2); + _mm512_mask_store_pd(p, mask.lo(), val.v_lo); + _mm512_mask_store_pd((uint8_t*)p+64, mask.hi(), val.v_hi); #else - _mm512_mask_storeu_pd(p, mask, val.v1); - _mm512_mask_storeu_pd((uint8_t*)p+64, (mask << 8), val.v2); + _mm512_mask_storeu_pd(p, mask.lo(), val.v_lo); + _mm512_mask_storeu_pd((uint8_t*)p+64, mask.hi(), val.v_hi); #endif } @@ -3150,107 +2922,63 @@ __gather_base_offsets32_i16(uint8_t *base, uint32_t scale, __vec16_i32 offsets, static FORCEINLINE __vec16_i32 __gather_base_offsets32_i32(uint8_t *base, uint32_t scale, __vec16_i32 offsets, __vec16_i1 mask) { - return _mm512_mask_i32extgather_epi32(_mm512_undefined_epi32(), mask, offsets, - base, _MM_UPCONV_EPI32_NONE, scale, - _MM_HINT_NONE); + return _mm512_mask_i32gather_epi32(_mm512_undefined_epi32(), mask, offsets, base, scale); } static FORCEINLINE __vec16_f __gather_base_offsets32_float(uint8_t *base, uint32_t scale, __vec16_i32 offsets, - __vec16_i1 mask) { - return _mm512_mask_i32extgather_ps(_mm512_undefined_ps(), mask, offsets, - base, _MM_UPCONV_PS_NONE, scale, - _MM_HINT_NONE); + __vec16_i1 mask) { + return _mm512_mask_i32gather_ps(_mm512_undefined_ps(), mask, offsets, base, scale); } static FORCEINLINE __vec16_d __gather_base_offsets32_double(uint8_t *base, uint32_t scale, __vec16_i32 offsets, __vec16_i1 mask) { __vec16_d ret; - ret.v1 = _mm512_mask_i32loextgather_pd(_mm512_undefined_pd(), mask, offsets, - base, _MM_UPCONV_PD_NONE, scale, - _MM_HINT_NONE); - __m512i shuffled_offsets = _mm512_permute4f128_epi32(offsets.v, _MM_PERM_DCDC); - ret.v2 = _mm512_mask_i32loextgather_pd(_mm512_undefined_pd(), mask, shuffled_offsets, - base, _MM_UPCONV_PD_NONE, scale, - _MM_HINT_NONE); + __m256i offsets_lo = _mm512_extracti64x4_epi64(offsets, 0); + __m256i offsets_hi = _mm512_extracti64x4_epi64(offsets, 1); + + ret.v_lo = _mm512_mask_i32gather_pd(_mm512_undefined_pd(), mask.lo(), + offsets_lo, base, scale); + ret.v_hi = _mm512_mask_i32gather_pd(_mm512_undefined_pd(), mask.hi(), + offsets_hi, base, scale); return ret; } static FORCEINLINE __vec16_i32 __gather64_i32(__vec16_i64 addr, __vec16_i1 mask) { - __vec16_i32 ret; - - // There is no gather instruction with 64-bit offsets in KNC. - // We have to manually iterate over the upper 32 bits ;-) - __vec16_i1 still_to_do = mask; - const __vec16_i32 signed_offsets = _mm512_add_epi32(addr.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); - while (still_to_do) { - int first_active_lane = _mm_tzcnt_32((int)still_to_do); - const uint &hi32 = ((uint*)&addr.v_hi)[first_active_lane]; - __vec16_i1 match = _mm512_mask_cmp_epi32_mask(still_to_do, addr.v_hi, - __smear_i32<__vec16_i32>((int32_t)hi32), - _MM_CMPINT_EQ); - - void * base = (void*)((((unsigned long)hi32) << 32) + (unsigned long)(-(long)INT_MIN)); - - ret.v = _mm512_mask_i32extgather_epi32(ret.v, match, signed_offsets, - base, _MM_UPCONV_EPI32_NONE, 1, - _MM_HINT_NONE); - still_to_do = _mm512_kxor(match, still_to_do); - } - - return ret; + __m256i val_lo = _mm512_mask_i64gather_epi32(_mm256_undefined_si256(), mask.lo(), + addr.v_lo, 0, 1); + __m256i val_hi = _mm512_mask_i64gather_epi32(_mm256_undefined_si256(), mask.hi(), + addr.v_hi, 0, 1); + + return _mm512_inserti64x4(_mm512_castsi256_si512(val_lo), val_hi, 1); } static FORCEINLINE __vec16_f __gather64_float(__vec16_i64 addr, __vec16_i1 mask) { - __vec16_f ret; + __m256 val_lo = _mm512_mask_i64gather_ps(_mm256_undefined_ps(), mask.lo(), + addr.v_lo, 0, 1); + __m256 val_hi = _mm512_mask_i64gather_ps(_mm256_undefined_ps(), mask.hi(), + addr.v_hi, 0, 1); - // There is no gather instruction with 64-bit offsets in KNC. - // We have to manually iterate over the upper 32 bits ;-) - __vec16_i1 still_to_do = mask; - const __vec16_i32 signed_offsets = _mm512_add_epi32(addr.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); - while (still_to_do) { - int first_active_lane = _mm_tzcnt_32((int)still_to_do); - const uint &hi32 = ((uint*)&addr.v_hi)[first_active_lane]; - __vec16_i1 match = _mm512_mask_cmp_epi32_mask(still_to_do,addr.v_hi, - __smear_i32<__vec16_i32>((int32_t)hi32), - _MM_CMPINT_EQ); - - void * base = (void*)((((unsigned long)hi32) << 32) + (unsigned long)(-(long)INT_MIN)); - - ret.v = _mm512_mask_i32extgather_ps(ret.v, match, signed_offsets, - base, _MM_UPCONV_PS_NONE, 1, - _MM_HINT_NONE); - still_to_do = _mm512_kxor(match, still_to_do); - } - - return ret; + return _mm512_insertf64x4(_mm512_castps_pd(_mm512_castps256_ps512(val_lo)), + _mm256_castps_pd(val_hi), 1); } static FORCEINLINE __vec16_d __gather64_double(__vec16_i64 addr, __vec16_i1 mask) { __vec16_d ret; - - __vec16_i32 addr_lo, addr_hi; - hilo2zmm(addr, addr_lo.v, addr_hi.v); -#if __INTEL_COMPILER < 1500 - ret.v1 = (__m512d)_mm512_i64extgather_pd ((__m512)addr_lo.v, 0, _MM_UPCONV_PD_NONE, 1, _MM_HINT_NONE); - ret.v2 = (__m512d)_mm512_i64extgather_pd ((__m512)addr_hi.v, 0, _MM_UPCONV_PD_NONE, 1, _MM_HINT_NONE); -#else - ret.v1 = _mm512_i64extgather_pd (addr_lo, 0, _MM_UPCONV_PD_NONE, 1, _MM_HINT_NONE); - ret.v2 = _mm512_i64extgather_pd (addr_hi, 0, _MM_UPCONV_PD_NONE, 1, _MM_HINT_NONE); -#endif + ret.v_lo = _mm512_mask_i64gather_pd(_mm512_undefined_pd(), mask.lo(), addr.v_lo, 0, 1); + ret.v_hi = _mm512_mask_i64gather_pd(_mm512_undefined_pd(), mask.hi(), addr.v_hi, 0, 1); + return ret; } - - /*! gather with 64-bit offsets. \todo add optimization that falls back to 32-bit offset gather if @@ -3261,28 +2989,14 @@ __gather64_double(__vec16_i64 addr, __vec16_i1 mask) static FORCEINLINE __vec16_f __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __vec16_i1 mask) { + __m256 val_lo = _mm512_mask_i64gather_ps(_mm256_undefined_ps(), mask.lo(), + offsets.v_lo, _base, 1); + __m256 val_hi = _mm512_mask_i64gather_ps(_mm256_undefined_ps(), mask.hi(), + offsets.v_hi, _base, 1); - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); - // There is no gather instruction with 64-bit offsets in KNC. - // We have to manually iterate over the upper 32 bits ;-) - __vec16_i1 still_to_do = mask; - __vec16_f ret; - while (still_to_do) { - int first_active_lane = _mm_tzcnt_32((int)still_to_do); - const uint &hi32 = ((uint*)&offsets.v_hi)[first_active_lane]; - __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, - __smear_i32<__vec16_i32>((int32_t)hi32), - _MM_CMPINT_EQ); - void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); + return _mm512_insertf64x4(_mm512_castps_pd(_mm512_castps256_ps512(val_lo)), + _mm256_castps_pd(val_hi), 1); - ret = _mm512_mask_i32extgather_ps(ret, match, signed_offsets, base, - _MM_UPCONV_PS_NONE, scale, - _MM_HINT_NONE); - still_to_do = _mm512_kxor(match, still_to_do); - } - - return ret; } static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, @@ -3348,71 +3062,21 @@ __gather64_i16(__vec16_i64 addr, __vec16_i1 mask) } static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, - __vec16_f value, - __vec16_i1 mask) { - - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); - __vec16_i1 still_to_do = mask; - while (still_to_do) { - int first_active_lane = _mm_tzcnt_32((int)still_to_do); - const uint &hi32 = ((uint*)&offsets.v_hi)[first_active_lane]; - __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, - __smear_i32<__vec16_i32>((int32_t)hi32), - _MM_CMPINT_EQ); - - void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); - _mm512_mask_i32extscatter_ps(base, match, signed_offsets, - value, - _MM_DOWNCONV_PS_NONE, scale, - _MM_HINT_NONE); - still_to_do = _mm512_kxor(match,still_to_do); - } + __vec16_f value, __vec16_i1 mask) { + _mm512_mask_i64scatter_ps(_base, mask.lo(), offsets.v_lo, _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(value), 0)), scale); + _mm512_mask_i64scatter_ps(_base, mask.hi(), offsets.v_hi, _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(value), 1)), scale); } static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, - __vec16_i32 value, - __vec16_i1 mask) { - - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); - __vec16_i1 still_to_do = mask; - while (still_to_do) { - int first_active_lane = _mm_tzcnt_32((int)still_to_do); - const uint &hi32 = ((uint*)&offsets.v_hi)[first_active_lane]; - __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, - __smear_i32<__vec16_i32>((int32_t)hi32), - _MM_CMPINT_EQ); - - void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); - _mm512_mask_i32extscatter_epi32(base, match, signed_offsets, - value, - _MM_DOWNCONV_EPI32_NONE, scale, - _MM_HINT_NONE); - still_to_do = _mm512_kxor(match,still_to_do); - } + __vec16_i32 value, __vec16_i1 mask) { + _mm512_mask_i64scatter_epi32(_base, mask.lo(), offsets.v_lo, _mm512_extracti64x4_epi64(value, 0), scale); + _mm512_mask_i64scatter_epi32(_base, mask.hi(), offsets.v_hi, _mm512_extracti64x4_epi64(value, 1), scale); } static FORCEINLINE void __scatter_base_offsets64_i64(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, - __vec16_i64 value, - __vec16_i1 mask) { - - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); - __vec16_i1 still_to_do = mask; - while (still_to_do) { - int first_active_lane = _mm_tzcnt_32((int)still_to_do); - const uint &hi32 = ((uint*)&offsets.v_hi)[first_active_lane]; - __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, - __smear_i32<__vec16_i32>((int32_t)hi32), - _MM_CMPINT_EQ); - - void * base = (void*)((unsigned long)_base + ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); - - _mm512_mask_i32extscatter_epi32(base, match, signed_offsets, value.v_lo, _MM_DOWNCONV_EPI32_NONE, scale, _MM_HINT_NONE); - _mm512_mask_i32extscatter_epi32(base + sizeof(uint32_t), match, signed_offsets, value.v_hi, _MM_DOWNCONV_EPI32_NONE, scale, _MM_HINT_NONE); - - still_to_do = _mm512_kxor(match,still_to_do); - } + __vec16_i64 value, __vec16_i1 mask) { + _mm512_mask_i64scatter_epi64(_base, mask.lo(), offsets.v_lo, value.v_lo, scale); + _mm512_mask_i64scatter_epi64(_base, mask.hi(), offsets.v_hi, value.v_hi, scale); } static FORCEINLINE void // TODO @@ -3447,75 +3111,62 @@ static FORCEINLINE __vec16_i32 __gather_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __vec16_i1 mask) { - __vec16_f r = __gather_base_offsets64_float(_base,scale,offsets,mask); - return (__vec16_i32&)r; + __m256i lo = _mm512_mask_i64gather_epi32(_mm256_undefined_si256(), mask.lo(), + offsets.v_lo, _base, scale); + __m256i hi = _mm512_mask_i64gather_epi32(_mm256_undefined_si256(), mask.hi(), + offsets.v_hi, _base, scale); + return _mm512_inserti64x4(_mm512_castsi256_si512(lo), hi, 1); } // scatter static FORCEINLINE void __scatter_base_offsets32_i32(uint8_t *b, uint32_t scale, __vec16_i32 offsets, __vec16_i32 val, __vec16_i1 mask) { - _mm512_mask_i32extscatter_epi32(b, mask, offsets, val, _MM_DOWNCONV_EPI32_NONE, scale, _MM_HINT_NONE); + _mm512_mask_i32scatter_epi32(b, mask, offsets, val, scale); } static FORCEINLINE void __scatter_base_offsets32_i64(uint8_t *b, uint32_t scale, __vec16_i32 offsets, __vec16_i64 val, __vec16_i1 mask) { - _mm512_mask_i32extscatter_epi32(b, mask, offsets, val.v_lo, _MM_DOWNCONV_EPI32_NONE, scale, _MM_HINT_NONE); - _mm512_mask_i32extscatter_epi32(b + sizeof(uint32_t), mask, offsets, val.v_hi, _MM_DOWNCONV_EPI32_NONE, scale, _MM_HINT_NONE); + __m256i offsets_lo = _mm512_extracti64x4_epi64(offsets, 0); + __m256i offsets_hi = _mm512_extracti64x4_epi64(offsets, 1); + + _mm512_mask_i32scatter_epi64(b, mask.lo(), offsets_lo, val.v_lo, scale); + _mm512_mask_i32scatter_epi64(b, mask.hi(), offsets_hi, val.v_hi, scale); } static FORCEINLINE void __scatter_base_offsets32_float(void *base, uint32_t scale, __vec16_i32 offsets, __vec16_f val, __vec16_i1 mask) { - _mm512_mask_i32extscatter_ps(base, mask, offsets, val, _MM_DOWNCONV_PS_NONE, scale, _MM_HINT_NONE); + _mm512_mask_i32scatter_ps(base, mask, offsets, val, scale); } static FORCEINLINE void __scatter_base_offsets32_double(void *base, uint32_t scale, __vec16_i32 offsets, __vec16_d val, __vec16_i1 mask) { - _mm512_mask_i32loextscatter_pd(base, mask, offsets, val.v1, _MM_DOWNCONV_PD_NONE, scale, _MM_HINT_NONE); - __m512i shuffled_offsets = _mm512_permute4f128_epi32(offsets.v, _MM_PERM_DCDC); - const __mmask8 mask8 = 0x00FF & (mask >> 8); - _mm512_mask_i32loextscatter_pd(base, mask8, shuffled_offsets, val.v2, _MM_DOWNCONV_PD_NONE, scale, _MM_HINT_NONE); + __m256i offsets_lo = _mm512_extracti64x4_epi64(offsets, 0); + __m256i offsets_hi = _mm512_extracti64x4_epi64(offsets, 1); + + _mm512_mask_i32scatter_pd(base, mask.lo(), offsets_lo, val.v_lo, scale); + _mm512_mask_i32scatter_pd(base, mask.hi(), offsets_hi, val.v_hi, scale); } static FORCEINLINE void __scatter64_float(__vec16_i64 ptrs, __vec16_f val, __vec16_i1 mask){ -#if __INTEL_COMPILER < 1500 - #warning "__scatter64_float is slow due to outdated compiler" - __scatter_base_offsets64_float(0, 1, ptrs, val, mask); -#else - __vec16_i32 first8ptrs, second8ptrs; - hilo2zmm(ptrs, first8ptrs.v, second8ptrs.v); - _mm512_mask_i64scatter_pslo (0, mask, first8ptrs, val, 1); - const __mmask8 mask_hi = 0x00FF & (mask >> 8); - _mm512_mask_i64scatter_pslo (0, mask_hi, second8ptrs, _mm512_permute4f128_ps(val.v, _MM_PERM_DCDC), 1); -#endif + __m256 val_lo = _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(val), 0)); + __m256 val_hi = _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(val), 1)); + + _mm512_mask_i64scatter_ps(0, mask.lo(), ptrs.v_lo, val_lo, 1); + _mm512_mask_i64scatter_ps(0, mask.hi(), ptrs.v_hi, val_hi, 1); } static FORCEINLINE void __scatter64_i32(__vec16_i64 ptrs, __vec16_i32 val, __vec16_i1 mask) { -#if __INTEL_COMPILER < 1500 - #warning "__scatter64_i32 is slow due to outdated compiler" - __scatter_base_offsets64_i32(0, 1, ptrs, val, mask); -#else - __vec16_i32 first8ptrs, second8ptrs; - hilo2zmm(ptrs, first8ptrs.v, second8ptrs.v); - _mm512_mask_i64scatter_epi32lo (0, mask, first8ptrs, val, 1); - const __mmask8 mask_hi = 0x00FF & (mask >> 8); - _mm512_mask_i64scatter_epi32lo (0, mask_hi, second8ptrs, _mm512_permute4f128_epi32(val.v, _MM_PERM_DCDC), 1); -#endif + __m256i val_lo = _mm512_extracti64x4_epi64(val, 0); + __m256i val_hi = _mm512_extracti64x4_epi64(val, 1); + + _mm512_mask_i64scatter_epi32(0, mask.lo(), ptrs.v_lo, val_lo, 1); + _mm512_mask_i64scatter_epi32(0, mask.hi(), ptrs.v_hi, val_hi, 1); } static FORCEINLINE void __scatter64_i64(__vec16_i64 ptrs, __vec16_i64 val, __vec16_i1 mask) { -#if __INTEL_COMPILER < 1500 - #warning "__scatter64_i64 is slow due to outdated compiler" - __scatter_base_offsets64_i64(0, 1, ptrs, val, mask); -#else - __vec16_i32 first8ptrs, second8ptrs; - hilo2zmm(ptrs, first8ptrs.v, second8ptrs.v); - __vec16_i32 first8vals, second8vals; - hilo2zmm(val, first8vals.v, second8vals.v); - _mm512_mask_i64extscatter_epi64 (0, mask, first8ptrs, first8vals, _MM_DOWNCONV_EPI64_NONE, 1, _MM_HINT_NONE); - const __mmask8 mask8 = 0x00FF & (mask >> 8); - _mm512_mask_i64extscatter_epi64 (0, mask8, second8ptrs, second8vals, _MM_DOWNCONV_EPI64_NONE, 1, _MM_HINT_NONE); -#endif + _mm512_mask_i64scatter_epi64(0, mask.lo(), ptrs.v_lo, val.v_lo, 1); + _mm512_mask_i64scatter_epi64(0, mask.hi(), ptrs.v_hi, val.v_hi, 1); } @@ -3880,7 +3531,7 @@ static FORCEINLINE uint64_t __clock() { #define TRANSCENDENTALS(op) \ static FORCEINLINE __vec16_f __##op##_varying_float(__vec16_f v) { return _mm512_##op##_ps(v); } \ static FORCEINLINE float __##op##_uniform_float(float v) { return op##f(v); } \ -static FORCEINLINE __vec16_d __##op##_varying_double(__vec16_d v) { return __vec16_d(_mm512_##op##_pd(v.v1),_mm512_##op##_pd(v.v2)); } \ +static FORCEINLINE __vec16_d __##op##_varying_double(__vec16_d v) { return __vec16_d(_mm512_##op##_pd(v.v_lo),_mm512_##op##_pd(v.v_hi)); } \ static FORCEINLINE double __##op##_uniform_double(double a) { return op(a); } TRANSCENDENTALS(log) @@ -3889,7 +3540,7 @@ TRANSCENDENTALS(exp) static FORCEINLINE float __pow_uniform_float(float a, float b) { return powf(a, b);} static FORCEINLINE __vec16_f __pow_varying_float(__vec16_f a, __vec16_f b) { return _mm512_pow_ps(a,b); } static FORCEINLINE double __pow_uniform_double(double a, double b) { return pow(a,b);} -static FORCEINLINE __vec16_d __pow_varying_double(__vec16_d a, __vec16_d b) { return __vec16_d(_mm512_pow_pd(a.v1,b.v1),_mm512_pow_pd(a.v2,b.v2)); } +static FORCEINLINE __vec16_d __pow_varying_double(__vec16_d a, __vec16_d b) { return __vec16_d(_mm512_pow_pd(a.v_lo,b.v_lo),_mm512_pow_pd(a.v_hi,b.v_hi)); } /////////////////////////////////////////////////////////////////////////// // Trigonometry @@ -3904,7 +3555,7 @@ TRANSCENDENTALS(atan) static FORCEINLINE float __atan2_uniform_float(float a, float b) { return atan2f(a, b);} static FORCEINLINE __vec16_f __atan2_varying_float(__vec16_f a, __vec16_f b) { return _mm512_atan2_ps(a,b); } static FORCEINLINE double __atan2_uniform_double(double a, double b) { return atan2(a,b);} -static FORCEINLINE __vec16_d __atan2_varying_double(__vec16_d a, __vec16_d b) { return __vec16_d(_mm512_atan2_pd(a.v1,b.v1),_mm512_atan2_pd(a.v2,b.v2)); } +static FORCEINLINE __vec16_d __atan2_varying_double(__vec16_d a, __vec16_d b) { return __vec16_d(_mm512_atan2_pd(a.v_lo,b.v_lo),_mm512_atan2_pd(a.v_hi,b.v_hi)); } #undef FORCEINLINE #undef PRE_ALIGN From 4c96fd32798dae69e71fbaf75b05104a15b2bc8f Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 26 Mar 2015 12:38:53 +0300 Subject: [PATCH 11/33] cast_fptou(s)i compfail fix --- examples/intrinsics/knl.h | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index e1692d84..2814674a 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -50,7 +50,7 @@ #include // for operator<<(m512[i]) #if __INTEL_COMPILER < 1500 -#warning "Your compiler version is outdated which can reduce performance in some cases. Please, update your compiler!" +#warning "Only ICC 15.0 and older are supported. Please, update your compiler!" #endif @@ -1780,6 +1780,10 @@ static FORCEINLINE __vec16_i64 __cast_zext(const __vec16_i64 &, const __vec16_i3 return __vec16_i64(val.v, _mm512_setzero_epi32()); } +static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i32 val) { + return _mm512_cvtepi32_ps(val); +} + static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i8 val) { return __cast_sitofp(__vec16_f(), __cast_sext(__vec16_i32(), val)); } @@ -1788,10 +1792,6 @@ static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i16 val) { return __cast_sitofp(__vec16_f(), __cast_sext(__vec16_i32(), val)); } -static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i32 val) { - return _mm512_cvtepi32_ps(val); -} - static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i64 val) { __vec16_f ret; @@ -1863,7 +1863,6 @@ static FORCEINLINE __vec16_d __cast_sitofp(__vec16_d, __vec16_i64 val) { return ret; } - static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i1 v) { const __m512 ret = _mm512_setzero_ps(); @@ -1871,6 +1870,10 @@ static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i1 v) return _mm512_mask_mov_ps(ret, v, one); } +static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i32 v) { + return _mm512_cvtepu32_ps(v); +} + static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, const __vec16_i8 &v) { return __cast_uitofp(__vec16_f(), __cast_zext(__vec16_i32(), v)); } @@ -1879,10 +1882,6 @@ static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i16 val) { return __cast_uitofp(__vec16_f(), __cast_zext(__vec16_i32(), val)); } -static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i32 v) { - return _mm512_cvtepu32_ps(v); -} - static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i64 val) { __vec16_f ret; // Cycles don't work. It seems that it is icc bug. From cd6f8249bf4b9fc5e495cf8cda70194ab8a2f88d Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Thu, 26 Mar 2015 12:11:28 +0300 Subject: [PATCH 12/33] cast_fptosi/ui for double to i32 --- examples/intrinsics/knl.h | 17 +++++++++++------ examples/intrinsics/known_fails.txt | 13 +++++++++++++ 2 files changed, 24 insertions(+), 6 deletions(-) create mode 100644 examples/intrinsics/known_fails.txt diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 2814674a..cfbbf00a 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -1985,9 +1985,11 @@ static FORCEINLINE __vec16_i64 __cast_fptosi(__vec16_i64, __vec16_f val) { } static FORCEINLINE __vec16_i32 __cast_fptosi(__vec16_i32, __vec16_d val) { - __vec16_i32 tmp = _mm512_cvtfxpnt_roundpd_epi32lo(val.v_hi, _MM_ROUND_MODE_TOWARD_ZERO); - __vec16_i32 ret_hi8 = _mm512_permute4f128_epi32(tmp, _MM_PERM_BADC); - __vec16_i32 ret_lo8 = _mm512_cvtfxpnt_roundpd_epi32lo(val.v_lo, _MM_ROUND_MODE_TOWARD_ZERO); + __m256i tmp = _mm512_cvtpd_epi32(val.v_hi); + __vec16_i32 tmp1 = _mm512_castsi256_si512 (tmp); + __vec16_i32 ret_hi8 = _mm512_permute4f128_epi32(tmp1, _MM_PERM_BADC); + __m256i tmp2 = _mm512_cvtpd_epi32(val.v_lo); + __vec16_i32 ret_lo8 = _mm512_castsi256_si512 (tmp2); return _mm512_xor_epi32(ret_lo8, ret_hi8); } @@ -2049,9 +2051,11 @@ static FORCEINLINE __vec16_i64 __cast_fptoui(__vec16_i64, __vec16_f val) { } static FORCEINLINE __vec16_i32 __cast_fptoui(__vec16_i32, __vec16_d val) { - __vec16_i32 tmp = _mm512_cvtfxpnt_roundpd_epu32lo(val.v_hi, _MM_ROUND_MODE_TOWARD_ZERO); - __vec16_i32 ret_hi8 = _mm512_permute4f128_epi32(tmp, _MM_PERM_BADC); - __vec16_i32 ret_lo8 = _mm512_cvtfxpnt_roundpd_epu32lo(val.v_lo, _MM_ROUND_MODE_TOWARD_ZERO); + __m256i tmp = _mm512_cvtpd_epu32(val.v_hi); + __vec16_i32 tmp1 = _mm512_castsi256_si512 (tmp); + __vec16_i32 ret_hi8 = _mm512_permute4f128_epi32(tmp1, _MM_PERM_BADC); + __m256i tmp2 = _mm512_cvtpd_epu32(val.v_lo); + __vec16_i32 ret_lo8 = _mm512_castsi256_si512 (tmp2); return _mm512_xor_epi32(ret_lo8, ret_hi8); } @@ -2434,6 +2438,7 @@ static FORCEINLINE double __ceil_uniform_double(double v) { } static FORCEINLINE __vec16_f __round_varying_float(__vec16_f v) { + return _mm512_round_ps(v, _MM_ROUND_MODE_NEAREST, _MM_EXPADJ_NONE); } diff --git a/examples/intrinsics/known_fails.txt b/examples/intrinsics/known_fails.txt new file mode 100644 index 00000000..cda10901 --- /dev/null +++ b/examples/intrinsics/known_fails.txt @@ -0,0 +1,13 @@ +=============================================================================== +__and_not2 : _mm512_kandnr -> _mm512_kandn + ./tests/cfor-c-cif-nested-continue.ispc + ./tests/cfor-c-test-134.ispc + ./tests/cfor-c-test-135.ispc + ./tests/cfor-c-test-136.ispc + ./tests/cfor-c-test-64.ispc + ./tests/cfor-c-test-70.ispc + ./tests/cfor-c-test-71.ispc + ./tests/recursion-forward-func-decl.ispc + ./tests/recursion.ispc +=============================================================================== + From b16aee93f98ca03ae5bd51aef6b3b91340fee7d9 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 26 Mar 2015 14:41:30 +0300 Subject: [PATCH 13/33] gather/scatter i8/16 loop implementations --- examples/intrinsics/knl.h | 119 ++++++++++++-------------------------- 1 file changed, 38 insertions(+), 81 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 2814674a..0be4e318 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -665,28 +665,31 @@ static FORCEINLINE __vec16_i8 __cast_trunc(__vec16_i8, const __vec16_i16 i16) { static FORCEINLINE __vec16_i16 __cast_trunc(__vec16_i16, const __vec16_i32 i32) { __vec16_i16 ret; - __vec16_i32 i32_trunk = _mm512_and_epi32(i32, __smear_i32<__vec16_i32>(65535)); - _mm512_extstore_epi32(ret.v, i32_trunk, _MM_DOWNCONV_EPI32_UINT16, _MM_HINT_NONE); + //__vec16_i32 i32_trunk = _mm512_and_epi32(i32, __smear_i32<__vec16_i32>(65535)); + //_mm512_extstore_epi32(ret.v, i32_trunk, _MM_DOWNCONV_EPI32_UINT16, _MM_HINT_NONE); + + _mm512_mask_cvtepi32_storeu_epi16(ret.v, 0xFFFF, i32); return ret; } static FORCEINLINE __vec16_i8 __cast_trunc(__vec16_i8, const __vec16_i32 i32) { __vec16_i8 ret; - __vec16_i32 i32_trunk = _mm512_and_epi32(i32, __smear_i32<__vec16_i32>(255)); - _mm512_extstore_epi32(ret.v, i32, _MM_DOWNCONV_EPI32_UINT8, _MM_HINT_NONE); + //__vec16_i32 i32_trunk = _mm512_and_epi32(i32, __smear_i32<__vec16_i32>(255)); + //_mm512_extstore_epi32(ret.v, i32, _MM_DOWNCONV_EPI32_UINT8, _MM_HINT_NONE); + _mm512_mask_cvtepi32_storeu_epi8(ret.v, 0xFFFF, i32); return ret; } static FORCEINLINE __vec16_i32 __cast_trunc(__vec16_i32, const __vec16_i64 i64) { - return __vec16_i32(i64.v_lo); + return __vec16_i32(i64.v_lo); //TODO } static FORCEINLINE __vec16_i16 __cast_trunc(__vec16_i16, const __vec16_i64 i64) { - return __cast_trunc(__vec16_i16(), i64.v_lo); + return __cast_trunc(__vec16_i16(), i64.v_lo);//TODO } static FORCEINLINE __vec16_i8 __cast_trunc(__vec16_i8, const __vec16_i64 i64) { - return __cast_trunc(__vec16_i8(), i64.v_lo); + return __cast_trunc(__vec16_i8(), i64.v_lo);//TODO } static FORCEINLINE __vec16_i32 __rotate_i32(__vec16_i32 v, int index) { @@ -1715,8 +1718,6 @@ static FORCEINLINE __vec16_i64 __cast_sext(const __vec16_i64 &, const __vec16_i3 return __vec16_i64(val.v, _mm512_srai_epi32(val.v, 31)); } - - static FORCEINLINE __vec16_i8 __cast_zext(const __vec16_i8 &, const __vec16_i1 &val) { return __vec16_i8(val[0], val[1], val[2], val[3], val[4], val[5], val[6], val[7], @@ -3001,61 +3002,35 @@ __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offset static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __vec16_i1 mask) { - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); - __vec16_i1 still_to_do = mask; - __vec16_i32 tmp; - while (still_to_do) { - int first_active_lane = _mm_tzcnt_32((int)still_to_do); - const uint &hi32 = ((uint*)&offsets.v_hi)[first_active_lane]; - __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, - __smear_i32<__vec16_i32>((int32_t)hi32), - _MM_CMPINT_EQ); - - void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); - tmp = _mm512_mask_i32extgather_epi32(tmp, match, signed_offsets, base, - _MM_UPCONV_EPI32_SINT8, scale, - _MM_HINT_NONE); - still_to_do = _mm512_kxor(match,still_to_do); - } - __vec16_i8 ret; - _mm512_extstore_epi32(ret.v,tmp,_MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); - return ret; + // TODO + __vec16_i8 ret; + for (int i = 0; i < 16; ++i) + if ((mask & (1 << i)) != 0) { + int8_t *ptr = (int8_t *)(_base + scale * offsets[i]); + ret[i] = *ptr; + } + return ret; } -static FORCEINLINE __vec16_i8 -__gather64_i8(__vec16_i64 addr, __vec16_i1 mask) +static FORCEINLINE __vec16_i8 __gather64_i8(__vec16_i64 addr, __vec16_i1 mask) { return __gather_base_offsets64_i8(0, 1, addr, mask); } static FORCEINLINE __vec16_i16 __gather_base_offsets64_i16(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __vec16_i1 mask) -{ - const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN)); - __vec16_i1 still_to_do = mask; - __vec16_i32 tmp; - while (still_to_do) { - int first_active_lane = _mm_tzcnt_32((int)still_to_do); - const uint &hi32 = ((uint*)&offsets.v_hi)[first_active_lane]; - __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, - __smear_i32<__vec16_i32>((int32_t)hi32), - _MM_CMPINT_EQ); - - void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN)); - tmp = _mm512_mask_i32extgather_epi32(tmp, match, signed_offsets, base, - _MM_UPCONV_EPI32_SINT16, scale, - _MM_HINT_NONE); - still_to_do = _mm512_kxor(match,still_to_do); - } - __vec16_i16 ret; - _mm512_extstore_epi32(ret.v,tmp,_MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); - return ret; +{ + // TODO + __vec16_i16 ret; + for (int i = 0; i < 16; ++i) + if ((mask & (1 << i)) != 0) { + int16_t *ptr = (int16_t *)(_base + scale * offsets[i]); + ret[i] = *ptr; + } + return ret; } -static FORCEINLINE __vec16_i16 -__gather64_i16(__vec16_i64 addr, __vec16_i1 mask) +static FORCEINLINE __vec16_i16 __gather64_i16(__vec16_i64 addr, __vec16_i1 mask) { return __gather_base_offsets64_i16(0, 1, addr, mask); } @@ -3078,36 +3053,18 @@ static FORCEINLINE void __scatter_base_offsets64_i64(uint8_t *_base, uint32_t sc _mm512_mask_i64scatter_epi64(_base, mask.hi(), offsets.v_hi, value.v_hi, scale); } -static FORCEINLINE void // TODO -__scatter_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, - __vec16_i8 value, - __vec16_i1 mask) { - __vec16_i1 still_to_do = mask; - - __vec16_i32 tmp = _mm512_extload_epi32(&value, _MM_UPCONV_EPI32_SINT8, - _MM_BROADCAST32_NONE, _MM_HINT_NONE); - // _mm512_mask_extstore_epi32(p, mask, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); - - while (still_to_do) { - int first_active_lane = _mm_tzcnt_32((int)still_to_do); - const uint &hi32 = ((uint*)&offsets.v_hi)[first_active_lane]; - __vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi, - __smear_i32<__vec16_i32>((int32_t)hi32), - _MM_CMPINT_EQ); - - void * base = (void*)((unsigned long)_base + - ((scale*(unsigned long)hi32) << 32)); - _mm512_mask_i32extscatter_epi32(base, match, offsets.v_lo, - tmp, - _MM_DOWNCONV_EPI32_SINT8, scale, - _MM_HINT_NONE); - still_to_do = _mm512_kxor(match,still_to_do); - } +static FORCEINLINE void __scatter_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, + __vec16_i8 value, __vec16_i1 mask) { + // TODO + for (int i = 0; i < 16; ++i) + if ((mask & (1 << i)) != 0) { + int8_t *ptr = (int8_t *)(_base + scale * offsets[i]); + *ptr = value[i]; + } } -static FORCEINLINE __vec16_i32 -__gather_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, +static FORCEINLINE __vec16_i32 __gather_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __vec16_i1 mask) { __m256i lo = _mm512_mask_i64gather_epi32(_mm256_undefined_si256(), mask.lo(), From 81cb374084284a34b98fc331c026c541f37f5376 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 26 Mar 2015 14:53:19 +0300 Subject: [PATCH 14/33] loop execution for i8/16 32-addr-bit gathers/scatters --- examples/intrinsics/knl.h | 61 +++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 31 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 18704ea5..e1666c0c 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -2789,27 +2789,27 @@ template static FORCEINLINE void __store(__vec16_i8 *p, __vec16_i8 v *p = v; } -static FORCEINLINE void -__scatter_base_offsets32_i8(uint8_t *b, uint32_t scale, __vec16_i32 offsets, +static FORCEINLINE void __scatter_base_offsets32_i8(uint8_t *b, uint32_t scale, __vec16_i32 offsets, __vec16_i8 val, __vec16_i1 mask) { - __vec16_i32 tmp = _mm512_extload_epi32(&val,_MM_UPCONV_EPI32_SINT8, - _MM_BROADCAST32_NONE, _MM_HINT_NONE); - _mm512_mask_i32extscatter_epi32(b, mask, offsets, tmp, - _MM_DOWNCONV_EPI32_SINT8, scale, - _MM_HINT_NONE); + // TODO + for (int i = 0; i < 16; ++i) + if ((mask & (1 << i)) != 0) { + int8_t *ptr = (int8_t *)(_base + scale * offsets[i]); + *ptr = val[i]; + } } -static FORCEINLINE void -__scatter_base_offsets32_i16(uint8_t *b, uint32_t scale, __vec16_i32 offsets, +static FORCEINLINE void __scatter_base_offsets32_i16(uint8_t *b, uint32_t scale, __vec16_i32 offsets, __vec16_i16 val, __vec16_i1 mask) { - __vec16_i32 tmp = _mm512_extload_epi32(&val,_MM_UPCONV_EPI32_SINT16, - _MM_BROADCAST32_NONE, _MM_HINT_NONE); - _mm512_mask_i32extscatter_epi32(b, mask, offsets, tmp, - _MM_DOWNCONV_EPI32_SINT16, scale, - _MM_HINT_NONE); + // TODO + for (int i = 0; i < 16; ++i) + if ((mask & (1 << i)) != 0) { + int16_t *ptr = (int16_t *)(_base + scale * offsets[i]); + *ptr = val[i]; + } } @@ -2900,28 +2900,27 @@ static FORCEINLINE void __masked_store_blend_float(void *p, __vec16_f val, static FORCEINLINE __vec16_i8 __gather_base_offsets32_i8(uint8_t *base, uint32_t scale, __vec16_i32 offsets, - __vec16_i1 mask) { - // (iw): need to temporarily store as int because gathers can only return ints. - __vec16_i32 tmp = _mm512_mask_i32extgather_epi32(_mm512_undefined_epi32(), mask, offsets, base, - _MM_UPCONV_EPI32_SINT8, scale, - _MM_HINT_NONE); - // now, downconverting to chars into temporary char vector - __vec16_i8 ret; - _mm512_extstore_epi32(ret.v,tmp,_MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); - return ret; + // TODO + __vec16_i8 ret; + for (int i = 0; i < 16; ++i) + if ((mask & (1 << i)) != 0) { + int8_t *ptr = (int8_t *)(_base + scale * offsets[i]); + ret[i] = *ptr; + } + return ret; } static FORCEINLINE __vec16_i16 __gather_base_offsets32_i16(uint8_t *base, uint32_t scale, __vec16_i32 offsets, __vec16_i1 mask) { - // (iw): need to temporarily store as int because gathers can only return ints. - __vec16_i32 tmp = _mm512_mask_i32extgather_epi32(_mm512_undefined_epi32(), mask, offsets, base, - _MM_UPCONV_EPI32_SINT16, scale, - _MM_HINT_NONE); - // now, downconverting to chars into temporary char vector - __vec16_i16 ret; - _mm512_extstore_epi32(ret.v,tmp,_MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); - return ret; + // TODO + __vec16_i16 ret; + for (int i = 0; i < 16; ++i) + if ((mask & (1 << i)) != 0) { + int16_t *ptr = (int16_t *)(_base + scale * offsets[i]); + ret[i] = *ptr; + } + return ret; } static FORCEINLINE __vec16_i32 From 952609427226bad4f6e9a4b517470d51a517b804 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 26 Mar 2015 16:12:10 +0300 Subject: [PATCH 15/33] purge _MM_DOWNCONV... enum from knl.h --- examples/intrinsics/knl.h | 167 +++++++++++++++++--------------------- 1 file changed, 75 insertions(+), 92 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index e1666c0c..1146d66e 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -43,7 +43,6 @@ #endif #include -#include #include #include // for operator<<(m512[i]) @@ -1962,20 +1961,21 @@ static FORCEINLINE __vec16_i32 __cast_fptosi(__vec16_i32, __vec16_f val) { static FORCEINLINE __vec16_i8 __cast_fptosi(__vec16_i8, __vec16_f val) { __vec16_i8 ret; - __vec16_i32 tmp = __cast_fptosi(__vec16_i32(), val); - _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_SINT8, _MM_HINT_NONE); + __m128i tmp = _mm512_cvtepi32_epi8(__cast_fptosi(__vec16_i32(), val)); + _mm_storeu_si128((__m128i *)ret.v, tmp); return ret; } static FORCEINLINE __vec16_i16 __cast_fptosi(__vec16_i16, __vec16_f val) { __vec16_i16 ret; - __vec16_i32 tmp = __cast_fptosi(__vec16_i32(), val); - _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_SINT16, _MM_HINT_NONE); + __m256i tmp = _mm512_cvtepi32_epi16(__cast_fptosi(__vec16_i32(), val)); + _mm256_storeu_si256((__m256i *)ret.v, tmp); return ret; } static FORCEINLINE __vec16_i64 __cast_fptosi(__vec16_i64, __vec16_f val) { __vec16_i64 ret; + // TODO for (int i = 0; i < 8; i++) { ((int64_t*)&ret.v_lo)[i] = (int64_t)(((float*)&val)[i]); } @@ -1996,15 +1996,15 @@ static FORCEINLINE __vec16_i32 __cast_fptosi(__vec16_i32, __vec16_d val) { static FORCEINLINE __vec16_i8 __cast_fptosi(__vec16_i8, __vec16_d val) { __vec16_i8 ret; - __vec16_i32 tmp = __cast_fptosi(__vec16_i32(), val); - _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_SINT8, _MM_HINT_NONE); + __m128i tmp = _mm512_cvtepi32_epi8(__cast_fptosi(__vec16_i32(), val)); + _mm_storeu_si128((__m128i *)ret.v, tmp); return ret; } static FORCEINLINE __vec16_i16 __cast_fptosi(__vec16_i16, __vec16_d val) { __vec16_i16 ret; - __vec16_i32 tmp = __cast_fptosi(__vec16_i32(), val); - _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_SINT16, _MM_HINT_NONE); + __m256i tmp = _mm512_cvtepi32_epi16(__cast_fptosi(__vec16_i32(), val)); + _mm256_storeu_si256((__m256i *)ret.v, tmp); return ret; } @@ -2020,22 +2020,21 @@ static FORCEINLINE __vec16_i64 __cast_fptosi(__vec16_i64, __vec16_d val) { return ret; } - static FORCEINLINE __vec16_i32 __cast_fptoui(__vec16_i32, __vec16_f val) { return _mm512_cvt_roundps_epu32(val, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); } static FORCEINLINE __vec16_i8 __cast_fptoui(__vec16_i8, __vec16_f val) { __vec16_i8 ret; - __vec16_i32 tmp = __cast_fptoui(__vec16_i32(), val); - _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_UINT8, _MM_HINT_NONE); + __m128i tmp = _mm512_cvtepi32_epi8(__cast_fptoui(__vec16_i32(), val)); + _mm_storeu_si128((__m128i *)ret.v, tmp); return ret; } static FORCEINLINE __vec16_i16 __cast_fptoui(__vec16_i16, __vec16_f val) { __vec16_i16 ret; - __vec16_i32 tmp = __cast_fptoui(__vec16_i32(), val); - _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_UINT16, _MM_HINT_NONE); + __m256i tmp = _mm512_cvtepi32_epi16(__cast_fptoui(__vec16_i32(), val)); + _mm256_storeu_si256((__m256i *)ret.v, tmp); return ret; } @@ -2062,15 +2061,15 @@ static FORCEINLINE __vec16_i32 __cast_fptoui(__vec16_i32, __vec16_d val) { static FORCEINLINE __vec16_i8 __cast_fptoui(__vec16_i8, __vec16_d val) { __vec16_i8 ret; - __vec16_i32 tmp = __cast_fptoui(__vec16_i32(), val); - _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_UINT8, _MM_HINT_NONE); + __m128i tmp = _mm512_cvtepi32_epi8(__cast_fptoui(__vec16_i32(), val)); + _mm_storeu_si128((__m128i *)ret.v, tmp); return ret; } static FORCEINLINE __vec16_i16 __cast_fptoui(__vec16_i16, __vec16_d val) { __vec16_i16 ret; - __vec16_i32 tmp = __cast_fptoui(__vec16_i32(), val); - _mm512_extstore_epi32(ret.v, tmp, _MM_DOWNCONV_EPI32_UINT16, _MM_HINT_NONE); + __m256i tmp = _mm512_cvtepi32_epi16(__cast_fptoui(__vec16_i32(), val)); + _mm256_storeu_si256((__m256i *)ret.v, tmp); return ret; } @@ -2086,12 +2085,6 @@ static FORCEINLINE __vec16_i64 __cast_fptoui(__vec16_i64, __vec16_d val) { return ret; } - - - - - - static FORCEINLINE __vec16_d __cast_fpext(__vec16_d, __vec16_f val) { __vec16_d ret; ret.v_lo = _mm512_cvtpslo_pd(val.v); @@ -2234,25 +2227,25 @@ CMP_OP(__vec16_i8, i8, int8_t, __signed_greater_than, >) SELECT(__vec16_i8) static FORCEINLINE int8_t __extract_element(__vec16_i8 v, uint32_t index) { - return v[index]; + return v[index]; } static FORCEINLINE void __insert_element(__vec16_i8 *v, uint32_t index, int8_t val) { - ((int32_t *)v)[index] = val; + ((int32_t *)v)[index] = val; } static FORCEINLINE __vec16_i8 __broadcast_i8(__vec16_i8 v, int index) { - int32_t val = __extract_element(v, index & 0xf); - __vec16_i32 tmp = _mm512_set1_epi32(val); - __vec16_i8 ret; - _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); - return ret; + __vec16_i8 ret; + int32_t val = __extract_element(v, index & 0xf); + __m128i tmp = _mm512_cvtepi32_epi8(_mm512_set1_epi32(val)); + _mm_storeu_si128((__m128i *)ret.v, tmp); + return ret; } static FORCEINLINE __vec16_i1 __not_equal_i8(__vec16_i8 a, __vec16_i8 b) { - __vec16_i32 tmp_a = __cast_sext(__vec16_i32(), a); - __vec16_i32 tmp_b = __cast_sext(__vec16_i32(), b); - return __not_equal_i32(tmp_a, tmp_b); + __vec16_i32 tmp_a = __cast_sext(__vec16_i32(), a); + __vec16_i32 tmp_b = __cast_sext(__vec16_i32(), b); + return __not_equal_i32(tmp_a, tmp_b); } static FORCEINLINE __vec16_i1 __equal_i8_and_mask(const __vec16_i8 &a, const __vec16_i8 &b, __vec16_i1 m) { @@ -2268,38 +2261,38 @@ static FORCEINLINE __vec16_i1 __not_equal_i8_and_mask(__vec16_i8 a, __vec16_i8 b } static FORCEINLINE __vec16_i8 __rotate_i8(__vec16_i8 v, int index) { - __vec16_i32 tmp_v = __cast_sext(__vec16_i32(), v); - __vec16_i32 tmp = __rotate_i32(tmp_v, index); - __vec16_i8 ret; - _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); - return ret; + __vec16_i32 tmp_v = __cast_sext(__vec16_i32(), v); + __m128i tmp = _mm512_cvtepi32_epi8(__rotate_i32(tmp_v, index)); + __vec16_i8 ret; + _mm_storeu_si128((__m128i *)ret.v, tmp); + return ret; } static FORCEINLINE __vec16_i8 __shuffle_i8(__vec16_i8 v, __vec16_i32 index) { - __vec16_i32 tmp_v = __cast_sext(__vec16_i32(), v); - __vec16_i32 tmp = __shuffle_i32(tmp_v, index); - __vec16_i8 ret; - _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); - return ret; - + __vec16_i32 tmp_v = __cast_sext(__vec16_i32(), v); + __m128i tmp = _mm512_cvtepi32_epi8(__shuffle_i32(tmp_v, index)); + __vec16_i8 ret; + _mm_storeu_si128((__m128i *)ret.v, tmp); + return ret; } template static RetVecType __smear_i8(int8_t i); template <> FORCEINLINE __vec16_i8 __smear_i8<__vec16_i8>(int8_t i) { - __vec16_i32 tmp = __smear_i32<__vec16_i32>(i); - __vec16_i8 ret; - _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); - return ret; + __m128i tmp = _mm512_cvtepi32_epi8(__smear_i32<__vec16_i32>(i)); + __vec16_i8 ret; + _mm_storeu_si128((__m128i *)ret.v, tmp); + return ret; } static FORCEINLINE __vec16_i8 __shuffle2_i8(__vec16_i8 v0, __vec16_i8 v1, __vec16_i32 index) { - __vec16_i32 tmp_v0 = __cast_sext(__vec16_i32(), v0); - __vec16_i32 tmp_v1 = __cast_sext(__vec16_i32(), v1); - __vec16_i32 tmp = __shuffle2_i32(tmp_v0, tmp_v1, index); - __vec16_i8 ret; - _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); - return ret; + __vec16_i8 ret; + __vec16_i32 tmp_v0 = __cast_sext(__vec16_i32(), v0); + __vec16_i32 tmp_v1 = __cast_sext(__vec16_i32(), v1); + __m128i tmp = _mm512_cvtepi32_epi8(__shuffle2_i32(tmp_v0, tmp_v1, index)); + _mm_storeu_si128((__m128i *)ret.v, tmp); + return ret; } + /////////////////////////////////////////////////////////////////////////// // int16 /////////////////////////////////////////////////////////////////////////// @@ -2352,11 +2345,11 @@ static FORCEINLINE void __insert_element(__vec16_i16 *v, uint32_t index, int16_t } static FORCEINLINE __vec16_i16 __broadcast_i16(__vec16_i16 v, int index) { - int32_t val = __extract_element(v, index & 0xf); - __vec16_i32 tmp = _mm512_set1_epi32(val); - __vec16_i16 ret; - _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); - return ret; + int32_t val = __extract_element(v, index & 0xf); + __m256i tmp = _mm512_cvtepi32_epi16(_mm512_set1_epi32(val)); + __vec16_i16 ret; + _mm256_storeu_si256((__m256i *)ret.v, tmp); + return ret; } static FORCEINLINE __vec16_i1 __not_equal_i16(__vec16_i16 a, __vec16_i16 b) { @@ -2379,34 +2372,34 @@ static FORCEINLINE __vec16_i1 __not_equal_i16_and_mask(__vec16_i16 a, __vec16_i1 static FORCEINLINE __vec16_i16 __rotate_i16(__vec16_i16 v, int index) { __vec16_i32 tmp_v = __cast_sext(__vec16_i32(), v); - __vec16_i32 tmp = __rotate_i32(tmp_v, index); + __m256i tmp = _mm512_cvtepi32_epi16(__rotate_i32(tmp_v, index)); __vec16_i16 ret; - _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); + _mm256_storeu_si256((__m256i *)ret.v, tmp); return ret; } static FORCEINLINE __vec16_i16 __shuffle_i16(__vec16_i16 v, __vec16_i32 index) { __vec16_i32 tmp_v = __cast_sext(__vec16_i32(), v); - __vec16_i32 tmp = __shuffle_i32(tmp_v, index); + __m256i tmp = _mm512_cvtepi32_epi16(__shuffle_i32(tmp_v, index)); __vec16_i16 ret; - _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); + _mm256_storeu_si256((__m256i *)ret.v, tmp); return ret; } template static RetVecType __smear_i16(int16_t i); template <> FORCEINLINE __vec16_i16 __smear_i16<__vec16_i16>(int16_t i) { - __vec16_i32 tmp = __smear_i32<__vec16_i32>(i); + __m256i tmp = _mm512_cvtepi32_epi16(__smear_i32<__vec16_i32>(i)); __vec16_i16 ret; - _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); + _mm256_storeu_si256((__m256i *)ret.v, tmp); return ret; } static FORCEINLINE __vec16_i16 __shuffle2_i16(__vec16_i16 v0, __vec16_i16 v1, __vec16_i32 index) { __vec16_i32 tmp_v0 = __cast_sext(__vec16_i32(), v0); __vec16_i32 tmp_v1 = __cast_sext(__vec16_i32(), v1); - __vec16_i32 tmp = __shuffle2_i32(tmp_v0, tmp_v1, index); + __m256i tmp = _mm512_cvtepi32_epi16(__shuffle2_i32(tmp_v0, tmp_v1, index)); __vec16_i16 ret; - _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16,_MM_HINT_NONE); + _mm256_storeu_si256((__m256i *)ret.v, tmp); return ret; } @@ -2756,29 +2749,23 @@ static FORCEINLINE void __masked_store_i8(void *p, const __vec16_i8 &val, __vec1 #ifdef ISPC_FORCE_ALIGNED_MEMORY _mm512_mask_extstore_epi32(p, mask, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); #else - #if 0 // TODO: both implementations seem to work, need to test which one is faster - _mm512_mask_i32extscatter_epi32 (p, mask, __vec16_i32(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15), tmp, _MM_DOWNCONV_EPI32_SINT8, sizeof(uint8_t), _MM_HINT_NONE); - #else __vec16_i32 tmp_; tmp_.v = _mm512_extloadunpacklo_epi32(tmp_.v, p, _MM_UPCONV_EPI32_SINT8, _MM_HINT_NONE); tmp_.v = _mm512_extloadunpackhi_epi32(tmp_.v, (uint8_t*)p+64, _MM_UPCONV_EPI32_SINT8, _MM_HINT_NONE); tmp_.v = _mm512_mask_mov_epi32(tmp_.v, mask, tmp.v); _mm512_extpackstorelo_epi32(p, tmp_.v, _MM_DOWNCONV_EPI32_SINT8, _MM_HINT_NONE); _mm512_extpackstorehi_epi32((uint8_t*)p+64, tmp_.v, _MM_DOWNCONV_EPI32_SINT8, _MM_HINT_NONE); - #endif // if 0 #endif } static FORCEINLINE __vec16_i8 __masked_load_i8(void *p, __vec16_i1 mask) { #ifdef ISPC_FORCE_ALIGNED_MEMORY - __vec16_i32 tmp = _mm512_mask_extload_epi32(_mm512_undefined_epi32(), mask, p, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __m128i tmp = _mm_load_si128((__m128i *)p); #else - __vec16_i32 tmp; - tmp.v = _mm512_mask_extloadunpacklo_epi32(tmp.v, 0xFFFF, p, _MM_UPCONV_EPI32_SINT8, _MM_HINT_NONE); - tmp.v = _mm512_mask_extloadunpackhi_epi32(tmp.v, 0xFFFF, (uint8_t*)p+64, _MM_UPCONV_EPI32_SINT8, _MM_HINT_NONE); + __m128i tmp = _mm_loadu_si128((__m128i *)p); #endif __vec16_i8 ret; - _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT8, _MM_HINT_NONE); + _mm_storeu_si128((__m128i *)ret.v, tmp); return ret; } @@ -2795,7 +2782,7 @@ static FORCEINLINE void __scatter_base_offsets32_i8(uint8_t *b, uint32_t scale, // TODO for (int i = 0; i < 16; ++i) if ((mask & (1 << i)) != 0) { - int8_t *ptr = (int8_t *)(_base + scale * offsets[i]); + int8_t *ptr = (int8_t *)(b + scale * offsets[i]); *ptr = val[i]; } } @@ -2807,7 +2794,7 @@ static FORCEINLINE void __scatter_base_offsets32_i16(uint8_t *b, uint32_t scale, // TODO for (int i = 0; i < 16; ++i) if ((mask & (1 << i)) != 0) { - int16_t *ptr = (int16_t *)(_base + scale * offsets[i]); + int16_t *ptr = (int16_t *)(b + scale * offsets[i]); *ptr = val[i]; } } @@ -2833,18 +2820,15 @@ static FORCEINLINE void __masked_store_i16(void *p, const __vec16_i16 &val, __ve static FORCEINLINE __vec16_i16 __masked_load_i16(void *p, __vec16_i1 mask) { #ifdef ISPC_FORCE_ALIGNED_MEMORY - __vec16_i32 tmp = _mm512_mask_extload_epi32(_mm512_undefined_epi32(), mask, p, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __m256i tmp = _mm256_load_si256((__m256i *)p); #else - __vec16_i32 tmp; - tmp.v = _mm512_mask_extloadunpacklo_epi32(tmp.v, 0xFFFF, p, _MM_UPCONV_EPI32_SINT16, _MM_HINT_NONE); - tmp.v = _mm512_mask_extloadunpackhi_epi32(tmp.v, 0xFFFF, (uint8_t*)p+64, _MM_UPCONV_EPI32_SINT16, _MM_HINT_NONE); + __m256i tmp = _mm256_loadu_si256((__m256i *)p); #endif __vec16_i16 ret; - _mm512_extstore_epi32(&ret, tmp, _MM_DOWNCONV_EPI32_SINT16, _MM_HINT_NONE); + _mm256_storeu_si256((__m256i *)ret.v, tmp); return ret; } - template static FORCEINLINE __vec16_i16 __load(const __vec16_i16 *p) { return *p; } @@ -2898,26 +2882,25 @@ static FORCEINLINE void __masked_store_blend_float(void *p, __vec16_f val, // offsets * offsetScale is in bytes (for all of these) -static FORCEINLINE __vec16_i8 -__gather_base_offsets32_i8(uint8_t *base, uint32_t scale, __vec16_i32 offsets, +static FORCEINLINE __vec16_i8 __gather_base_offsets32_i8(uint8_t *base, uint32_t scale, + __vec16_i32 offsets, __vec16_i1 mask) { // TODO __vec16_i8 ret; for (int i = 0; i < 16; ++i) if ((mask & (1 << i)) != 0) { - int8_t *ptr = (int8_t *)(_base + scale * offsets[i]); + int8_t *ptr = (int8_t *)(base + scale * offsets[i]); ret[i] = *ptr; } return ret; } -static FORCEINLINE __vec16_i16 -__gather_base_offsets32_i16(uint8_t *base, uint32_t scale, __vec16_i32 offsets, - __vec16_i1 mask) { +static FORCEINLINE __vec16_i16 __gather_base_offsets32_i16(uint8_t *base, uint32_t scale, + __vec16_i32 offsets, __vec16_i1 mask) { // TODO __vec16_i16 ret; for (int i = 0; i < 16; ++i) if ((mask & (1 << i)) != 0) { - int16_t *ptr = (int16_t *)(_base + scale * offsets[i]); + int16_t *ptr = (int16_t *)(base + scale * offsets[i]); ret[i] = *ptr; } return ret; From 1d9d989d8d0e4d4ecc8a31502f087fa4df83ba50 Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Thu, 26 Mar 2015 14:56:16 +0300 Subject: [PATCH 16/33] round_ps --- examples/intrinsics/knl.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 1146d66e..c8f7cfcd 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -2432,8 +2432,7 @@ static FORCEINLINE double __ceil_uniform_double(double v) { } static FORCEINLINE __vec16_f __round_varying_float(__vec16_f v) { - - return _mm512_round_ps(v, _MM_ROUND_MODE_NEAREST, _MM_EXPADJ_NONE); + return _mm512_cvtepi32_ps(_mm512_cvtps_epi32(v)); } static FORCEINLINE __vec16_f __floor_varying_float(__vec16_f v) { From 89ded5492937feae4f463ff4073f8cabfdb7b283 Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Thu, 26 Mar 2015 16:16:37 +0300 Subject: [PATCH 17/33] DOWNCONW --- examples/intrinsics/knl.h | 31 ++++++++++--------------------- 1 file changed, 10 insertions(+), 21 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index c8f7cfcd..a5ab832e 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -666,7 +666,6 @@ static FORCEINLINE __vec16_i16 __cast_trunc(__vec16_i16, const __vec16_i32 i32) __vec16_i16 ret; //__vec16_i32 i32_trunk = _mm512_and_epi32(i32, __smear_i32<__vec16_i32>(65535)); //_mm512_extstore_epi32(ret.v, i32_trunk, _MM_DOWNCONV_EPI32_UINT16, _MM_HINT_NONE); - _mm512_mask_cvtepi32_storeu_epi16(ret.v, 0xFFFF, i32); return ret; } @@ -2744,17 +2743,14 @@ static FORCEINLINE __vec16_d __masked_load_double(void *p, __vec16_i1 mask) { } static FORCEINLINE void __masked_store_i8(void *p, const __vec16_i8 &val, __vec16_i1 mask) { - __vec16_i32 tmp = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __m128i val_t; #ifdef ISPC_FORCE_ALIGNED_MEMORY - _mm512_mask_extstore_epi32(p, mask, tmp, _MM_DOWNCONV_EPI32_SINT8,_MM_HINT_NONE); + val_t = _mm_load_si128((__m128i *)val.v); #else - __vec16_i32 tmp_; - tmp_.v = _mm512_extloadunpacklo_epi32(tmp_.v, p, _MM_UPCONV_EPI32_SINT8, _MM_HINT_NONE); - tmp_.v = _mm512_extloadunpackhi_epi32(tmp_.v, (uint8_t*)p+64, _MM_UPCONV_EPI32_SINT8, _MM_HINT_NONE); - tmp_.v = _mm512_mask_mov_epi32(tmp_.v, mask, tmp.v); - _mm512_extpackstorelo_epi32(p, tmp_.v, _MM_DOWNCONV_EPI32_SINT8, _MM_HINT_NONE); - _mm512_extpackstorehi_epi32((uint8_t*)p+64, tmp_.v, _MM_DOWNCONV_EPI32_SINT8, _MM_HINT_NONE); + val_t = _mm_loadu_si128((__m128i *)val.v); #endif + __vec16_i32 tmp = _mm512_cvtepi8_epi32(val_t); + _mm512_mask_cvtepi32_storeu_epi8(p, mask, tmp); } static FORCEINLINE __vec16_i8 __masked_load_i8(void *p, __vec16_i1 mask) { @@ -2800,21 +2796,14 @@ static FORCEINLINE void __scatter_base_offsets32_i16(uint8_t *b, uint32_t scale, static FORCEINLINE void __masked_store_i16(void *p, const __vec16_i16 &val, __vec16_i1 mask) { - __vec16_i32 tmp = _mm512_extload_epi32(&val, _MM_UPCONV_EPI32_SINT16, _MM_BROADCAST32_NONE, _MM_HINT_NONE); + __m256i val_t; #ifdef ISPC_FORCE_ALIGNED_MEMORY - _mm512_mask_extstore_epi32(p, mask, tmp, _MM_DOWNCONV_EPI32_SINT16, _MM_HINT_NONE); + val_t = _mm256_load_si256((__m256i *)val.v); #else - #if 0 // TODO: both implementations seem to work, need to test which one is faster - _mm512_mask_i32extscatter_epi32 (p, mask, __vec16_i32(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15), tmp, _MM_DOWNCONV_EPI32_SINT16, sizeof(uint16_t), _MM_HINT_NONE); - #else - __vec16_i32 tmp_; - tmp_.v = _mm512_extloadunpacklo_epi32(tmp_.v, p, _MM_UPCONV_EPI32_SINT16, _MM_HINT_NONE); - tmp_.v = _mm512_extloadunpackhi_epi32(tmp_.v, (uint8_t*)p+64, _MM_UPCONV_EPI32_SINT16, _MM_HINT_NONE); - tmp_.v = _mm512_mask_mov_epi32(tmp_.v, mask, tmp.v); - _mm512_extpackstorelo_epi32(p, tmp_.v, _MM_DOWNCONV_EPI32_SINT16, _MM_HINT_NONE); - _mm512_extpackstorehi_epi32((uint8_t*)p+64, tmp_.v, _MM_DOWNCONV_EPI32_SINT16, _MM_HINT_NONE); - #endif // if 0 + val_t = _mm256_loadu_si256((__m256i *)val.v); #endif + __vec16_i32 tmp = _mm512_cvtepi16_epi32(val_t); + _mm512_mask_cvtepi32_storeu_epi16(p, mask, tmp); } static FORCEINLINE __vec16_i16 __masked_load_i16(void *p, __vec16_i1 mask) { From 17e41970d305bbc307ccd24d4ac96052288d1b8e Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 26 Mar 2015 17:37:05 +0300 Subject: [PATCH 18/33] Immediate parameter for '_mm512_alignr_epi32/64 intrinsic' --- examples/intrinsics/knl.h | 41 +++++++++++++++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 1146d66e..840b4c3e 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -691,9 +691,41 @@ static FORCEINLINE __vec16_i8 __cast_trunc(__vec16_i8, const __vec16_i64 i64) { return __cast_trunc(__vec16_i8(), i64.v_lo);//TODO } +static FORCEINLINE __vec16_i32 unrolled_alignr_i32(__m512i &v1, __m512i &v2, int index) { + if (index == 0) return v2; + if (index == 1) return _mm512_alignr_epi32(v1, v2, 1); + if (index == 2) return _mm512_alignr_epi32(v1, v2, 2); + if (index == 3) return _mm512_alignr_epi32(v1, v2, 3); + if (index == 4) return _mm512_alignr_epi32(v1, v2, 4); + if (index == 5) return _mm512_alignr_epi32(v1, v2, 5); + if (index == 6) return _mm512_alignr_epi32(v1, v2, 6); + if (index == 7) return _mm512_alignr_epi32(v1, v2, 7); + if (index == 8) return _mm512_alignr_epi32(v1, v2, 8); + if (index == 9) return _mm512_alignr_epi32(v1, v2, 9); + if (index == 10) return _mm512_alignr_epi32(v1, v2, 10); + if (index == 11) return _mm512_alignr_epi32(v1, v2, 11); + if (index == 12) return _mm512_alignr_epi32(v1, v2, 12); + if (index == 13) return _mm512_alignr_epi32(v1, v2, 13); + if (index == 14) return _mm512_alignr_epi32(v1, v2, 14); + if (index == 15) return _mm512_alignr_epi32(v1, v2, 15); + if (index >= 16) return v1; +}; + +static FORCEINLINE __vec16_i32 unrolled_alignr_i64(__m512i &v1, __m512i &v2, int index) { + if (index == 0) return v2; + if (index == 1) return _mm512_alignr_epi64(v1, v2, 1); + if (index == 2) return _mm512_alignr_epi64(v1, v2, 2); + if (index == 3) return _mm512_alignr_epi64(v1, v2, 3); + if (index == 4) return _mm512_alignr_epi64(v1, v2, 4); + if (index == 5) return _mm512_alignr_epi64(v1, v2, 5); + if (index == 6) return _mm512_alignr_epi64(v1, v2, 6); + if (index == 7) return _mm512_alignr_epi64(v1, v2, 7); + if (index >= 8) return v1; +}; + static FORCEINLINE __vec16_i32 __rotate_i32(__vec16_i32 v, int index) { index &= 0xFF; - return _mm512_alignr_epi32(v, v, index); + return unrolled_alignr_i32(v.v, v.v, index % 16); } static FORCEINLINE __vec16_i32 __shuffle_i32(__vec16_i32 v, __vec16_i32 index) { @@ -711,7 +743,8 @@ static FORCEINLINE __vec16_i32 __shuffle2_i32(__vec16_i32 v0, __vec16_i32 v1, __ static FORCEINLINE __vec16_i32 __shift_i32(__vec16_i32 v, int index) { index &= 0xFF; - return _mm512_alignr_epi32(_mm512_setzero_epi32(), v, index); + __m512i mmzero = _mm512_setzero_epi32(); + return unrolled_alignr_i32(mmzero, v.v, index); } template static FORCEINLINE __vec16_i32 __load(const __vec16_i32 *p) { @@ -1034,8 +1067,8 @@ static FORCEINLINE __vec16_i64 __rotate_i64(__vec16_i64 v, int index) { swap = false; index -= 8; } - __m512i v1 = _mm512_alignr_epi64(v.v_hi, v.v_lo, index); - __m512i v2 = _mm512_alignr_epi64(v.v_lo, v.v_hi, index); + __m512i v1 = unrolled_alignr_i64(v.v_hi, v.v_lo, index); + __m512i v2 = unrolled_alignr_i64(v.v_lo, v.v_hi, index); return (swap) ? __vec16_i64(v1, v2) : __vec16_i64(v2, v1); } } From d33100a11c11b7c0757b77e7bca23ab91acdbbcb Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 26 Mar 2015 17:45:20 +0300 Subject: [PATCH 19/33] Since is no _MM_HINT_T2 for prefetch on KNL, changing that to T1 --- examples/intrinsics/knl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 840b4c3e..e1775c5b 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -3258,7 +3258,7 @@ PREFETCH_READ_VARYING(1, _MM_HINT_T0) PREFETCH_READ_VARYING(2, _MM_HINT_T1) // L3 prefetch is mapped to L2 cache PREFETCH_READ_VARYING(3, _MM_HINT_T1) -PREFETCH_READ_VARYING(nt, _MM_HINT_T2) +PREFETCH_READ_VARYING(nt, _MM_HINT_T1) /////////////////////////////////////////////////////////////////////////// // atomics From 03211d55435f6b9305dc0859d2b914e8e4516a3f Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Fri, 27 Mar 2015 09:58:59 +0300 Subject: [PATCH 20/33] cast_uitofp typo fix; helper printf_v for vector types add --- examples/intrinsics/knl.h | 74 ++++++++++++++++++++++++++++++++++----- 1 file changed, 65 insertions(+), 9 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index d39e5230..d01e2549 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -308,13 +308,68 @@ inline std::ostream &operator<<(std::ostream &out, const __vec16_i64 &v) out << "["; uint32_t *ptr = (uint32_t*)&v; for (int i=0;i<16;i++) { - uint64_t val = (uint64_t(ptr[i])<<32)+ptr[i+16]; - out << (i!=0?",":"") << std::dec << std::setw(8) << ((uint64_t)val) << std::dec; + out << (i!=0?",":"") << std::dec << std::setw(8) << ((uint64_t)v[i]) << std::dec; } out << "]" << std::flush; return out; } +// C-style debugging helpers +inline void printf_v(const __m512i &v) +{ + printf("["); + for (int i=0;i<16;i++) + printf("%d ", ((int*)&v)[i]); + printf("]\n"); +} + +inline void printf_v(const __m512 &v) +{ + printf("["); + for (int i=0;i<16;i++) + printf("%f ", ((float*)&v)[i]); + printf("]\n"); +} + +inline void printf_v(const __vec16_i1 &v) +{ + printf("["); + for (int i=0;i<16;i++) + printf("%d ", (int)v[i]); + printf("]\n"); +} + +inline void printf_v(const __vec16_i8 &v) +{ + printf("["); + for (int i=0;i<16;i++) + printf("%d ", (int)((unsigned char*)&v)[i]); + printf("]\n"); +} + +inline void printf_v(const __vec16_i16 &v) +{ + printf("["); + for (int i=0;i<16;i++) + printf("%d ", (int)((uint16_t*)&v)[i]); + printf("]\n"); +} + +inline void printf_v(const __vec16_d &v) +{ + printf("["); + for (int i=0;i<16;i++) + printf("%f ", v[i]); + printf("]\n"); +} + +inline void printf_v(const __vec16_i64 &v) +{ + printf("["); + for (int i=0;i<16;i++) + printf("%llu ", ((uint64_t)v[i])); + printf("]\n"); +} /////////////////////////////////////////////////////////////////////////// // macros... @@ -784,8 +839,8 @@ static FORCEINLINE __vec16_i64 __select(__vec16_i1 mask, ret.v_lo = _mm512_mask_blend_epi64(mask.lo(), b.v_lo, a.v_lo); ret.v_hi = _mm512_mask_blend_epi64(mask.hi(), b.v_hi, a.v_hi); // TODO: Check if this works better: - ret.v_lo = _mm512_mask_mov_epi32(b.v_lo, mask, a.v_lo); - ret.v_hi = _mm512_mask_mov_epi32(b.v_hi, mask, a.v_hi); + //ret.v_lo = _mm512_mask_mov_epi32(b.v_lo, mask, a.v_lo); + //ret.v_hi = _mm512_mask_mov_epi32(b.v_hi, mask, a.v_hi); return ret; } @@ -831,6 +886,7 @@ static FORCEINLINE __vec16_i64 __sub(const __vec16_i64 &a, const __vec16_i64 &b) /*! 64x32 bit mul -- address computations often use a scale that we know is 32 bits; and 32x64 is faster than 64x64 */ +/* static FORCEINLINE __vec16_i64 __mul(const __vec16_i32 &a, const __vec16_i64 &b) { // TODO @@ -838,7 +894,7 @@ static FORCEINLINE __vec16_i64 __mul(const __vec16_i32 &a, const __vec16_i64 &b) _mm512_add_epi32(_mm512_mullo_epi32(a.v, b.v_hi), _mm512_mulhi_epi32(a.v, b.v_lo))); } - +*/ static FORCEINLINE void __abs_i32i64(__m512i &_hi, __m512i &_lo) { /* abs(x) : @@ -1916,7 +1972,7 @@ static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i16 val) { static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i64 val) { __vec16_f ret; - // Cycles don't work. It seems that it is icc bug. + // Loops don't work. It seems that it is icc bug. /* for (int i = 0; i < 8; i++) { ((float*)&ret)[i] = ((float)(((uint64_t*)&tmp1)[i])); @@ -1925,14 +1981,14 @@ static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i64 val) { ((float*)&ret)[i + 8] = ((float)(((uint64_t*)&tmp2)[i])); } */ - ret[0] = ((float)(((uint64_t*)&val.v_lo)[0])); + ret[0] = ((float)(val[0])); ret[1] = ((float)(((uint64_t*)&val.v_lo)[1])); ret[2] = ((float)(((uint64_t*)&val.v_lo)[2])); ret[3] = ((float)(((uint64_t*)&val.v_lo)[3])); ret[4] = ((float)(((uint64_t*)&val.v_lo)[4])); ret[5] = ((float)(((uint64_t*)&val.v_lo)[5])); ret[6] = ((float)(((uint64_t*)&val.v_lo)[6])); - ret[7] = ((float)(((uint64_t*)&val.v_hi)[7])); + ret[7] = ((float)(((uint64_t*)&val.v_lo)[7])); ret[8] = ((float)(((uint64_t*)&val.v_hi)[0])); ret[9] = ((float)(((uint64_t*)&val.v_hi)[1])); ret[10] = ((float)(((uint64_t*)&val.v_hi)[2])); @@ -1940,7 +1996,7 @@ static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i64 val) { ret[12] = ((float)(((uint64_t*)&val.v_hi)[4])); ret[13] = ((float)(((uint64_t*)&val.v_hi)[5])); ret[14] = ((float)(((uint64_t*)&val.v_hi)[6])); - ret[15] = ((float)(((uint64_t*)&val.v_hi)[7])); + ret[15] = ((float)(((uint64_t*)&val.v_hi)[7])); return ret; } From 4d371c0f2109ca281486369bbf84a5c2cad35b1d Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Fri, 27 Mar 2015 11:59:54 +0300 Subject: [PATCH 21/33] update fail_db to track fails --- fail_db.txt | 337 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 337 insertions(+) diff --git a/fail_db.txt b/fail_db.txt index ad3abd4b..bc345008 100644 --- a/fail_db.txt +++ b/fail_db.txt @@ -169,3 +169,340 @@ ./tests/ptr-19.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * ./tests/ptr-22.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * ./tests/test-143.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * +./tests/array-gather-simple.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/array-multidim-gather-scatter.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/c-test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/c-test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-cif-nested-continue.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-for-loop-nested-continues.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-for-unif-test-vary-break.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-133.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-134.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-135.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-136.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-62.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-63.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-65.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-66.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-68.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-69.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-70.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-71.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-76.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-c-test-77.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-cif-nested-continue.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/cfor-test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/exclusive-scan-add-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/exclusive-scan-add-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/exclusive-scan-add-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/exclusive-scan-add-9.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/funcptr-uniform-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/funcptr-uniform-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/funcptr-varying-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/funcptr-varying-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/funcptr-varying-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/funcptr-varying-9.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-double-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-double-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-float-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-float-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int16-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int16-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int16-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int16-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int16-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int16-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int32-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int32-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int64-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int64-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int64-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int64-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int64-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int64-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int64-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gather-int64-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/global-decl-define.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gs-double-improve-progindex-plus-const.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gs-double-improve-progindex-plus-unif.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gs-double-improve-unif.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/gs-improve-unif.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/idiv.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/init-atomic-with-expr-list.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/local-atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/local-atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/local-atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/memcpy-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/memmove-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/memset-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/packed-load-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/packed-store-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/packed-store2-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/paddus_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/pmuls_vi32.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/pmuls_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/pmulus_vi32.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/pmulus_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/prefetch-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/ptr-diff-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/ptr-diff-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/ptr-diff-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/recursion-forward-func-decl.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/recursion.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/reduce-add-int64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/reduce-add-uint64-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/reduce-add-uint64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/ref-vec-param-index.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/shift-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/short-circuit.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/short-vec-11.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/short-vec-12.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/short-vec-18.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/soa-21.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/soa-22.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/soa-23.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/soa-24.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/soa-25.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/static-array-init-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/static-array-init.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/test-139.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/test-93.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/test-94.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/uint64-max-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/vector-varying-scatter-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/vector-varying-scatter.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * +./tests/array-gather-ifs.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/array-gather-simple.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/array-multidim-gather-scatter.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/array-struct-gather.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/atomics-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/atomics-varyingptr-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/avg-down-int16.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/avg-down-int8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/avg-down-uint16.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/avg-down-uint8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/avg-up-int16.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/avg-up-int8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/avg-up-uint16.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/avg-up-uint8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/c-test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/c-test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-array-gather-ifs.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-array-struct-gather.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-cif-nested-continue.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-for-loop-nested-continues.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-for-unif-test-vary-break.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-133.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-134.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-135.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-136.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-62.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-63.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-65.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-66.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-68.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-69.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-70.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-71.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-76.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-c-test-77.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-cif-nested-continue.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-struct-gather-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-struct-gather-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-struct-test-114.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/cfor-unif-struct-test-114.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/count-leading-trailing-zeros-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/double-abs-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/double-abs.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/exclusive-scan-add-10.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/exclusive-scan-add-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/exclusive-scan-add-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/exclusive-scan-add-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/exclusive-scan-add-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/exclusive-scan-add-9.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/foreach-unique-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/frexp-double-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/frexp-double.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/func-ptr-initializer.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-null-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-null-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-null-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-null-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-null-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-uniform-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-uniform-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-uniform-9.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-varying-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-varying-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-varying-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-varying-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-varying-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-varying-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/funcptr-varying-9.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-double-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-double-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-double-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-double-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-float-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-float-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-float-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-float-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int16-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int16-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int16-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int16-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int32-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int32-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int32-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int32-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int64-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int64-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int64-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int64-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int64-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int64-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int64-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int64-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int8-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int8-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int8-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-int8-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gather-struct-vector.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/global-decl-define.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gs-double-improve-progindex-plus-const.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gs-double-improve-progindex-plus-unif.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gs-double-improve-unif.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gs-double-improve-vary-one.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gs-double-improve-vary-zero.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gs-improve-unif.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gs-improve-vary-one.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/gs-improve-vary-zero.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/idiv.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/init-atomic-with-expr-list.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/insert-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/insert-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/int64-constant.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/int64-max-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/int64-max.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/int64-min-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/int64-min.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ldexp-double.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/local-atomics-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/local-atomics-varyingptr-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/local-atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/local-atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/local-atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/memcpy-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/memmove-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/memset-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/nested-statics-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/new-delete-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/packed-load-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/packed-store-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/packed-store2-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/padds_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/paddus_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/pass-varying-lvalue-to-ref.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/pdivs_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/pdivus_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/pmuls_vi32.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/pmuls_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/pmulus_vi32.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/pmulus_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/popcnt-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/prefetch-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/psubs_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/psubus_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-15.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-22.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-24.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-25.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-assign-lhs-math-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-cast-complex.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-cmp-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-diff-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-diff-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-diff-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-diff-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-diff-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-int-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-int-null-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-null-func-arg.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ptr-varying-unif-index.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/recursion-forward-func-decl.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/recursion.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/reduce-add-int64-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/reduce-add-int64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/reduce-add-uint64-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/reduce-add-uint64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/reduce-equal-10.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/reduce-equal-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/reduce-max-int64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/reduce-max-uint64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/ref-vec-param-index.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/rotate-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/rotate-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/scatter-struct.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/scatter-vector.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/shift-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/shift-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/short-circuit.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/short-vec-10.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/short-vec-11.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/short-vec-12.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/short-vec-13.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/short-vec-14.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/short-vec-18.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/short-vec-19.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/short-vec-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/short-vec-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/short-vec-9.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/soa-18.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/soa-19.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/soa-21.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/soa-22.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/static-array-init-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/static-array-init.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/static-init.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/struct-gather-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/struct-gather-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/struct-nested-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/struct-ref-lvalue.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/struct-test-114.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/swizzle-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/swizzle-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/swizzle-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/test-139.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/test-143.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/test-93.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/test-94.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/uint64-max-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/uint64-max.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/uint64-min-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/uint64-min.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/unif-struct-test-114.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/unif-struct-test-115.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/unif-struct-test-116.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/unsized-array.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/vector-varying-scatter-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * +./tests/vector-varying-scatter.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * From e7803728842c7906b50cd08fbe5704f552708dfb Mon Sep 17 00:00:00 2001 From: Vsevolod Livinskiy Date: Fri, 27 Mar 2015 09:12:21 +0300 Subject: [PATCH 22/33] cast_trunk from i64 --- examples/intrinsics/knl.h | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index d01e2549..cab5edf6 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -734,11 +734,24 @@ static FORCEINLINE __vec16_i8 __cast_trunc(__vec16_i8, const __vec16_i32 i32) { } static FORCEINLINE __vec16_i32 __cast_trunc(__vec16_i32, const __vec16_i64 i64) { - return __vec16_i32(i64.v_lo); //TODO + __m256i tmp = _mm512_cvtepi64_epi32(i64.v_hi); + __vec16_i32 vec_tmp = _mm512_castsi256_si512 (tmp); + __vec16_i32 ret_hi8 = _mm512_permute4f128_epi32(vec_tmp, _MM_PERM_BADC); + tmp = _mm512_cvtepi64_epi32(i64.v_lo); + vec_tmp = _mm512_castsi256_si512 (tmp); + return _mm512_xor_epi32(vec_tmp, ret_hi8); } static FORCEINLINE __vec16_i16 __cast_trunc(__vec16_i16, const __vec16_i64 i64) { - return __cast_trunc(__vec16_i16(), i64.v_lo);//TODO + // TODO: untested + __m128i tmp = _mm512_cvtepi64_epi16(i64.v_hi); + __m256i vec_tmp_hi = _mm256_castsi128_si256(tmp); + tmp = _mm512_cvtepi64_epi16(i64.v_lo); + __m256i vec_tmp_lo = _mm256_castsi128_si256(tmp); + __m256i res = _mm256_permute2f128_si256(vec_tmp_hi, vec_tmp_lo, 0x20); + __vec16_i16 ret; + _mm256_storeu_si256((__m256i *)ret.v, res); + return ret; } static FORCEINLINE __vec16_i8 __cast_trunc(__vec16_i8, const __vec16_i64 i64) { @@ -2319,7 +2332,7 @@ static FORCEINLINE int8_t __extract_element(__vec16_i8 v, uint32_t index) { } static FORCEINLINE void __insert_element(__vec16_i8 *v, uint32_t index, int8_t val) { - ((int32_t *)v)[index] = val; + ((int8_t *)v)[index] = val; } static FORCEINLINE __vec16_i8 __broadcast_i8(__vec16_i8 v, int index) { From aa9496947221b746b8c8a4b44904917469842589 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Fri, 27 Mar 2015 15:22:25 +0300 Subject: [PATCH 23/33] rotate and shift for i32 not supporting negative index --- examples/intrinsics/knl.h | 42 ++++++++++++++++----------------------- 1 file changed, 17 insertions(+), 25 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index d01e2549..373966b6 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -745,26 +745,6 @@ static FORCEINLINE __vec16_i8 __cast_trunc(__vec16_i8, const __vec16_i64 i64) { return __cast_trunc(__vec16_i8(), i64.v_lo);//TODO } -static FORCEINLINE __vec16_i32 unrolled_alignr_i32(__m512i &v1, __m512i &v2, int index) { - if (index == 0) return v2; - if (index == 1) return _mm512_alignr_epi32(v1, v2, 1); - if (index == 2) return _mm512_alignr_epi32(v1, v2, 2); - if (index == 3) return _mm512_alignr_epi32(v1, v2, 3); - if (index == 4) return _mm512_alignr_epi32(v1, v2, 4); - if (index == 5) return _mm512_alignr_epi32(v1, v2, 5); - if (index == 6) return _mm512_alignr_epi32(v1, v2, 6); - if (index == 7) return _mm512_alignr_epi32(v1, v2, 7); - if (index == 8) return _mm512_alignr_epi32(v1, v2, 8); - if (index == 9) return _mm512_alignr_epi32(v1, v2, 9); - if (index == 10) return _mm512_alignr_epi32(v1, v2, 10); - if (index == 11) return _mm512_alignr_epi32(v1, v2, 11); - if (index == 12) return _mm512_alignr_epi32(v1, v2, 12); - if (index == 13) return _mm512_alignr_epi32(v1, v2, 13); - if (index == 14) return _mm512_alignr_epi32(v1, v2, 14); - if (index == 15) return _mm512_alignr_epi32(v1, v2, 15); - if (index >= 16) return v1; -}; - static FORCEINLINE __vec16_i32 unrolled_alignr_i64(__m512i &v1, __m512i &v2, int index) { if (index == 0) return v2; if (index == 1) return _mm512_alignr_epi64(v1, v2, 1); @@ -778,8 +758,9 @@ static FORCEINLINE __vec16_i32 unrolled_alignr_i64(__m512i &v1, __m512i &v2, int }; static FORCEINLINE __vec16_i32 __rotate_i32(__vec16_i32 v, int index) { - index &= 0xFF; - return unrolled_alignr_i32(v.v, v.v, index % 16); + __vec16_i32 idx = __smear_i32<__vec16_i32>(index); + __vec16_i32 shuffle = _mm512_and_epi32(_mm512_add_epi32(__ispc_stride1, idx), __smear_i32<__vec16_i32>(0xf)); + return _mm512_mask_permutevar_epi32(v, 0xffff, shuffle, v); } static FORCEINLINE __vec16_i32 __shuffle_i32(__vec16_i32 v, __vec16_i32 index) { @@ -796,9 +777,12 @@ static FORCEINLINE __vec16_i32 __shuffle2_i32(__vec16_i32 v0, __vec16_i32 v1, __ } static FORCEINLINE __vec16_i32 __shift_i32(__vec16_i32 v, int index) { - index &= 0xFF; - __m512i mmzero = _mm512_setzero_epi32(); - return unrolled_alignr_i32(mmzero, v.v, index); + __vec16_i32 mod_index = _mm512_add_epi32(__ispc_stride1, __smear_i32<__vec16_i32>(index)); + __vec16_i1 mask_ge = _mm512_cmpge_epi32_mask (mod_index, __smear_i32<__vec16_i32>(0)); + __vec16_i1 mask_le = _mm512_cmple_epi32_mask (mod_index, __smear_i32<__vec16_i32>(0xF)); + __vec16_i1 mask = mask_ge & mask_le; + __vec16_i32 ret = __smear_i32<__vec16_i32>(0); + ret = _mm512_mask_permutevar_epi32(ret, mask, mod_index, v); } template static FORCEINLINE __vec16_i32 __load(const __vec16_i32 *p) { @@ -2791,6 +2775,14 @@ static FORCEINLINE __vec16_i32 __masked_load_i32(void *p, __vec16_i1 mask) { #ifdef ISPC_FORCE_ALIGNED_MEMORY return _mm512_mask_load_epi32(_mm512_undefined_epi32(), mask, p); #else + printf("\n\n\n"); + printf_v(mask); + for(int i=0; i <3; ++i) { + printf("%d ", ((uint32_t *)p)[i]); + } + printf("\n"); + printf_v(_mm512_mask_loadu_epi32(_mm512_undefined_epi32(), mask, p)); + printf("\n\n\n"); return _mm512_mask_loadu_epi32(_mm512_undefined_epi32(), mask, p); #endif } From fc1e12fb2fcd59ff70689e3eaee093d92f28865c Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Fri, 27 Mar 2015 17:34:48 +0300 Subject: [PATCH 24/33] fixed knc target --- run_tests.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/run_tests.py b/run_tests.py index 170a0b73..0dbdd605 100755 --- a/run_tests.py +++ b/run_tests.py @@ -170,7 +170,7 @@ def run_test(testname): (filename, options.arch, "generic-16") else: ispc_cmd = ispc_exe_rel + " --werror --nowrap %s --arch=%s --target=%s" % \ - (filename, options.arch, options.target) + (filename, options.arch, options.target) (return_code, output) = run_command(ispc_cmd) got_error = (return_code != 0) @@ -267,7 +267,7 @@ def run_test(testname): if (options.target == "knc"): cc_cmd = "%s -O2 -I. %s %s test_static.cpp -DTEST_SIG=%d %s -o %s" % \ (options.compiler_exe, gcc_arch, "-mmic", match, obj_name, exe_name) - if (options.target == "knl"): + elif (options.target == "knl"): cc_cmd = "%s -O2 -I. %s %s test_static.cpp -DTEST_SIG=%d %s -o %s" % \ (options.compiler_exe, gcc_arch, "-xMIC-AVX512", match, obj_name, exe_name) else: @@ -291,7 +291,7 @@ def run_test(testname): if (options.target == "knc"): ispc_cmd = ispc_exe_rel + " --woff %s -o %s --arch=%s --target=%s" % \ (filename, obj_name, options.arch, "generic-16") - if (options.target == "knl"): + elif (options.target == "knl"): ispc_cmd = ispc_exe_rel + " --woff %s -o %s --arch=%s --target=%s" % \ (filename, obj_name, options.arch, "generic-16") else: From 2534cd14396816e6561cf68512c31be1498b97db Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 2 Apr 2015 11:53:04 +0300 Subject: [PATCH 25/33] knl/knc supports only x86-64, so adding that in alloy.py --- alloy.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/alloy.py b/alloy.py index ea699e2e..ffcb5743 100755 --- a/alloy.py +++ b/alloy.py @@ -591,6 +591,7 @@ def validation_run(only, only_targets, reference_branch, number, notify, update, LLVM = [newest_LLVM, "trunk"] gen_archs = ["x86-64"] knc_archs = ["x86-64"] + knl_archs = ["x86-64"] need_LLVM = check_LLVM(LLVM) for i in range(0,len(need_LLVM)): build_LLVM(need_LLVM[i], "", "", "", False, False, False, True, False, make, options.gcc_toolchain_path) @@ -612,14 +613,16 @@ def validation_run(only, only_targets, reference_branch, number, notify, update, # sometimes clang++ is not avaluable. if --ispc-build-compiler = gcc we will pass in g++ compiler if options.ispc_build_compiler == "gcc": stability.compiler_exe = "g++" - # but 'knc' target is supported only by icpc, so set explicitly - if "knc" in targets[j]: + # but 'knc/knl' target is supported only by icpc, so set explicitly + if ("knc" in targets[j]) or ("knl" in targets[j]): stability.compiler_exe = "icpc" # now set archs for targets if "generic" in targets[j]: arch = gen_archs elif "knc" in targets[j]: arch = knc_archs + elif "knl" in targets[j]: + arch = knl_archs else: arch = archs for i1 in range(0,len(arch)): @@ -638,10 +641,16 @@ def validation_run(only, only_targets, reference_branch, number, notify, update, for j in range(0,len(sde_targets)): stability.target = sde_targets[j][1] stability.wrapexe = os.environ["SDE_HOME"] + "/sde " + sde_targets[j][0] + " -- " - for i1 in range(0,len(archs)): + if "knc" in stability.target: + arch = knc_archs + elif "knl" in stability.target: + arch = knl_archs + else: + arch = archs + for i1 in range(0,len(arch)): for i2 in range(0,len(opts)): for i3 in range(dbg_begin,dbg_total): - stability.arch = archs[i1] + stability.arch = arch[i1] stability.no_opt = opts[i2] stability.ispc_flags = ispc_flags_tmp if (i3 != 0): From bf4376be1bc36c776e1498dfa75a2fef452842c3 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 2 Apr 2015 12:42:17 +0300 Subject: [PATCH 26/33] remove knl fails in fail_db to change 3.6 to 3.4 later --- fail_db.txt | 337 ---------------------------------------------------- 1 file changed, 337 deletions(-) diff --git a/fail_db.txt b/fail_db.txt index bc345008..ad3abd4b 100644 --- a/fail_db.txt +++ b/fail_db.txt @@ -169,340 +169,3 @@ ./tests/ptr-19.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * ./tests/ptr-22.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * ./tests/test-143.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * -./tests/array-gather-simple.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/array-multidim-gather-scatter.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/c-test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/c-test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-cif-nested-continue.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-for-loop-nested-continues.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-for-unif-test-vary-break.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-133.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-134.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-135.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-136.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-62.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-63.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-65.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-66.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-68.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-69.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-70.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-71.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-76.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-c-test-77.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-cif-nested-continue.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/cfor-test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/exclusive-scan-add-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/exclusive-scan-add-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/exclusive-scan-add-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/exclusive-scan-add-9.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/funcptr-uniform-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/funcptr-uniform-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/funcptr-varying-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/funcptr-varying-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/funcptr-varying-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/funcptr-varying-9.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-double-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-double-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-float-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-float-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int16-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int16-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int16-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int16-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int16-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int16-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int32-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int32-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int64-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int64-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int64-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int64-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int64-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int64-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int64-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gather-int64-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/global-decl-define.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gs-double-improve-progindex-plus-const.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gs-double-improve-progindex-plus-unif.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gs-double-improve-unif.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/gs-improve-unif.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/idiv.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/init-atomic-with-expr-list.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/local-atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/local-atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/local-atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/memcpy-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/memmove-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/memset-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/packed-load-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/packed-store-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/packed-store2-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/paddus_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/pmuls_vi32.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/pmuls_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/pmulus_vi32.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/pmulus_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/prefetch-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/ptr-diff-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/ptr-diff-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/ptr-diff-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/recursion-forward-func-decl.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/recursion.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/reduce-add-int64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/reduce-add-uint64-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/reduce-add-uint64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/ref-vec-param-index.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/shift-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/short-circuit.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/short-vec-11.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/short-vec-12.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/short-vec-18.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/soa-21.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/soa-22.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/soa-23.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/soa-24.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/soa-25.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/static-array-init-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/static-array-init.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/test-139.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/test-93.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/test-94.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/uint64-max-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/vector-varying-scatter-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/vector-varying-scatter.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O2 * -./tests/array-gather-ifs.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/array-gather-simple.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/array-multidim-gather-scatter.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/array-struct-gather.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/atomics-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/atomics-varyingptr-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/avg-down-int16.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/avg-down-int8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/avg-down-uint16.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/avg-down-uint8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/avg-up-int16.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/avg-up-int8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/avg-up-uint16.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/avg-up-uint8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/c-test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/c-test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-array-gather-ifs.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-array-struct-gather.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-cif-nested-continue.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-for-loop-nested-continues.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-for-unif-test-vary-break.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-133.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-134.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-135.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-136.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-62.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-63.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-65.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-66.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-68.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-69.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-70.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-71.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-76.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-c-test-77.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-cif-nested-continue.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-struct-gather-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-struct-gather-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-struct-test-114.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/cfor-unif-struct-test-114.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/count-leading-trailing-zeros-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/double-abs-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/double-abs.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/exclusive-scan-add-10.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/exclusive-scan-add-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/exclusive-scan-add-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/exclusive-scan-add-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/exclusive-scan-add-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/exclusive-scan-add-9.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/foreach-unique-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/frexp-double-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/frexp-double.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/func-ptr-initializer.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-null-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-null-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-null-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-null-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-null-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-uniform-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-uniform-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-uniform-9.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-varying-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-varying-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-varying-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-varying-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-varying-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-varying-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/funcptr-varying-9.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-double-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-double-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-double-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-double-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-float-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-float-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-float-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-float-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int16-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int16-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int16-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int16-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int32-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int32-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int32-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int32-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int64-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int64-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int64-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int64-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int64-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int64-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int64-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int64-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int8-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int8-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int8-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-int8-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gather-struct-vector.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/global-decl-define.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gs-double-improve-progindex-plus-const.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gs-double-improve-progindex-plus-unif.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gs-double-improve-unif.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gs-double-improve-vary-one.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gs-double-improve-vary-zero.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gs-improve-unif.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gs-improve-vary-one.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/gs-improve-vary-zero.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/idiv.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/init-atomic-with-expr-list.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/insert-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/insert-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/int64-constant.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/int64-max-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/int64-max.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/int64-min-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/int64-min.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ldexp-double.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/local-atomics-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/local-atomics-varyingptr-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/local-atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/local-atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/local-atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/memcpy-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/memmove-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/memset-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/nested-statics-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/new-delete-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/packed-load-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/packed-store-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/packed-store2-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/padds_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/paddus_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/pass-varying-lvalue-to-ref.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/pdivs_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/pdivus_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/pmuls_vi32.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/pmuls_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/pmulus_vi32.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/pmulus_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/popcnt-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/prefetch-varying.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/psubs_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/psubus_vi64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-15.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-22.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-24.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-25.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-7.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-assign-lhs-math-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-cast-complex.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-cmp-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-diff-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-diff-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-diff-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-diff-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-diff-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-int-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-int-null-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-null-func-arg.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ptr-varying-unif-index.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/recursion-forward-func-decl.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/recursion.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/reduce-add-int64-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/reduce-add-int64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/reduce-add-uint64-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/reduce-add-uint64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/reduce-equal-10.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/reduce-equal-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/reduce-max-int64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/reduce-max-uint64.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/ref-vec-param-index.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/rotate-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/rotate-4.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/scatter-struct.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/scatter-vector.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/shift-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/shift-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/short-circuit.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/short-vec-10.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/short-vec-11.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/short-vec-12.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/short-vec-13.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/short-vec-14.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/short-vec-18.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/short-vec-19.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/short-vec-6.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/short-vec-8.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/short-vec-9.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/soa-18.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/soa-19.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/soa-21.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/soa-22.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/static-array-init-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/static-array-init.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/static-init.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/struct-gather-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/struct-gather-3.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/struct-nested-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/struct-ref-lvalue.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/struct-test-114.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/swizzle-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/swizzle-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/swizzle-5.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/test-108.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/test-131.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/test-139.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/test-143.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/test-93.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/test-94.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/uint64-max-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/uint64-max.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/uint64-min-1.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/uint64-min.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/unif-struct-test-114.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/unif-struct-test-115.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/unif-struct-test-116.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/unsized-array.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/vector-varying-scatter-2.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * -./tests/vector-varying-scatter.ispc runfail x86-64 knl Linux LLVM 3.6 icpc15.0 -O0 * From 13084ece5f670a2c32d5fd4a0e62f76e2d4f4f8c Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 2 Apr 2015 13:00:42 +0300 Subject: [PATCH 27/33] add knl fails on 3.4 for -O2 (except foreach-unique-6.ispc, which ends up in infinite loop) --- fail_db.txt | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/fail_db.txt b/fail_db.txt index ad3abd4b..de924a32 100644 --- a/fail_db.txt +++ b/fail_db.txt @@ -169,3 +169,45 @@ ./tests/ptr-19.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * ./tests/ptr-22.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * ./tests/test-143.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * +./tests/atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/c-test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/c-test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/cfor-c-test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/cfor-c-test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/cfor-test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/cfor-test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/funcptr-varying-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/funcptr-varying-9.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/gather-int16-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/gather-int16-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/gather-int16-7.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/gather-int16-8.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/idiv.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/local-atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/local-atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/local-atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/memcpy-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/memmove-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/memset-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/packed-load-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/packed-store-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/packed-store2-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/paddus_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/pmuls_vi32.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/pmuls_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/pmulus_vi32.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/pmulus_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/reduce-add-int64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/reduce-add-uint64-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/reduce-add-uint64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/shift-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/soa-21.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/soa-22.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/soa-23.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/soa-24.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/soa-25.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/uint64-max-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * From 29c73f242cd31ecc147279d6ee2d99803c92be46 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 2 Apr 2015 14:40:24 +0300 Subject: [PATCH 28/33] cast s(z)ext 64-bit function fix for knl target --- examples/intrinsics/knl.h | 48 ++++++++------- fail_db.txt | 122 ++++++++++++++++++++++++++++++++++---- 2 files changed, 137 insertions(+), 33 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 1bb9832d..32e363af 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -1785,21 +1785,26 @@ static FORCEINLINE __vec16_i32 __cast_sext(const __vec16_i32 &, const __vec16_i1 return _mm512_cvtepi16_epi32(val_t); } +static FORCEINLINE __vec16_i64 __cast_sext(const __vec16_i64 &, const __vec16_i32 &val) +{ + // TODO: this probably shall be optimized + __vec16_i64 a; + a.v_lo = _mm512_cvtepi32_epi64(_mm512_castsi512_si256(val)); + __vec16_i32 a_hi_32 = _mm512_permutevar_epi32(__vec16_i32(8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7), val); + a.v_hi = _mm512_cvtepi32_epi64(_mm512_castsi512_si256(a_hi_32)); + return a; +} + static FORCEINLINE __vec16_i64 __cast_sext(const __vec16_i64 &, const __vec16_i1 &val) { - __vec16_i32 ret = _mm512_mask_mov_epi32(_mm512_setzero_epi32(), val, _mm512_set1_epi32(-1)); - return __vec16_i64(ret, ret); + __vec16_i32 a = _mm512_mask_mov_epi32(_mm512_setzero_epi32(), val, _mm512_set1_epi32(-1)); + return __cast_sext(__vec16_i64(), a); } static FORCEINLINE __vec16_i64 __cast_sext(const __vec16_i64 &, const __vec16_i8 &val) { __vec16_i32 a = __cast_sext(__vec16_i32(), val); - return __vec16_i64(a.v, _mm512_srai_epi32(a.v, 31)); -} - -static FORCEINLINE __vec16_i64 __cast_sext(const __vec16_i64 &, const __vec16_i32 &val) -{ - return __vec16_i64(val.v, _mm512_srai_epi32(val.v, 31)); + return __cast_sext(__vec16_i64(), a); } static FORCEINLINE __vec16_i8 __cast_zext(const __vec16_i8 &, const __vec16_i1 &val) @@ -1841,28 +1846,31 @@ static FORCEINLINE __vec16_i32 __cast_zext(const __vec16_i32 &, const __vec16_i1 return _mm512_cvtepu16_epi32(val_t); } +static FORCEINLINE __vec16_i64 __cast_zext(const __vec16_i64 &, const __vec16_i32 &val) +{ + // TODO: this probably shall be optimized + __vec16_i64 a; + a.v_lo = _mm512_cvtepu32_epi64(_mm512_castsi512_si256(val)); + __vec16_i32 a_hi_32 = _mm512_permutevar_epi32(__vec16_i32(8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7), val); + a.v_hi = _mm512_cvtepu32_epi64(_mm512_castsi512_si256(a_hi_32)); + return a; +} + static FORCEINLINE __vec16_i64 __cast_zext(const __vec16_i64 &, const __vec16_i1 &val) { - __vec16_i32 ret_hi = _mm512_setzero_epi32(); - __vec16_i32 ret_lo = _mm512_setzero_epi32(); - __vec16_i32 one = _mm512_set1_epi32(1); - ret_lo = _mm512_mask_mov_epi32(ret_lo, val, one); - return __vec16_i64 (ret_lo, ret_hi); + __vec16_i32 ret = _mm512_setzero_epi32(); + ret = _mm512_mask_mov_epi32(ret, val, _mm512_set1_epi32(1)); + return __cast_zext(__vec16_i64(), ret); } static FORCEINLINE __vec16_i64 __cast_zext(const __vec16_i64 &, const __vec16_i8 &val) { - return __vec16_i64(__cast_zext(__vec16_i32(), val), _mm512_setzero_epi32()); + return __cast_zext(__vec16_i64(), __cast_zext(__vec16_i32(), val)); } static FORCEINLINE __vec16_i64 __cast_zext(const __vec16_i64 &, const __vec16_i16 &val) { - return __vec16_i64(__cast_zext(__vec16_i32(), val), _mm512_setzero_epi32()); -} - -static FORCEINLINE __vec16_i64 __cast_zext(const __vec16_i64 &, const __vec16_i32 &val) -{ - return __vec16_i64(val.v, _mm512_setzero_epi32()); + return __cast_zext(__vec16_i64(), __cast_zext(__vec16_i32(), val)); } static FORCEINLINE __vec16_f __cast_sitofp(__vec16_f, __vec16_i32 val) { diff --git a/fail_db.txt b/fail_db.txt index de924a32..0f0fccb1 100644 --- a/fail_db.txt +++ b/fail_db.txt @@ -184,30 +184,126 @@ ./tests/gather-int16-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/gather-int16-7.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/gather-int16-8.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/idiv.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/local-atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/local-atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/local-atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/memcpy-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/memmove-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/memset-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/packed-load-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/packed-store-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/packed-store2-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/paddus_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/pmuls_vi32.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/pmuls_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/pmulus_vi32.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/pmulus_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/reduce-add-int64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/reduce-add-uint64-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/reduce-add-uint64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/shift-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/soa-21.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/soa-22.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/soa-23.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/soa-24.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/soa-25.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/uint64-max-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * +./tests/atomics-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/c-test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/c-test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/cfor-c-test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/cfor-c-test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/cfor-test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/cfor-test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/count-leading-trailing-zeros-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/double-abs-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/double-abs.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/exclusive-scan-add-10.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/exclusive-scan-add-7.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/frexp-double-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/frexp-double.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/funcptr-null-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/funcptr-null-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/funcptr-null-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/funcptr-null-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/funcptr-null-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/funcptr-uniform-9.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/funcptr-varying-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/funcptr-varying-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/funcptr-varying-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/funcptr-varying-9.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/gather-double-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/gather-double-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/gather-float-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/gather-float-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/gather-int16-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/gather-int16-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/gather-int32-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/gather-int32-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/gather-int64-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/gather-int64-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/gather-int8-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/gather-int8-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/gather-struct-vector.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/insert-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/insert-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/int64-constant.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/int64-max-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/int64-max.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/int64-min-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/int64-min.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ldexp-double.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/local-atomics-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/local-atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/local-atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/local-atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/memcpy-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/memmove-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/memset-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/packed-load-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/packed-store-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/packed-store2-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/padds_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/paddus_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/pass-varying-lvalue-to-ref.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/pdivs_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/pdivus_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/pmuls_vi32.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/pmuls_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/pmulus_vi32.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/pmulus_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/popcnt-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/prefetch-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/psubs_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/psubus_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-15.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-22.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-24.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-25.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-7.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-assign-lhs-math-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-cast-complex.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-cmp-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-diff-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-diff-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-diff-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-diff-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-diff-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-int-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-int-null-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-null-func-arg.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/ptr-varying-unif-index.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/reduce-add-int64-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/reduce-add-int64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/reduce-add-uint64-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/reduce-add-uint64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/reduce-equal-10.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/reduce-equal-8.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/reduce-max-int64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/reduce-max-uint64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/reduce-min-uint64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/rotate-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/rotate-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/shift-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/shift-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/shift-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/struct-nested-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/uint64-max-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/uint64-max.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/uint64-min-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * +./tests/uint64-min.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * From 0292730cea66cd4797c96f766972487b52c17c97 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 2 Apr 2015 14:56:20 +0300 Subject: [PATCH 29/33] 64-bit min-max functions for knl.h --- examples/intrinsics/knl.h | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 32e363af..5385652a 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -2565,33 +2565,29 @@ static FORCEINLINE __vec16_i32 __min_varying_uint32(__vec16_i32 v1, __vec16_i32 static FORCEINLINE __vec16_i64 __max_varying_int64 (__vec16_i64 v1, __vec16_i64 v2) { __vec16_i64 ret; - ret.v_hi = _mm512_max_epi32(v1.v_hi, v2.v_hi); - __vec16_i1 mask = _mm512_cmpeq_epi32_mask(ret.v_hi, v2.v_hi); - ret.v_lo = _mm512_mask_max_epi32(v1.v_lo, mask, v1.v_lo, v2.v_lo); + ret.v_hi = _mm512_max_epi64(v1.v_hi, v2.v_hi); + ret.v_lo = _mm512_max_epi64(v1.v_lo, v2.v_lo); return ret; } static FORCEINLINE __vec16_i64 __min_varying_int64 (__vec16_i64 v1, __vec16_i64 v2) { __vec16_i64 ret; - ret.v_hi = _mm512_min_epi32(v1.v_hi, v2.v_hi); - __vec16_i1 mask = _mm512_cmpeq_epi32_mask(ret.v_hi, v2.v_hi); - ret.v_lo = _mm512_mask_min_epi32(v1.v_lo, mask, v1.v_lo, v2.v_lo); + ret.v_hi = _mm512_min_epi64(v1.v_hi, v2.v_hi); + ret.v_lo = _mm512_min_epi64(v1.v_lo, v2.v_lo); return ret; } static FORCEINLINE __vec16_i64 __max_varying_uint64 (__vec16_i64 v1, __vec16_i64 v2) { __vec16_i64 ret; - ret.v_hi = _mm512_max_epu32(v1.v_hi, v2.v_hi); - __vec16_i1 mask = _mm512_cmpeq_epi32_mask(ret.v_hi, v2.v_hi); - ret.v_lo = _mm512_mask_max_epu32(v1.v_lo, mask, v1.v_lo, v2.v_lo); + ret.v_hi = _mm512_max_epu64(v1.v_hi, v2.v_hi); + ret.v_lo = _mm512_max_epu64(v1.v_lo, v2.v_lo); return ret; } static FORCEINLINE __vec16_i64 __min_varying_uint64 (__vec16_i64 v1, __vec16_i64 v2) { __vec16_i64 ret; - ret.v_hi = _mm512_min_epu32(v1.v_hi, v2.v_hi); - __vec16_i1 mask = _mm512_cmpeq_epi32_mask(ret.v_hi, v2.v_hi); - ret.v_lo = _mm512_mask_min_epu32(v1.v_lo, mask, v1.v_lo, v2.v_lo); + ret.v_hi = _mm512_min_epu64(v1.v_hi, v2.v_hi); + ret.v_lo = _mm512_min_epu64(v1.v_lo, v2.v_lo); return ret; } @@ -2796,14 +2792,6 @@ static FORCEINLINE __vec16_i32 __masked_load_i32(void *p, __vec16_i1 mask) { #ifdef ISPC_FORCE_ALIGNED_MEMORY return _mm512_mask_load_epi32(_mm512_undefined_epi32(), mask, p); #else - printf("\n\n\n"); - printf_v(mask); - for(int i=0; i <3; ++i) { - printf("%d ", ((uint32_t *)p)[i]); - } - printf("\n"); - printf_v(_mm512_mask_loadu_epi32(_mm512_undefined_epi32(), mask, p)); - printf("\n\n\n"); return _mm512_mask_loadu_epi32(_mm512_undefined_epi32(), mask, p); #endif } From c22ee2b0a0a48441fa20699040322b746cfc9176 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 2 Apr 2015 16:13:41 +0300 Subject: [PATCH 30/33] some minor fixes in knl.h, updated fail_db.txt --- examples/intrinsics/knl.h | 5 +++-- fail_db.txt | 23 ----------------------- 2 files changed, 3 insertions(+), 25 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 5385652a..51140954 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -796,6 +796,7 @@ static FORCEINLINE __vec16_i32 __shift_i32(__vec16_i32 v, int index) { __vec16_i1 mask = mask_ge & mask_le; __vec16_i32 ret = __smear_i32<__vec16_i32>(0); ret = _mm512_mask_permutevar_epi32(ret, mask, mod_index, v); + return ret; } template static FORCEINLINE __vec16_i32 __load(const __vec16_i32 *p) { @@ -1986,7 +1987,7 @@ static FORCEINLINE __vec16_f __cast_uitofp(__vec16_f, __vec16_i64 val) { ((float*)&ret)[i + 8] = ((float)(((uint64_t*)&tmp2)[i])); } */ - ret[0] = ((float)(val[0])); + ret[0] = ((float)(((uint64_t*)&val.v_lo)[0])); ret[1] = ((float)(((uint64_t*)&val.v_lo)[1])); ret[2] = ((float)(((uint64_t*)&val.v_lo)[2])); ret[3] = ((float)(((uint64_t*)&val.v_lo)[3])); @@ -2049,7 +2050,7 @@ static FORCEINLINE __vec16_d __cast_uitofp(__vec16_d, __vec16_i64 val) { // float/double to signed int static FORCEINLINE __vec16_i32 __cast_fptosi(__vec16_i32, __vec16_f val) { - return _mm512_cvtps_epi32(val); + return _mm512_cvt_roundps_epi32(val, (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC)); } static FORCEINLINE __vec16_i8 __cast_fptosi(__vec16_i8, __vec16_f val) { diff --git a/fail_db.txt b/fail_db.txt index 0f0fccb1..cd8baec3 100644 --- a/fail_db.txt +++ b/fail_db.txt @@ -172,12 +172,6 @@ ./tests/atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/c-test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/c-test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/cfor-c-test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/cfor-c-test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/cfor-test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/cfor-test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/funcptr-varying-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/funcptr-varying-9.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/gather-int16-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * @@ -192,22 +186,10 @@ ./tests/packed-load-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/packed-store-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/packed-store2-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/paddus_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/pmulus_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/shift-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/uint64-max-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * ./tests/atomics-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * ./tests/atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * ./tests/atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * ./tests/atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/c-test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/c-test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/cfor-c-test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/cfor-c-test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/cfor-test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/cfor-test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * ./tests/count-leading-trailing-zeros-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * ./tests/double-abs-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * ./tests/double-abs.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * @@ -297,12 +279,7 @@ ./tests/reduce-min-uint64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * ./tests/rotate-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * ./tests/rotate-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/shift-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/shift-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/shift-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * ./tests/struct-nested-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/test-108.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/test-131.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * ./tests/uint64-max-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * ./tests/uint64-max.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * ./tests/uint64-min-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * From 81251115af117d11e55cd946dbc7f13926cc1565 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 2 Apr 2015 17:24:57 +0300 Subject: [PATCH 31/33] packed load/store for knl --- examples/intrinsics/knl.h | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 51140954..3dd33c70 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -3070,11 +3070,10 @@ static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_ { // TODO __vec16_i8 ret; - for (int i = 0; i < 16; ++i) - if ((mask & (1 << i)) != 0) { - int8_t *ptr = (int8_t *)(_base + scale * offsets[i]); - ret[i] = *ptr; - } + for (int i = 0; i < 16; ++i) { + int8_t *ptr = (int8_t *)(_base + scale * offsets[i]); + ret[i] = *ptr; + } return ret; } @@ -3087,12 +3086,11 @@ static FORCEINLINE __vec16_i16 __gather_base_offsets64_i16(uint8_t *_base, uint3 __vec16_i1 mask) { // TODO - __vec16_i16 ret; - for (int i = 0; i < 16; ++i) - if ((mask & (1 << i)) != 0) { - int16_t *ptr = (int16_t *)(_base + scale * offsets[i]); - ret[i] = *ptr; - } + __vec16_i16 ret; + for (int i = 0; i < 16; i++) { + int16_t *ptr = (int16_t *)(_base + scale * offsets[i]); + ret.v[i] = *ptr; + } return ret; } @@ -3124,7 +3122,7 @@ static FORCEINLINE void __scatter_base_offsets64_i8(uint8_t *_base, uint32_t sca // TODO for (int i = 0; i < 16; ++i) if ((mask & (1 << i)) != 0) { - int8_t *ptr = (int8_t *)(_base + scale * offsets[i]); + int8_t *ptr = (int8_t *)(_base + scale * offsets[i]); *ptr = value[i]; } } @@ -3198,13 +3196,13 @@ static FORCEINLINE void __scatter64_i64(__vec16_i64 ptrs, __vec16_i64 val, __vec static FORCEINLINE int32_t __packed_load_active(uint32_t *p, __vec16_i32 *val, __vec16_i1 mask) { __vec16_i32 v = __load<64>(val); - v = _mm512_mask_loadu_epi32(v, mask, p); + v = _mm512_mask_expandloadu_epi32(v, mask, p); __store<64>(val, v); return _mm_countbits_32(uint32_t(mask)); } static FORCEINLINE int32_t __packed_store_active(uint32_t *p, __vec16_i32 val, __vec16_i1 mask) { - _mm512_mask_storeu_epi32(p, mask, val); + _mm512_mask_compressstoreu_epi32(p, mask, val); return _mm_countbits_32(uint32_t(mask)); } From a3737e2b814d7d6e8179343e22e5ffcb2bc37e36 Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Thu, 2 Apr 2015 18:23:04 +0300 Subject: [PATCH 32/33] i64 store fix for knl --- examples/intrinsics/knl.h | 9 ++- fail_db.txt | 114 -------------------------------------- 2 files changed, 4 insertions(+), 119 deletions(-) diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 3dd33c70..153dff84 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -1135,11 +1135,10 @@ static FORCEINLINE __vec16_i64 __shuffle2_i64(__vec16_i64 v0, __vec16_i64 v1, __ template static FORCEINLINE __vec16_i64 __load(const __vec16_i64 *p) { __vec16_i64 v; - const uint8_t*ptr = (const uint8_t*)p; + const uint8_t *ptr = (const uint8_t *)p; v.v_lo = _mm512_loadu_si512(ptr); v.v_hi = _mm512_loadu_si512(ptr+64); - return v; } @@ -1158,7 +1157,7 @@ template <> FORCEINLINE __vec16_i64 __load<128>(const __vec16_i64 *p) { template static FORCEINLINE void __store(__vec16_i64 *p, __vec16_i64 v) { _mm512_storeu_si512(p, v.v_lo); - _mm512_storeu_si512(p+64, v.v_hi); + _mm512_storeu_si512((uint8_t*)p+64, v.v_hi); } #if 0 template <> FORCEINLINE void __store<64>(__vec16_i64 *p, __vec16_i64 v) { @@ -2964,10 +2963,10 @@ static FORCEINLINE void __masked_store_blend_float(void *p, __vec16_f val, static FORCEINLINE __vec16_i8 __gather_base_offsets32_i8(uint8_t *base, uint32_t scale, __vec16_i32 offsets, __vec16_i1 mask) { // TODO - __vec16_i8 ret; + __vec16_i8 ret; for (int i = 0; i < 16; ++i) if ((mask & (1 << i)) != 0) { - int8_t *ptr = (int8_t *)(base + scale * offsets[i]); + int8_t *ptr = (int8_t *)(base + scale * offsets[i]); ret[i] = *ptr; } return ret; diff --git a/fail_db.txt b/fail_db.txt index cd8baec3..618f1895 100644 --- a/fail_db.txt +++ b/fail_db.txt @@ -169,118 +169,4 @@ ./tests/ptr-19.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * ./tests/ptr-22.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * ./tests/test-143.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * -./tests/atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/funcptr-varying-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/funcptr-varying-9.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/gather-int16-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/gather-int16-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/gather-int16-7.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/gather-int16-8.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/local-atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/local-atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/local-atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/memcpy-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/memset-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/packed-load-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/packed-store-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/packed-store2-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O2 * -./tests/atomics-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/count-leading-trailing-zeros-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/double-abs-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/double-abs.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/exclusive-scan-add-10.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/exclusive-scan-add-7.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/frexp-double-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/frexp-double.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/funcptr-null-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/funcptr-null-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/funcptr-null-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/funcptr-null-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/funcptr-null-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/funcptr-uniform-9.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/funcptr-varying-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/funcptr-varying-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/funcptr-varying-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/funcptr-varying-9.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/gather-double-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/gather-double-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/gather-float-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/gather-float-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/gather-int16-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/gather-int16-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/gather-int32-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/gather-int32-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/gather-int64-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/gather-int64-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/gather-int8-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/gather-int8-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/gather-struct-vector.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/insert-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/insert-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/int64-constant.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/int64-max-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/int64-max.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/int64-min-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/int64-min.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ldexp-double.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/local-atomics-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/local-atomics-varyingptr-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/local-atomics-varyingptr-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/local-atomics-varyingptr-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/memcpy-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/memmove-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/memset-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/packed-load-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/packed-store-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/packed-store2-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/padds_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/paddus_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/pass-varying-lvalue-to-ref.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/pdivs_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/pdivus_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/pmuls_vi32.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/pmuls_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/pmulus_vi32.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/pmulus_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/popcnt-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/prefetch-varying.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/psubs_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/psubus_vi64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-15.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * ./tests/ptr-22.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-24.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-25.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-7.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-assign-lhs-math-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-cast-complex.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-cmp-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-diff-2.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-diff-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-diff-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-diff-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-diff-6.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-int-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-int-null-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-null-func-arg.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/ptr-varying-unif-index.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/reduce-add-int64-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/reduce-add-int64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/reduce-add-uint64-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/reduce-add-uint64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/reduce-equal-10.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/reduce-equal-8.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/reduce-max-int64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/reduce-max-uint64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/reduce-min-uint64.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/rotate-3.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/rotate-4.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/struct-nested-5.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/uint64-max-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/uint64-max.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/uint64-min-1.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * -./tests/uint64-min.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 * From 74c1feed087d6fbd2d636b06a2851e8c3933677e Mon Sep 17 00:00:00 2001 From: Anton Mitrokhin Date: Fri, 3 Apr 2015 13:07:55 +0300 Subject: [PATCH 33/33] fixed ptr-22.ispc on knl --- examples/intrinsics/knl.h | 8 +++++--- examples/intrinsics/known_fails.txt | 13 ------------- fail_db.txt | 1 - 3 files changed, 5 insertions(+), 17 deletions(-) delete mode 100644 examples/intrinsics/known_fails.txt diff --git a/examples/intrinsics/knl.h b/examples/intrinsics/knl.h index 153dff84..f23623c2 100644 --- a/examples/intrinsics/knl.h +++ b/examples/intrinsics/knl.h @@ -49,7 +49,7 @@ #include // for operator<<(m512[i]) #if __INTEL_COMPILER < 1500 -#warning "Only ICC 15.0 and older are supported. Please, update your compiler!" +#error "Only ICC 15.0 and older are supported. Please, update your compiler!" #endif @@ -3106,8 +3106,10 @@ static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, __vec16_i32 value, __vec16_i1 mask) { - _mm512_mask_i64scatter_epi32(_base, mask.lo(), offsets.v_lo, _mm512_extracti64x4_epi64(value, 0), scale); - _mm512_mask_i64scatter_epi32(_base, mask.hi(), offsets.v_hi, _mm512_extracti64x4_epi64(value, 1), scale); + __m256i value_lo = _mm512_extracti64x4_epi64(value.v, 0); + __m256i value_hi = _mm512_extracti64x4_epi64(value.v, 1); + _mm512_mask_i64scatter_epi32(_base, mask.lo(), offsets.v_lo, value_lo, scale); + _mm512_mask_i64scatter_epi32(_base, mask.hi(), offsets.v_hi, value_hi, scale); } static FORCEINLINE void __scatter_base_offsets64_i64(uint8_t *_base, uint32_t scale, __vec16_i64 offsets, diff --git a/examples/intrinsics/known_fails.txt b/examples/intrinsics/known_fails.txt deleted file mode 100644 index cda10901..00000000 --- a/examples/intrinsics/known_fails.txt +++ /dev/null @@ -1,13 +0,0 @@ -=============================================================================== -__and_not2 : _mm512_kandnr -> _mm512_kandn - ./tests/cfor-c-cif-nested-continue.ispc - ./tests/cfor-c-test-134.ispc - ./tests/cfor-c-test-135.ispc - ./tests/cfor-c-test-136.ispc - ./tests/cfor-c-test-64.ispc - ./tests/cfor-c-test-70.ispc - ./tests/cfor-c-test-71.ispc - ./tests/recursion-forward-func-decl.ispc - ./tests/recursion.ispc -=============================================================================== - diff --git a/fail_db.txt b/fail_db.txt index 618f1895..ad3abd4b 100644 --- a/fail_db.txt +++ b/fail_db.txt @@ -169,4 +169,3 @@ ./tests/ptr-19.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * ./tests/ptr-22.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * ./tests/test-143.ispc runfail x86-64 generic-16 Linux LLVM 3.7 clang++3.4 -O0 * -./tests/ptr-22.ispc runfail x86-64 knl Linux LLVM 3.4 icpc15.0 -O0 *