Merge pull request #845 from ncos/knc-fails
Fixed 64 bit gather/scatter functions in knc.h and knc-i1x16.h (fixes stability problem on ptr-22.ispc at -O0, which appears as a segfault from time to time)
This commit is contained in:
@@ -1,5 +1,5 @@
|
|||||||
/**
|
/**
|
||||||
Copyright (c) 2010-2013, Intel Corporation
|
Copyright (c) 2010-2014, Intel Corporation
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
@@ -31,7 +31,8 @@
|
|||||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <limits.h> // INT_MIN
|
||||||
|
#include <stdint.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
@@ -2162,6 +2163,7 @@ static FORCEINLINE __vec16_i8 __gather_base_offsets32_i8(uint8_t *base, uint32_t
|
|||||||
static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask)
|
static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask)
|
||||||
{
|
{
|
||||||
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
||||||
|
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
|
||||||
__vec16_i1 still_to_do = mask;
|
__vec16_i1 still_to_do = mask;
|
||||||
__vec16_i32 tmp;
|
__vec16_i32 tmp;
|
||||||
while (still_to_do) {
|
while (still_to_do) {
|
||||||
@@ -2172,8 +2174,8 @@ static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_
|
|||||||
_MM_CMPINT_EQ);
|
_MM_CMPINT_EQ);
|
||||||
|
|
||||||
void * base = (void*)((unsigned long)_base +
|
void * base = (void*)((unsigned long)_base +
|
||||||
((scale*(unsigned long)hi32) << 32));
|
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
|
||||||
tmp = _mm512_mask_i32extgather_epi32(tmp, match, offsets.v_lo, base,
|
tmp = _mm512_mask_i32extgather_epi32(tmp, match, signed_offsets, base,
|
||||||
_MM_UPCONV_EPI32_SINT8, scale,
|
_MM_UPCONV_EPI32_SINT8, scale,
|
||||||
_MM_HINT_NONE);
|
_MM_HINT_NONE);
|
||||||
still_to_do = _mm512_kxor(match,still_to_do);
|
still_to_do = _mm512_kxor(match,still_to_do);
|
||||||
@@ -2197,6 +2199,7 @@ static FORCEINLINE __vec16_i32 __gather_base_offsets32_i32(uint8_t *base, uint32
|
|||||||
static FORCEINLINE __vec16_i32 __gather_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask)
|
static FORCEINLINE __vec16_i32 __gather_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask)
|
||||||
{
|
{
|
||||||
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
||||||
|
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
|
||||||
// There is no gather instruction with 64-bit offsets in KNC.
|
// There is no gather instruction with 64-bit offsets in KNC.
|
||||||
// We have to manually iterate over the upper 32 bits ;-)
|
// We have to manually iterate over the upper 32 bits ;-)
|
||||||
__vec16_i1 still_to_do = mask;
|
__vec16_i1 still_to_do = mask;
|
||||||
@@ -2207,10 +2210,10 @@ static FORCEINLINE __vec16_i32 __gather_base_offsets64_i32(uint8_t *_base, uint3
|
|||||||
__vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi,
|
__vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi,
|
||||||
__smear_i32<__vec16_i32>((int32_t)hi32),
|
__smear_i32<__vec16_i32>((int32_t)hi32),
|
||||||
_MM_CMPINT_EQ);
|
_MM_CMPINT_EQ);
|
||||||
|
|
||||||
void * base = (void*)((unsigned long)_base +
|
void * base = (void*)((unsigned long)_base +
|
||||||
((scale*(unsigned long)hi32) << 32));
|
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
|
||||||
ret = _mm512_mask_i32extgather_epi32(ret, match, offsets.v_lo, base,
|
ret = _mm512_mask_i32extgather_epi32(ret, match, signed_offsets, base,
|
||||||
_MM_UPCONV_EPI32_NONE, scale,
|
_MM_UPCONV_EPI32_NONE, scale,
|
||||||
_MM_HINT_NONE);
|
_MM_HINT_NONE);
|
||||||
still_to_do = _mm512_kxor(match, still_to_do);
|
still_to_do = _mm512_kxor(match, still_to_do);
|
||||||
@@ -2230,6 +2233,7 @@ static FORCEINLINE __vec16_f __gather_base_offsets32_float(uint8_t *base, uint32
|
|||||||
static FORCEINLINE __vec16_f __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask)
|
static FORCEINLINE __vec16_f __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask)
|
||||||
{
|
{
|
||||||
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
||||||
|
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
|
||||||
// There is no gather instruction with 64-bit offsets in KNC.
|
// There is no gather instruction with 64-bit offsets in KNC.
|
||||||
// We have to manually iterate over the upper 32 bits ;-)
|
// We have to manually iterate over the upper 32 bits ;-)
|
||||||
__vec16_i1 still_to_do = mask;
|
__vec16_i1 still_to_do = mask;
|
||||||
@@ -2242,8 +2246,8 @@ static FORCEINLINE __vec16_f __gather_base_offsets64_float(uint8_t *_base, uint3
|
|||||||
_MM_CMPINT_EQ);
|
_MM_CMPINT_EQ);
|
||||||
|
|
||||||
void * base = (void*)((unsigned long)_base +
|
void * base = (void*)((unsigned long)_base +
|
||||||
((scale*(unsigned long)hi32) << 32));
|
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
|
||||||
ret = _mm512_mask_i32extgather_ps(ret, match, offsets.v_lo, base,
|
ret = _mm512_mask_i32extgather_ps(ret, match, signed_offsets, base,
|
||||||
_MM_UPCONV_PS_NONE, scale,
|
_MM_UPCONV_PS_NONE, scale,
|
||||||
_MM_HINT_NONE);
|
_MM_HINT_NONE);
|
||||||
still_to_do = _mm512_kxor(match, still_to_do);
|
still_to_do = _mm512_kxor(match, still_to_do);
|
||||||
@@ -2339,7 +2343,8 @@ static FORCEINLINE void __scatter_base_offsets32_i32(uint8_t *b, uint32_t scale,
|
|||||||
static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i32 value, __vec16_i1 mask)
|
static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i32 value, __vec16_i1 mask)
|
||||||
{
|
{
|
||||||
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
||||||
|
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
|
||||||
|
|
||||||
__vec16_i1 still_to_do = mask;
|
__vec16_i1 still_to_do = mask;
|
||||||
while (still_to_do) {
|
while (still_to_do) {
|
||||||
int first_active_lane = _mm_tzcnt_32((int)still_to_do);
|
int first_active_lane = _mm_tzcnt_32((int)still_to_do);
|
||||||
@@ -2349,8 +2354,8 @@ static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t sc
|
|||||||
_MM_CMPINT_EQ);
|
_MM_CMPINT_EQ);
|
||||||
|
|
||||||
void * base = (void*)((unsigned long)_base +
|
void * base = (void*)((unsigned long)_base +
|
||||||
((scale*(unsigned long)hi32) << 32));
|
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
|
||||||
_mm512_mask_i32extscatter_epi32(base, match, offsets.v_lo,
|
_mm512_mask_i32extscatter_epi32(base, match, signed_offsets,
|
||||||
value,
|
value,
|
||||||
_MM_DOWNCONV_EPI32_NONE, scale,
|
_MM_DOWNCONV_EPI32_NONE, scale,
|
||||||
_MM_HINT_NONE);
|
_MM_HINT_NONE);
|
||||||
@@ -2370,7 +2375,8 @@ static FORCEINLINE void __scatter_base_offsets32_float(void *base, uint32_t scal
|
|||||||
static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_f value, __vec16_i1 mask)
|
static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_f value, __vec16_i1 mask)
|
||||||
{
|
{
|
||||||
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
const __vec16_i64 offsets = _offsets.cvt2hilo();
|
||||||
|
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
|
||||||
|
|
||||||
__vec16_i1 still_to_do = mask;
|
__vec16_i1 still_to_do = mask;
|
||||||
while (still_to_do) {
|
while (still_to_do) {
|
||||||
int first_active_lane = _mm_tzcnt_32((int)still_to_do);
|
int first_active_lane = _mm_tzcnt_32((int)still_to_do);
|
||||||
@@ -2380,8 +2386,9 @@ static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t
|
|||||||
_MM_CMPINT_EQ);
|
_MM_CMPINT_EQ);
|
||||||
|
|
||||||
void * base = (void*)((unsigned long)_base +
|
void * base = (void*)((unsigned long)_base +
|
||||||
((scale*(unsigned long)hi32) << 32));
|
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
|
||||||
_mm512_mask_i32extscatter_ps(base, match, offsets.v_lo,
|
|
||||||
|
_mm512_mask_i32extscatter_ps(base, match, signed_offsets,
|
||||||
value,
|
value,
|
||||||
_MM_DOWNCONV_PS_NONE, scale,
|
_MM_DOWNCONV_PS_NONE, scale,
|
||||||
_MM_HINT_NONE);
|
_MM_HINT_NONE);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright (c) 2012, Intel Corporation
|
Copyright (c) 2012-2014, Intel Corporation
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
@@ -31,6 +31,7 @@
|
|||||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <limits.h> // INT_MIN
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
@@ -43,7 +44,6 @@
|
|||||||
#include <iostream> // for operator<<(m512[i])
|
#include <iostream> // for operator<<(m512[i])
|
||||||
#include <iomanip> // for operator<<(m512[i])
|
#include <iomanip> // for operator<<(m512[i])
|
||||||
|
|
||||||
|
|
||||||
#define FORCEINLINE __forceinline
|
#define FORCEINLINE __forceinline
|
||||||
#ifdef _MSC_VER
|
#ifdef _MSC_VER
|
||||||
#define PRE_ALIGN(x) /*__declspec(align(x))*/
|
#define PRE_ALIGN(x) /*__declspec(align(x))*/
|
||||||
@@ -1749,6 +1749,8 @@ __gather_base_offsets32_double(uint8_t *base, uint32_t scale, __vec16_i32 offset
|
|||||||
static FORCEINLINE __vec16_f
|
static FORCEINLINE __vec16_f
|
||||||
__gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
|
__gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
|
||||||
__vec16_i1 mask) {
|
__vec16_i1 mask) {
|
||||||
|
|
||||||
|
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
|
||||||
// There is no gather instruction with 64-bit offsets in KNC.
|
// There is no gather instruction with 64-bit offsets in KNC.
|
||||||
// We have to manually iterate over the upper 32 bits ;-)
|
// We have to manually iterate over the upper 32 bits ;-)
|
||||||
__vec16_i1 still_to_do = mask;
|
__vec16_i1 still_to_do = mask;
|
||||||
@@ -1759,10 +1761,10 @@ __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offset
|
|||||||
__vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi,
|
__vec16_i1 match = _mm512_mask_cmp_epi32_mask(mask,offsets.v_hi,
|
||||||
__smear_i32<__vec16_i32>((int32_t)hi32),
|
__smear_i32<__vec16_i32>((int32_t)hi32),
|
||||||
_MM_CMPINT_EQ);
|
_MM_CMPINT_EQ);
|
||||||
|
|
||||||
void * base = (void*)((unsigned long)_base +
|
void * base = (void*)((unsigned long)_base +
|
||||||
((scale*(unsigned long)hi32) << 32));
|
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
|
||||||
ret = _mm512_mask_i32extgather_ps(ret, match, offsets.v_lo, base,
|
|
||||||
|
ret = _mm512_mask_i32extgather_ps(ret, match, signed_offsets, base,
|
||||||
_MM_UPCONV_PS_NONE, scale,
|
_MM_UPCONV_PS_NONE, scale,
|
||||||
_MM_HINT_NONE);
|
_MM_HINT_NONE);
|
||||||
still_to_do = _mm512_kxor(match, still_to_do);
|
still_to_do = _mm512_kxor(match, still_to_do);
|
||||||
@@ -1776,6 +1778,8 @@ static FORCEINLINE __vec16_i8
|
|||||||
__gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
|
__gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
|
||||||
__vec16_i1 mask)
|
__vec16_i1 mask)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
|
||||||
__vec16_i1 still_to_do = mask;
|
__vec16_i1 still_to_do = mask;
|
||||||
__vec16_i32 tmp;
|
__vec16_i32 tmp;
|
||||||
while (still_to_do) {
|
while (still_to_do) {
|
||||||
@@ -1786,8 +1790,8 @@ __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
|
|||||||
_MM_CMPINT_EQ);
|
_MM_CMPINT_EQ);
|
||||||
|
|
||||||
void * base = (void*)((unsigned long)_base +
|
void * base = (void*)((unsigned long)_base +
|
||||||
((scale*(unsigned long)hi32) << 32));
|
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
|
||||||
tmp = _mm512_mask_i32extgather_epi32(tmp, match, offsets.v_lo, base,
|
tmp = _mm512_mask_i32extgather_epi32(tmp, match, signed_offsets, base,
|
||||||
_MM_UPCONV_EPI32_SINT8, scale,
|
_MM_UPCONV_EPI32_SINT8, scale,
|
||||||
_MM_HINT_NONE);
|
_MM_HINT_NONE);
|
||||||
still_to_do = _mm512_kxor(match,still_to_do);
|
still_to_do = _mm512_kxor(match,still_to_do);
|
||||||
@@ -1802,6 +1806,8 @@ static FORCEINLINE void
|
|||||||
__scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
|
__scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
|
||||||
__vec16_f value,
|
__vec16_f value,
|
||||||
__vec16_i1 mask) {
|
__vec16_i1 mask) {
|
||||||
|
|
||||||
|
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
|
||||||
__vec16_i1 still_to_do = mask;
|
__vec16_i1 still_to_do = mask;
|
||||||
while (still_to_do) {
|
while (still_to_do) {
|
||||||
int first_active_lane = _mm_tzcnt_32((int)still_to_do);
|
int first_active_lane = _mm_tzcnt_32((int)still_to_do);
|
||||||
@@ -1811,8 +1817,8 @@ __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offse
|
|||||||
_MM_CMPINT_EQ);
|
_MM_CMPINT_EQ);
|
||||||
|
|
||||||
void * base = (void*)((unsigned long)_base +
|
void * base = (void*)((unsigned long)_base +
|
||||||
((scale*(unsigned long)hi32) << 32));
|
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
|
||||||
_mm512_mask_i32extscatter_ps(base, match, offsets.v_lo,
|
_mm512_mask_i32extscatter_ps(base, match, signed_offsets,
|
||||||
value,
|
value,
|
||||||
_MM_DOWNCONV_PS_NONE, scale,
|
_MM_DOWNCONV_PS_NONE, scale,
|
||||||
_MM_HINT_NONE);
|
_MM_HINT_NONE);
|
||||||
@@ -1824,6 +1830,8 @@ static FORCEINLINE void
|
|||||||
__scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
|
__scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
|
||||||
__vec16_i32 value,
|
__vec16_i32 value,
|
||||||
__vec16_i1 mask) {
|
__vec16_i1 mask) {
|
||||||
|
|
||||||
|
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
|
||||||
__vec16_i1 still_to_do = mask;
|
__vec16_i1 still_to_do = mask;
|
||||||
while (still_to_do) {
|
while (still_to_do) {
|
||||||
int first_active_lane = _mm_tzcnt_32((int)still_to_do);
|
int first_active_lane = _mm_tzcnt_32((int)still_to_do);
|
||||||
@@ -1833,8 +1841,8 @@ __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets
|
|||||||
_MM_CMPINT_EQ);
|
_MM_CMPINT_EQ);
|
||||||
|
|
||||||
void * base = (void*)((unsigned long)_base +
|
void * base = (void*)((unsigned long)_base +
|
||||||
((scale*(unsigned long)hi32) << 32));
|
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
|
||||||
_mm512_mask_i32extscatter_epi32(base, match, offsets.v_lo,
|
_mm512_mask_i32extscatter_epi32(base, match, signed_offsets,
|
||||||
value,
|
value,
|
||||||
_MM_DOWNCONV_EPI32_NONE, scale,
|
_MM_DOWNCONV_EPI32_NONE, scale,
|
||||||
_MM_HINT_NONE);
|
_MM_HINT_NONE);
|
||||||
|
|||||||
Reference in New Issue
Block a user