removed ugly INT32_MIN define (included limits.h) and updated the copyright

This commit is contained in:
Anton Mitrokhin
2014-08-15 14:43:22 +04:00
parent 7adacf5a7b
commit bd8d02527b
2 changed files with 23 additions and 25 deletions

View File

@@ -1,5 +1,5 @@
/**
Copyright (c) 2010-2013, Intel Corporation
Copyright (c) 2010-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -31,6 +31,7 @@
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <limits.h> // INT_MIN
#include <stdint.h>
#include <math.h>
#include <assert.h>
@@ -38,8 +39,6 @@
#include <immintrin.h>
#include <zmmintrin.h>
#define INT32_MIN (-0x7fffffff - 1)
#ifdef _MSC_VER
#define FORCEINLINE __forceinline
#define PRE_ALIGN(x) /*__declspec(align(x))*/
@@ -2164,7 +2163,7 @@ static FORCEINLINE __vec16_i8 __gather_base_offsets32_i8(uint8_t *base, uint32_t
static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask)
{
const __vec16_i64 offsets = _offsets.cvt2hilo();
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
__vec16_i1 still_to_do = mask;
__vec16_i32 tmp;
while (still_to_do) {
@@ -2175,7 +2174,7 @@ static FORCEINLINE __vec16_i8 __gather_base_offsets64_i8(uint8_t *_base, uint32_
_MM_CMPINT_EQ);
void * base = (void*)((unsigned long)_base +
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
tmp = _mm512_mask_i32extgather_epi32(tmp, match, signed_offsets, base,
_MM_UPCONV_EPI32_SINT8, scale,
_MM_HINT_NONE);
@@ -2200,7 +2199,7 @@ static FORCEINLINE __vec16_i32 __gather_base_offsets32_i32(uint8_t *base, uint32
static FORCEINLINE __vec16_i32 __gather_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask)
{
const __vec16_i64 offsets = _offsets.cvt2hilo();
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
// There is no gather instruction with 64-bit offsets in KNC.
// We have to manually iterate over the upper 32 bits ;-)
__vec16_i1 still_to_do = mask;
@@ -2213,7 +2212,7 @@ static FORCEINLINE __vec16_i32 __gather_base_offsets64_i32(uint8_t *_base, uint3
_MM_CMPINT_EQ);
void * base = (void*)((unsigned long)_base +
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
ret = _mm512_mask_i32extgather_epi32(ret, match, signed_offsets, base,
_MM_UPCONV_EPI32_NONE, scale,
_MM_HINT_NONE);
@@ -2234,7 +2233,7 @@ static FORCEINLINE __vec16_f __gather_base_offsets32_float(uint8_t *base, uint32
static FORCEINLINE __vec16_f __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i1 mask)
{
const __vec16_i64 offsets = _offsets.cvt2hilo();
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
// There is no gather instruction with 64-bit offsets in KNC.
// We have to manually iterate over the upper 32 bits ;-)
__vec16_i1 still_to_do = mask;
@@ -2247,7 +2246,7 @@ static FORCEINLINE __vec16_f __gather_base_offsets64_float(uint8_t *_base, uint3
_MM_CMPINT_EQ);
void * base = (void*)((unsigned long)_base +
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
ret = _mm512_mask_i32extgather_ps(ret, match, signed_offsets, base,
_MM_UPCONV_PS_NONE, scale,
_MM_HINT_NONE);
@@ -2344,7 +2343,7 @@ static FORCEINLINE void __scatter_base_offsets32_i32(uint8_t *b, uint32_t scale,
static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_i32 value, __vec16_i1 mask)
{
const __vec16_i64 offsets = _offsets.cvt2hilo();
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
__vec16_i1 still_to_do = mask;
while (still_to_do) {
@@ -2355,7 +2354,7 @@ static FORCEINLINE void __scatter_base_offsets64_i32(uint8_t *_base, uint32_t sc
_MM_CMPINT_EQ);
void * base = (void*)((unsigned long)_base +
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
_mm512_mask_i32extscatter_epi32(base, match, signed_offsets,
value,
_MM_DOWNCONV_EPI32_NONE, scale,
@@ -2376,7 +2375,7 @@ static FORCEINLINE void __scatter_base_offsets32_float(void *base, uint32_t scal
static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 _offsets, __vec16_f value, __vec16_i1 mask)
{
const __vec16_i64 offsets = _offsets.cvt2hilo();
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
__vec16_i1 still_to_do = mask;
while (still_to_do) {
@@ -2387,7 +2386,7 @@ static FORCEINLINE void __scatter_base_offsets64_float(uint8_t *_base, uint32_t
_MM_CMPINT_EQ);
void * base = (void*)((unsigned long)_base +
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
_mm512_mask_i32extscatter_ps(base, match, signed_offsets,
value,

View File

@@ -1,5 +1,5 @@
/*
Copyright (c) 2012, Intel Corporation
Copyright (c) 2012-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -31,6 +31,7 @@
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <limits.h> // INT_MIN
#include <stdint.h>
#include <math.h>
#include <assert.h>
@@ -40,8 +41,6 @@
#include <immintrin.h>
#include <zmmintrin.h>
#define INT32_MIN (-0x7fffffff - 1)
#include <iostream> // for operator<<(m512[i])
#include <iomanip> // for operator<<(m512[i])
@@ -1751,7 +1750,7 @@ static FORCEINLINE __vec16_f
__gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
__vec16_i1 mask) {
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
// There is no gather instruction with 64-bit offsets in KNC.
// We have to manually iterate over the upper 32 bits ;-)
__vec16_i1 still_to_do = mask;
@@ -1763,7 +1762,7 @@ __gather_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offset
__smear_i32<__vec16_i32>((int32_t)hi32),
_MM_CMPINT_EQ);
void * base = (void*)((unsigned long)_base +
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
ret = _mm512_mask_i32extgather_ps(ret, match, signed_offsets, base,
_MM_UPCONV_PS_NONE, scale,
@@ -1780,7 +1779,7 @@ __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
__vec16_i1 mask)
{
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
__vec16_i1 still_to_do = mask;
__vec16_i32 tmp;
while (still_to_do) {
@@ -1791,7 +1790,7 @@ __gather_base_offsets64_i8(uint8_t *_base, uint32_t scale, __vec16_i64 offsets,
_MM_CMPINT_EQ);
void * base = (void*)((unsigned long)_base +
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
tmp = _mm512_mask_i32extgather_epi32(tmp, match, signed_offsets, base,
_MM_UPCONV_EPI32_SINT8, scale,
_MM_HINT_NONE);
@@ -1808,7 +1807,7 @@ __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offse
__vec16_f value,
__vec16_i1 mask) {
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
__vec16_i1 still_to_do = mask;
while (still_to_do) {
int first_active_lane = _mm_tzcnt_32((int)still_to_do);
@@ -1818,7 +1817,7 @@ __scatter_base_offsets64_float(uint8_t *_base, uint32_t scale, __vec16_i64 offse
_MM_CMPINT_EQ);
void * base = (void*)((unsigned long)_base +
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
_mm512_mask_i32extscatter_ps(base, match, signed_offsets,
value,
_MM_DOWNCONV_PS_NONE, scale,
@@ -1832,7 +1831,7 @@ __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets
__vec16_i32 value,
__vec16_i1 mask) {
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT32_MIN));
const __vec16_i32 signed_offsets = _mm512_add_epi32(offsets.v_lo, __smear_i32<__vec16_i32>((int32_t)INT_MIN));
__vec16_i1 still_to_do = mask;
while (still_to_do) {
int first_active_lane = _mm_tzcnt_32((int)still_to_do);
@@ -1842,7 +1841,7 @@ __scatter_base_offsets64_i32(uint8_t *_base, uint32_t scale, __vec16_i64 offsets
_MM_CMPINT_EQ);
void * base = (void*)((unsigned long)_base +
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT32_MIN));
((scale*(unsigned long)hi32) << 32) + scale*(unsigned long)(-(long)INT_MIN));
_mm512_mask_i32extscatter_epi32(base, match, signed_offsets,
value,
_MM_DOWNCONV_EPI32_NONE, scale,