added uniform_new/uniform_delete in util_ptx.m4 and __shfl intrinsics

This commit is contained in:
Evghenii
2013-11-11 09:18:15 +01:00
parent 38947ab71b
commit e9bc2b7b54
4 changed files with 73 additions and 75 deletions

View File

@@ -250,15 +250,15 @@ objs/lex.o: objs/lex.cpp $(HEADERS) objs/parse.cc
@echo Compiling $< @echo Compiling $<
$(CXX) $(CXXFLAGS) -o $@ -c $< $(CXX) $(CXXFLAGS) -o $@ -c $<
objs/builtins-dispatch.cpp: builtins/dispatch.ll builtins/util.m4 builtins/svml.m4 $(wildcard builtins/*common.ll) objs/builtins-dispatch.cpp: builtins/dispatch.ll builtins/util.m4 builtins/util_ptx.m4 builtins/svml.m4 $(wildcard builtins/*common.ll)
@echo Creating C++ source from builtins definition file $< @echo Creating C++ source from builtins definition file $<
m4 -Ibuiltins/ -DLLVM_VERSION=$(LLVM_VERSION) -DBUILD_OS=UNIX $< | python bitcode2cpp.py $< > $@ m4 -Ibuiltins/ -DLLVM_VERSION=$(LLVM_VERSION) -DBUILD_OS=UNIX $< | python bitcode2cpp.py $< > $@
objs/builtins-%-32bit.cpp: builtins/%.ll builtins/util.m4 builtins/svml.m4 $(wildcard builtins/*common.ll) objs/builtins-%-32bit.cpp: builtins/%.ll builtins/util.m4 builtins/util_ptx.m4 builtins/svml.m4 $(wildcard builtins/*common.ll)
@echo Creating C++ source from builtins definition file $< \(32 bit version\) @echo Creating C++ source from builtins definition file $< \(32 bit version\)
m4 -Ibuiltins/ -DLLVM_VERSION=$(LLVM_VERSION) -DBUILD_OS=UNIX -DRUNTIME=32 $< | python bitcode2cpp.py $< 32bit > $@ m4 -Ibuiltins/ -DLLVM_VERSION=$(LLVM_VERSION) -DBUILD_OS=UNIX -DRUNTIME=32 $< | python bitcode2cpp.py $< 32bit > $@
objs/builtins-%-64bit.cpp: builtins/%.ll builtins/util.m4 builtins/svml.m4 $(wildcard builtins/*common.ll) objs/builtins-%-64bit.cpp: builtins/%.ll builtins/util.m4 builtins/util_ptx.m4 builtins/svml.m4 $(wildcard builtins/*common.ll)
@echo Creating C++ source from builtins definition file $< \(64 bit version\) @echo Creating C++ source from builtins definition file $< \(64 bit version\)
m4 -Ibuiltins/ -DLLVM_VERSION=$(LLVM_VERSION) -DBUILD_OS=UNIX -DRUNTIME=64 $< | python bitcode2cpp.py $< 64bit > $@ m4 -Ibuiltins/ -DLLVM_VERSION=$(LLVM_VERSION) -DBUILD_OS=UNIX -DRUNTIME=64 $< | python bitcode2cpp.py $< 64bit > $@

View File

@@ -60,6 +60,11 @@ define i32 @__nctaid_z() nounwind readnone alwaysinline
%nb = call i32 @llvm.nvvm.read.ptx.sreg.nctaid.z() %nb = call i32 @llvm.nvvm.read.ptx.sreg.nctaid.z()
ret i32 %nb ret i32 %nb
} }
define i32 @__shfl_i32(i32, i32) nounwind readnone alwaysinline
{
%shfl = tail call i32 asm sideeffect "shfl.idx.b32 $0, $1, $2, 0x1f;", "=r,r,r"(i32 %0, i32 %1) nounwind readnone alwaysinline
ret i32 %shfl
}
;;;;;;;;;;;;;; ;;;;;;;;;;;;;;

View File

@@ -2853,13 +2853,7 @@ ifelse(RUNTIME, `32',
;; - __delete_varying_32rt ;; - __delete_varying_32rt
declare i8* @malloc(i32) declare i8* @malloc(i32)
define i32 @posix_memalign(i8**, i32, i32) alwaysinline declare i32 @posix_memalign(i8**, i32, i32)
{
%ptr = call i8* @malloc (i32 %2);
store i8* %ptr, i8** %0
%ret = add i32 0, 0
ret i32 %ret
}
declare void @free(i8 *) declare void @free(i8 *)
define noalias i8 * @__new_uniform_32rt(i64 %size) { define noalias i8 * @__new_uniform_32rt(i64 %size) {
@@ -2915,72 +2909,60 @@ RUNTIME, `64',
;; - __delete_varying_64rt ;; - __delete_varying_64rt
declare i8* @malloc(i64) declare i8* @malloc(i64)
define i32 @posix_memalign(i8**, i64, i64) alwaysinline
{
%ptr = call i8* @malloc (i64 %2);
store i8* %ptr, i8** %0
%ret = add i32 0, 0
ret i32 %ret
}
declare void @free(i8 *) declare void @free(i8 *)
define noalias i8 * @__new_uniform_64rt(i64 %size) { define noalias i8 * @__new_uniform_64rt(i64 %size)
%ptr = alloca i8* {
%alignment = load i32* @memory_alignment entry:
%alignment64 = sext i32 %alignment to i64 ;; compute laneIdx = __tid_x() & (__warpsize() - 1)
%call1 = call i32 @posix_memalign(i8** %ptr, i64 %alignment64, i64 %size) %call = tail call i32 @__tid_x()
%ptr_val = load i8** %ptr %call1 = tail call i32 @__warpsize()
ret i8* %ptr_val %sub = add nsw i32 %call1, -1
%and = and i32 %sub, %call
;; if (laneIdx == 0)
%cmp = icmp eq i32 %and, 0
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
%call2 = tail call noalias i8* @malloc(i64 %size) #3
%phitmp = ptrtoint i8* %call2 to i64
br label %if.end
if.end: ; preds = %if.then, %entry
%ptr.0 = phi i64 [ %phitmp, %if.then ], [ undef, %entry ]
%val.sroa.0.0.extract.trunc = trunc i64 %ptr.0 to i32
%call3 = tail call i32 @__shfl_i32(i32 %val.sroa.0.0.extract.trunc, i32 0)
%val.sroa.0.0.insert.ext = zext i32 %call3 to i64
%val.sroa.0.4.extract.shift = lshr i64 %ptr.0, 32
%val.sroa.0.4.extract.trunc = trunc i64 %val.sroa.0.4.extract.shift to i32
%call8 = tail call i32 @__shfl_i32(i32 %val.sroa.0.4.extract.trunc, i32 0)
%val.sroa.0.4.insert.ext = zext i32 %call8 to i64
%val.sroa.0.4.insert.shift = shl nuw i64 %val.sroa.0.4.insert.ext, 32
%val.sroa.0.4.insert.insert = or i64 %val.sroa.0.4.insert.shift, %val.sroa.0.0.insert.ext
%0 = inttoptr i64 %val.sroa.0.4.insert.insert to i8*
ret i8* %0
} }
define void @__delete_uniform_64rt(i8 * %ptr)
{
entry:
%call = tail call i32 @__tid_x()
%call1 = tail call i32 @__warpsize()
%sub = add nsw i32 %call1, -1
%and = and i32 %sub, %call
%cmp = icmp eq i32 %and, 0
br i1 %cmp, label %if.then, label %if.end
define <WIDTH x i64> @__new_varying32_64rt(<WIDTH x i32> %size, <WIDTH x MASK> %mask) { if.then: ; preds = %entry
%ret = alloca <WIDTH x i64> tail call void @free(i8* %ptr) #3
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %ret br label %if.end
%ret64 = bitcast <WIDTH x i64> * %ret to i64 *
%alignment = load i32* @memory_alignment
%alignment64 = sext i32 %alignment to i64
per_lane(WIDTH, <WIDTH x MASK> %mask, ` if.end: ; preds = %if.then, %entry
%sz_LANE_ID = extractelement <WIDTH x i32> %size, i32 LANE
%sz64_LANE_ID = zext i32 %sz_LANE_ID to i64
%store_LANE_ID = getelementptr i64 * %ret64, i32 LANE
%ptr_LANE_ID = bitcast i64* %store_LANE_ID to i8**
%call_LANE_ID = call i32 @posix_memalign(i8** %ptr_LANE_ID, i64 %alignment64, i64 %sz64_LANE_ID)')
%r = load <WIDTH x i64> * %ret
ret <WIDTH x i64> %r
}
define <WIDTH x i64> @__new_varying64_64rt(<WIDTH x i64> %size, <WIDTH x MASK> %mask) {
%ret = alloca <WIDTH x i64>
store <WIDTH x i64> zeroinitializer, <WIDTH x i64> * %ret
%ret64 = bitcast <WIDTH x i64> * %ret to i64 *
%alignment = load i32* @memory_alignment
%alignment64 = sext i32 %alignment to i64
per_lane(WIDTH, <WIDTH x MASK> %mask, `
%sz64_LANE_ID = extractelement <WIDTH x i64> %size, i32 LANE
%store_LANE_ID = getelementptr i64 * %ret64, i32 LANE
%ptr_LANE_ID = bitcast i64* %store_LANE_ID to i8**
%call_LANE_ID = call i32 @posix_memalign(i8** %ptr_LANE_ID, i64 %alignment64, i64 %sz64_LANE_ID)')
%r = load <WIDTH x i64> * %ret
ret <WIDTH x i64> %r
}
define void @__delete_uniform_64rt(i8 * %ptr) {
call void @free(i8 * %ptr)
ret void ret void
} }
define void @__delete_varying_64rt(<WIDTH x i64> %ptr, <WIDTH x MASK> %mask) { declare <WIDTH x i64> @__new_varying32_64rt(<WIDTH x i32> %size, <WIDTH x MASK> %mask);
per_lane(WIDTH, <WIDTH x MASK> %mask, ` declare <WIDTH x i64> @__new_varying64_64rt(<WIDTH x i64> %size, <WIDTH x MASK> %mask);
%iptr_LANE_ID = extractelement <WIDTH x i64> %ptr, i32 LANE declare void @__delete_varying_64rt(<WIDTH x i64> %ptr, <WIDTH x MASK> %mask);
%ptr_LANE_ID = inttoptr i64 %iptr_LANE_ID to i8 *
call void @free(i8 * %ptr_LANE_ID)
')
ret void
}
', ` ', `
errprint(`RUNTIME should be defined to either 32 or 64 errprint(`RUNTIME should be defined to either 32 or 64

View File

@@ -161,6 +161,7 @@ struct Uniform
return data[i]; return data[i];
} }
__device__ inline T* get_ptr(const int i) {return &data[i]; }
__device__ inline void set(const bool active, const int i, T value) __device__ inline void set(const bool active, const int i, T value)
{ {
if (active) if (active)
@@ -219,6 +220,8 @@ static float reduce_max(float value)
value = max(value, __shfl_xor(value, 1<<i, 32)); value = max(value, __shfl_xor(value, 1<<i, 32));
return value; return value;
} }
#if 0
__device__ inline __device__ inline
static int reduce_sum(int value) static int reduce_sum(int value)
{ {
@@ -247,6 +250,7 @@ static __device__ __forceinline__ int inclusive_scan_warp(const int value)
sum = shfl_scan_add_step(sum, 1 << i); sum = shfl_scan_add_step(sum, 1 << i);
return sum - value; return sum - value;
} }
#endif
static __device__ __forceinline__ int lanemask_lt() static __device__ __forceinline__ int lanemask_lt()
@@ -260,6 +264,16 @@ static __device__ __forceinline__ int2 warpBinExclusiveScan(const bool p)
const unsigned int b = __ballot(p); const unsigned int b = __ballot(p);
return make_int2(__popc(b & lanemask_lt()), __popc(b)); return make_int2(__popc(b & lanemask_lt()), __popc(b));
} }
__device__ static inline
int packed_store_active(bool active, int* ptr, int value)
{
const int2 res = warpBinExclusiveScan(active);
const int idx = res.x;
const int nactive = res.y;
if (active)
ptr[idx] = value;
return nactive;
}
@@ -417,21 +431,18 @@ IntersectLightsWithTileMinMax(
} }
#endif #endif
} }
#if 1
if (lightIndex >= numLights) if (lightIndex >= numLights)
active = 0; active = 0;
#if 0 #if 0
const int idx = tileNumLights + inclusive_scan_warp(active);
const int nactive = reduce_sum(active);
#else
const int2 res = warpBinExclusiveScan(active); const int2 res = warpBinExclusiveScan(active);
const int idx = tileNumLights + res.x; const int idx = tileNumLights + res.x;
const int nactive = res.y; const int nactive = res.y;
#endif tileLightIndices.set(active, idx,lightIndex);
// if (active)
tileLightIndices.set(active, idx,lightIndex);
tileNumLights += nactive; tileNumLights += nactive;
#else
tileNumLights += packed_store_active(active, tileLightIndices.get_ptr(tileNumLights),
lightIndex);
#endif #endif
} }