Add native support for (AO)SOA data layout.

There's now a SOA variability class (in addition to uniform,
varying, and unbound variability); the SOA factor must be a
positive power of 2.

When applied to a type, the leaf elements of the type (i.e.
atomic types, pointer types, and enum types) are widened out
into arrays of the given SOA factor.  For example, given

struct Point { float x, y, z; };

Then "soa<8> Point" has a memory layout of "float x[8], y[8],
z[8]".

Furthermore, array indexing syntax has been augmented so that
when indexing into arrays of SOA-variability data, the two-stage
indexing (first into the array of soa<> elements and then into
the leaf arrays of SOA data) is performed automatically.
This commit is contained in:
Matt Pharr
2012-03-05 09:49:44 -08:00
parent 8fdf84de04
commit db5db5aefd
9 changed files with 1547 additions and 442 deletions

471
ctx.cpp
View File

@@ -1,5 +1,5 @@
/* /*
Copyright (c) 2010-2011, Intel Corporation Copyright (c) 2010-2012, Intel Corporation
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@@ -1904,17 +1904,146 @@ FunctionEmitContext::applyVaryingGEP(llvm::Value *basePtr, llvm::Value *index,
} }
void
FunctionEmitContext::MatchIntegerTypes(llvm::Value **v0, llvm::Value **v1) {
LLVM_TYPE_CONST llvm::Type *type0 = (*v0)->getType();
LLVM_TYPE_CONST llvm::Type *type1 = (*v1)->getType();
// First, promote to a vector type if one of the two values is a vector
// type
if (llvm::isa<LLVM_TYPE_CONST llvm::VectorType>(type0) &&
!llvm::isa<LLVM_TYPE_CONST llvm::VectorType>(type1)) {
*v1 = SmearUniform(*v1, "smear_v1");
type1 = (*v1)->getType();
}
if (!llvm::isa<LLVM_TYPE_CONST llvm::VectorType>(type0) &&
llvm::isa<LLVM_TYPE_CONST llvm::VectorType>(type1)) {
*v0 = SmearUniform(*v0, "smear_v0");
type0 = (*v0)->getType();
}
// And then update to match bit widths
if (type0 == LLVMTypes::Int32VectorType &&
type1 == LLVMTypes::Int64VectorType)
*v0 = SExtInst(*v0, LLVMTypes::Int64VectorType);
else if (type1 == LLVMTypes::Int32VectorType &&
type0 == LLVMTypes::Int64VectorType)
*v1 = SExtInst(*v1, LLVMTypes::Int64VectorType);
}
/** Given an integer index in indexValue that's indexing into an array of
soa<> structures with given soaWidth, compute the two sub-indices we
need to do the actual indexing calculation:
subIndices[0] = (indexValue >> log(soaWidth))
subIndices[1] = (indexValue & (soaWidth-1))
*/
static llvm::Value *
lComputeSliceIndex(FunctionEmitContext *ctx, int soaWidth,
llvm::Value *indexValue, llvm::Value *ptrSliceOffset,
llvm::Value **newSliceOffset) {
// Compute the log2 of the soaWidth.
Assert(soaWidth > 0);
int logWidth = 0, sw = soaWidth;
while (sw > 1) {
++logWidth;
sw >>= 1;
}
Assert((1 << logWidth) == soaWidth);
ctx->MatchIntegerTypes(&indexValue, &ptrSliceOffset);
LLVM_TYPE_CONST llvm::Type *indexType = indexValue->getType();
llvm::Value *shift = LLVMIntAsType(logWidth, indexType);
llvm::Value *mask = LLVMIntAsType(soaWidth-1, indexType);
llvm::Value *indexSum =
ctx->BinaryOperator(llvm::Instruction::Add, indexValue, ptrSliceOffset,
"index_sum");
// minor index = (index & (soaWidth - 1))
*newSliceOffset = ctx->BinaryOperator(llvm::Instruction::And, indexSum,
mask, "slice_index_minor");
// slice offsets are always 32 bits...
if ((*newSliceOffset)->getType() == LLVMTypes::Int64Type)
*newSliceOffset = ctx->TruncInst(*newSliceOffset, LLVMTypes::Int32Type);
else if ((*newSliceOffset)->getType() == LLVMTypes::Int64VectorType)
*newSliceOffset = ctx->TruncInst(*newSliceOffset, LLVMTypes::Int32VectorType);
// major index = (index >> logWidth)
return ctx->BinaryOperator(llvm::Instruction::AShr, indexSum,
shift, "slice_index_major");
}
llvm::Value *
FunctionEmitContext::MakeSlicePointer(llvm::Value *ptr, llvm::Value *offset) {
// Create a small struct where the first element is the type of the
// given pointer and the second element is the type of the offset
// value.
std::vector<LLVM_TYPE_CONST llvm::Type *> eltTypes;
eltTypes.push_back(ptr->getType());
eltTypes.push_back(offset->getType());
LLVM_TYPE_CONST llvm::StructType *st =
llvm::StructType::get(*g->ctx, eltTypes);
llvm::Value *ret = llvm::UndefValue::get(st);
ret = InsertInst(ret, ptr, 0);
ret = InsertInst(ret, offset, 1);
return ret;
}
llvm::Value * llvm::Value *
FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index, FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index,
const Type *ptrType, const char *name) { const Type *ptrRefType, const char *name) {
if (basePtr == NULL || index == NULL) { if (basePtr == NULL || index == NULL) {
Assert(m->errorCount > 0); Assert(m->errorCount > 0);
return NULL; return NULL;
} }
if (dynamic_cast<const ReferenceType *>(ptrType) != NULL) // Regularize to a standard pointer type for basePtr's type
ptrType = PointerType::GetUniform(ptrType->GetReferenceTarget()); const PointerType *ptrType;
Assert(dynamic_cast<const PointerType *>(ptrType) != NULL); if (dynamic_cast<const ReferenceType *>(ptrRefType) != NULL)
ptrType = PointerType::GetUniform(ptrRefType->GetReferenceTarget());
else {
ptrType = dynamic_cast<const PointerType *>(ptrRefType);
Assert(ptrType != NULL);
}
if (ptrType->IsSlice()) {
Assert(llvm::isa<LLVM_TYPE_CONST llvm::StructType>(basePtr->getType()));
llvm::Value *ptrSliceOffset = ExtractInst(basePtr, 1);
if (ptrType->IsFrozenSlice() == false) {
// For slice pointers that aren't frozen, we compute a new
// index based on the given index plus the offset in the slice
// pointer. This gives us an updated integer slice index for
// the resulting slice pointer and then an index to index into
// the soa<> structs with.
llvm::Value *newSliceOffset;
int soaWidth = ptrType->GetBaseType()->GetSOAWidth();
index = lComputeSliceIndex(this, soaWidth, index,
ptrSliceOffset, &newSliceOffset);
ptrSliceOffset = newSliceOffset;
}
// Handle the indexing into the soa<> structs with the major
// component of the index through a recursive call
llvm::Value *p = GetElementPtrInst(ExtractInst(basePtr, 0), index,
ptrType->GetAsNonSlice(), name);
// And mash the results together for the return value
return MakeSlicePointer(p, ptrSliceOffset);
}
// Double-check consistency between the given pointer type and its LLVM
// type.
if (ptrType->IsUniformType())
Assert(llvm::isa<LLVM_TYPE_CONST llvm::PointerType>(basePtr->getType()));
else if (ptrType->IsVaryingType())
Assert(llvm::isa<LLVM_TYPE_CONST llvm::VectorType>(basePtr->getType()));
bool indexIsVaryingType = bool indexIsVaryingType =
llvm::isa<LLVM_TYPE_CONST llvm::VectorType>(index->getType()); llvm::isa<LLVM_TYPE_CONST llvm::VectorType>(index->getType());
@@ -1943,16 +2072,41 @@ FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index,
llvm::Value * llvm::Value *
FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index0, FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index0,
llvm::Value *index1, const Type *ptrType, llvm::Value *index1, const Type *ptrRefType,
const char *name) { const char *name) {
if (basePtr == NULL || index0 == NULL || index1 == NULL) { if (basePtr == NULL || index0 == NULL || index1 == NULL) {
Assert(m->errorCount > 0); Assert(m->errorCount > 0);
return NULL; return NULL;
} }
if (dynamic_cast<const ReferenceType *>(ptrType) != NULL) // Regaularize the pointer type for basePtr
ptrType = PointerType::GetUniform(ptrType->GetReferenceTarget()); const PointerType *ptrType = NULL;
Assert(dynamic_cast<const PointerType *>(ptrType) != NULL); if (dynamic_cast<const ReferenceType *>(ptrRefType) != NULL)
ptrType = PointerType::GetUniform(ptrRefType->GetReferenceTarget());
else {
ptrType = dynamic_cast<const PointerType *>(ptrRefType);
Assert(ptrType != NULL);
}
if (ptrType->IsSlice()) {
// Similar to the 1D GEP implementation above, for non-frozen slice
// pointers we do the two-step indexing calculation and then pass
// the new major index on to a recursive GEP call.
Assert(llvm::isa<LLVM_TYPE_CONST llvm::StructType>(basePtr->getType()));
llvm::Value *ptrSliceOffset = ExtractInst(basePtr, 1);
if (ptrType->IsFrozenSlice() == false) {
llvm::Value *newSliceOffset;
int soaWidth = ptrType->GetBaseType()->GetSOAWidth();
index1 = lComputeSliceIndex(this, soaWidth, index1,
ptrSliceOffset, &newSliceOffset);
ptrSliceOffset = newSliceOffset;
}
llvm::Value *p = GetElementPtrInst(ExtractInst(basePtr, 0), index0,
index1, ptrType->GetAsNonSlice(),
name);
return MakeSlicePointer(p, ptrSliceOffset);
}
bool index0IsVaryingType = bool index0IsVaryingType =
llvm::isa<LLVM_TYPE_CONST llvm::VectorType>(index0->getType()); llvm::isa<LLVM_TYPE_CONST llvm::VectorType>(index0->getType());
@@ -2000,28 +2154,71 @@ FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index0
llvm::Value * llvm::Value *
FunctionEmitContext::AddElementOffset(llvm::Value *basePtr, int elementNum, FunctionEmitContext::AddElementOffset(llvm::Value *fullBasePtr, int elementNum,
const Type *ptrType, const char *name) { const Type *ptrRefType, const char *name,
if (ptrType == NULL || ptrType->IsUniformType() || const PointerType **resultPtrType) {
dynamic_cast<const ReferenceType *>(ptrType) != NULL) { if (resultPtrType != NULL)
// If the pointer is uniform or we have a reference (which is a Assert(ptrRefType != NULL);
// uniform pointer in the end), we can use the regular LLVM GEP.
// (Unfortunately) it's not required to pass a non-NULL ptrRefType, but
// if we have one, regularize into a pointer type.
const PointerType *ptrType = NULL;
if (ptrRefType != NULL) {
// Normalize references to uniform pointers
if (dynamic_cast<const ReferenceType *>(ptrRefType) != NULL)
ptrType = PointerType::GetUniform(ptrRefType->GetReferenceTarget());
else
ptrType = dynamic_cast<const PointerType *>(ptrRefType);
Assert(ptrType != NULL);
}
// Similarly, we have to see if the pointer type is a struct to see if
// we have a slice pointer instead of looking at ptrType; this is also
// unfortunate...
llvm::Value *basePtr = fullBasePtr;
bool baseIsSlicePtr =
llvm::isa<LLVM_TYPE_CONST llvm::StructType>(fullBasePtr->getType());
const PointerType *rpt;
if (baseIsSlicePtr) {
Assert(ptrType != NULL);
// Update basePtr to just be the part that actually points to the
// start of an soa<> struct for now; the element offset computation
// doesn't change the slice offset, so we'll incorporate that into
// the final value right before this method returns.
basePtr = ExtractInst(fullBasePtr, 0);
if (resultPtrType == NULL)
resultPtrType = &rpt;
}
// Return the pointer type of the result of this call, for callers that
// want it.
if (resultPtrType != NULL) {
Assert(ptrType != NULL);
const CollectionType *ct =
dynamic_cast<const CollectionType *>(ptrType->GetBaseType());
Assert(ct != NULL);
*resultPtrType = new PointerType(ct->GetElementType(elementNum),
ptrType->GetVariability(),
ptrType->IsConstType(),
ptrType->IsSlice());
}
llvm::Value *resultPtr = NULL;
if (ptrType == NULL || ptrType->IsUniformType()) {
// If the pointer is uniform, we can use the regular LLVM GEP.
llvm::Value *offsets[2] = { LLVMInt32(0), LLVMInt32(elementNum) }; llvm::Value *offsets[2] = { LLVMInt32(0), LLVMInt32(elementNum) };
#if defined(LLVM_3_0) || defined(LLVM_3_0svn) || defined(LLVM_3_1svn) #if defined(LLVM_3_0) || defined(LLVM_3_0svn) || defined(LLVM_3_1svn)
llvm::ArrayRef<llvm::Value *> arrayRef(&offsets[0], &offsets[2]); llvm::ArrayRef<llvm::Value *> arrayRef(&offsets[0], &offsets[2]);
return llvm::GetElementPtrInst::Create(basePtr, arrayRef, resultPtr =
llvm::GetElementPtrInst::Create(basePtr, arrayRef,
name ? name : "struct_offset", bblock); name ? name : "struct_offset", bblock);
#else #else
return llvm::GetElementPtrInst::Create(basePtr, &offsets[0], &offsets[2], resultPtr =
llvm::GetElementPtrInst::Create(basePtr, &offsets[0], &offsets[2],
name ? name : "struct_offset", bblock); name ? name : "struct_offset", bblock);
#endif #endif
} }
else {
if (dynamic_cast<const ReferenceType *>(ptrType) != NULL)
ptrType = PointerType::GetUniform(ptrType->GetReferenceTarget());
Assert(dynamic_cast<const PointerType *>(ptrType) != NULL);
// Otherwise do the math to find the offset and add it to the given // Otherwise do the math to find the offset and add it to the given
// varying pointers // varying pointers
const StructType *st = const StructType *st =
@@ -2054,10 +2251,18 @@ FunctionEmitContext::AddElementOffset(llvm::Value *basePtr, int elementNum,
// we add the offset to the varying pointers. // we add the offset to the varying pointers.
offset = SExtInst(offset, LLVMTypes::Int64VectorType, "offset_to_64"); offset = SExtInst(offset, LLVMTypes::Int64VectorType, "offset_to_64");
return BinaryOperator(llvm::Instruction::Add, basePtr, offset, resultPtr = BinaryOperator(llvm::Instruction::Add, basePtr, offset,
"struct_ptr_offset"); "struct_ptr_offset");
} }
// Finally, if had a slice pointer going in, mash back together with
// the original (unchanged) slice offset.
if (baseIsSlicePtr)
return MakeSlicePointer(resultPtr, ExtractInst(fullBasePtr, 1));
else
return resultPtr;
}
llvm::Value * llvm::Value *
FunctionEmitContext::LoadInst(llvm::Value *ptr, const char *name) { FunctionEmitContext::LoadInst(llvm::Value *ptr, const char *name) {
@@ -2085,22 +2290,102 @@ FunctionEmitContext::LoadInst(llvm::Value *ptr, const char *name) {
} }
/** Given a slice pointer to soa'd data that is a basic type (atomic,
pointer, or enum type), use the slice offset to compute pointer(s) to
the appropriate individual data element(s).
*/
static llvm::Value *
lFinalSliceOffset(FunctionEmitContext *ctx, llvm::Value *ptr,
const PointerType **ptrType) {
Assert(dynamic_cast<const PointerType *>(*ptrType) != NULL);
llvm::Value *slicePtr = ctx->ExtractInst(ptr, 0, "slice_ptr");
llvm::Value *sliceOffset = ctx->ExtractInst(ptr, 1, "slice_offset");
// slicePtr should be a pointer to an soa-width wide array of the
// final atomic/enum/pointer type
const Type *unifBaseType = (*ptrType)->GetBaseType()->GetAsUniformType();
Assert(Type::IsBasicType(unifBaseType));
// The final pointer type is a uniform or varying pointer to the
// underlying uniform type, depending on whether the given pointer is
// uniform or varying.
*ptrType = (*ptrType)->IsUniformType() ?
PointerType::GetUniform(unifBaseType) :
PointerType::GetVarying(unifBaseType);
// For uniform pointers, bitcast to a pointer to the uniform element
// type, so that the GEP below does the desired indexing
if ((*ptrType)->IsUniformType())
slicePtr = ctx->BitCastInst(slicePtr, (*ptrType)->LLVMType(g->ctx));
// And finally index based on the slice offset
return ctx->GetElementPtrInst(slicePtr, sliceOffset, *ptrType,
"final_slice_gep");
}
/** Utility routine that loads from a uniform pointer to soa<> data,
returning a regular uniform (non-SOA result).
*/
llvm::Value *
FunctionEmitContext::loadUniformFromSOA(llvm::Value *ptr, llvm::Value *mask,
const PointerType *ptrType,
const char *name) {
const Type *unifType = ptrType->GetBaseType()->GetAsUniformType();
const CollectionType *ct =
dynamic_cast<const CollectionType *>(ptrType->GetBaseType());
if (ct != NULL) {
// If we have a struct/array, we need to decompose it into
// individual element loads to fill in the result structure since
// the SOA slice of values we need isn't contiguous in memory...
LLVM_TYPE_CONST llvm::Type *llvmReturnType = unifType->LLVMType(g->ctx);
llvm::Value *retValue = llvm::UndefValue::get(llvmReturnType);
for (int i = 0; i < ct->GetElementCount(); ++i) {
const PointerType *eltPtrType;
llvm::Value *eltPtr = AddElementOffset(ptr, i, ptrType,
"elt_offset", &eltPtrType);
llvm::Value *eltValue = LoadInst(eltPtr, mask, eltPtrType, name);
retValue = InsertInst(retValue, eltValue, i, "set_value");
}
return retValue;
}
else {
// Otherwise we've made our way to a slice pointer to a basic type;
// we need to apply the slice offset into this terminal SOA array
// and then perform the final load
ptr = lFinalSliceOffset(this, ptr, &ptrType);
return LoadInst(ptr, mask, ptrType, name);
}
}
llvm::Value * llvm::Value *
FunctionEmitContext::LoadInst(llvm::Value *ptr, llvm::Value *mask, FunctionEmitContext::LoadInst(llvm::Value *ptr, llvm::Value *mask,
const Type *ptrType, const char *name) { const Type *ptrRefType, const char *name) {
if (ptr == NULL) { if (ptr == NULL) {
Assert(m->errorCount > 0); Assert(m->errorCount > 0);
return NULL; return NULL;
} }
Assert(ptrType != NULL && mask != NULL); Assert(ptrRefType != NULL && mask != NULL);
if (dynamic_cast<const ReferenceType *>(ptrType) != NULL) const PointerType *ptrType;
ptrType = PointerType::GetUniform(ptrType->GetReferenceTarget()); if (dynamic_cast<const ReferenceType *>(ptrRefType) != NULL)
ptrType = PointerType::GetUniform(ptrRefType->GetReferenceTarget());
Assert(dynamic_cast<const PointerType *>(ptrType) != NULL); else {
ptrType = dynamic_cast<const PointerType *>(ptrRefType);
Assert(ptrType != NULL);
}
if (ptrType->IsUniformType()) { if (ptrType->IsUniformType()) {
if (ptrType->IsSlice()) {
return loadUniformFromSOA(ptr, mask, ptrType, name);
}
else {
// FIXME: same issue as above load inst regarding alignment... // FIXME: same issue as above load inst regarding alignment...
// //
// If the ptr is a straight up regular pointer, then just issue // If the ptr is a straight up regular pointer, then just issue
@@ -2123,6 +2408,7 @@ FunctionEmitContext::LoadInst(llvm::Value *ptr, llvm::Value *mask,
AddDebugPos(inst); AddDebugPos(inst);
return inst; return inst;
} }
}
else { else {
// Otherwise we should have a varying ptr and it's time for a // Otherwise we should have a varying ptr and it's time for a
// gather. // gather.
@@ -2132,11 +2418,10 @@ FunctionEmitContext::LoadInst(llvm::Value *ptr, llvm::Value *mask,
llvm::Value * llvm::Value *
FunctionEmitContext::gather(llvm::Value *ptr, const Type *ptrType, FunctionEmitContext::gather(llvm::Value *ptr, const PointerType *ptrType,
llvm::Value *mask, const char *name) { llvm::Value *mask, const char *name) {
// We should have a varying lvalue if we get here... // We should have a varying pointer if we get here...
Assert(ptrType->IsVaryingType() && Assert(ptrType->IsVaryingType());
ptr->getType() == LLVMTypes::VoidPointerVectorType);
const Type *returnType = ptrType->GetBaseType()->GetAsVaryingType(); const Type *returnType = ptrType->GetBaseType()->GetAsVaryingType();
LLVM_TYPE_CONST llvm::Type *llvmReturnType = returnType->LLVMType(g->ctx); LLVM_TYPE_CONST llvm::Type *llvmReturnType = returnType->LLVMType(g->ctx);
@@ -2147,10 +2432,12 @@ FunctionEmitContext::gather(llvm::Value *ptr, const Type *ptrType,
// For collections, recursively gather element wise to find the // For collections, recursively gather element wise to find the
// result. // result.
llvm::Value *retValue = llvm::UndefValue::get(llvmReturnType); llvm::Value *retValue = llvm::UndefValue::get(llvmReturnType);
for (int i = 0; i < collectionType->GetElementCount(); ++i) { for (int i = 0; i < collectionType->GetElementCount(); ++i) {
llvm::Value *eltPtr = AddElementOffset(ptr, i, ptrType); const PointerType *eltPtrType;
const Type *eltPtrType = llvm::Value *eltPtr =
PointerType::GetVarying(collectionType->GetElementType(i)); AddElementOffset(ptr, i, ptrType, "gather_elt_ptr", &eltPtrType);
eltPtr = addVaryingOffsetsIfNeeded(eltPtr, eltPtrType); eltPtr = addVaryingOffsetsIfNeeded(eltPtr, eltPtrType);
// This in turn will be another gather // This in turn will be another gather
@@ -2160,6 +2447,14 @@ FunctionEmitContext::gather(llvm::Value *ptr, const Type *ptrType,
} }
return retValue; return retValue;
} }
else if (ptrType->IsSlice()) {
// If we have a slice pointer, we need to add the final slice
// offset here right before issuing the actual gather
//
// FIXME: would it be better to do the corresponding same thing for
// all of the varying offsets stuff here (and in scatter)?
ptr = lFinalSliceOffset(this, ptr, &ptrType);
}
// Otherwise we should just have a basic scalar or pointer type and we // Otherwise we should just have a basic scalar or pointer type and we
// can go and do the actual gather // can go and do the actual gather
@@ -2312,14 +2607,26 @@ FunctionEmitContext::maskedStore(llvm::Value *value, llvm::Value *ptr,
// We must have a regular atomic, enumerator, or pointer type at this // We must have a regular atomic, enumerator, or pointer type at this
// point. // point.
Assert(dynamic_cast<const AtomicType *>(valueType) != NULL || Assert(Type::IsBasicType(valueType));
dynamic_cast<const EnumType *>(valueType) != NULL ||
dynamic_cast<const PointerType *>(valueType) != NULL);
valueType = valueType->GetAsNonConstType(); valueType = valueType->GetAsNonConstType();
llvm::Function *maskedStoreFunc = NULL;
// Figure out if we need a 8, 16, 32 or 64-bit masked store. // Figure out if we need a 8, 16, 32 or 64-bit masked store.
if (dynamic_cast<const PointerType *>(valueType) != NULL) { llvm::Function *maskedStoreFunc = NULL;
const PointerType *pt = dynamic_cast<const PointerType *>(valueType);
if (pt != NULL) {
if (pt->IsSlice()) {
// For masked stores of (varying) slice pointers to memory, we
// grab the equivalent StructType and make a recursive call to
// maskedStore, giving it that type for the pointer type; that
// in turn will lead to the base pointer and offset index being
// mask stored to memory..
const StructType *sliceStructType = pt->GetSliceStructType();
ptrType = PointerType::GetUniform(sliceStructType);
maskedStore(value, ptr, ptrType, mask);
return;
}
if (g->target.is32Bit) if (g->target.is32Bit)
maskedStoreFunc = m->module->getFunction("__pseudo_masked_store_32"); maskedStoreFunc = m->module->getFunction("__pseudo_masked_store_32");
else else
@@ -2391,14 +2698,12 @@ FunctionEmitContext::maskedStore(llvm::Value *value, llvm::Value *ptr,
*/ */
void void
FunctionEmitContext::scatter(llvm::Value *value, llvm::Value *ptr, FunctionEmitContext::scatter(llvm::Value *value, llvm::Value *ptr,
const Type *valueType, const Type *ptrType, const Type *valueType, const Type *origPt,
llvm::Value *mask) { llvm::Value *mask) {
Assert(dynamic_cast<const PointerType *>(ptrType) != NULL); const PointerType *ptrType = dynamic_cast<const PointerType *>(origPt);
Assert(ptrType != NULL);
Assert(ptrType->IsVaryingType()); Assert(ptrType->IsVaryingType());
// I think this should be impossible
Assert(dynamic_cast<const ArrayType *>(valueType) == NULL);
const CollectionType *srcCollectionType = const CollectionType *srcCollectionType =
dynamic_cast<const CollectionType *>(valueType); dynamic_cast<const CollectionType *>(valueType);
if (srcCollectionType != NULL) { if (srcCollectionType != NULL) {
@@ -2424,8 +2729,7 @@ FunctionEmitContext::scatter(llvm::Value *value, llvm::Value *ptr,
// We may be scattering a uniform atomic element; in this case // We may be scattering a uniform atomic element; in this case
// we'll smear it out to be varying before making the recursive // we'll smear it out to be varying before making the recursive
// scatter() call below. // scatter() call below.
if (srcEltType->IsUniformType() && if (srcEltType->IsUniformType() && Type::IsBasicType(srcEltType)) {
dynamic_cast<const AtomicType *>(srcEltType) != NULL) {
eltValue = SmearUniform(eltValue, "to_varying"); eltValue = SmearUniform(eltValue, "to_varying");
srcEltType = srcEltType->GetAsVaryingType(); srcEltType = srcEltType->GetAsVaryingType();
} }
@@ -2439,16 +2743,23 @@ FunctionEmitContext::scatter(llvm::Value *value, llvm::Value *ptr,
// to be careful about passing the correct type to // to be careful about passing the correct type to
// addVaryingOffsetsIfNeeded() here. // addVaryingOffsetsIfNeeded() here.
const Type *dstEltType = dstCollectionType->GetElementType(i); const Type *dstEltType = dstCollectionType->GetElementType(i);
const Type *dstEltPtrType = PointerType::GetVarying(dstEltType); const PointerType *dstEltPtrType = PointerType::GetVarying(dstEltType);
if (ptrType->IsSlice())
dstEltPtrType = dstEltPtrType->GetAsSlice();
eltPtr = addVaryingOffsetsIfNeeded(eltPtr, dstEltPtrType); eltPtr = addVaryingOffsetsIfNeeded(eltPtr, dstEltPtrType);
// And recursively scatter() until we hit an atomic or pointer // And recursively scatter() until we hit a basic type, at
// type, at which point the actual memory operations can be // which point the actual memory operations can be performed...
// performed...
scatter(eltValue, eltPtr, srcEltType, dstEltPtrType, mask); scatter(eltValue, eltPtr, srcEltType, dstEltPtrType, mask);
} }
return; return;
} }
else if (ptrType->IsSlice()) {
// As with gather, we need to add the final slice offset finally
// once we get to a terminal SOA array of basic types..
ptr = lFinalSliceOffset(this, ptr, &ptrType);
}
const PointerType *pt = dynamic_cast<const PointerType *>(valueType); const PointerType *pt = dynamic_cast<const PointerType *>(valueType);
@@ -2520,19 +2831,27 @@ FunctionEmitContext::StoreInst(llvm::Value *value, llvm::Value *ptr) {
void void
FunctionEmitContext::StoreInst(llvm::Value *value, llvm::Value *ptr, FunctionEmitContext::StoreInst(llvm::Value *value, llvm::Value *ptr,
llvm::Value *mask, const Type *valueType, llvm::Value *mask, const Type *valueType,
const Type *ptrType) { const Type *ptrRefType) {
if (value == NULL || ptr == NULL) { if (value == NULL || ptr == NULL) {
// may happen due to error elsewhere // may happen due to error elsewhere
Assert(m->errorCount > 0); Assert(m->errorCount > 0);
return; return;
} }
if (dynamic_cast<const ReferenceType *>(ptrType) != NULL) const PointerType *ptrType;
ptrType = PointerType::GetUniform(ptrType->GetReferenceTarget()); if (dynamic_cast<const ReferenceType *>(ptrRefType) != NULL)
ptrType = PointerType::GetUniform(ptrRefType->GetReferenceTarget());
else {
ptrType = dynamic_cast<const PointerType *>(ptrRefType);
Assert(ptrType != NULL);
}
// Figure out what kind of store we're doing here // Figure out what kind of store we're doing here
if (ptrType->IsUniformType()) { if (ptrType->IsUniformType()) {
if (ptrType->GetBaseType()->IsUniformType()) if (ptrType->IsSlice())
// storing a uniform value to a single slice of a SOA type
storeUniformToSOA(value, ptr, mask, valueType, ptrType);
else if (ptrType->GetBaseType()->IsUniformType())
// the easy case // the easy case
StoreInst(value, ptr); StoreInst(value, ptr);
else if (mask == LLVMMaskAllOn && !g->opt.disableMaskAllOnOptimizations) else if (mask == LLVMMaskAllOn && !g->opt.disableMaskAllOnOptimizations)
@@ -2551,6 +2870,37 @@ FunctionEmitContext::StoreInst(llvm::Value *value, llvm::Value *ptr,
} }
/** Store a uniform type to SOA-laid-out memory.
*/
void
FunctionEmitContext::storeUniformToSOA(llvm::Value *value, llvm::Value *ptr,
llvm::Value *mask, const Type *valueType,
const PointerType *ptrType) {
Assert(Type::Equal(ptrType->GetBaseType()->GetAsUniformType(), valueType));
const CollectionType *ct = dynamic_cast<const CollectionType *>(valueType);
if (ct != NULL) {
// Handle collections element wise...
for (int i = 0; i < ct->GetElementCount(); ++i) {
llvm::Value *eltValue = ExtractInst(value, i);
const Type *eltType = ct->GetElementType(i);
const PointerType *dstEltPtrType;
llvm::Value *dstEltPtr =
AddElementOffset(ptr, i, ptrType, "slice_offset",
&dstEltPtrType);
StoreInst(eltValue, dstEltPtr, mask, eltType, dstEltPtrType);
}
}
else {
// We're finally at a leaf SOA array; apply the slice offset and
// then we can do a final regular store
Assert(Type::IsBasicType(valueType));
ptr = lFinalSliceOffset(this, ptr, &ptrType);
StoreInst(value, ptr);
}
}
void void
FunctionEmitContext::MemcpyInst(llvm::Value *dest, llvm::Value *src, FunctionEmitContext::MemcpyInst(llvm::Value *dest, llvm::Value *src,
llvm::Value *count, llvm::Value *align) { llvm::Value *count, llvm::Value *align) {
@@ -3012,11 +3362,10 @@ FunctionEmitContext::addVaryingOffsetsIfNeeded(llvm::Value *ptr,
Assert(pt && pt->IsVaryingType()); Assert(pt && pt->IsVaryingType());
const Type *baseType = ptrType->GetBaseType(); const Type *baseType = ptrType->GetBaseType();
if (dynamic_cast<const AtomicType *>(baseType) == NULL && if (Type::IsBasicType(baseType) == false)
dynamic_cast<const EnumType *>(baseType) == NULL &&
dynamic_cast<const PointerType *>(baseType) == NULL)
return ptr; return ptr;
if (baseType->IsUniformType())
if (baseType->IsVaryingType() == false)
return ptr; return ptr;
// Find the size of a uniform element of the varying type // Find the size of a uniform element of the varying type

26
ctx.h
View File

@@ -1,5 +1,5 @@
/* /*
Copyright (c) 2010-2011, Intel Corporation Copyright (c) 2010-2012, Intel Corporation
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@@ -392,6 +392,16 @@ public:
llvm::Instruction *ZExtInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type, llvm::Instruction *ZExtInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type,
const char *name = NULL); const char *name = NULL);
/** Given two integer-typed values (but possibly one vector and the
other not, and or of possibly-different bit-widths), update their
values as needed so that the two have the same (more general)
type. */
void MatchIntegerTypes(llvm::Value **v0, llvm::Value **v1);
/** Create a new slice pointer out of the given pointer to an soa type
and an integer offset to a slice within that type. */
llvm::Value *MakeSlicePointer(llvm::Value *ptr, llvm::Value *offset);
/** These GEP methods are generalizations of the standard ones in LLVM; /** These GEP methods are generalizations of the standard ones in LLVM;
they support both uniform and varying basePtr values as well as they support both uniform and varying basePtr values as well as
uniform and varying index values (arrays of indices). Varying base uniform and varying index values (arrays of indices). Varying base
@@ -412,7 +422,8 @@ public:
the type of the pointer, though it may be NULL if the base pointer the type of the pointer, though it may be NULL if the base pointer
is uniform. */ is uniform. */
llvm::Value *AddElementOffset(llvm::Value *basePtr, int elementNum, llvm::Value *AddElementOffset(llvm::Value *basePtr, int elementNum,
const Type *ptrType, const char *name = NULL); const Type *ptrType, const char *name = NULL,
const PointerType **resultPtrType = NULL);
/** Load from the memory location(s) given by lvalue, using the given /** Load from the memory location(s) given by lvalue, using the given
mask. The lvalue may be varying, in which case this corresponds to mask. The lvalue may be varying, in which case this corresponds to
@@ -657,8 +668,15 @@ private:
const Type *ptrType, llvm::Value *mask); const Type *ptrType, llvm::Value *mask);
void maskedStore(llvm::Value *value, llvm::Value *ptr, const Type *ptrType, void maskedStore(llvm::Value *value, llvm::Value *ptr, const Type *ptrType,
llvm::Value *mask); llvm::Value *mask);
llvm::Value *gather(llvm::Value *ptr, const Type *ptrType, llvm::Value *mask, void storeUniformToSOA(llvm::Value *value, llvm::Value *ptr,
const char *name); llvm::Value *mask, const Type *valueType,
const PointerType *ptrType);
llvm::Value *loadUniformFromSOA(llvm::Value *ptr, llvm::Value *mask,
const PointerType *ptrType, const char *name);
llvm::Value *gather(llvm::Value *ptr, const PointerType *ptrType,
llvm::Value *mask, const char *name);
llvm::Value *addVaryingOffsetsIfNeeded(llvm::Value *ptr, const Type *ptrType); llvm::Value *addVaryingOffsetsIfNeeded(llvm::Value *ptr, const Type *ptrType);
}; };

View File

@@ -1,5 +1,5 @@
/* /*
Copyright (c) 2010-2011, Intel Corporation Copyright (c) 2010-2012, Intel Corporation
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@@ -144,6 +144,35 @@ DeclSpecs::GetBaseType(SourcePos pos) const {
} }
retType = lApplyTypeQualifiers(typeQualifiers, retType, pos); retType = lApplyTypeQualifiers(typeQualifiers, retType, pos);
if (soaWidth > 0) {
const StructType *st = dynamic_cast<const StructType *>(retType);
if (st == NULL) {
Error(pos, "Illegal to provide soa<%d> qualifier with non-struct "
"type \"%s\".", soaWidth, retType->GetString().c_str());
return NULL;
}
else if (soaWidth <= 0 || (soaWidth & (soaWidth - 1)) != 0) {
Error(pos, "soa<%d> width illegal. Value must be positive power "
"of two.", soaWidth);
return NULL;
}
if (st->IsUniformType()) {
Error(pos, "\"uniform\" qualifier and \"soa<%d>\" qualifier can't "
"both be used in a type declaration.", soaWidth);
return NULL;
}
else if (st->IsVaryingType()) {
Error(pos, "\"varying\" qualifier and \"soa<%d>\" qualifier can't "
"both be used in a type declaration.", soaWidth);
return NULL;
}
else
retType = st->GetAsSOAType(soaWidth);
}
return retType; return retType;
} }
@@ -337,7 +366,10 @@ Declarator::GetType(const Type *base, DeclSpecs *ds) const {
return type; return type;
case DK_POINTER: case DK_POINTER:
type = new PointerType(type, variability, isConst); /* For now, any pointer to an SOA type gets the slice property; if
we add the capability to declare pointers as slices or not,
we'll want to set this based on a type qualifier here. */
type = new PointerType(type, variability, isConst, type->IsSOAType());
if (child != NULL) if (child != NULL)
return child->GetType(type, ds); return child->GetType(type, ds);
else else

574
expr.cpp
View File

@@ -1,5 +1,5 @@
/* /*
Copyright (c) 2010-2011, Intel Corporation Copyright (c) 2010-2012, Intel Corporation
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@@ -226,6 +226,16 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr,
return false; return false;
} }
if ((toType->GetSOAWidth() > 0 || fromType->GetSOAWidth() > 0) &&
Type::Equal(toType->GetAsUniformType(), fromType->GetAsUniformType()) &&
toType->GetSOAWidth() != fromType->GetSOAWidth()) {
if (!failureOk)
Error(pos, "Can't convert between types \"%s\" and \"%s\" with "
"different SOA widths for %s.", fromType->GetString().c_str(),
toType->GetString().c_str(), errorMsgBase);
return false;
}
const ArrayType *toArrayType = dynamic_cast<const ArrayType *>(toType); const ArrayType *toArrayType = dynamic_cast<const ArrayType *>(toType);
const ArrayType *fromArrayType = dynamic_cast<const ArrayType *>(fromType); const ArrayType *fromArrayType = dynamic_cast<const ArrayType *>(fromType);
const VectorType *toVectorType = dynamic_cast<const VectorType *>(toType); const VectorType *toVectorType = dynamic_cast<const VectorType *>(toType);
@@ -289,6 +299,15 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr,
toType->GetString().c_str(), errorMsgBase); toType->GetString().c_str(), errorMsgBase);
return false; return false;
} }
else if (fromPointerType->IsSlice() == true &&
toPointerType->IsSlice() == false) {
if (!failureOk)
Error(pos, "Can't convert from pointer to SOA type "
"\"%s\" to pointer to non-SOA type \"%s\" for %s.",
fromPointerType->GetAsNonSlice()->GetString().c_str(),
toType->GetString().c_str(), errorMsgBase);
return false;
}
else if (PointerType::IsVoidPointer(toPointerType)) { else if (PointerType::IsVoidPointer(toPointerType)) {
// any pointer type can be converted to a void * // any pointer type can be converted to a void *
goto typecast_ok; goto typecast_ok;
@@ -314,6 +333,10 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr,
if (toType->IsVaryingType() && fromType->IsUniformType()) if (toType->IsVaryingType() && fromType->IsUniformType())
goto typecast_ok; goto typecast_ok;
if (toPointerType->IsSlice() == true &&
fromPointerType->IsSlice() == false)
goto typecast_ok;
// Otherwise there's nothing to do // Otherwise there's nothing to do
return true; return true;
} }
@@ -333,6 +356,18 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr,
return false; return false;
} }
// Need to check this early, since otherwise the [sic] "unbound"
// variability of SOA struct types causes things to get messy if that
// hasn't been detected...
if (toStructType && fromStructType &&
(toStructType->GetSOAWidth() != fromStructType->GetSOAWidth())) {
if (!failureOk)
Error(pos, "Can't convert between incompatible struct types \"%s\" "
"and \"%s\" for %s.", fromType->GetString().c_str(),
toType->GetString().c_str(), errorMsgBase);
return false;
}
// Convert from type T -> const T; just return a TypeCast expr, which // Convert from type T -> const T; just return a TypeCast expr, which
// can handle this // can handle this
if (Type::Equal(toType, fromType->GetAsConstType())) if (Type::Equal(toType, fromType->GetAsConstType()))
@@ -469,23 +504,31 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr,
// other... // other...
if (fromAtomicType == NULL) { if (fromAtomicType == NULL) {
if (!failureOk) if (!failureOk)
Error(pos, "Type conversion only possible from atomic types, not " Error(pos, "Type conversion from \"%s\" to \"%s\" for %s is not "
"from \"%s\" to \"%s\", for %s.", fromType->GetString().c_str(), "possible.", fromType->GetString().c_str(),
toType->GetString().c_str(), errorMsgBase); toType->GetString().c_str(), errorMsgBase);
return false; return false;
} }
// scalar -> short-vector conversions // scalar -> short-vector conversions
if (toVectorType != NULL) if (toVectorType != NULL &&
(fromType->GetSOAWidth() == toType->GetSOAWidth()))
goto typecast_ok; goto typecast_ok;
// ok, it better be a scalar->scalar conversion of some sort by now // ok, it better be a scalar->scalar conversion of some sort by now
if (toAtomicType == NULL) { if (toAtomicType == NULL) {
if (!failureOk) if (!failureOk)
Error(pos, "Type conversion only possible to atomic types, not " Error(pos, "Type conversion from \"%s\" to \"%s\" for %s is "
"from \"%s\" to \"%s\", for %s.", "not possible", fromType->GetString().c_str(),
fromType->GetString().c_str(), toType->GetString().c_str(), toType->GetString().c_str(), errorMsgBase);
errorMsgBase); return false;
}
if (fromType->GetSOAWidth() != toType->GetSOAWidth()) {
if (!failureOk)
Error(pos, "Can't convert between types \"%s\" and \"%s\" with "
"different SOA widths for %s.", fromType->GetString().c_str(),
toType->GetString().c_str(), errorMsgBase);
return false; return false;
} }
@@ -606,19 +649,18 @@ InitSymbol(llvm::Value *ptr, const Type *symType, Expr *initExpr,
return; return;
} }
// Atomic types and enums can't be initialized with { ... } initializer // Atomic types and enums can be initialized with { ... } initializer
// expressions, so print an error and return if that's what we've got // expressions if they have a single element (except for SOA types,
// here.. // which are handled below).
if (dynamic_cast<const AtomicType *>(symType) != NULL || if (symType->IsSOAType() == false && Type::IsBasicType(symType)) {
dynamic_cast<const EnumType *>(symType) != NULL ||
dynamic_cast<const PointerType *>(symType) != NULL) {
ExprList *elist = dynamic_cast<ExprList *>(initExpr); ExprList *elist = dynamic_cast<ExprList *>(initExpr);
if (elist != NULL) { if (elist != NULL) {
if (elist->exprs.size() == 1) if (elist->exprs.size() == 1)
InitSymbol(ptr, symType, elist->exprs[0], ctx, pos); InitSymbol(ptr, symType, elist->exprs[0], ctx, pos);
else else
Error(initExpr->pos, "Expression list initializers can't be used " Error(initExpr->pos, "Expression list initializers with "
"with type \"%s\".", symType->GetString().c_str()); "multiple values can't be used with type \"%s\".",
symType->GetString().c_str());
} }
return; return;
} }
@@ -638,15 +680,14 @@ InitSymbol(llvm::Value *ptr, const Type *symType, Expr *initExpr,
return; return;
} }
// There are two cases for initializing structs, arrays and vectors; // Handle initiailizers for SOA types as well as for structs, arrays,
// either a single initializer may be provided (float foo[3] = 0;), in // and vectors.
// which case all of the elements are initialized to the given value,
// or an initializer list may be provided (float foo[3] = { 1,2,3 }),
// in which case the elements are initialized with the corresponding
// values.
const CollectionType *collectionType = const CollectionType *collectionType =
dynamic_cast<const CollectionType *>(symType); dynamic_cast<const CollectionType *>(symType);
if (collectionType != NULL) { if (collectionType != NULL || symType->IsSOAType()) {
int nElements = collectionType ? collectionType->GetElementCount() :
symType->GetSOAWidth();
std::string name; std::string name;
if (dynamic_cast<const StructType *>(symType) != NULL) if (dynamic_cast<const StructType *>(symType) != NULL)
name = "struct"; name = "struct";
@@ -654,26 +695,37 @@ InitSymbol(llvm::Value *ptr, const Type *symType, Expr *initExpr,
name = "array"; name = "array";
else if (dynamic_cast<const VectorType *>(symType) != NULL) else if (dynamic_cast<const VectorType *>(symType) != NULL)
name = "vector"; name = "vector";
else if (symType->IsSOAType())
name = symType->GetVariability().GetString();
else else
FATAL("Unexpected CollectionType in InitSymbol()"); FATAL("Unexpected CollectionType in InitSymbol()");
// There are two cases for initializing these types; either a
// single initializer may be provided (float foo[3] = 0;), in which
// case all of the elements are initialized to the given value, or
// an initializer list may be provided (float foo[3] = { 1,2,3 }),
// in which case the elements are initialized with the
// corresponding values.
ExprList *exprList = dynamic_cast<ExprList *>(initExpr); ExprList *exprList = dynamic_cast<ExprList *>(initExpr);
if (exprList != NULL) { if (exprList != NULL) {
// The { ... } case; make sure we have the no more expressions // The { ... } case; make sure we have the no more expressions
// in the ExprList as we have struct members // in the ExprList as we have struct members
int nInits = exprList->exprs.size(); int nInits = exprList->exprs.size();
if (nInits > collectionType->GetElementCount()) { if (nInits > nElements) {
Error(initExpr->pos, "Initializer for %s type \"%s\" requires " Error(initExpr->pos, "Initializer for %s type \"%s\" requires "
"no more than %d values; %d provided.", name.c_str(), "no more than %d values; %d provided.", name.c_str(),
symType->GetString().c_str(), symType->GetString().c_str(), nElements, nInits);
collectionType->GetElementCount(), nInits);
return; return;
} }
// Initialize each element with the corresponding value from // Initialize each element with the corresponding value from
// the ExprList // the ExprList
for (int i = 0; i < collectionType->GetElementCount(); ++i) { for (int i = 0; i < nElements; ++i) {
const Type *elementType = collectionType->GetElementType(i); // For SOA types, the element type is the uniform variant
// of the underlying type
const Type *elementType =
collectionType ? collectionType->GetElementType(i) :
symType->GetAsUniformType();
if (elementType == NULL) { if (elementType == NULL) {
Assert(m->errorCount > 0); Assert(m->errorCount > 0);
return; return;
@@ -688,8 +740,7 @@ InitSymbol(llvm::Value *ptr, const Type *symType, Expr *initExpr,
"gep"); "gep");
if (i < nInits) if (i < nInits)
InitSymbol(ep, collectionType->GetElementType(i), InitSymbol(ep, elementType, exprList->exprs[i], ctx, pos);
exprList->exprs[i], ctx, pos);
else { else {
// If we don't have enough initializer values, initialize the // If we don't have enough initializer values, initialize the
// rest as zero. // rest as zero.
@@ -1152,6 +1203,12 @@ UnaryExpr::TypeCheck() {
// something went wrong in type checking... // something went wrong in type checking...
return NULL; return NULL;
if (type->IsSOAType()) {
Error(pos, "Can't apply unary operator to SOA type \"%s\".",
type->GetString().c_str());
return NULL;
}
if (op == PreInc || op == PreDec || op == PostInc || op == PostDec) { if (op == PreInc || op == PreDec || op == PostInc || op == PostDec) {
if (type->IsConstType()) { if (type->IsConstType()) {
Error(pos, "Can't assign to type \"%s\" on left-hand side of " Error(pos, "Can't assign to type \"%s\" on left-hand side of "
@@ -1162,7 +1219,8 @@ UnaryExpr::TypeCheck() {
if (type->IsNumericType()) if (type->IsNumericType())
return this; return this;
if (dynamic_cast<const PointerType *>(type) == NULL) { const PointerType *pt = dynamic_cast<const PointerType *>(type);
if (pt == NULL) {
Error(expr->pos, "Can only pre/post increment numeric and " Error(expr->pos, "Can only pre/post increment numeric and "
"pointer types, not \"%s\".", type->GetString().c_str()); "pointer types, not \"%s\".", type->GetString().c_str());
return NULL; return NULL;
@@ -1294,16 +1352,13 @@ lEmitBinaryBitOp(BinaryExpr::Op op, llvm::Value *arg0Val,
} }
/** Utility routine to emit binary arithmetic operator based on the given
BinaryExpr::Op.
*/
static llvm::Value * static llvm::Value *
lEmitBinaryArith(BinaryExpr::Op op, llvm::Value *value0, llvm::Value *value1, lEmitBinaryPointerArith(BinaryExpr::Op op, llvm::Value *value0,
const Type *type0, const Type *type1, llvm::Value *value1, const Type *type0,
FunctionEmitContext *ctx, SourcePos pos) { const Type *type1, FunctionEmitContext *ctx,
SourcePos pos) {
const PointerType *ptrType = dynamic_cast<const PointerType *>(type0); const PointerType *ptrType = dynamic_cast<const PointerType *>(type0);
if (ptrType != NULL) {
switch (op) { switch (op) {
case BinaryExpr::Add: case BinaryExpr::Add:
// ptr + integer // ptr + integer
@@ -1311,6 +1366,36 @@ lEmitBinaryArith(BinaryExpr::Op op, llvm::Value *value0, llvm::Value *value1,
break; break;
case BinaryExpr::Sub: { case BinaryExpr::Sub: {
if (dynamic_cast<const PointerType *>(type1) != NULL) { if (dynamic_cast<const PointerType *>(type1) != NULL) {
Assert(Type::Equal(type0, type1));
if (ptrType->IsSlice()) {
llvm::Value *p0 = ctx->ExtractInst(value0, 0);
llvm::Value *p1 = ctx->ExtractInst(value1, 0);
const Type *majorType = ptrType->GetAsNonSlice();
llvm::Value *majorDelta =
lEmitBinaryPointerArith(op, p0, p1, majorType, majorType,
ctx, pos);
int soaWidth = ptrType->GetBaseType()->GetSOAWidth();
Assert(soaWidth > 0);
llvm::Value *soaScale = LLVMIntAsType(soaWidth,
majorDelta->getType());
llvm::Value *majorScale =
ctx->BinaryOperator(llvm::Instruction::Mul, majorDelta,
soaScale, "major_soa_scaled");
llvm::Value *m0 = ctx->ExtractInst(value0, 1);
llvm::Value *m1 = ctx->ExtractInst(value1, 1);
llvm::Value *minorDelta =
ctx->BinaryOperator(llvm::Instruction::Sub, m0, m1,
"minor_soa_delta");
ctx->MatchIntegerTypes(&majorScale, &minorDelta);
return ctx->BinaryOperator(llvm::Instruction::Add, majorScale,
minorDelta, "soa_ptrdiff");
}
// ptr - ptr // ptr - ptr
if (ptrType->IsUniformType()) { if (ptrType->IsUniformType()) {
value0 = ctx->PtrToIntInst(value0); value0 = ctx->PtrToIntInst(value0);
@@ -1364,7 +1449,21 @@ lEmitBinaryArith(BinaryExpr::Op op, llvm::Value *value0, llvm::Value *value1,
FATAL("Logic error in lEmitBinaryArith() for pointer type case"); FATAL("Logic error in lEmitBinaryArith() for pointer type case");
return NULL; return NULL;
} }
} }
/** Utility routine to emit binary arithmetic operator based on the given
BinaryExpr::Op.
*/
static llvm::Value *
lEmitBinaryArith(BinaryExpr::Op op, llvm::Value *value0, llvm::Value *value1,
const Type *type0, const Type *type1,
FunctionEmitContext *ctx, SourcePos pos) {
const PointerType *ptrType = dynamic_cast<const PointerType *>(type0);
if (ptrType != NULL)
return lEmitBinaryPointerArith(op, value0, value1, type0, type1,
ctx, pos);
else { else {
Assert(Type::EqualIgnoringConst(type0, type1)); Assert(Type::EqualIgnoringConst(type0, type1));
@@ -2118,6 +2217,8 @@ BinaryExpr::TypeCheck() {
if (type0 == NULL || type1 == NULL) if (type0 == NULL || type1 == NULL)
return NULL; return NULL;
// If either operand is a reference, dereference it before we move
// forward
if (dynamic_cast<const ReferenceType *>(type0) != NULL) { if (dynamic_cast<const ReferenceType *>(type0) != NULL) {
arg0 = new DereferenceExpr(arg0, arg0->pos); arg0 = new DereferenceExpr(arg0, arg0->pos);
type0 = arg0->GetType(); type0 = arg0->GetType();
@@ -2139,9 +2240,22 @@ BinaryExpr::TypeCheck() {
type1 = arg1->GetType(); type1 = arg1->GetType();
} }
// Prohibit binary operators with SOA types
if (type0->GetSOAWidth() > 0) {
Error(arg0->pos, "Illegal to use binary operator %s with SOA type "
"\"%s\".", lOpString(op), type0->GetString().c_str());
return NULL;
}
if (type1->GetSOAWidth() > 0) {
Error(arg1->pos, "Illegal to use binary operator %s with SOA type "
"\"%s\".", lOpString(op), type1->GetString().c_str());
return NULL;
}
const PointerType *pt0 = dynamic_cast<const PointerType *>(type0); const PointerType *pt0 = dynamic_cast<const PointerType *>(type0);
const PointerType *pt1 = dynamic_cast<const PointerType *>(type1); const PointerType *pt1 = dynamic_cast<const PointerType *>(type1);
if (pt0 != NULL && pt1 != NULL && op == Sub) { if (pt0 != NULL && pt1 != NULL && op == Sub) {
// Pointer subtraction
if (PointerType::IsVoidPointer(type0)) { if (PointerType::IsVoidPointer(type0)) {
Error(pos, "Illegal to perform pointer arithmetic " Error(pos, "Illegal to perform pointer arithmetic "
"on \"%s\" type.", type0->GetString().c_str()); "on \"%s\" type.", type0->GetString().c_str());
@@ -2156,6 +2270,7 @@ BinaryExpr::TypeCheck() {
const Type *t = Type::MoreGeneralType(type0, type1, pos, "-"); const Type *t = Type::MoreGeneralType(type0, type1, pos, "-");
if (t == NULL) if (t == NULL)
return NULL; return NULL;
arg0 = TypeConvertExpr(arg0, t, "pointer subtraction"); arg0 = TypeConvertExpr(arg0, t, "pointer subtraction");
arg1 = TypeConvertExpr(arg1, t, "pointer subtraction"); arg1 = TypeConvertExpr(arg1, t, "pointer subtraction");
if (arg0 == NULL || arg1 == NULL) if (arg0 == NULL || arg1 == NULL)
@@ -3609,23 +3724,22 @@ IndexExpr::IndexExpr(Expr *a, Expr *i, SourcePos p)
*/ */
static llvm::Value * static llvm::Value *
lAddVaryingOffsetsIfNeeded(FunctionEmitContext *ctx, llvm::Value *ptr, lAddVaryingOffsetsIfNeeded(FunctionEmitContext *ctx, llvm::Value *ptr,
const Type *ptrType) { const Type *ptrRefType) {
if (dynamic_cast<const ReferenceType *>(ptrType) != NULL) if (dynamic_cast<const ReferenceType *>(ptrRefType) != NULL)
// References are uniform pointers, so no offsetting is needed // References are uniform pointers, so no offsetting is needed
return ptr; return ptr;
Assert(dynamic_cast<const PointerType *>(ptrType) != NULL); const PointerType *ptrType = dynamic_cast<const PointerType *>(ptrRefType);
if (ptrType->IsUniformType()) Assert(ptrType != NULL);
if (ptrType->IsUniformType() || ptrType->IsSlice())
return ptr; return ptr;
const Type *baseType = ptrType->GetBaseType(); const Type *baseType = ptrType->GetBaseType();
if (baseType->IsUniformType()) if (baseType->IsVaryingType() == false)
return ptr; return ptr;
// must be indexing into varying atomic, enum, or pointer types // must be indexing into varying atomic, enum, or pointer types
if (dynamic_cast<const AtomicType *>(baseType) == NULL && if (Type::IsBasicType(baseType) == false)
dynamic_cast<const EnumType *>(baseType) == NULL &&
dynamic_cast<const PointerType *>(baseType) == NULL)
return ptr; return ptr;
// Onward: compute the per lane offsets. // Onward: compute the per lane offsets.
@@ -3706,9 +3820,8 @@ lVaryingStructHasUniformMember(const Type *type, SourcePos pos) {
llvm::Value * llvm::Value *
IndexExpr::GetValue(FunctionEmitContext *ctx) const { IndexExpr::GetValue(FunctionEmitContext *ctx) const {
const Type *baseExprType, *indexType, *returnType; const Type *indexType, *returnType;
if (baseExpr == NULL || index == NULL || if (baseExpr == NULL || index == NULL ||
((baseExprType = baseExpr->GetType()) == NULL) ||
((indexType = index->GetType()) == NULL) || ((indexType = index->GetType()) == NULL) ||
((returnType = GetType()) == NULL)) { ((returnType = GetType()) == NULL)) {
Assert(m->errorCount > 0); Assert(m->errorCount > 0);
@@ -3724,35 +3837,34 @@ IndexExpr::GetValue(FunctionEmitContext *ctx) const {
ctx->SetDebugPos(pos); ctx->SetDebugPos(pos);
llvm::Value *lvalue = GetLValue(ctx); llvm::Value *ptr = GetLValue(ctx);
llvm::Value *mask = NULL; llvm::Value *mask = NULL;
const Type *lvalueType = GetLValueType(); const Type *lvalueType = GetLValueType();
if (lvalue == NULL) { if (ptr == NULL) {
// We may be indexing into a temporary that hasn't hit memory, so // We may be indexing into a temporary that hasn't hit memory, so
// get the full value and stuff it into temporary alloca'd space so // get the full value and stuff it into temporary alloca'd space so
// that we can index from there... // that we can index from there...
const Type *baseExprType = baseExpr->GetType();
llvm::Value *val = baseExpr->GetValue(ctx); llvm::Value *val = baseExpr->GetValue(ctx);
if (val == NULL) { if (baseExprType == NULL || val == NULL) {
Assert(m->errorCount > 0); Assert(m->errorCount > 0);
return NULL; return NULL;
} }
ctx->SetDebugPos(pos); ctx->SetDebugPos(pos);
llvm::Value *ptr = ctx->AllocaInst(baseExprType->LLVMType(g->ctx), llvm::Value *tmpPtr = ctx->AllocaInst(baseExprType->LLVMType(g->ctx),
"array_tmp"); "array_tmp");
ctx->StoreInst(val, ptr); ctx->StoreInst(val, tmpPtr);
lvalue = ctx->GetElementPtrInst(ptr, LLVMInt32(0), index->GetValue(ctx),
PointerType::GetUniform(baseExprType));
// Get a pointer type to the underlying elements
const SequentialType *st = const SequentialType *st =
dynamic_cast<const SequentialType *>(baseExprType); dynamic_cast<const SequentialType *>(baseExprType);
if (st == NULL) { Assert(st != NULL);
Assert(m->errorCount > 0);
return NULL;
}
lvalueType = PointerType::GetUniform(st->GetElementType()); lvalueType = PointerType::GetUniform(st->GetElementType());
lvalue = lAddVaryingOffsetsIfNeeded(ctx, lvalue, lvalueType); // And do the indexing calculation into the temporary array in memory
ptr = ctx->GetElementPtrInst(tmpPtr, LLVMInt32(0), index->GetValue(ctx),
PointerType::GetUniform(baseExprType));
ptr = lAddVaryingOffsetsIfNeeded(ctx, ptr, lvalueType);
mask = LLVMMaskAllOn; mask = LLVMMaskAllOn;
} }
@@ -3763,7 +3875,7 @@ IndexExpr::GetValue(FunctionEmitContext *ctx) const {
} }
ctx->SetDebugPos(pos); ctx->SetDebugPos(pos);
return ctx->LoadInst(lvalue, mask, lvalueType, "index"); return ctx->LoadInst(ptr, mask, lvalueType, "index");
} }
@@ -3790,12 +3902,23 @@ IndexExpr::GetType() const {
elementType = sequentialType->GetElementType(); elementType = sequentialType->GetElementType();
} }
if (indexType->IsUniformType() && baseExprType->IsUniformType()) // If we're indexing into a sequence of SOA types, the result type is
// If the index is uniform, the resulting type is just whatever the // actually the underlying type, as a uniform or varying. Get the
// element type is // uniform variant of it for starters, then below we'll make it varying
// if the index is varying.
// (If we ever provide a way to index into SOA types and get an entire
// SOA'd struct out of the array, then we won't want to do this in that
// case..)
if (elementType->IsSOAType())
elementType = elementType->GetAsUniformType();
// If either the index is varying or we're indexing into a varying
// pointer, then the result type is the varying variant of the indexed
// type.
if (indexType->IsUniformType() &&
(pointerType == NULL || pointerType->IsUniformType()))
return elementType; return elementType;
else else
// A varying index into even a uniform base type -> varying type
return elementType->GetAsVaryingType(); return elementType->GetAsVaryingType();
} }
@@ -3806,105 +3929,207 @@ IndexExpr::GetBaseSymbol() const {
} }
/** Utility routine that takes a regualr pointer (either uniform or
varying) and returns a slice pointer with zero offsets.
*/
static llvm::Value *
lConvertToSlicePointer(FunctionEmitContext *ctx, llvm::Value *ptr,
const PointerType *slicePtrType) {
LLVM_TYPE_CONST llvm::Type *llvmSlicePtrType =
slicePtrType->LLVMType(g->ctx);
LLVM_TYPE_CONST llvm::StructType *sliceStructType =
llvm::dyn_cast<LLVM_TYPE_CONST llvm::StructType>(llvmSlicePtrType);
Assert(sliceStructType != NULL &&
sliceStructType->getElementType(0) == ptr->getType());
// Get a null-initialized struct to take care of having zeros for the
// offsets
llvm::Value *result = llvm::Constant::getNullValue(sliceStructType);
// And replace the pointer in the struct with the given pointer
return ctx->InsertInst(result, ptr, 0);
}
/** If the given array index is a compile time constant, check to see if it
value/values don't go past the end of the array; issue a warning if
so.
*/
static void
lCheckIndicesVersusBounds(const Type *baseExprType, Expr *index) {
const SequentialType *seqType =
dynamic_cast<const SequentialType *>(baseExprType);
if (seqType == NULL)
return;
int nElements = seqType->GetElementCount();
if (nElements == 0)
// Unsized array...
return;
// If it's an array of soa<> items, then the number of elements to
// worry about w.r.t. index values is the product of the array size and
// the soa width.
int soaWidth = seqType->GetElementType()->GetSOAWidth();
if (soaWidth > 0)
nElements *= soaWidth;
ConstExpr *ce = dynamic_cast<ConstExpr *>(index);
if (ce == NULL)
return;
int32_t indices[ISPC_MAX_NVEC];
int count = ce->AsInt32(indices);
for (int i = 0; i < count; ++i) {
if (indices[i] < 0 || indices[i] >= nElements)
Warning(index->pos, "Array index \"%d\" may be out of bounds for %d "
"element array.", indices[i], nElements);
}
}
/** Converts the given pointer value to a slice pointer if the pointer
points to SOA'ed data.
*/
static llvm::Value *
lConvertPtrToSliceIfNeeded(FunctionEmitContext *ctx,
llvm::Value *ptr,
const Type **type) {
Assert(*type != NULL);
const PointerType *ptrType = dynamic_cast<const PointerType *>(*type);
bool convertToSlice = (ptrType->GetBaseType()->IsSOAType() &&
ptrType->IsSlice() == false);
if (convertToSlice == false)
return ptr;
*type = ptrType->GetAsSlice();
return lConvertToSlicePointer(ctx, ptr, ptrType->GetAsSlice());
}
llvm::Value * llvm::Value *
IndexExpr::GetLValue(FunctionEmitContext *ctx) const { IndexExpr::GetLValue(FunctionEmitContext *ctx) const {
const Type *baseExprType; const Type *baseExprType;
if (baseExpr == NULL || index == NULL || if (baseExpr == NULL || index == NULL ||
((baseExprType = baseExpr->GetType()) == NULL)) ((baseExprType = baseExpr->GetType()) == NULL)) {
Assert(m->errorCount > 0);
return NULL; return NULL;
}
ctx->SetDebugPos(pos);
llvm::Value *indexValue = index->GetValue(ctx);
if (indexValue == NULL) {
Assert(m->errorCount > 0);
return NULL;
}
ctx->SetDebugPos(pos); ctx->SetDebugPos(pos);
if (dynamic_cast<const PointerType *>(baseExprType) != NULL) { if (dynamic_cast<const PointerType *>(baseExprType) != NULL) {
// We're indexing off of a base pointer // We're indexing off of a pointer
llvm::Value *baseValue = baseExpr->GetValue(ctx); llvm::Value *basePtrValue = baseExpr->GetValue(ctx);
llvm::Value *indexValue = index->GetValue(ctx); if (basePtrValue == NULL) {
if (baseValue == NULL || indexValue == NULL) Assert(m->errorCount > 0);
return NULL; return NULL;
}
ctx->SetDebugPos(pos); ctx->SetDebugPos(pos);
llvm::Value *ptr = ctx->GetElementPtrInst(baseValue, indexValue,
// Convert to a slice pointer if we're indexing into SOA data
basePtrValue = lConvertPtrToSliceIfNeeded(ctx, basePtrValue,
&baseExprType);
llvm::Value *ptr = ctx->GetElementPtrInst(basePtrValue, indexValue,
baseExprType, "ptr_offset"); baseExprType, "ptr_offset");
ptr = lAddVaryingOffsetsIfNeeded(ctx, ptr, GetLValueType()); return lAddVaryingOffsetsIfNeeded(ctx, ptr, GetLValueType());
return ptr;
} }
// Otherwise it's an array or vector // Not a pointer: we must be indexing an array or vector (and possibly
// a reference thereuponfore.)
llvm::Value *basePtr = NULL; llvm::Value *basePtr = NULL;
const Type *basePtrType = NULL; const PointerType *basePtrType = NULL;
if (dynamic_cast<const ArrayType *>(baseExprType) || if (dynamic_cast<const ArrayType *>(baseExprType) ||
dynamic_cast<const VectorType *>(baseExprType)) { dynamic_cast<const VectorType *>(baseExprType)) {
basePtr = baseExpr->GetLValue(ctx); basePtr = baseExpr->GetLValue(ctx);
basePtrType = baseExpr->GetLValueType(); basePtrType = dynamic_cast<const PointerType *>(baseExpr->GetLValueType());
if (baseExpr->GetLValueType()) Assert(basePtrType != NULL);
} }
else { else {
baseExprType = baseExprType->GetReferenceTarget(); baseExprType = baseExprType->GetReferenceTarget();
Assert(dynamic_cast<const ArrayType *>(baseExprType) || Assert(dynamic_cast<const ArrayType *>(baseExprType) ||
dynamic_cast<const VectorType *>(baseExprType)); dynamic_cast<const VectorType *>(baseExprType));
basePtr = baseExpr->GetValue(ctx); basePtr = baseExpr->GetValue(ctx);
basePtrType = baseExpr->GetType(); basePtrType = PointerType::GetUniform(baseExprType);
} }
if (!basePtr) if (!basePtr)
return NULL; return NULL;
// If the array index is a compile time constant, check to see if it // If possible, check the index value(s) against the size of the array
// may lead to an out-of-bounds access. lCheckIndicesVersusBounds(baseExprType, index);
ConstExpr *ce = dynamic_cast<ConstExpr *>(index);
const SequentialType *seqType = // Convert to a slice pointer if indexing into SOA data
dynamic_cast<const SequentialType *>(baseExprType); basePtr = lConvertPtrToSliceIfNeeded(ctx, basePtr,
if (seqType != NULL) { (const Type **)&basePtrType);
int nElements = seqType->GetElementCount();
if (ce != NULL && nElements > 0) {
int32_t indices[ISPC_MAX_NVEC];
int count = ce->AsInt32(indices);
for (int i = 0; i < count; ++i) {
if (indices[i] < 0 || indices[i] >= nElements)
Warning(index->pos, "Array index \"%d\" may be out of bounds for "
"%d element array.", indices[i], nElements);
}
}
}
ctx->SetDebugPos(pos); ctx->SetDebugPos(pos);
// And do the actual indexing calculation..
llvm::Value *ptr = llvm::Value *ptr =
ctx->GetElementPtrInst(basePtr, LLVMInt32(0), index->GetValue(ctx), ctx->GetElementPtrInst(basePtr, LLVMInt32(0), indexValue,
basePtrType); basePtrType);
ptr = lAddVaryingOffsetsIfNeeded(ctx, ptr, GetLValueType()); return lAddVaryingOffsetsIfNeeded(ctx, ptr, GetLValueType());
return ptr;
} }
const Type * const Type *
IndexExpr::GetLValueType() const { IndexExpr::GetLValueType() const {
const Type *baseExprLValueType, *indexType; const Type *baseExprType, *baseExprLValueType, *indexType;
if (baseExpr == NULL || index == NULL || if (baseExpr == NULL || index == NULL ||
((baseExprType = baseExpr->GetType()) == NULL) ||
((baseExprLValueType = baseExpr->GetLValueType()) == NULL) || ((baseExprLValueType = baseExpr->GetLValueType()) == NULL) ||
((indexType = index->GetType()) == NULL)) ((indexType = index->GetType()) == NULL))
return NULL; return NULL;
if (dynamic_cast<const ReferenceType *>(baseExprLValueType) != NULL) // regularize to a PointerType
baseExprLValueType = PointerType::GetUniform(baseExprLValueType->GetReferenceTarget()); if (dynamic_cast<const ReferenceType *>(baseExprLValueType) != NULL) {
const Type *refTarget = baseExprLValueType->GetReferenceTarget();
baseExprLValueType = PointerType::GetUniform(refTarget);
}
Assert(dynamic_cast<const PointerType *>(baseExprLValueType) != NULL); Assert(dynamic_cast<const PointerType *>(baseExprLValueType) != NULL);
// FIXME: can we do something in the type system that unifies the // Find the type of thing that we're indexing into
// concept of a sequential type's element type and a pointer type's const Type *elementType;
// base type? The code below is identical but for handling that
// difference. IndexableType?
const SequentialType *st = const SequentialType *st =
dynamic_cast<const SequentialType *>(baseExprLValueType->GetBaseType()); dynamic_cast<const SequentialType *>(baseExprLValueType->GetBaseType());
if (st != NULL) { if (st != NULL)
if (baseExprLValueType->IsUniformType() && indexType->IsUniformType()) elementType = st->GetElementType();
return PointerType::GetUniform(st->GetElementType()); else {
else
return PointerType::GetVarying(st->GetElementType());
}
const PointerType *pt = const PointerType *pt =
dynamic_cast<const PointerType *>(baseExprLValueType->GetBaseType()); dynamic_cast<const PointerType *>(baseExprLValueType->GetBaseType());
Assert(pt != NULL); Assert(pt != NULL);
if (baseExprLValueType->IsUniformType() && indexType->IsUniformType() && elementType = pt->GetBaseType();
pt->IsVaryingType() == false) }
return PointerType::GetUniform(pt->GetBaseType());
// Are we indexing into a varying type, or are we indexing with a
// varying pointer?
bool baseVarying;
if (dynamic_cast<const PointerType *>(baseExprType) != NULL)
baseVarying = baseExprType->IsVaryingType();
else else
return PointerType::GetVarying(pt->GetBaseType()); baseVarying = baseExprLValueType->IsVaryingType();
// The return type is uniform iff. the base is a uniform pointer / a
// collection of uniform typed elements and the index is uniform.
const PointerType *retType;
if (baseVarying == false && indexType->IsUniformType())
retType = PointerType::GetUniform(elementType);
else
retType = PointerType::GetVarying(elementType);
// Finally, if we're indexing into an SOA type, then the resulting
// pointer must (currently) be a slice pointer; we don't allow indexing
// the soa-width-wide structs directly.
if (elementType->IsSOAType())
retType = retType->GetAsSlice();
return retType;
} }
@@ -3918,8 +4143,12 @@ IndexExpr::Optimize() {
Expr * Expr *
IndexExpr::TypeCheck() { IndexExpr::TypeCheck() {
if (baseExpr == NULL || index == NULL || index->GetType() == NULL) const Type *indexType;
if (baseExpr == NULL || index == NULL ||
((indexType = index->GetType()) == NULL)) {
Assert(m->errorCount > 0);
return NULL; return NULL;
}
const Type *baseExprType = baseExpr->GetType(); const Type *baseExprType = baseExpr->GetType();
if (baseExprType == NULL) { if (baseExprType == NULL) {
@@ -3936,11 +4165,20 @@ IndexExpr::TypeCheck() {
bool isUniform = (index->GetType()->IsUniformType() && bool isUniform = (index->GetType()->IsUniformType() &&
!g->opt.disableUniformMemoryOptimizations); !g->opt.disableUniformMemoryOptimizations);
// Unless we have an explicit 64-bit index and are compiling to a
// 64-bit target with 64-bit addressing, convert the index to an int32
// type.
if (Type::EqualIgnoringConst(indexType->GetAsUniformType(),
AtomicType::UniformInt64) == false ||
g->target.is32Bit ||
g->opt.force32BitAddressing) {
const Type *indexType = isUniform ? AtomicType::UniformInt32 : const Type *indexType = isUniform ? AtomicType::UniformInt32 :
AtomicType::VaryingInt32; AtomicType::VaryingInt32;
index = TypeConvertExpr(index, indexType, "array index"); index = TypeConvertExpr(index, indexType, "array index");
if (index == NULL) if (index == NULL)
return NULL; return NULL;
}
return this; return this;
} }
@@ -4036,18 +4274,16 @@ const Type *
StructMemberExpr::GetType() const { StructMemberExpr::GetType() const {
// It's a struct, and the result type is the element type, possibly // It's a struct, and the result type is the element type, possibly
// promoted to varying if the struct type / lvalue is varying. // promoted to varying if the struct type / lvalue is varying.
const Type *exprType; const Type *exprType, *lvalueType;
const StructType *structType; const StructType *structType;
if (expr == NULL || if (expr == NULL ||
((exprType = expr->GetType()) == NULL) || ((exprType = expr->GetType()) == NULL) ||
((structType = getStructType()) == NULL)) { ((structType = getStructType()) == NULL) ||
((lvalueType = GetLValueType()) == NULL)) {
Assert(m->errorCount > 0); Assert(m->errorCount > 0);
return NULL; return NULL;
} }
if (exprType->IsVaryingType())
structType = structType->GetAsVaryingType();
const Type *elementType = structType->GetElementType(identifier); const Type *elementType = structType->GetElementType(identifier);
if (elementType == NULL) { if (elementType == NULL) {
Error(identifierPos, Error(identifierPos,
@@ -4056,11 +4292,26 @@ StructMemberExpr::GetType() const {
getCandidateNearMatches().c_str()); getCandidateNearMatches().c_str());
return NULL; return NULL;
} }
Assert(Type::Equal(lvalueType->GetBaseType(), elementType));
// If the expression we're getting the member of has an lvalue that is bool isSlice = (dynamic_cast<const PointerType *>(lvalueType) &&
// a varying pointer type, then the result type must be the varying dynamic_cast<const PointerType *>(lvalueType)->IsSlice());
// version of the element type. if (isSlice) {
if (GetLValueType()->IsVaryingType()) // FIXME: not true if we allow bound unif/varying for soa<>
// structs?...
Assert(elementType->IsSOAType());
// If we're accessing a member of an soa structure via a uniform
// slice pointer, then the result type is the uniform variant of
// the element type.
if (lvalueType->IsUniformType())
elementType = elementType->GetAsUniformType();
}
if (lvalueType->IsVaryingType())
// If the expression we're getting the member of has an lvalue that
// is a varying pointer type (be it slice or non-slice), then the
// result type must be the varying version of the element type.
elementType = elementType->GetAsVaryingType(); elementType = elementType->GetAsVaryingType();
return elementType; return elementType;
@@ -4081,10 +4332,23 @@ StructMemberExpr::GetLValueType() const {
return NULL; return NULL;
} }
return (exprLValueType->IsUniformType() || // The pointer type is varying if the lvalue type of the expression is
// varying (and otherwise uniform)
const PointerType *ptrType =
(exprLValueType->IsUniformType() ||
dynamic_cast<const ReferenceType *>(exprLValueType) != NULL) ? dynamic_cast<const ReferenceType *>(exprLValueType) != NULL) ?
PointerType::GetUniform(getElementType()) : PointerType::GetUniform(getElementType()) :
PointerType::GetVarying(getElementType()); PointerType::GetVarying(getElementType());
// If struct pointer is a slice pointer, the resulting member pointer
// needs to be a frozen slice pointer--i.e. any further indexing with
// the result shouldn't modify the minor slice offset, but it should be
// left unchanged until we get to a leaf SOA value.
if (dynamic_cast<const PointerType *>(exprLValueType) &&
dynamic_cast<const PointerType *>(exprLValueType)->IsSlice())
ptrType = ptrType->GetAsFrozenSlice();
return ptrType;
} }
@@ -4195,8 +4459,19 @@ VectorMemberExpr::GetType() const {
(const Type *)memberType; (const Type *)memberType;
const Type *lvalueType = GetLValueType(); const Type *lvalueType = GetLValueType();
if (lvalueType != NULL && lvalueType->IsVaryingType()) if (lvalueType != NULL) {
bool isSlice = (dynamic_cast<const PointerType *>(lvalueType) &&
dynamic_cast<const PointerType *>(lvalueType)->IsSlice());
if (isSlice) {
//CO Assert(type->IsSOAType());
if (lvalueType->IsUniformType())
type = type->GetAsUniformType();
}
if (lvalueType->IsVaryingType())
type = type->GetAsVaryingType(); type = type->GetAsVaryingType();
}
return type; return type;
} }
@@ -4236,10 +4511,16 @@ VectorMemberExpr::GetLValueType() const {
const Type *elementType = vt->GetElementType(); const Type *elementType = vt->GetElementType();
if (dynamic_cast<const ReferenceType *>(exprLValueType) != NULL) if (dynamic_cast<const ReferenceType *>(exprLValueType) != NULL)
return new ReferenceType(elementType); return new ReferenceType(elementType);
else else {
return exprLValueType->IsUniformType() ? const PointerType *ptrType = exprLValueType->IsUniformType() ?
PointerType::GetUniform(elementType) : PointerType::GetUniform(elementType) :
PointerType::GetVarying(elementType); PointerType::GetVarying(elementType);
// FIXME: replicated logic with structmemberexpr....
if (dynamic_cast<const PointerType *>(exprLValueType) &&
dynamic_cast<const PointerType *>(exprLValueType)->IsSlice())
ptrType = ptrType->GetAsFrozenSlice();
return ptrType;
}
} }
else else
return NULL; return NULL;
@@ -6006,6 +6287,23 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const {
if (value == NULL) if (value == NULL)
return NULL; return NULL;
if (fromPointerType->IsSlice() == false &&
toPointerType->IsSlice() == true) {
// Convert from a non-slice pointer to a slice pointer by
// creating a slice pointer structure with zero offsets.
if (fromPointerType->IsUniformType())
value = ctx->MakeSlicePointer(value, LLVMInt32(0));
else
value = ctx->MakeSlicePointer(value, LLVMInt32Vector(0));
// FIXME: avoid error from unnecessary bitcast when all we
// need to do is the slice conversion and don't need to
// also do unif->varying conversions. But this is really
// ugly logic.
if (value->getType() == toType->LLVMType(g->ctx))
return value;
}
if (fromType->IsUniformType() && toType->IsUniformType()) if (fromType->IsUniformType() && toType->IsUniformType())
// bitcast to the actual pointer type // bitcast to the actual pointer type
return ctx->BitCastInst(value, toType->LLVMType(g->ctx)); return ctx->BitCastInst(value, toType->LLVMType(g->ctx));
@@ -6015,11 +6313,27 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const {
return value; return value;
} }
else { else {
// Uniform -> varying pointer conversion
Assert(fromType->IsUniformType() && toType->IsVaryingType()); Assert(fromType->IsUniformType() && toType->IsVaryingType());
if (fromPointerType->IsSlice()) {
// For slice pointers, we need to smear out both the
// pointer and the offset vector
Assert(toPointerType->IsSlice());
llvm::Value *ptr = ctx->ExtractInst(value, 0);
llvm::Value *offset = ctx->ExtractInst(value, 1);
ptr = ctx->PtrToIntInst(ptr);
ptr = ctx->SmearUniform(ptr);
offset = ctx->SmearUniform(offset);
return ctx->MakeSlicePointer(ptr, offset);
}
else {
// Otherwise we just bitcast it to an int and smear it
// out to a vector
value = ctx->PtrToIntInst(value); value = ctx->PtrToIntInst(value);
return ctx->SmearUniform(value); return ctx->SmearUniform(value);
} }
} }
}
else { else {
Assert(dynamic_cast<const AtomicType *>(toType) != NULL); Assert(dynamic_cast<const AtomicType *>(toType) != NULL);
if (toType->IsBoolType()) { if (toType->IsBoolType()) {

1
ispc.h
View File

@@ -107,6 +107,7 @@ class ExprList;
class Function; class Function;
class FunctionType; class FunctionType;
class Module; class Module;
class PointerType;
class Stmt; class Stmt;
class Symbol; class Symbol;
class SymbolTable; class SymbolTable;

View File

@@ -328,25 +328,39 @@ Module::AddGlobalVariable(Symbol *sym, Expr *initExpr, bool isConst) {
} }
/** Given an arbitrary type, see if it or any of the leaf types contained /** Given an arbitrary type, see if it or any of the leaf types contained
in it are varying. (Note that it's fine for the original struct or a in it has a type that's illegal to have exported to C/C++
contained struct to be varying, so long as all of its members have code--specifically, that it has a varying value in memory, or a pointer
bound 'uniform' variability.) Returns true if so, false otherwise. */ to SOA data (which has a different representation than a regular
pointer.
(Note that it's fine for the original struct or a contained struct to
be varying, so long as all of its members have bound 'uniform'
variability.)
This functions returns true and issues an error if are any illegal
types are found and returns false otherwise.
*/
static bool static bool
lRecursiveCheckVarying(const Type *t) { lRecursiveCheckValidParamType(const Type *t) {
t = t->GetBaseType(); t = t->GetBaseType();
const StructType *st = dynamic_cast<const StructType *>(t); const StructType *st = dynamic_cast<const StructType *>(t);
if (st != NULL) { if (st != NULL) {
for (int i = 0; i < st->GetElementCount(); ++i) for (int i = 0; i < st->GetElementCount(); ++i)
if (lRecursiveCheckVarying(st->GetElementType(i))) if (lRecursiveCheckValidParamType(st->GetElementType(i)))
return true; return true;
return false; return false;
} }
else {
if (t->IsVaryingType())
return true;
const PointerType *pt = dynamic_cast<const PointerType *>(t);
if (pt != NULL && pt->IsSlice())
return true;
else else
return t->IsVaryingType(); return false;
}
} }
@@ -358,7 +372,7 @@ lRecursiveCheckVarying(const Type *t) {
static void static void
lCheckForVaryingParameter(const Type *type, const std::string &name, lCheckForVaryingParameter(const Type *type, const std::string &name,
SourcePos pos) { SourcePos pos) {
if (lRecursiveCheckVarying(type)) { if (lRecursiveCheckValidParamType(type)) {
const Type *t = type->GetBaseType(); const Type *t = type->GetBaseType();
if (dynamic_cast<const StructType *>(t)) if (dynamic_cast<const StructType *>(t))
Error(pos, "Struct parameter \"%s\" with varying member(s) is illegal " Error(pos, "Struct parameter \"%s\" with varying member(s) is illegal "
@@ -371,10 +385,8 @@ lCheckForVaryingParameter(const Type *type, const std::string &name,
/** Given a function type, loop through the function parameters and see if /** Given a function type, loop through the function parameters and see if
any are StructTypes. If so, issue an error (this seems to be broken any are StructTypes. If so, issue an error; this is currently broken
currently). (https://github.com/ispc/ispc/issues/3).
@todo Fix passing structs from C/C++ to ispc functions.
*/ */
static void static void
lCheckForStructParameters(const FunctionType *ftype, SourcePos pos) { lCheckForStructParameters(const FunctionType *ftype, SourcePos pos) {
@@ -511,7 +523,7 @@ Module::AddFunctionDeclaration(Symbol *funSym, bool isInline) {
// Make sure that the return type isn't 'varying' if the function is // Make sure that the return type isn't 'varying' if the function is
// 'export'ed. // 'export'ed.
if (funSym->storageClass == SC_EXPORT && if (funSym->storageClass == SC_EXPORT &&
lRecursiveCheckVarying(functionType->GetReturnType())) lRecursiveCheckValidParamType(functionType->GetReturnType()))
Error(funSym->pos, "Illegal to return a \"varying\" type from exported " Error(funSym->pos, "Illegal to return a \"varying\" type from exported "
"function \"%s\"", funSym->name.c_str()); "function \"%s\"", funSym->name.c_str());
@@ -826,7 +838,13 @@ lEmitStructDecls(std::vector<const StructType *> &structTypes, FILE *file) {
// sorted ones in order. // sorted ones in order.
for (unsigned int i = 0; i < sortedTypes.size(); ++i) { for (unsigned int i = 0; i < sortedTypes.size(); ++i) {
const StructType *st = sortedTypes[i]; const StructType *st = sortedTypes[i];
fprintf(file, "struct %s {\n", st->GetStructName().c_str()); fprintf(file, "struct %s", st->GetStructName().c_str());
if (st->GetSOAWidth() > 0)
// This has to match the naming scheme in
// StructType::GetCDeclaration().
fprintf(file, "_SOA%d", st->GetSOAWidth());
fprintf(file, " {\n");
for (int j = 0; j < st->GetElementCount(); ++j) { for (int j = 0; j < st->GetElementCount(); ++j) {
const Type *type = st->GetElementType(j)->GetAsNonConstType(); const Type *type = st->GetElementType(j)->GetAsNonConstType();
std::string d = type->GetCDeclaration(st->GetElementName(j)); std::string d = type->GetCDeclaration(st->GetElementName(j));
@@ -1004,9 +1022,10 @@ static void
lPrintExternGlobals(FILE *file, const std::vector<Symbol *> &externGlobals) { lPrintExternGlobals(FILE *file, const std::vector<Symbol *> &externGlobals) {
for (unsigned int i = 0; i < externGlobals.size(); ++i) { for (unsigned int i = 0; i < externGlobals.size(); ++i) {
Symbol *sym = externGlobals[i]; Symbol *sym = externGlobals[i];
if (lRecursiveCheckVarying(sym->type)) if (lRecursiveCheckValidParamType(sym->type))
Warning(sym->pos, "Not emitting declaration for symbol \"%s\" into generated " Warning(sym->pos, "Not emitting declaration for symbol \"%s\" into "
"header file since it (or some of its members) are varying.", "generated header file since it (or some of its members) "
"has types that are illegal in exported symbols.",
sym->name.c_str()); sym->name.c_str());
else else
fprintf(file, "extern %s;\n", sym->type->GetCDeclaration(sym->name).c_str()); fprintf(file, "extern %s;\n", sym->type->GetCDeclaration(sym->name).c_str());

View File

@@ -1,5 +1,5 @@
/* /*
Copyright (c) 2010-2011, Intel Corporation Copyright (c) 2010-2012, Intel Corporation
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@@ -224,7 +224,7 @@ struct ForeachDimension {
%type <enumType> enum_specifier %type <enumType> enum_specifier
%type <type> specifier_qualifier_list struct_or_union_specifier %type <type> specifier_qualifier_list struct_or_union_specifier
%type <type> type_specifier type_name rate_qualified_new_type %type <type> type_specifier type_name rate_qualified_type_specifier
%type <type> short_vec_specifier %type <type> short_vec_specifier
%type <atomicType> atomic_var_type_specifier %type <atomicType> atomic_var_type_specifier
@@ -476,7 +476,7 @@ rate_qualified_new
| TOKEN_VARYING TOKEN_NEW { $$ = TYPEQUAL_VARYING; } | TOKEN_VARYING TOKEN_NEW { $$ = TYPEQUAL_VARYING; }
; ;
rate_qualified_new_type rate_qualified_type_specifier
: type_specifier { $$ = $1; } : type_specifier { $$ = $1; }
| TOKEN_UNIFORM type_specifier | TOKEN_UNIFORM type_specifier
{ {
@@ -500,19 +500,40 @@ rate_qualified_new_type
else else
$$ = $2->GetAsVaryingType(); $$ = $2->GetAsVaryingType();
} }
| soa_width_specifier type_specifier
{
if ($2 == NULL)
$$ = NULL;
else {
int soaWidth = $1;
const StructType *st = dynamic_cast<const StructType *>($2);
if (st == NULL) {
Error(@1, "\"soa\" qualifier is illegal with non-struct type \"%s\".",
$2->GetString().c_str());
$$ = NULL;
}
else if (soaWidth <= 0 || (soaWidth & (soaWidth - 1)) != 0) {
Error(@1, "soa<%d> width illegal. Value must be positive power "
"of two.", soaWidth);
$$ = NULL;
}
else
$$ = st->GetAsSOAType(soaWidth);
}
}
; ;
new_expression new_expression
: conditional_expression : conditional_expression
| rate_qualified_new rate_qualified_new_type | rate_qualified_new rate_qualified_type_specifier
{ {
$$ = new NewExpr((int32_t)$1, $2, NULL, NULL, @1, Union(@1, @2)); $$ = new NewExpr((int32_t)$1, $2, NULL, NULL, @1, Union(@1, @2));
} }
| rate_qualified_new rate_qualified_new_type '(' initializer_list ')' | rate_qualified_new rate_qualified_type_specifier '(' initializer_list ')'
{ {
$$ = new NewExpr((int32_t)$1, $2, $4, NULL, @1, Union(@1, @2)); $$ = new NewExpr((int32_t)$1, $2, $4, NULL, @1, Union(@1, @2));
} }
| rate_qualified_new rate_qualified_new_type '[' expression ']' | rate_qualified_new rate_qualified_type_specifier '[' expression ']'
{ {
$$ = new NewExpr((int32_t)$1, $2, NULL, $4, @1, Union(@1, @4)); $$ = new NewExpr((int32_t)$1, $2, NULL, $4, @1, Union(@1, @4));
} }

404
type.cpp
View File

@@ -1,5 +1,5 @@
/* /*
Copyright (c) 2010-2011, Intel Corporation Copyright (c) 2010-2012, Intel Corporation
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@@ -66,6 +66,34 @@ lShouldPrintName(const std::string &name) {
} }
/** Utility routine to create a llvm DIArray type of the given number of
the given element type. */
static llvm::DIType
lCreateDIArray(llvm::DIType eltType, int count) {
int lowerBound = 0, upperBound = count-1;
if (count == 0) {
// unsized array -> indicate with low > high
lowerBound = 1;
upperBound = 0;
}
llvm::Value *sub = m->diBuilder->getOrCreateSubrange(lowerBound, upperBound);
std::vector<llvm::Value *> subs;
subs.push_back(sub);
#ifdef LLVM_2_9
llvm::DIArray subArray = m->diBuilder->getOrCreateArray(&subs[0], subs.size());
#else
llvm::DIArray subArray = m->diBuilder->getOrCreateArray(subs);
#endif
uint64_t size = eltType.getSizeInBits() * count;
uint64_t align = eltType.getAlignInBits();
return m->diBuilder->createArrayType(size, align, eltType, subArray);
}
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
// Variability // Variability
@@ -275,8 +303,15 @@ AtomicType::GetAsUnboundVariabilityType() const {
if (variability == Variability::Unbound) if (variability == Variability::Unbound)
return this; return this;
return new AtomicType(basicType, Variability::Unbound, isConst); return new AtomicType(basicType, Variability::Unbound, isConst);
}
const AtomicType *
AtomicType::GetAsSOAType(int width) const {
Assert(this != AtomicType::Void);
if (variability == Variability(Variability::SOA, width))
return this; return this;
return new AtomicType(basicType, Unbound, isConst); return new AtomicType(basicType, Variability(Variability::SOA, width), isConst);
} }
@@ -372,6 +407,13 @@ AtomicType::GetCDeclaration(const std::string &name) const {
ret += " "; ret += " ";
ret += name; ret += name;
} }
if (variability == Variability::SOA) {
char buf[32];
sprintf(buf, "[%d]", variability.soaWidth);
ret += buf;
}
return ret; return ret;
} }
@@ -380,7 +422,9 @@ LLVM_TYPE_CONST llvm::Type *
AtomicType::LLVMType(llvm::LLVMContext *ctx) const { AtomicType::LLVMType(llvm::LLVMContext *ctx) const {
Assert(variability.type != Variability::Unbound); Assert(variability.type != Variability::Unbound);
bool isUniform = (variability == Variability::Uniform); bool isUniform = (variability == Variability::Uniform);
bool isVarying = (variability == Variability::Varying);
if (isUniform || isVarying) {
switch (basicType) { switch (basicType) {
case TYPE_VOID: case TYPE_VOID:
return llvm::Type::getVoidTy(*ctx); return llvm::Type::getVoidTy(*ctx);
@@ -407,6 +451,11 @@ AtomicType::LLVMType(llvm::LLVMContext *ctx) const {
return NULL; return NULL;
} }
} }
else {
ArrayType at(GetAsUniformType(), variability.soaWidth);
return at.LLVMType(ctx);
}
}
llvm::DIType llvm::DIType
@@ -466,7 +515,7 @@ AtomicType::GetDIType(llvm::DIDescriptor scope) const {
return llvm::DIType(); return llvm::DIType();
} }
} }
else { else if (variability == Variability::Varying) {
llvm::DIType unifType = GetAsUniformType()->GetDIType(scope); llvm::DIType unifType = GetAsUniformType()->GetDIType(scope);
llvm::Value *sub = m->diBuilder->getOrCreateSubrange(0, g->target.vectorWidth-1); llvm::Value *sub = m->diBuilder->getOrCreateSubrange(0, g->target.vectorWidth-1);
#ifdef LLVM_2_9 #ifdef LLVM_2_9
@@ -479,6 +528,11 @@ AtomicType::GetDIType(llvm::DIDescriptor scope) const {
uint64_t align = unifType.getAlignInBits() * g->target.vectorWidth; uint64_t align = unifType.getAlignInBits() * g->target.vectorWidth;
return m->diBuilder->createVectorType(size, align, unifType, subArray); return m->diBuilder->createVectorType(size, align, unifType, subArray);
} }
else {
Assert(variability == Variability::SOA);
ArrayType at(GetAsUniformType(), variability.soaWidth);
return at.GetDIType(scope);
}
} }
@@ -590,6 +644,18 @@ EnumType::GetAsUnboundVariabilityType() const {
} }
const EnumType *
EnumType::GetAsSOAType(int width) const {
if (GetSOAWidth() == width)
return this;
else {
EnumType *enumType = new EnumType(*this);
enumType->variability = Variability(Variability::SOA, width);
return enumType;
}
}
const EnumType * const EnumType *
EnumType::GetAsConstType() const { EnumType::GetAsConstType() const {
if (isConst) if (isConst)
@@ -657,6 +723,13 @@ EnumType::GetCDeclaration(const std::string &varName) const {
ret += " "; ret += " ";
ret += varName; ret += varName;
} }
if (variability == Variability::SOA) {
char buf[32];
sprintf(buf, "[%d]", variability.soaWidth);
ret += buf;
}
return ret; return ret;
} }
@@ -670,6 +743,10 @@ EnumType::LLVMType(llvm::LLVMContext *ctx) const {
return LLVMTypes::Int32Type; return LLVMTypes::Int32Type;
case Variability::Varying: case Variability::Varying:
return LLVMTypes::Int32VectorType; return LLVMTypes::Int32VectorType;
case Variability::SOA: {
ArrayType at(AtomicType::UniformInt32, variability.soaWidth);
return at.LLVMType(ctx);
}
default: default:
FATAL("Unexpected variability in EnumType::LLVMType()"); FATAL("Unexpected variability in EnumType::LLVMType()");
return NULL; return NULL;
@@ -705,9 +782,12 @@ EnumType::GetDIType(llvm::DIDescriptor scope) const {
32 /* size in bits */, 32 /* size in bits */,
32 /* align in bits */, 32 /* align in bits */,
elementArray); elementArray);
if (IsUniformType())
return diType;
switch (variability.type) {
case Variability::Uniform:
return diType;
case Variability::Varying: {
llvm::Value *sub = m->diBuilder->getOrCreateSubrange(0, g->target.vectorWidth-1); llvm::Value *sub = m->diBuilder->getOrCreateSubrange(0, g->target.vectorWidth-1);
#ifdef LLVM_2_9 #ifdef LLVM_2_9
llvm::Value *suba[] = { sub }; llvm::Value *suba[] = { sub };
@@ -719,6 +799,14 @@ EnumType::GetDIType(llvm::DIDescriptor scope) const {
uint64_t align = diType.getAlignInBits() * g->target.vectorWidth; uint64_t align = diType.getAlignInBits() * g->target.vectorWidth;
return m->diBuilder->createVectorType(size, align, diType, subArray); return m->diBuilder->createVectorType(size, align, diType, subArray);
} }
case Variability::SOA: {
return lCreateDIArray(diType, variability.soaWidth);
}
default:
FATAL("Unexpected variability in EnumType::GetDIType()");
return llvm::DIType();
}
}
void void
@@ -746,8 +834,9 @@ PointerType *PointerType::Void =
new PointerType(AtomicType::Void, Variability(Variability::Uniform), false); new PointerType(AtomicType::Void, Variability(Variability::Uniform), false);
PointerType::PointerType(const Type *t, Variability v, bool ic) PointerType::PointerType(const Type *t, Variability v, bool ic, bool is,
: variability(v), isConst(ic) { bool fr)
: variability(v), isConst(ic), isSlice(is), isFrozen(fr) {
baseType = t; baseType = t;
} }
@@ -819,6 +908,7 @@ PointerType::GetAsVaryingType() const {
return this; return this;
else else
return new PointerType(baseType, Variability(Variability::Varying), return new PointerType(baseType, Variability(Variability::Varying),
isConst, isSlice, isFrozen);
} }
@@ -828,6 +918,7 @@ PointerType::GetAsUniformType() const {
return this; return this;
else else
return new PointerType(baseType, Variability(Variability::Uniform), return new PointerType(baseType, Variability(Variability::Uniform),
isConst, isSlice, isFrozen);
} }
@@ -837,6 +928,77 @@ PointerType::GetAsUnboundVariabilityType() const {
return this; return this;
else else
return new PointerType(baseType, Variability(Variability::Unbound), return new PointerType(baseType, Variability(Variability::Unbound),
isConst, isSlice, isFrozen);
}
const PointerType *
PointerType::GetAsSOAType(int width) const {
if (GetSOAWidth() == width)
return this;
else
return new PointerType(baseType, Variability(Variability::SOA, width),
isConst, isSlice, isFrozen);
}
const PointerType *
PointerType::GetAsSlice() const {
if (isSlice)
return this;
return new PointerType(baseType, variability, isConst, true);
}
const PointerType *
PointerType::GetAsNonSlice() const {
if (isSlice == false)
return this;
return new PointerType(baseType, variability, isConst, false);
}
const PointerType *
PointerType::GetAsFrozenSlice() const {
if (isFrozen)
return this;
return new PointerType(baseType, variability, isConst, true, true);
}
/** Returns a structure corresponding to the pointer representation for
slice pointers; the first member of this structure is a uniform or
varying pointer, and the second element is either a uniform or varying
int32.
*/
const StructType *
PointerType::GetSliceStructType() const {
Assert(isSlice == true);
std::vector<const Type *> eltTypes;
eltTypes.push_back(GetAsNonSlice());
switch (variability.type) {
case Variability::Uniform:
eltTypes.push_back(AtomicType::UniformInt32);
break;
case Variability::Varying:
eltTypes.push_back(AtomicType::VaryingInt32);
break;
default:
FATAL("Unexpected variability in PointerType::GetSliceStructType()");
}
std::vector<std::string> eltNames;
std::vector<SourcePos> eltPos;
eltNames.push_back("ptr");
eltNames.push_back("offset");
eltPos.push_back(SourcePos());
eltPos.push_back(SourcePos());
return new StructType("__ptr_slice_tmp", eltTypes, eltNames, eltPos, isConst,
Variability::Uniform, SourcePos());
} }
@@ -853,6 +1015,7 @@ PointerType::ResolveUnboundVariability(Variability v) const {
const Type *resolvedBaseType = const Type *resolvedBaseType =
baseType->ResolveUnboundVariability(Variability::Uniform); baseType->ResolveUnboundVariability(Variability::Uniform);
return new PointerType(resolvedBaseType, ptrVariability, isConst, isSlice, return new PointerType(resolvedBaseType, ptrVariability, isConst, isSlice,
isFrozen);
} }
@@ -861,7 +1024,7 @@ PointerType::GetAsConstType() const {
if (isConst == true) if (isConst == true)
return this; return this;
else else
return new PointerType(baseType, variability, true); return new PointerType(baseType, variability, true, isSlice);
} }
@@ -870,7 +1033,7 @@ PointerType::GetAsNonConstType() const {
if (isConst == false) if (isConst == false)
return this; return this;
else else
return new PointerType(baseType, variability, false); return new PointerType(baseType, variability, false, isSlice);
} }
@@ -885,6 +1048,8 @@ PointerType::GetString() const {
ret += std::string(" * "); ret += std::string(" * ");
if (isConst) ret += "const "; if (isConst) ret += "const ";
if (isSlice) ret += "slice ";
if (isFrozen) ret += "/*frozen*/ ";
ret += variability.GetString(); ret += variability.GetString();
return ret; return ret;
@@ -900,13 +1065,19 @@ PointerType::Mangle() const {
} }
std::string ret = variability.MangleString() + std::string("<"); std::string ret = variability.MangleString() + std::string("<");
if (isSlice || isFrozen) ret += "-";
if (isSlice) ret += "s";
if (isFrozen) ret += "f";
if (isSlice || isFrozen) ret += "-";
return ret + baseType->Mangle() + std::string(">"); return ret + baseType->Mangle() + std::string(">");
} }
std::string std::string
PointerType::GetCDeclaration(const std::string &name) const { PointerType::GetCDeclaration(const std::string &name) const {
if (variability != Uniform) { if (isSlice ||
(variability != Variability::Uniform &&
variability != Variability::SOA)) {
Assert(m->errorCount > 0); Assert(m->errorCount > 0);
return ""; return "";
} }
@@ -921,6 +1092,13 @@ PointerType::GetCDeclaration(const std::string &name) const {
if (isConst) ret += " const"; if (isConst) ret += " const";
ret += std::string(" "); ret += std::string(" ");
ret += name; ret += name;
if (variability == Variability::SOA) {
char buf[32];
sprintf(buf, "[%d]", variability.soaWidth);
ret += buf;
}
return ret; return ret;
} }
@@ -932,11 +1110,14 @@ PointerType::LLVMType(llvm::LLVMContext *ctx) const {
return NULL; return NULL;
} }
if (variability == Varying) if (isSlice)
// always the same, since we currently use int vectors for varying // Slice pointers are represented as a structure with a pointer and
// pointers // an integer offset; the corresponding ispc type is returned by
return LLVMTypes::VoidPointerVectorType; // GetSliceStructType().
return GetSliceStructType()->LLVMType(ctx);
switch (variability.type) {
case Variability::Uniform: {
LLVM_TYPE_CONST llvm::Type *ptype = NULL; LLVM_TYPE_CONST llvm::Type *ptype = NULL;
const FunctionType *ftype = dynamic_cast<const FunctionType *>(baseType); const FunctionType *ftype = dynamic_cast<const FunctionType *>(baseType);
if (ftype != NULL) if (ftype != NULL)
@@ -945,39 +1126,25 @@ PointerType::LLVMType(llvm::LLVMContext *ctx) const {
// exported functions. // exported functions.
ptype = llvm::PointerType::get(ftype->LLVMFunctionType(ctx, true), 0); ptype = llvm::PointerType::get(ftype->LLVMFunctionType(ctx, true), 0);
else { else {
if (Type::Equal(baseType, AtomicType::Void)) if (baseType == AtomicType::Void)
ptype = LLVMTypes::VoidPointerType; ptype = LLVMTypes::VoidPointerType;
else else
ptype = llvm::PointerType::get(baseType->LLVMType(ctx), 0); ptype = llvm::PointerType::get(baseType->LLVMType(ctx), 0);
} }
return ptype; return ptype;
} }
case Variability::Varying:
// always the same, since we currently use int vectors for varying
static llvm::DIType // pointers
lCreateDIArray(llvm::DIType eltType, int count) { return LLVMTypes::VoidPointerVectorType;
int lowerBound = 0, upperBound = count-1; case Variability::SOA: {
ArrayType at(GetAsUniformType(), variability.soaWidth);
if (count == 0) { return at.LLVMType(ctx);
// unsized array -> indicate with low > high }
lowerBound = 1; default:
upperBound = 0; FATAL("Unexpected variability in PointerType::LLVMType()");
return NULL;
} }
llvm::Value *sub = m->diBuilder->getOrCreateSubrange(lowerBound, upperBound);
std::vector<llvm::Value *> subs;
subs.push_back(sub);
#ifdef LLVM_2_9
llvm::DIArray subArray = m->diBuilder->getOrCreateArray(&subs[0], subs.size());
#else
llvm::DIArray subArray = m->diBuilder->getOrCreateArray(subs);
#endif
uint64_t size = eltType.getSizeInBits() * count;
uint64_t align = eltType.getAlignInBits();
return m->diBuilder->createArrayType(size, align, eltType, subArray);
} }
@@ -999,6 +1166,10 @@ PointerType::GetDIType(llvm::DIDescriptor scope) const {
bitsSize); bitsSize);
return lCreateDIArray(eltType, g->target.vectorWidth); return lCreateDIArray(eltType, g->target.vectorWidth);
} }
case Variability::SOA: {
ArrayType at(GetAsUniformType(), variability.soaWidth);
return at.GetDIType(scope);
}
default: default:
FATAL("Unexpected variability in PointerType::GetDIType()"); FATAL("Unexpected variability in PointerType::GetDIType()");
return llvm::DIType(); return llvm::DIType();
@@ -1120,6 +1291,16 @@ ArrayType::GetAsUnboundVariabilityType() const {
} }
const ArrayType *
ArrayType::GetAsSOAType(int width) const {
if (child == NULL) {
Assert(m->errorCount > 0);
return NULL;
}
return new ArrayType(child->GetAsSOAType(width), numElements);
}
const ArrayType * const ArrayType *
ArrayType::ResolveUnboundVariability(Variability v) const { ArrayType::ResolveUnboundVariability(Variability v) const {
if (child == NULL) { if (child == NULL) {
@@ -1220,6 +1401,10 @@ ArrayType::GetCDeclaration(const std::string &name) const {
Assert(m->errorCount > 0); Assert(m->errorCount > 0);
return ""; return "";
} }
int soaWidth = base->GetSOAWidth();
base = base->GetAsUniformType();
std::string s = base->GetCDeclaration(name); std::string s = base->GetCDeclaration(name);
const ArrayType *at = this; const ArrayType *at = this;
@@ -1232,6 +1417,13 @@ ArrayType::GetCDeclaration(const std::string &name) const {
s += std::string("[") + std::string(buf) + std::string("]"); s += std::string("[") + std::string(buf) + std::string("]");
at = dynamic_cast<const ArrayType *>(at->child); at = dynamic_cast<const ArrayType *>(at->child);
} }
if (soaWidth > 0) {
char buf[16];
sprintf(buf, "[%d]", soaWidth);
s += buf;
}
return s; return s;
} }
@@ -1393,6 +1585,12 @@ VectorType::GetAsUnboundVariabilityType() const {
} }
const VectorType *
VectorType::GetAsSOAType(int width) const {
return new VectorType(base->GetAsSOAType(width), numElements);
}
const VectorType * const VectorType *
VectorType::ResolveUnboundVariability(Variability v) const { VectorType::ResolveUnboundVariability(Variability v) const {
return new VectorType(base->ResolveUnboundVariability(v), numElements); return new VectorType(base->ResolveUnboundVariability(v), numElements);
@@ -1452,6 +1650,11 @@ VectorType::GetElementType() const {
LLVM_TYPE_CONST llvm::Type * LLVM_TYPE_CONST llvm::Type *
VectorType::LLVMType(llvm::LLVMContext *ctx) const { VectorType::LLVMType(llvm::LLVMContext *ctx) const {
if (base == NULL) {
Assert(m->errorCount > 0);
return NULL;
}
LLVM_TYPE_CONST llvm::Type *bt = base->LLVMType(ctx); LLVM_TYPE_CONST llvm::Type *bt = base->LLVMType(ctx);
if (!bt) if (!bt)
return NULL; return NULL;
@@ -1464,10 +1667,16 @@ VectorType::LLVMType(llvm::LLVMContext *ctx) const {
// registers so that e.g. if we want to add two uniform 4 float // registers so that e.g. if we want to add two uniform 4 float
// vectors, that is turned into a single addps on SSE. // vectors, that is turned into a single addps on SSE.
return llvm::VectorType::get(bt, getVectorMemoryCount()); return llvm::VectorType::get(bt, getVectorMemoryCount());
else else if (base->IsVaryingType())
// varying types are already laid out to fill HW vector registers, // varying types are already laid out to fill HW vector registers,
// so a vector type here is just expanded out as an llvm array. // so a vector type here is just expanded out as an llvm array.
return llvm::ArrayType::get(bt, getVectorMemoryCount()); return llvm::ArrayType::get(bt, getVectorMemoryCount());
else if (base->IsSOAType())
return llvm::ArrayType::get(bt, numElements);
else {
FATAL("Unexpected variability in VectorType::LLVMType()");
return NULL;
}
} }
@@ -1491,7 +1700,16 @@ VectorType::GetDIType(llvm::DIDescriptor scope) const {
if (IsUniformType()) if (IsUniformType())
align = 4 * g->target.nativeVectorWidth; align = 4 * g->target.nativeVectorWidth;
if (IsUniformType() || IsVaryingType())
return m->diBuilder->createVectorType(sizeBits, align, eltType, subArray); return m->diBuilder->createVectorType(sizeBits, align, eltType, subArray);
else if (IsSOAType()) {
ArrayType at(base, numElements);
return at.GetDIType(scope);
}
else {
FATAL("Unexpected variability in VectorType::GetDIType()");
return llvm::DIType();
}
} }
@@ -1499,7 +1717,7 @@ int
VectorType::getVectorMemoryCount() const { VectorType::getVectorMemoryCount() const {
if (base->IsVaryingType()) if (base->IsVaryingType())
return numElements; return numElements;
else { else if (base->IsUniformType()) {
int nativeWidth = g->target.nativeVectorWidth; int nativeWidth = g->target.nativeVectorWidth;
if (Type::Equal(base->GetAsUniformType(), AtomicType::UniformInt64) || if (Type::Equal(base->GetAsUniformType(), AtomicType::UniformInt64) ||
Type::Equal(base->GetAsUniformType(), AtomicType::UniformUInt64) || Type::Equal(base->GetAsUniformType(), AtomicType::UniformUInt64) ||
@@ -1512,6 +1730,14 @@ VectorType::getVectorMemoryCount() const {
// nativeWidth // nativeWidth
return (numElements + (nativeWidth - 1)) & ~(nativeWidth-1); return (numElements + (nativeWidth - 1)) & ~(nativeWidth-1);
} }
else if (base->IsSOAType()) {
FATAL("VectorType SOA getVectorMemoryCount");
return -1;
}
else {
FATAL("Unexpected variability in VectorType::getVectorMemoryCount()");
return -1;
}
} }
@@ -1599,6 +1825,19 @@ StructType::GetAsUnboundVariabilityType() const {
} }
const StructType *
StructType::GetAsSOAType(int width) const {
if (GetSOAWidth() == width)
return this;
if (checkIfCanBeSOA(this) == false)
return NULL;
return new StructType(name, elementTypes, elementNames, elementPositions,
isConst, Variability(Variability::SOA, width), pos);
}
const StructType * const StructType *
StructType::ResolveUnboundVariability(Variability v) const { StructType::ResolveUnboundVariability(Variability v) const {
Assert(v != Variability::Unbound); Assert(v != Variability::Unbound);
@@ -1686,6 +1925,15 @@ StructType::GetCDeclaration(const std::string &n) const {
ret += std::string("struct ") + name; ret += std::string("struct ") + name;
if (lShouldPrintName(n)) if (lShouldPrintName(n))
ret += std::string(" ") + n; ret += std::string(" ") + n;
if (variability.soaWidth > 0) {
char buf[32];
// This has to match the naming scheme used in lEmitStructDecls()
// in module.cpp
sprintf(buf, "_SOA%d", variability.soaWidth);
ret += buf;
}
return ret; return ret;
} }
@@ -1793,6 +2041,35 @@ StructType::GetElementNumber(const std::string &n) const {
} }
bool
StructType::checkIfCanBeSOA(const StructType *st) {
bool ok = true;
for (int i = 0; i < (int)st->elementTypes.size(); ++i) {
const Type *eltType = st->elementTypes[i];
const StructType *childStructType =
dynamic_cast<const StructType *>(eltType);
if (childStructType != NULL)
ok &= checkIfCanBeSOA(childStructType);
else if (eltType->HasUnboundVariability() == false) {
Error(st->elementPositions[i], "Unable to apply SOA conversion to "
"struct due to \"%s\" member \"%s\" with bound \"%s\" "
"variability.", eltType->GetString().c_str(),
st->elementNames[i].c_str(),
eltType->IsUniformType() ? "uniform" : "varying");
ok = false;
}
else if (dynamic_cast<const ReferenceType *>(eltType)) {
Error(st->elementPositions[i], "Unable to apply SOA conversion to "
"struct due to member \"%s\" with reference type \"%s\".",
st->elementNames[i].c_str(), eltType->GetString().c_str());
ok = false;
}
}
return ok;
}
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
// ReferenceType // ReferenceType
@@ -1913,6 +2190,13 @@ ReferenceType::GetAsUnboundVariabilityType() const {
} }
const Type *
ReferenceType::GetAsSOAType(int width) const {
// FIXME: is this right?
return new ArrayType(this, width);
}
const ReferenceType * const ReferenceType *
ReferenceType::ResolveUnboundVariability(Variability v) const { ReferenceType::ResolveUnboundVariability(Variability v) const {
if (targetType == NULL) { if (targetType == NULL) {
@@ -2126,6 +2410,13 @@ FunctionType::GetAsUnboundVariabilityType() const {
} }
const Type *
FunctionType::GetAsSOAType(int width) const {
FATAL("FunctionType::GetAsSOAType() shouldn't be called");
return NULL;
}
const FunctionType * const FunctionType *
FunctionType::ResolveUnboundVariability(Variability v) const { FunctionType::ResolveUnboundVariability(Variability v) const {
if (returnType == NULL) { if (returnType == NULL) {
@@ -2527,6 +2818,14 @@ Type::MoreGeneralType(const Type *t0, const Type *t1, SourcePos pos, const char
} }
bool
Type::IsBasicType(const Type *type) {
return (dynamic_cast<const AtomicType *>(type) != NULL ||
dynamic_cast<const EnumType *>(type) != NULL ||
dynamic_cast<const PointerType *>(type) != NULL);
}
static bool static bool
lCheckTypeEquality(const Type *a, const Type *b, bool ignoreConst) { lCheckTypeEquality(const Type *a, const Type *b, bool ignoreConst) {
if (a == NULL || b == NULL) if (a == NULL || b == NULL)
@@ -2579,13 +2878,26 @@ lCheckTypeEquality(const Type *a, const Type *b, bool ignoreConst) {
return false; return false;
if (sta->GetStructName() != stb->GetStructName()) if (sta->GetStructName() != stb->GetStructName())
return false; return false;
if (sta->GetVariability() != stb->GetVariability())
return false;
for (int i = 0; i < sta->GetElementCount(); ++i) for (int i = 0; i < sta->GetElementCount(); ++i)
// FIXME: is this redundant now?
if (!lCheckTypeEquality(sta->GetElementType(i), stb->GetElementType(i), if (!lCheckTypeEquality(sta->GetElementType(i), stb->GetElementType(i),
ignoreConst)) ignoreConst))
return false; return false;
return true; return true;
} }
const PointerType *pta = dynamic_cast<const PointerType *>(a);
const PointerType *ptb = dynamic_cast<const PointerType *>(b);
if (pta != NULL && ptb != NULL)
return (pta->IsUniformType() == ptb->IsUniformType() &&
pta->IsSlice() == ptb->IsSlice() &&
pta->IsFrozenSlice() == ptb->IsFrozenSlice() &&
lCheckTypeEquality(pta->GetBaseType(), ptb->GetBaseType(),
ignoreConst));
const ReferenceType *rta = dynamic_cast<const ReferenceType *>(a); const ReferenceType *rta = dynamic_cast<const ReferenceType *>(a);
const ReferenceType *rtb = dynamic_cast<const ReferenceType *>(b); const ReferenceType *rtb = dynamic_cast<const ReferenceType *>(b);
if (rta != NULL && rtb != NULL) if (rta != NULL && rtb != NULL)
@@ -2617,14 +2929,6 @@ lCheckTypeEquality(const Type *a, const Type *b, bool ignoreConst) {
return true; return true;
} }
const PointerType *pta = dynamic_cast<const PointerType *>(a);
const PointerType *ptb = dynamic_cast<const PointerType *>(b);
if (pta != NULL && ptb != NULL)
return (pta->IsConstType() == ptb->IsConstType() &&
pta->IsUniformType() == ptb->IsUniformType() &&
lCheckTypeEquality(pta->GetBaseType(), ptb->GetBaseType(),
ignoreConst));
return false; return false;
} }

51
type.h
View File

@@ -1,5 +1,5 @@
/* /*
Copyright (c) 2010-2011, Intel Corporation Copyright (c) 2010-2012, Intel Corporation
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@@ -148,6 +148,8 @@ public:
/** Get an instance of the type with unbound variability. */ /** Get an instance of the type with unbound variability. */
virtual const Type *GetAsUnboundVariabilityType() const = 0; virtual const Type *GetAsUnboundVariabilityType() const = 0;
virtual const Type *GetAsSOAType(int width) const = 0;
/** If this is a signed integer type, return the unsigned version of /** If this is a signed integer type, return the unsigned version of
the type. Otherwise, return the original type. */ the type. Otherwise, return the original type. */
virtual const Type *GetAsUnsignedType() const; virtual const Type *GetAsUnsignedType() const;
@@ -224,6 +226,11 @@ public:
static const Type *MoreGeneralType(const Type *type0, const Type *type1, static const Type *MoreGeneralType(const Type *type0, const Type *type1,
SourcePos pos, const char *reason, SourcePos pos, const char *reason,
bool forceVarying = false, int vecSize = 0); bool forceVarying = false, int vecSize = 0);
/** Returns true if the given type is an atomic, enum, or pointer type
(i.e. not an aggregation of multiple instances of a type or
types.) */
static bool IsBasicType(const Type *type);
}; };
@@ -251,6 +258,8 @@ public:
const AtomicType *GetAsUniformType() const; const AtomicType *GetAsUniformType() const;
const AtomicType *GetAsVaryingType() const; const AtomicType *GetAsVaryingType() const;
const AtomicType *GetAsUnboundVariabilityType() const; const AtomicType *GetAsUnboundVariabilityType() const;
const AtomicType *GetAsSOAType(int width) const;
const AtomicType *ResolveUnboundVariability(Variability v) const; const AtomicType *ResolveUnboundVariability(Variability v) const;
const AtomicType *GetAsUnsignedType() const; const AtomicType *GetAsUnsignedType() const;
const AtomicType *GetAsConstType() const; const AtomicType *GetAsConstType() const;
@@ -324,6 +333,8 @@ public:
const EnumType *GetAsVaryingType() const; const EnumType *GetAsVaryingType() const;
const EnumType *GetAsUniformType() const; const EnumType *GetAsUniformType() const;
const EnumType *GetAsUnboundVariabilityType() const; const EnumType *GetAsUnboundVariabilityType() const;
const EnumType *GetAsSOAType(int width) const;
const EnumType *ResolveUnboundVariability(Variability v) const; const EnumType *ResolveUnboundVariability(Variability v) const;
const EnumType *GetAsConstType() const; const EnumType *GetAsConstType() const;
const EnumType *GetAsNonConstType() const; const EnumType *GetAsNonConstType() const;
@@ -353,10 +364,29 @@ private:
/** @brief Type implementation for pointers to other types /** @brief Type implementation for pointers to other types
Pointers can have two additional properties beyond their variability
and the type of object that they are pointing to. Both of these
properties are used for internal bookkeeping and aren't directly
accessible from the language.
- Slice: pointers that point to data with SOA layout have this
property--it indicates that the pointer has two components, where the
first (major) component is a regular pointer that points to an
instance of the soa<> type being indexed, and where the second
(minor) component is an integer that indicates which of the soa
slices in that instance the pointer points to.
- Frozen: only slice pointers may have this property--it indicates that
any further indexing calculations should only be applied to the major
pointer, and the value of the minor offset should be left unchanged.
Pointers to lvalues from structure member access have the frozen
property; see discussion in comments in the StructMemberExpr class.
*/ */
class PointerType : public Type { class PointerType : public Type {
public: public:
PointerType(const Type *t, Variability v, bool isConst); PointerType(const Type *t, Variability v, bool isConst,
bool isSlice = false, bool frozen = false);
/** Helper method to return a uniform pointer to the given type. */ /** Helper method to return a uniform pointer to the given type. */
static PointerType *GetUniform(const Type *t); static PointerType *GetUniform(const Type *t);
@@ -374,10 +404,19 @@ public:
bool IsUnsignedType() const; bool IsUnsignedType() const;
bool IsConstType() const; bool IsConstType() const;
bool IsSlice() const { return isSlice; }
bool IsFrozenSlice() const { return isFrozen; }
const PointerType *GetAsSlice() const;
const PointerType *GetAsNonSlice() const;
const PointerType *GetAsFrozenSlice() const;
const StructType *GetSliceStructType() const;
const Type *GetBaseType() const; const Type *GetBaseType() const;
const PointerType *GetAsVaryingType() const; const PointerType *GetAsVaryingType() const;
const PointerType *GetAsUniformType() const; const PointerType *GetAsUniformType() const;
const PointerType *GetAsUnboundVariabilityType() const; const PointerType *GetAsUnboundVariabilityType() const;
const PointerType *GetAsSOAType(int width) const;
const PointerType *ResolveUnboundVariability(Variability v) const; const PointerType *ResolveUnboundVariability(Variability v) const;
const PointerType *GetAsConstType() const; const PointerType *GetAsConstType() const;
const PointerType *GetAsNonConstType() const; const PointerType *GetAsNonConstType() const;
@@ -394,6 +433,7 @@ public:
private: private:
const Variability variability; const Variability variability;
const bool isConst; const bool isConst;
const bool isSlice, isFrozen;
const Type *baseType; const Type *baseType;
}; };
@@ -471,6 +511,7 @@ public:
const ArrayType *GetAsVaryingType() const; const ArrayType *GetAsVaryingType() const;
const ArrayType *GetAsUniformType() const; const ArrayType *GetAsUniformType() const;
const ArrayType *GetAsUnboundVariabilityType() const; const ArrayType *GetAsUnboundVariabilityType() const;
const ArrayType *GetAsSOAType(int width) const;
const ArrayType *ResolveUnboundVariability(Variability v) const; const ArrayType *ResolveUnboundVariability(Variability v) const;
const ArrayType *GetAsUnsignedType() const; const ArrayType *GetAsUnsignedType() const;
@@ -538,6 +579,7 @@ public:
const VectorType *GetAsVaryingType() const; const VectorType *GetAsVaryingType() const;
const VectorType *GetAsUniformType() const; const VectorType *GetAsUniformType() const;
const VectorType *GetAsUnboundVariabilityType() const; const VectorType *GetAsUnboundVariabilityType() const;
const VectorType *GetAsSOAType(int width) const;
const VectorType *ResolveUnboundVariability(Variability v) const; const VectorType *ResolveUnboundVariability(Variability v) const;
const VectorType *GetAsConstType() const; const VectorType *GetAsConstType() const;
@@ -587,6 +629,7 @@ public:
const StructType *GetAsVaryingType() const; const StructType *GetAsVaryingType() const;
const StructType *GetAsUniformType() const; const StructType *GetAsUniformType() const;
const StructType *GetAsUnboundVariabilityType() const; const StructType *GetAsUnboundVariabilityType() const;
const StructType *GetAsSOAType(int width) const;
const StructType *ResolveUnboundVariability(Variability v) const; const StructType *ResolveUnboundVariability(Variability v) const;
const StructType *GetAsConstType() const; const StructType *GetAsConstType() const;
@@ -623,6 +666,8 @@ public:
const std::string &GetStructName() const { return name; } const std::string &GetStructName() const { return name; }
private: private:
static bool checkIfCanBeSOA(const StructType *st);
const std::string name; const std::string name;
/** The types of the struct elements. Note that we store these with /** The types of the struct elements. Note that we store these with
uniform/varying exactly as they were declared in the source file. uniform/varying exactly as they were declared in the source file.
@@ -664,6 +709,7 @@ public:
const ReferenceType *GetAsVaryingType() const; const ReferenceType *GetAsVaryingType() const;
const ReferenceType *GetAsUniformType() const; const ReferenceType *GetAsUniformType() const;
const ReferenceType *GetAsUnboundVariabilityType() const; const ReferenceType *GetAsUnboundVariabilityType() const;
const Type *GetAsSOAType(int width) const;
const ReferenceType *ResolveUnboundVariability(Variability v) const; const ReferenceType *ResolveUnboundVariability(Variability v) const;
const ReferenceType *GetAsConstType() const; const ReferenceType *GetAsConstType() const;
@@ -715,6 +761,7 @@ public:
const Type *GetAsVaryingType() const; const Type *GetAsVaryingType() const;
const Type *GetAsUniformType() const; const Type *GetAsUniformType() const;
const Type *GetAsUnboundVariabilityType() const; const Type *GetAsUnboundVariabilityType() const;
const Type *GetAsSOAType(int width) const;
const FunctionType *ResolveUnboundVariability(Variability v) const; const FunctionType *ResolveUnboundVariability(Variability v) const;
const Type *GetAsConstType() const; const Type *GetAsConstType() const;