From 8d1b77b2355bf28a0bc2f83597dbc9e910779cd7 Mon Sep 17 00:00:00 2001 From: Matt Pharr Date: Thu, 15 Dec 2011 11:11:07 -0800 Subject: [PATCH] Have assertion macro and FATAL() text ask user to file a bug, provide URL to do so. Switch to Assert() from assert() to make it clear it's not the C stdlib one we're using any more. --- builtins.cpp | 12 +-- ctx.cpp | 190 ++++++++++++++++++++-------------------- decl.cpp | 14 +-- expr.cpp | 226 ++++++++++++++++++++++++------------------------ func.cpp | 30 +++---- ispc.cpp | 10 +-- ispc.h | 14 ++- lex.ll | 20 ++--- llvmutil.cpp | 4 +- main.cpp | 2 + module.cpp | 49 +++++------ opt.cpp | 62 ++++++------- parse.yy | 20 ++--- stmt.cpp | 38 ++++---- stmt.h | 2 +- sym.cpp | 12 +-- test_static.cpp | 2 +- type.cpp | 58 ++++++------- util.cpp | 15 ++-- 19 files changed, 398 insertions(+), 382 deletions(-) diff --git a/builtins.cpp b/builtins.cpp index 443b3beb..5358e789 100644 --- a/builtins.cpp +++ b/builtins.cpp @@ -257,7 +257,7 @@ static void lAddModuleSymbols(llvm::Module *module, SymbolTable *symbolTable) { #if 0 // FIXME: handle globals? - assert(module->global_empty()); + Assert(module->global_empty()); #endif llvm::Module::iterator iter; @@ -287,11 +287,11 @@ lCheckModuleIntrinsics(llvm::Module *module) { // check the llvm.x86.* intrinsics for now... if (!strncmp(funcName.c_str(), "llvm.x86.", 9)) { llvm::Intrinsic::ID id = (llvm::Intrinsic::ID)func->getIntrinsicID(); - assert(id != 0); + Assert(id != 0); LLVM_TYPE_CONST llvm::Type *intrinsicType = llvm::Intrinsic::getType(*g->ctx, id); intrinsicType = llvm::PointerType::get(intrinsicType, 0); - assert(func->getType() == intrinsicType); + Assert(func->getType() == intrinsicType); } } } @@ -591,9 +591,9 @@ AddBitcodeToModule(const unsigned char *bitcode, int length, // linking together modules with incompatible target triples.. llvm::Triple mTriple(m->module->getTargetTriple()); llvm::Triple bcTriple(bcModule->getTargetTriple()); - assert(bcTriple.getArch() == llvm::Triple::UnknownArch || + Assert(bcTriple.getArch() == llvm::Triple::UnknownArch || mTriple.getArch() == bcTriple.getArch()); - assert(bcTriple.getVendor() == llvm::Triple::UnknownVendor || + Assert(bcTriple.getVendor() == llvm::Triple::UnknownVendor || mTriple.getVendor() == bcTriple.getVendor()); bcModule->setTargetTriple(mTriple.str()); @@ -639,7 +639,7 @@ lDefineConstantIntFunc(const char *name, int val, llvm::Module *module, Symbol *sym = new Symbol(name, SourcePos(), ft, SC_STATIC); llvm::Function *func = module->getFunction(name); - assert(func != NULL); // it should be declared already... + Assert(func != NULL); // it should be declared already... func->addFnAttr(llvm::Attribute::AlwaysInline); llvm::BasicBlock *bblock = llvm::BasicBlock::Create(*g->ctx, "entry", func, 0); llvm::ReturnInst::Create(*g->ctx, LLVMInt32(val), bblock); diff --git a/ctx.cpp b/ctx.cpp index 46568e2e..043f7acc 100644 --- a/ctx.cpp +++ b/ctx.cpp @@ -89,7 +89,7 @@ struct CFInfo { private: CFInfo(CFType t, bool uniformIf, llvm::Value *sm) { - assert(t == If); + Assert(t == If); type = t; isUniform = uniformIf; savedBreakTarget = savedContinueTarget = NULL; @@ -99,7 +99,7 @@ private: CFInfo(CFType t, bool iu, llvm::BasicBlock *bt, llvm::BasicBlock *ct, llvm::Value *sb, llvm::Value *sc, llvm::Value *sm, llvm::Value *lm) { - assert(t == Loop); + Assert(t == Loop); type = t; isUniform = iu; savedBreakTarget = bt; @@ -112,7 +112,7 @@ private: CFInfo(CFType t, llvm::BasicBlock *bt, llvm::BasicBlock *ct, llvm::Value *sb, llvm::Value *sc, llvm::Value *sm, llvm::Value *lm) { - assert(t == Foreach); + Assert(t == Foreach); type = t; isUniform = false; savedBreakTarget = bt; @@ -226,7 +226,7 @@ FunctionEmitContext::FunctionEmitContext(Function *func, Symbol *funSym, llvm::Constant *offFunc = m->module->getOrInsertFunction(buf, LLVMTypes::VoidType, NULL); - assert(llvm::isa(offFunc)); + Assert(llvm::isa(offFunc)); llvm::BasicBlock *offBB = llvm::BasicBlock::Create(*g->ctx, "entry", (llvm::Function *)offFunc, 0); @@ -260,7 +260,7 @@ FunctionEmitContext::FunctionEmitContext(Function *func, Symbol *funSym, llvm::DIFile file = funcStartPos.GetDIFile(); Symbol *programIndexSymbol = m->symbolTable->LookupVariable("programIndex"); - assert(programIndexSymbol && programIndexSymbol->storagePtr); + Assert(programIndexSymbol && programIndexSymbol->storagePtr); m->diBuilder->createGlobalVariable(programIndexSymbol->name, file, funcStartPos.first_line, @@ -269,7 +269,7 @@ FunctionEmitContext::FunctionEmitContext(Function *func, Symbol *funSym, programIndexSymbol->storagePtr); Symbol *programCountSymbol = m->symbolTable->LookupVariable("programCount"); - assert(programCountSymbol); + Assert(programCountSymbol); m->diBuilder->createGlobalVariable(programCountSymbol->name, file, funcStartPos.first_line, @@ -281,8 +281,8 @@ FunctionEmitContext::FunctionEmitContext(Function *func, Symbol *funSym, FunctionEmitContext::~FunctionEmitContext() { - assert(controlFlowInfo.size() == 0); - assert(debugScopes.size() == (m->diBuilder ? 1 : 0)); + Assert(controlFlowInfo.size() == 0); + Assert(debugScopes.size() == (m->diBuilder ? 1 : 0)); } @@ -376,7 +376,7 @@ FunctionEmitContext::SetInternalMaskAndNot(llvm::Value *oldMask, llvm::Value *te void FunctionEmitContext::BranchIfMaskAny(llvm::BasicBlock *btrue, llvm::BasicBlock *bfalse) { - assert(bblock != NULL); + Assert(bblock != NULL); llvm::Value *any = Any(GetFullMask()); BranchInst(btrue, bfalse, any); // It's illegal to add any additional instructions to the basic block @@ -387,7 +387,7 @@ FunctionEmitContext::BranchIfMaskAny(llvm::BasicBlock *btrue, llvm::BasicBlock * void FunctionEmitContext::BranchIfMaskAll(llvm::BasicBlock *btrue, llvm::BasicBlock *bfalse) { - assert(bblock != NULL); + Assert(bblock != NULL); llvm::Value *all = All(GetFullMask()); BranchInst(btrue, bfalse, all); // It's illegal to add any additional instructions to the basic block @@ -398,7 +398,7 @@ FunctionEmitContext::BranchIfMaskAll(llvm::BasicBlock *btrue, llvm::BasicBlock * void FunctionEmitContext::BranchIfMaskNone(llvm::BasicBlock *btrue, llvm::BasicBlock *bfalse) { - assert(bblock != NULL); + Assert(bblock != NULL); // switch sense of true/false bblocks BranchIfMaskAny(bfalse, btrue); // It's illegal to add any additional instructions to the basic block @@ -422,7 +422,7 @@ FunctionEmitContext::StartVaryingIf(llvm::Value *oldMask) { void FunctionEmitContext::EndIf() { // Make sure we match up with a Start{Uniform,Varying}If(). - assert(controlFlowInfo.size() > 0 && controlFlowInfo.back()->IsIf()); + Assert(controlFlowInfo.size() > 0 && controlFlowInfo.back()->IsIf()); CFInfo *ci = controlFlowInfo.back(); controlFlowInfo.pop_back(); @@ -501,7 +501,7 @@ FunctionEmitContext::StartLoop(llvm::BasicBlock *bt, llvm::BasicBlock *ct, void FunctionEmitContext::EndLoop() { - assert(controlFlowInfo.size() && controlFlowInfo.back()->IsLoop()); + Assert(controlFlowInfo.size() && controlFlowInfo.back()->IsLoop()); CFInfo *ci = controlFlowInfo.back(); controlFlowInfo.pop_back(); @@ -544,7 +544,7 @@ FunctionEmitContext::StartForeach(llvm::BasicBlock *ct) { void FunctionEmitContext::EndForeach() { - assert(controlFlowInfo.size() && controlFlowInfo.back()->IsForeach()); + Assert(controlFlowInfo.size() && controlFlowInfo.back()->IsForeach()); CFInfo *ci = controlFlowInfo.back(); controlFlowInfo.pop_back(); @@ -598,7 +598,7 @@ FunctionEmitContext::Break(bool doCoherenceCheck) { // Otherwise we need to update the mask of the lanes that have // executed a 'break' statement: // breakLanes = breakLanes | mask - assert(breakLanesPtr != NULL); + Assert(breakLanesPtr != NULL); llvm::Value *mask = GetInternalMask(); llvm::Value *breakMask = LoadInst(breakLanesPtr, "break_mask"); @@ -648,7 +648,7 @@ FunctionEmitContext::Continue(bool doCoherenceCheck) { else { // Otherwise update the stored value of which lanes have 'continue'd. // continueLanes = continueLanes | mask - assert(continueLanesPtr); + Assert(continueLanesPtr); llvm::Value *mask = GetInternalMask(); llvm::Value *continueMask = LoadInst(continueLanesPtr, "continue_mask"); @@ -675,7 +675,7 @@ FunctionEmitContext::Continue(bool doCoherenceCheck) { */ bool FunctionEmitContext::ifsInLoopAllUniform() const { - assert(controlFlowInfo.size() > 0); + Assert(controlFlowInfo.size() > 0); // Go backwards through controlFlowInfo, since we add new nested scopes // to the back. Stop once we come to the first enclosing loop. int i = controlFlowInfo.size() - 1; @@ -685,7 +685,7 @@ FunctionEmitContext::ifsInLoopAllUniform() const { return false; --i; } - assert(i >= 0); // else we didn't find a loop! + Assert(i >= 0); // else we didn't find a loop! return true; } @@ -693,7 +693,7 @@ FunctionEmitContext::ifsInLoopAllUniform() const { void FunctionEmitContext::jumpIfAllLoopLanesAreDone(llvm::BasicBlock *target) { llvm::Value *allDone = NULL; - assert(continueLanesPtr != NULL); + Assert(continueLanesPtr != NULL); if (breakLanesPtr == NULL) { // In a foreach loop, break and return are illegal, and // breakLanesPtr is NULL. In this case, the mask is guaranteed to @@ -876,7 +876,7 @@ FunctionEmitContext::LaneMask(llvm::Value *v) { std::vector mm; m->symbolTable->LookupFunction("__movmsk", &mm); // There should be one with signed int signature, one unsigned int. - assert(mm.size() == 2); + Assert(mm.size() == 2); // We can actually call either one, since both are i32s as far as // LLVM's type system is concerned... llvm::Function *fmm = mm[0]->function; @@ -925,7 +925,7 @@ FunctionEmitContext::CreateBasicBlock(const char *name) { llvm::Value * FunctionEmitContext::I1VecToBoolVec(llvm::Value *b) { if (b == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -971,7 +971,7 @@ lGetStringAsValue(llvm::BasicBlock *bblock, const char *s) { void FunctionEmitContext::AddInstrumentationPoint(const char *note) { - assert(note != NULL); + Assert(note != NULL); if (!g->emitInstrumentation) return; @@ -1039,7 +1039,7 @@ FunctionEmitContext::StartScope() { void FunctionEmitContext::EndScope() { if (m->diBuilder != NULL) { - assert(debugScopes.size() > 0); + Assert(debugScopes.size() > 0); debugScopes.pop_back(); } } @@ -1047,7 +1047,7 @@ FunctionEmitContext::EndScope() { llvm::DIScope FunctionEmitContext::GetDIScope() const { - assert(debugScopes.size() > 0); + Assert(debugScopes.size() > 0); return debugScopes.back(); } @@ -1108,7 +1108,7 @@ lArrayVectorWidth(LLVM_TYPE_CONST llvm::Type *t) { // to things like FunctionEmitContext::BinaryOperator() as operands. LLVM_TYPE_CONST llvm::VectorType *vectorElementType = llvm::dyn_cast(arrayType->getElementType()); - assert((vectorElementType != NULL && + Assert((vectorElementType != NULL && (int)vectorElementType->getNumElements() == g->target.vectorWidth)); return (int)arrayType->getNumElements(); @@ -1120,11 +1120,11 @@ FunctionEmitContext::BinaryOperator(llvm::Instruction::BinaryOps inst, llvm::Value *v0, llvm::Value *v1, const char *name) { if (v0 == NULL || v1 == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } - assert(v0->getType() == v1->getType()); + Assert(v0->getType() == v1->getType()); LLVM_TYPE_CONST llvm::Type *type = v0->getType(); int arraySize = lArrayVectorWidth(type); if (arraySize == 0) { @@ -1152,7 +1152,7 @@ FunctionEmitContext::BinaryOperator(llvm::Instruction::BinaryOps inst, llvm::Value * FunctionEmitContext::NotOperator(llvm::Value *v, const char *name) { if (v == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -1188,12 +1188,12 @@ static LLVM_TYPE_CONST llvm::Type * lGetMatchingBoolVectorType(LLVM_TYPE_CONST llvm::Type *type) { LLVM_TYPE_CONST llvm::ArrayType *arrayType = llvm::dyn_cast(type); - assert(arrayType != NULL); + Assert(arrayType != NULL); LLVM_TYPE_CONST llvm::VectorType *vectorElementType = llvm::dyn_cast(arrayType->getElementType()); - assert(vectorElementType != NULL); - assert((int)vectorElementType->getNumElements() == g->target.vectorWidth); + Assert(vectorElementType != NULL); + Assert((int)vectorElementType->getNumElements() == g->target.vectorWidth); LLVM_TYPE_CONST llvm::Type *base = llvm::VectorType::get(LLVMTypes::BoolType, g->target.vectorWidth); @@ -1207,11 +1207,11 @@ FunctionEmitContext::CmpInst(llvm::Instruction::OtherOps inst, llvm::Value *v0, llvm::Value *v1, const char *name) { if (v0 == NULL || v1 == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } - assert(v0->getType() == v1->getType()); + Assert(v0->getType() == v1->getType()); LLVM_TYPE_CONST llvm::Type *type = v0->getType(); int arraySize = lArrayVectorWidth(type); if (arraySize == 0) { @@ -1238,7 +1238,7 @@ FunctionEmitContext::CmpInst(llvm::Instruction::OtherOps inst, llvm::Value * FunctionEmitContext::SmearUniform(llvm::Value *value, const char *name) { if (value == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -1273,7 +1273,7 @@ FunctionEmitContext::BitCastInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type, const char *name) { if (value == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -1287,7 +1287,7 @@ FunctionEmitContext::BitCastInst(llvm::Value *value, llvm::Value * FunctionEmitContext::PtrToIntInst(llvm::Value *value, const char *name) { if (value == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -1308,7 +1308,7 @@ FunctionEmitContext::PtrToIntInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *toType, const char *name) { if (value == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -1321,7 +1321,7 @@ FunctionEmitContext::PtrToIntInst(llvm::Value *value, else if (fromType->getScalarSizeInBits() > toType->getScalarSizeInBits()) return TruncInst(value, toType, "ptr_to_int"); else { - assert(fromType->getScalarSizeInBits() < + Assert(fromType->getScalarSizeInBits() < toType->getScalarSizeInBits()); return ZExtInst(value, toType, "ptr_to_int"); } @@ -1339,7 +1339,7 @@ FunctionEmitContext::IntToPtrInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *toType, const char *name) { if (value == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -1352,7 +1352,7 @@ FunctionEmitContext::IntToPtrInst(llvm::Value *value, else if (fromType->getScalarSizeInBits() > toType->getScalarSizeInBits()) return TruncInst(value, toType, "int_to_ptr"); else { - assert(fromType->getScalarSizeInBits() < + Assert(fromType->getScalarSizeInBits() < toType->getScalarSizeInBits()); return ZExtInst(value, toType, "int_to_ptr"); } @@ -1369,7 +1369,7 @@ llvm::Instruction * FunctionEmitContext::TruncInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type, const char *name) { if (value == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -1386,7 +1386,7 @@ llvm::Instruction * FunctionEmitContext::CastInst(llvm::Instruction::CastOps op, llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type, const char *name) { if (value == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -1403,7 +1403,7 @@ llvm::Instruction * FunctionEmitContext::FPCastInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type, const char *name) { if (value == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -1420,7 +1420,7 @@ llvm::Instruction * FunctionEmitContext::SExtInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type, const char *name) { if (value == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -1437,7 +1437,7 @@ llvm::Instruction * FunctionEmitContext::ZExtInst(llvm::Value *value, LLVM_TYPE_CONST llvm::Type *type, const char *name) { if (value == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -1510,7 +1510,7 @@ FunctionEmitContext::applyVaryingGEP(llvm::Value *basePtr, llvm::Value *index, // index must be varying for this method to be called. bool baseIsUniform = (llvm::isa(basePtr->getType())); - assert(baseIsUniform == false || indexIsVarying == true); + Assert(baseIsUniform == false || indexIsVarying == true); llvm::Value *varyingPtr = baseIsUniform ? SmearUniform(basePtr, "ptr_smear") : basePtr; @@ -1523,13 +1523,13 @@ llvm::Value * FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index, const Type *ptrType, const char *name) { if (basePtr == NULL || index == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } if (dynamic_cast(ptrType) != NULL) ptrType = PointerType::GetUniform(ptrType->GetReferenceTarget()); - assert(dynamic_cast(ptrType) != NULL); + Assert(dynamic_cast(ptrType) != NULL); bool indexIsVaryingType = llvm::isa(index->getType()); @@ -1561,13 +1561,13 @@ FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index0 llvm::Value *index1, const Type *ptrType, const char *name) { if (basePtr == NULL || index0 == NULL || index1 == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } if (dynamic_cast(ptrType) != NULL) ptrType = PointerType::GetUniform(ptrType->GetReferenceTarget()); - assert(dynamic_cast(ptrType) != NULL); + Assert(dynamic_cast(ptrType) != NULL); bool index0IsVaryingType = llvm::isa(index0->getType()); @@ -1600,7 +1600,7 @@ FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index0 // out the type of ptr0. const Type *baseType = ptrType->GetBaseType(); const SequentialType *st = dynamic_cast(baseType); - assert(st != NULL); + Assert(st != NULL); bool ptr0IsUniform = llvm::isa(ptr0->getType()); @@ -1635,7 +1635,7 @@ FunctionEmitContext::AddElementOffset(llvm::Value *basePtr, int elementNum, if (dynamic_cast(ptrType) != NULL) ptrType = PointerType::GetUniform(ptrType->GetReferenceTarget()); - assert(dynamic_cast(ptrType) != NULL); + Assert(dynamic_cast(ptrType) != NULL); // Otherwise do the math to find the offset and add it to the given // varying pointers @@ -1652,7 +1652,7 @@ FunctionEmitContext::AddElementOffset(llvm::Value *basePtr, int elementNum, // type of the vector. const SequentialType *st = dynamic_cast(ptrType->GetBaseType()); - assert(st != NULL); + Assert(st != NULL); llvm::Value *size = g->target.SizeOf(st->GetElementType()->LLVMType(g->ctx)); llvm::Value *scale = (g->target.is32Bit || g->opt.force32BitAddressing) ? @@ -1676,13 +1676,13 @@ FunctionEmitContext::AddElementOffset(llvm::Value *basePtr, int elementNum, llvm::Value * FunctionEmitContext::LoadInst(llvm::Value *ptr, const char *name) { if (ptr == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } LLVM_TYPE_CONST llvm::PointerType *pt = llvm::dyn_cast(ptr->getType()); - assert(pt != NULL); + Assert(pt != NULL); // FIXME: it's not clear to me that we generate unaligned vector loads // of varying stuff out of the front-end any more. (Only by the @@ -1703,16 +1703,16 @@ llvm::Value * FunctionEmitContext::LoadInst(llvm::Value *ptr, llvm::Value *mask, const Type *ptrType, const char *name) { if (ptr == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } - assert(ptrType != NULL && mask != NULL); + Assert(ptrType != NULL && mask != NULL); if (dynamic_cast(ptrType) != NULL) ptrType = PointerType::GetUniform(ptrType->GetReferenceTarget()); - assert(dynamic_cast(ptrType) != NULL); + Assert(dynamic_cast(ptrType) != NULL); if (ptrType->IsUniformType()) { // FIXME: same issue as above load inst regarding alignment... @@ -1749,7 +1749,7 @@ llvm::Value * FunctionEmitContext::gather(llvm::Value *ptr, const Type *ptrType, llvm::Value *mask, const char *name) { // We should have a varying lvalue if we get here... - assert(ptrType->IsVaryingType() && + Assert(ptrType->IsVaryingType() && ptr->getType() == LLVMTypes::VoidPointerVectorType); const Type *returnType = ptrType->GetBaseType()->GetAsVaryingType(); @@ -1798,13 +1798,13 @@ FunctionEmitContext::gather(llvm::Value *ptr, const Type *ptrType, funcName = g->target.is32Bit ? "__pseudo_gather32_16" : "__pseudo_gather64_16"; else { - assert(llvmReturnType == LLVMTypes::Int8VectorType); + Assert(llvmReturnType == LLVMTypes::Int8VectorType); funcName = g->target.is32Bit ? "__pseudo_gather32_8" : "__pseudo_gather64_8"; } llvm::Function *gatherFunc = m->module->getFunction(funcName); - assert(gatherFunc != NULL); + Assert(gatherFunc != NULL); llvm::Value *call = CallInst(gatherFunc, NULL, ptr, mask, name); @@ -1854,7 +1854,7 @@ FunctionEmitContext::AllocaInst(LLVM_TYPE_CONST llvm::Type *llvmType, const char *name, int align, bool atEntryBlock) { if (llvmType == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -1863,7 +1863,7 @@ FunctionEmitContext::AllocaInst(LLVM_TYPE_CONST llvm::Type *llvmType, // We usually insert it right before the jump instruction at the // end of allocaBlock llvm::Instruction *retInst = allocaBlock->getTerminator(); - assert(retInst); + Assert(retInst); inst = new llvm::AllocaInst(llvmType, name ? name : "", retInst); } else @@ -1899,12 +1899,12 @@ void FunctionEmitContext::maskedStore(llvm::Value *value, llvm::Value *ptr, const Type *ptrType, llvm::Value *mask) { if (value == NULL || ptr == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return; } - assert(dynamic_cast(ptrType) != NULL); - assert(ptrType->IsUniformType()); + Assert(dynamic_cast(ptrType) != NULL); + Assert(ptrType->IsUniformType()); const Type *valueType = ptrType->GetBaseType(); const CollectionType *collectionType = @@ -1926,7 +1926,7 @@ FunctionEmitContext::maskedStore(llvm::Value *value, llvm::Value *ptr, // We must have a regular atomic, enumerator, or pointer type at this // point. - assert(dynamic_cast(valueType) != NULL || + Assert(dynamic_cast(valueType) != NULL || dynamic_cast(valueType) != NULL || dynamic_cast(valueType) != NULL); valueType = valueType->GetAsNonConstType(); @@ -1972,7 +1972,7 @@ FunctionEmitContext::maskedStore(llvm::Value *value, llvm::Value *ptr, ptr = BitCastInst(ptr, LLVMTypes::Int8VectorPointerType, "ptr_to_int8vecptr"); } - assert(maskedStoreFunc != NULL); + Assert(maskedStoreFunc != NULL); std::vector args; args.push_back(ptr); @@ -1992,13 +1992,13 @@ FunctionEmitContext::maskedStore(llvm::Value *value, llvm::Value *ptr, void FunctionEmitContext::scatter(llvm::Value *value, llvm::Value *ptr, const Type *ptrType, llvm::Value *mask) { - assert(dynamic_cast(ptrType) != NULL); - assert(ptrType->IsVaryingType()); + Assert(dynamic_cast(ptrType) != NULL); + Assert(ptrType->IsVaryingType()); const Type *valueType = ptrType->GetBaseType(); // I think this should be impossible - assert(dynamic_cast(valueType) == NULL); + Assert(dynamic_cast(valueType) == NULL); const CollectionType *collectionType = dynamic_cast(valueType); if (collectionType != NULL) { @@ -2017,7 +2017,7 @@ FunctionEmitContext::scatter(llvm::Value *value, llvm::Value *ptr, const PointerType *pt = dynamic_cast(valueType); // And everything should be a pointer or atomic from here on out... - assert(pt != NULL || + Assert(pt != NULL || dynamic_cast(valueType) != NULL); LLVM_TYPE_CONST llvm::Type *type = value->getType(); @@ -2045,7 +2045,7 @@ FunctionEmitContext::scatter(llvm::Value *value, llvm::Value *ptr, "__pseudo_scatter64_8"; llvm::Function *scatterFunc = m->module->getFunction(funcName); - assert(scatterFunc != NULL); + Assert(scatterFunc != NULL); AddInstrumentationPoint("scatter"); @@ -2062,7 +2062,7 @@ void FunctionEmitContext::StoreInst(llvm::Value *value, llvm::Value *ptr) { if (value == NULL || ptr == NULL) { // may happen due to error elsewhere - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return; } @@ -2086,7 +2086,7 @@ FunctionEmitContext::StoreInst(llvm::Value *value, llvm::Value *ptr, llvm::Value *mask, const Type *ptrType) { if (value == NULL || ptr == NULL) { // may happen due to error elsewhere - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return; } @@ -2106,7 +2106,7 @@ FunctionEmitContext::StoreInst(llvm::Value *value, llvm::Value *ptr, maskedStore(value, ptr, ptrType, mask); } else { - assert(ptrType->IsVaryingType()); + Assert(ptrType->IsVaryingType()); // We have a varying ptr (an array of pointers), so it's time to // scatter scatter(value, ptr, ptrType, GetFullMask()); @@ -2126,7 +2126,7 @@ FunctionEmitContext::BranchInst(llvm::BasicBlock *trueBlock, llvm::BasicBlock *falseBlock, llvm::Value *test) { if (test == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return; } @@ -2139,7 +2139,7 @@ FunctionEmitContext::BranchInst(llvm::BasicBlock *trueBlock, llvm::Value * FunctionEmitContext::ExtractInst(llvm::Value *v, int elt, const char *name) { if (v == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -2159,7 +2159,7 @@ llvm::Value * FunctionEmitContext::InsertInst(llvm::Value *v, llvm::Value *eltVal, int elt, const char *name) { if (v == NULL || eltVal == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -2192,7 +2192,7 @@ llvm::Instruction * FunctionEmitContext::SelectInst(llvm::Value *test, llvm::Value *val0, llvm::Value *val1, const char *name) { if (test == NULL || val0 == NULL || val1 == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -2223,7 +2223,7 @@ lCalleeArgCount(llvm::Value *callee, const FunctionType *funcType) { ft = llvm::dyn_cast(pt->getElementType()); } - assert(ft != NULL); + Assert(ft != NULL); return ft->getNumParams(); } @@ -2233,7 +2233,7 @@ FunctionEmitContext::CallInst(llvm::Value *func, const FunctionType *funcType, const std::vector &args, const char *name) { if (func == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -2242,7 +2242,7 @@ FunctionEmitContext::CallInst(llvm::Value *func, const FunctionType *funcType, // isn't the case for things like intrinsics, builtins, and extern "C" // functions from the application. Add the mask if it's needed. unsigned int calleeArgCount = lCalleeArgCount(func, funcType); - assert(argVals.size() + 1 == calleeArgCount || + Assert(argVals.size() + 1 == calleeArgCount || argVals.size() == calleeArgCount); if (argVals.size() + 1 == calleeArgCount) argVals.push_back(GetFullMask()); @@ -2313,7 +2313,7 @@ FunctionEmitContext::CallInst(llvm::Value *func, const FunctionType *funcType, llvm::Value *currentMask = LoadInst(maskPtr); llvm::Function *cttz = m->module->getFunction("__count_trailing_zeros_i32"); - assert(cttz != NULL); + Assert(cttz != NULL); llvm::Value *firstLane = CallInst(cttz, NULL, LaneMask(currentMask), "first_lane"); @@ -2360,12 +2360,12 @@ FunctionEmitContext::CallInst(llvm::Value *func, const FunctionType *funcType, // Now, do a masked store into the memory allocated to // accumulate the result using the call mask. if (callResult != NULL) { - assert(resultPtr != NULL); + Assert(resultPtr != NULL); StoreInst(callResult, resultPtr, callMask, PointerType::GetUniform(returnType)); } else - assert(resultPtr == NULL); + Assert(resultPtr == NULL); // Update the mask to turn off the program instances for which // we just called the function. @@ -2425,7 +2425,7 @@ FunctionEmitContext::ReturnInst() { rinst = llvm::ReturnInst::Create(*g->ctx, retVal, bblock); } else { - assert(function->GetReturnType() == AtomicType::Void); + Assert(function->GetReturnType() == AtomicType::Void); rinst = llvm::ReturnInst::Create(*g->ctx, bblock); } @@ -2440,25 +2440,25 @@ FunctionEmitContext::LaunchInst(llvm::Value *callee, std::vector &argVals, llvm::Value *launchCount) { if (callee == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } launchedTasks = true; - assert(llvm::isa(callee)); + Assert(llvm::isa(callee)); LLVM_TYPE_CONST llvm::Type *argType = (llvm::dyn_cast(callee))->arg_begin()->getType(); - assert(llvm::PointerType::classof(argType)); + Assert(llvm::PointerType::classof(argType)); LLVM_TYPE_CONST llvm::PointerType *pt = llvm::dyn_cast(argType); - assert(llvm::StructType::classof(pt->getElementType())); + Assert(llvm::StructType::classof(pt->getElementType())); LLVM_TYPE_CONST llvm::StructType *argStructType = static_cast(pt->getElementType()); - assert(argStructType->getNumElements() == argVals.size() + 1); + Assert(argStructType->getNumElements() == argVals.size() + 1); llvm::Function *falloc = m->module->getFunction("ISPCAlloc"); - assert(falloc != NULL); + Assert(falloc != NULL); llvm::Value *structSize = g->target.SizeOf(argStructType); if (structSize->getType() != LLVMTypes::Int64Type) // ISPCAlloc expects the size as an uint64_t, but on 32-bit @@ -2493,7 +2493,7 @@ FunctionEmitContext::LaunchInst(llvm::Value *callee, // argument block we just filled in llvm::Value *fptr = BitCastInst(callee, LLVMTypes::VoidPointerType); llvm::Function *flaunch = m->module->getFunction("ISPCLaunch"); - assert(flaunch != NULL); + Assert(flaunch != NULL); std::vector args; args.push_back(launchGroupHandlePtr); args.push_back(fptr); @@ -2542,7 +2542,7 @@ FunctionEmitContext::addVaryingOffsetsIfNeeded(llvm::Value *ptr, const Type *ptrType) { // This should only be called for varying pointers const PointerType *pt = dynamic_cast(ptrType); - assert(pt && pt->IsVaryingType()); + Assert(pt && pt->IsVaryingType()); const Type *baseType = ptrType->GetBaseType(); if (dynamic_cast(baseType) == NULL && diff --git a/decl.cpp b/decl.cpp index 67ed419d..21736691 100644 --- a/decl.cpp +++ b/decl.cpp @@ -216,7 +216,7 @@ Declarator::GetFunctionInfo(DeclSpecs *ds, std::vector *funArgs) { return NULL; Symbol *declSym = GetSymbol(); - assert(declSym != NULL); + Assert(declSym != NULL); // Get the symbol for the function from the symbol table. (It should // already have been added to the symbol table by AddGlobal() by the @@ -232,11 +232,11 @@ Declarator::GetFunctionInfo(DeclSpecs *ds, std::vector *funArgs) { Declarator *d = this; while (d != NULL && d->kind != DK_FUNCTION) d = d->child; - assert(d != NULL); + Assert(d != NULL); for (unsigned int i = 0; i < d->functionParams.size(); ++i) { Declaration *pdecl = d->functionParams[i]; - assert(pdecl->declarators.size() == 1); + Assert(pdecl->declarators.size() == 1); funArgs->push_back(pdecl->declarators[0]->GetSymbol()); } @@ -263,8 +263,8 @@ Declarator::GetType(const Type *base, DeclSpecs *ds) const { case DK_BASE: // All of the type qualifiers should be in the DeclSpecs for the // base declarator - assert(typeQualifiers == 0); - assert(child == NULL); + Assert(typeQualifiers == 0); + Assert(child == NULL); return type; case DK_POINTER: @@ -376,7 +376,7 @@ Declarator::GetType(const Type *base, DeclSpecs *ds) const { // it lives down to the base declarator. Declarator *decl = d->declarators[0]; while (decl->child != NULL) { - assert(decl->initExpr == NULL); + Assert(decl->initExpr == NULL); decl = decl->child; } @@ -485,7 +485,7 @@ Declaration::Declaration(DeclSpecs *ds, Declarator *d) { std::vector Declaration::GetVariableDeclarations() const { - assert(declSpecs->storageClass != SC_TYPEDEF); + Assert(declSpecs->storageClass != SC_TYPEDEF); std::vector vars; for (unsigned int i = 0; i < declarators.size(); ++i) { diff --git a/expr.cpp b/expr.cpp index 0db4b5f1..b36e08db 100644 --- a/expr.cpp +++ b/expr.cpp @@ -137,15 +137,15 @@ lMaybeIssuePrecisionWarning(const AtomicType *toAtomicType, static Expr * lArrayToPointer(Expr *expr) { - assert(expr && dynamic_cast(expr->GetType())); + Assert(expr && dynamic_cast(expr->GetType())); Expr *zero = new ConstExpr(AtomicType::UniformInt32, 0, expr->pos); Expr *index = new IndexExpr(expr, zero, expr->pos); Expr *addr = new AddressOfExpr(index, expr->pos); addr = addr->TypeCheck(); - assert(addr != NULL); + Assert(addr != NULL); addr = addr->Optimize(); - assert(addr != NULL); + Assert(addr != NULL); return addr; } @@ -178,7 +178,7 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, bool failureOk, const char *errorMsgBase, SourcePos pos) { /* This function is way too long and complex. Is type conversion stuff always this messy, or can this be cleaned up somehow? */ - assert(failureOk || errorMsgBase != NULL); + Assert(failureOk || errorMsgBase != NULL); if (toType == NULL || fromType == NULL) return false; @@ -380,7 +380,7 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, fromArrayType->GetElementType())) { // the case of different element counts should have returned // successfully earlier, yes?? - assert(toArrayType->GetElementCount() != fromArrayType->GetElementCount()); + Assert(toArrayType->GetElementCount() != fromArrayType->GetElementCount()); goto typecast_ok; } else if (Type::Equal(toArrayType->GetElementType(), @@ -436,7 +436,7 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, // enum -> atomic (integer, generally...) is always ok if (fromEnumType != NULL) { - assert(toAtomicType != NULL || toVectorType != NULL); + Assert(toAtomicType != NULL || toVectorType != NULL); goto typecast_ok; } @@ -509,7 +509,7 @@ lMatchingBoolType(const Type *type) { if (vt != NULL) return new VectorType(boolBase, vt->GetElementCount()); else { - assert(dynamic_cast(type) != NULL || + Assert(dynamic_cast(type) != NULL || dynamic_cast(type) != NULL); return boolBase; } @@ -527,7 +527,7 @@ lLLVMConstantValue(const Type *type, llvm::LLVMContext *ctx, double value) { // This function is only called with, and only works for atomic, enum, // and vector types. - assert(atomicType != NULL || enumType != NULL || vectorType != NULL || + Assert(atomicType != NULL || enumType != NULL || vectorType != NULL || pointerType != NULL); if (atomicType != NULL || enumType != NULL) { @@ -549,7 +549,7 @@ lLLVMConstantValue(const Type *type, llvm::LLVMContext *ctx, double value) { return LLVMBoolVector(value != 0.); case AtomicType::TYPE_INT8: { int i = (int)value; - assert((double)i == value); + Assert((double)i == value); return isUniform ? LLVMInt8(i) : LLVMInt8Vector(i); } case AtomicType::TYPE_UINT8: { @@ -558,7 +558,7 @@ lLLVMConstantValue(const Type *type, llvm::LLVMContext *ctx, double value) { } case AtomicType::TYPE_INT16: { int i = (int)value; - assert((double)i == value); + Assert((double)i == value); return isUniform ? LLVMInt16(i) : LLVMInt16Vector(i); } case AtomicType::TYPE_UINT16: { @@ -567,7 +567,7 @@ lLLVMConstantValue(const Type *type, llvm::LLVMContext *ctx, double value) { } case AtomicType::TYPE_INT32: { int i = (int)value; - assert((double)i == value); + Assert((double)i == value); return isUniform ? LLVMInt32(i) : LLVMInt32Vector(i); } case AtomicType::TYPE_UINT32: { @@ -579,12 +579,12 @@ lLLVMConstantValue(const Type *type, llvm::LLVMContext *ctx, double value) { LLVMFloatVector((float)value); case AtomicType::TYPE_UINT64: { uint64_t i = (uint64_t)value; - assert(value == (int64_t)i); + Assert(value == (int64_t)i); return isUniform ? LLVMUInt64(i) : LLVMUInt64Vector(i); } case AtomicType::TYPE_INT64: { int64_t i = (int64_t)value; - assert((double)i == value); + Assert((double)i == value); return isUniform ? LLVMInt64(i) : LLVMInt64Vector(i); } case AtomicType::TYPE_DOUBLE: @@ -595,7 +595,7 @@ lLLVMConstantValue(const Type *type, llvm::LLVMContext *ctx, double value) { } } else if (pointerType != NULL) { - assert(value == 0); + Assert(value == 0); if (pointerType->IsUniformType()) return llvm::Constant::getNullValue(LLVMTypes::VoidPointerType); else @@ -617,7 +617,7 @@ lLLVMConstantValue(const Type *type, llvm::LLVMContext *ctx, double value) { if (baseType->IsUniformType()) { LLVM_TYPE_CONST llvm::VectorType *lvt = llvm::dyn_cast(llvmVectorType); - assert(lvt != NULL); + Assert(lvt != NULL); std::vector vals; for (unsigned int i = 0; i < lvt->getNumElements(); ++i) vals.push_back(constElement); @@ -626,7 +626,7 @@ lLLVMConstantValue(const Type *type, llvm::LLVMContext *ctx, double value) { else { LLVM_TYPE_CONST llvm::ArrayType *lat = llvm::dyn_cast(llvmVectorType); - assert(lat != NULL); + Assert(lat != NULL); std::vector vals; for (unsigned int i = 0; i < lat->getNumElements(); ++i) vals.push_back(constElement); @@ -658,7 +658,7 @@ lMaskForSymbol(Symbol *baseSym, FunctionEmitContext *ctx) { static void lStoreAssignResult(llvm::Value *value, llvm::Value *ptr, const Type *ptrType, FunctionEmitContext *ctx, Symbol *baseSym) { - assert(baseSym != NULL && + Assert(baseSym != NULL && baseSym->varyingCFDepth <= ctx->VaryingCFDepth()); if (!g->opt.disableMaskedStoreToStore && !g->opt.disableMaskAllOnOptimizations && @@ -769,7 +769,7 @@ lEmitNegate(Expr *arg, SourcePos pos, FunctionEmitContext *ctx) { return ctx->BinaryOperator(llvm::Instruction::FSub, zero, argVal, "fnegate"); else { - assert(type->IsIntType()); + Assert(type->IsIntType()); return ctx->BinaryOperator(llvm::Instruction::Sub, zero, argVal, "inegate"); } @@ -913,7 +913,7 @@ UnaryExpr::Optimize() { FATAL("unexpected type in UnaryExpr::Optimize() / BitNot case"); } case LogicalNot: { - assert(type == AtomicType::UniformBool || + Assert(type == AtomicType::UniformBool || type == AtomicType::VaryingBool || type == AtomicType::UniformConstBool || type == AtomicType::VaryingConstBool); @@ -1155,7 +1155,7 @@ lEmitBinaryArith(BinaryExpr::Op op, llvm::Value *value0, llvm::Value *value1, } } else { - assert(Type::EqualIgnoringConst(type0, type1)); + Assert(Type::EqualIgnoringConst(type0, type1)); llvm::Instruction::BinaryOps inst; bool isFloatOp = type0->IsFloatType(); @@ -1318,7 +1318,7 @@ BinaryExpr::GetType() const { // and will fail type checking and (int + ptr) should be canonicalized // into (ptr + int) by type checking. if (op == Add) - assert(dynamic_cast(type1) == NULL); + Assert(dynamic_cast(type1) == NULL); if (op == Comma) return arg1->GetType(); @@ -1343,14 +1343,14 @@ BinaryExpr::GetType() const { } // otherwise fall through for these... - assert(op == Lt || op == Gt || op == Le || op == Ge || + Assert(op == Lt || op == Gt || op == Le || op == Ge || op == Equal || op == NotEqual); } const Type *exprType = Type::MoreGeneralType(type0, type1, pos, lOpString(op)); // I don't think that MoreGeneralType should be able to fail after the // checks done in BinaryExpr::TypeCheck(). - assert(exprType != NULL); + Assert(exprType != NULL); switch (op) { case Add: @@ -1534,7 +1534,7 @@ BinaryExpr::Optimize() { std::vector rcpFuns; m->symbolTable->LookupFunction("rcp", &rcpFuns); if (rcpFuns.size() > 0) { - assert(rcpFuns.size() == 2); + Assert(rcpFuns.size() == 2); Expr *rcpSymExpr = new FunctionSymbolExpr("rcp", rcpFuns, pos); ExprList *args = new ExprList(arg1, arg1->pos); Expr *rcpCall = new FunctionCallExpr(rcpSymExpr, args, @@ -1564,7 +1564,7 @@ BinaryExpr::Optimize() { if (constArg0 == NULL || constArg1 == NULL) return this; - assert(Type::EqualIgnoringConst(arg0->GetType(), arg1->GetType())); + Assert(Type::EqualIgnoringConst(arg0->GetType(), arg1->GetType())); const Type *type = arg0->GetType()->GetAsNonConstType(); if (type == AtomicType::UniformFloat || type == AtomicType::VaryingFloat) { float v0[ISPC_MAX_NVEC], v1[ISPC_MAX_NVEC]; @@ -1653,12 +1653,12 @@ BinaryExpr::TypeCheck() { if (dynamic_cast(type0) != NULL) { arg0 = new DereferenceExpr(arg0, arg0->pos); type0 = arg0->GetType(); - assert(type0 != NULL); + Assert(type0 != NULL); } if (dynamic_cast(type1) != NULL) { arg1 = new DereferenceExpr(arg1, arg1->pos); type1 = arg1->GetType(); - assert(type1 != NULL); + Assert(type1 != NULL); } // Convert arrays to pointers to their first elements @@ -1711,7 +1711,7 @@ BinaryExpr::TypeCheck() { std::swap(pt0, pt1); } - assert(pt0 != NULL); + Assert(pt0 != NULL); if (PointerType::IsVoidPointer(pt0)) { Error(pos, "Illegal to perform pointer arithmetic " @@ -1726,7 +1726,7 @@ BinaryExpr::TypeCheck() { if (type1->IsVaryingType()) { arg0 = TypeConvertExpr(arg0, type0->GetAsVaryingType(), "pointer addition"); - assert(arg0 != NULL); + Assert(arg0 != NULL); } arg1 = TypeConvertExpr(arg1, offsetType, lOpString(op)); @@ -2044,18 +2044,18 @@ AssignExpr::GetValue(FunctionEmitContext *ctx) const { case Assign: { llvm::Value *lv = lvalue->GetLValue(ctx); if (lv == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } const Type *lvalueType = lvalue->GetLValueType(); if (lvalueType == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } llvm::Value *rv = rvalue->GetValue(ctx); if (rv == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -2076,7 +2076,7 @@ AssignExpr::GetValue(FunctionEmitContext *ctx) const { case XorAssign: case OrAssign: { // This should be caught during type checking - assert(!dynamic_cast(type) && + Assert(!dynamic_cast(type) && !dynamic_cast(type)); return lEmitOpAssign(op, lvalue, rvalue, type, baseSym, pos, ctx); } @@ -2281,7 +2281,7 @@ lEmitVaryingSelect(FunctionEmitContext *ctx, llvm::Value *test, // Don't need to worry about masking here ctx->StoreInst(expr2, resultPtr); // Use masking to conditionally store the expr1 values - assert(resultPtr->getType() == + Assert(resultPtr->getType() == PointerType::GetUniform(type)->LLVMType(g->ctx)); ctx->StoreInst(expr1, resultPtr, test, PointerType::GetUniform(type)); return ctx->LoadInst(resultPtr, "selectexpr_final"); @@ -2297,7 +2297,7 @@ SelectExpr::GetValue(FunctionEmitContext *ctx) const { const Type *testType = test->GetType()->GetAsNonConstType(); // This should be taken care of during typechecking - assert(testType->GetBaseType() == AtomicType::UniformBool || + Assert(testType->GetBaseType() == AtomicType::UniformBool || testType->GetBaseType() == AtomicType::VaryingBool); const Type *type = expr1->GetType(); @@ -2339,7 +2339,7 @@ SelectExpr::GetValue(FunctionEmitContext *ctx) const { // value expressions with the mask set appropriately and then do an // element-wise select to get the result llvm::Value *testVal = test->GetValue(ctx); - assert(testVal->getType() == LLVMTypes::MaskType); + Assert(testVal->getType() == LLVMTypes::MaskType); llvm::Value *oldMask = ctx->GetInternalMask(); ctx->SetInternalMaskAnd(oldMask, testVal); llvm::Value *expr1Val = expr1->GetValue(ctx); @@ -2360,8 +2360,8 @@ SelectExpr::GetValue(FunctionEmitContext *ctx) const { ctx->SetDebugPos(pos); const VectorType *vt = dynamic_cast(type); // Things that typechecking should have caught - assert(vt != NULL); - assert(dynamic_cast(testType) != NULL && + Assert(vt != NULL); + Assert(dynamic_cast(testType) != NULL && (dynamic_cast(testType)->GetElementCount() == vt->GetElementCount())); @@ -2402,7 +2402,7 @@ SelectExpr::GetType() const { dynamic_cast(testType)->GetElementCount() : 0; int expr1VecSize = dynamic_cast(expr1Type) != NULL ? dynamic_cast(expr1Type)->GetElementCount() : 0; - assert(!(testVecSize != 0 && expr1VecSize != 0 && testVecSize != expr1VecSize)); + Assert(!(testVecSize != 0 && expr1VecSize != 0 && testVecSize != expr1VecSize)); int vectorSize = std::max(testVecSize, expr1VecSize); return Type::MoreGeneralType(expr1Type, expr2Type, Union(expr1->pos, expr2->pos), @@ -2540,12 +2540,12 @@ FunctionCallExpr::GetValue(FunctionEmitContext *ctx) const { llvm::Value *callee = func->GetValue(ctx); if (callee == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } const FunctionType *ft = lGetFunctionType(func); - assert(ft != NULL); + Assert(ft != NULL); bool isVoidFunc = (ft->GetReturnType() == AtomicType::Void); // Automatically convert function call args to references if needed. @@ -2558,7 +2558,7 @@ FunctionCallExpr::GetValue(FunctionEmitContext *ctx) const { // Specifically, this can happen if there's an error earlier during // overload resolution. if ((int)callargs.size() > ft->GetNumParameters()) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -2625,7 +2625,7 @@ FunctionCallExpr::GetValue(FunctionEmitContext *ctx) const { llvm::Value *retVal = NULL; ctx->SetDebugPos(pos); if (ft->isTask) { - assert(launchCountExpr != NULL); + Assert(launchCountExpr != NULL); llvm::Value *launchCount = launchCountExpr->GetValue(ctx); if (launchCount != NULL) ctx->LaunchInst(callee, argVals, launchCount); @@ -2723,7 +2723,7 @@ FunctionCallExpr::TypeCheck() { if (isLaunch) Error(pos, "\"launch\" expression illegal with non-\"task\"-" "qualified function."); - assert(launchCountExpr == NULL); + Assert(launchCountExpr == NULL); } } else { @@ -2732,7 +2732,7 @@ FunctionCallExpr::TypeCheck() { if (fptrType == NULL) return NULL; - assert(dynamic_cast(fptrType) != NULL); + Assert(dynamic_cast(fptrType) != NULL); const FunctionType *funcType = dynamic_cast(fptrType->GetBaseType()); if (funcType == NULL) { @@ -2779,7 +2779,7 @@ FunctionCallExpr::TypeCheck() { // Otherwise the parameter default saves us. It should // be there for sure, given the check right above the // for loop. - assert(funcType->GetParameterDefault(i) != NULL); + Assert(funcType->GetParameterDefault(i) != NULL); } if (fptrType->IsVaryingType() && @@ -2809,7 +2809,7 @@ FunctionCallExpr::EstimateCost() const { // it's going through a function pointer const Type *fpType = func->GetType(); if (fpType != NULL) { - assert(dynamic_cast(fpType) != NULL); + Assert(dynamic_cast(fpType) != NULL); if (fpType->IsUniformType()) callCost = COST_FUNPTR_UNIFORM; else @@ -2923,7 +2923,7 @@ ExprList::GetConstant(const Type *type) const { #else LLVM_TYPE_CONST llvm::StructType *llvmStructType = llvm::dyn_cast(collectionType->LLVMType(g->ctx)); - assert(llvmStructType != NULL); + Assert(llvmStructType != NULL); return llvm::ConstantStruct::get(llvmStructType, cv); #endif } @@ -2934,7 +2934,7 @@ ExprList::GetConstant(const Type *type) const { // FIXME: should the assert below validly fail for uniform vectors // now? Need a test case to reproduce it and then to be sure we // have the right fix; leave the assert until we can hit it... - assert(lat != NULL); + Assert(lat != NULL); return llvm::ConstantArray::get(lat, cv); } return NULL; @@ -3001,7 +3001,7 @@ lAddVaryingOffsetsIfNeeded(FunctionEmitContext *ctx, llvm::Value *ptr, // References are uniform pointers, so no offsetting is needed return ptr; - assert(dynamic_cast(ptrType) != NULL); + Assert(dynamic_cast(ptrType) != NULL); if (ptrType->IsUniformType()) return ptr; @@ -3050,7 +3050,7 @@ IndexExpr::GetValue(FunctionEmitContext *ctx) const { // that we can index from there... llvm::Value *val = baseExpr->GetValue(ctx); if (val == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } ctx->SetDebugPos(pos); @@ -3064,7 +3064,7 @@ IndexExpr::GetValue(FunctionEmitContext *ctx) const { const SequentialType *st = dynamic_cast(baseExprType); if (st == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } lvalueType = PointerType::GetUniform(st->GetElementType()); @@ -3075,7 +3075,7 @@ IndexExpr::GetValue(FunctionEmitContext *ctx) const { } else { Symbol *baseSym = GetBaseSymbol(); - assert(baseSym != NULL); + Assert(baseSym != NULL); mask = lMaskForSymbol(baseSym, ctx); } @@ -3103,7 +3103,7 @@ IndexExpr::GetType() const { const SequentialType *sequentialType = dynamic_cast(baseExprType->GetReferenceTarget()); // Typechecking should have caught this... - assert(sequentialType != NULL); + Assert(sequentialType != NULL); elementType = sequentialType->GetElementType(); } @@ -3152,7 +3152,7 @@ IndexExpr::GetLValue(FunctionEmitContext *ctx) const { } else { baseExprType = baseExprType->GetReferenceTarget(); - assert(dynamic_cast(baseExprType) || + Assert(dynamic_cast(baseExprType) || dynamic_cast(baseExprType)); basePtr = baseExpr->GetValue(ctx); basePtrType = baseExpr->GetType(); @@ -3197,7 +3197,7 @@ IndexExpr::GetLValueType() const { if (dynamic_cast(baseExprLValueType) != NULL) baseExprLValueType = PointerType::GetUniform(baseExprLValueType->GetReferenceTarget()); - assert(dynamic_cast(baseExprLValueType) != NULL); + Assert(dynamic_cast(baseExprLValueType) != NULL); // FIXME: can we do something in the type system that unifies the // concept of a sequential type's element type and a pointer type's @@ -3214,7 +3214,7 @@ IndexExpr::GetLValueType() const { const PointerType *pt = dynamic_cast(baseExprLValueType->GetBaseType()); - assert(pt != NULL); + Assert(pt != NULL); if (baseExprLValueType->IsUniformType() && indexType->IsUniformType()) return PointerType::GetUniform(pt->GetBaseType()); else @@ -3419,10 +3419,10 @@ StructMemberExpr::getStructType() const { else { const ReferenceType *rt = dynamic_cast(exprType); - assert(rt != NULL); + Assert(rt != NULL); structType = dynamic_cast(rt->GetReferenceTarget()); } - assert(structType != NULL); + Assert(structType != NULL); } return structType; } @@ -3461,11 +3461,11 @@ VectorMemberExpr::VectorMemberExpr(Expr *e, const char *id, SourcePos p, if (pt != NULL) exprVectorType = dynamic_cast(pt->GetBaseType()); else { - assert(dynamic_cast(exprType) != NULL); + Assert(dynamic_cast(exprType) != NULL); exprVectorType = dynamic_cast(exprType->GetReferenceTarget()); } - assert(exprVectorType != NULL); + Assert(exprVectorType != NULL); } memberType = new VectorType(exprVectorType->GetElementType(), identifier.length()); @@ -3515,7 +3515,7 @@ VectorMemberExpr::GetLValueType() const { vt = dynamic_cast(exprLValueType->GetReferenceTarget()); else vt = dynamic_cast(exprLValueType->GetBaseType()); - assert(vt != NULL); + Assert(vt != NULL); // we don't want to report that it's e.g. a pointer to a float<1>, // but ta pointer to a float, etc. @@ -3562,7 +3562,7 @@ VectorMemberExpr::GetValue(FunctionEmitContext *ctx) const { } if (basePtr == NULL || basePtrType == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } @@ -3624,7 +3624,7 @@ MemberExpr::create(Expr *e, const char *id, SourcePos p, SourcePos idpos, if (referenceType != NULL) { e = new DereferenceExpr(e, e->pos); exprType = e->GetType(); - assert(exprType != NULL); + Assert(exprType != NULL); } const PointerType *pointerType = dynamic_cast(exprType); @@ -3686,7 +3686,7 @@ MemberExpr::GetValue(FunctionEmitContext *ctx) const { // so that we can index from there... llvm::Value *val = expr->GetValue(ctx); if (!val) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return NULL; } ctx->SetDebugPos(pos); @@ -3706,7 +3706,7 @@ MemberExpr::GetValue(FunctionEmitContext *ctx) const { } else { Symbol *baseSym = GetBaseSymbol(); - assert(baseSym != NULL); + Assert(baseSym != NULL); mask = lMaskForSymbol(baseSym, ctx); } @@ -3850,7 +3850,7 @@ ConstExpr::ConstExpr(const Type *t, int8_t i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstInt8); + Assert(type == AtomicType::UniformConstInt8); int8Val[0] = i; } @@ -3859,7 +3859,7 @@ ConstExpr::ConstExpr(const Type *t, int8_t *i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstInt8 || + Assert(type == AtomicType::UniformConstInt8 || type == AtomicType::VaryingConstInt8); for (int j = 0; j < Count(); ++j) int8Val[j] = i[j]; @@ -3870,7 +3870,7 @@ ConstExpr::ConstExpr(const Type *t, uint8_t u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstUInt8); + Assert(type == AtomicType::UniformConstUInt8); uint8Val[0] = u; } @@ -3879,7 +3879,7 @@ ConstExpr::ConstExpr(const Type *t, uint8_t *u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstUInt8 || + Assert(type == AtomicType::UniformConstUInt8 || type == AtomicType::VaryingConstUInt8); for (int j = 0; j < Count(); ++j) uint8Val[j] = u[j]; @@ -3890,7 +3890,7 @@ ConstExpr::ConstExpr(const Type *t, int16_t i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstInt16); + Assert(type == AtomicType::UniformConstInt16); int16Val[0] = i; } @@ -3899,7 +3899,7 @@ ConstExpr::ConstExpr(const Type *t, int16_t *i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstInt16 || + Assert(type == AtomicType::UniformConstInt16 || type == AtomicType::VaryingConstInt16); for (int j = 0; j < Count(); ++j) int16Val[j] = i[j]; @@ -3910,7 +3910,7 @@ ConstExpr::ConstExpr(const Type *t, uint16_t u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstUInt16); + Assert(type == AtomicType::UniformConstUInt16); uint16Val[0] = u; } @@ -3919,7 +3919,7 @@ ConstExpr::ConstExpr(const Type *t, uint16_t *u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstUInt16 || + Assert(type == AtomicType::UniformConstUInt16 || type == AtomicType::VaryingConstUInt16); for (int j = 0; j < Count(); ++j) uint16Val[j] = u[j]; @@ -3930,7 +3930,7 @@ ConstExpr::ConstExpr(const Type *t, int32_t i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstInt32); + Assert(type == AtomicType::UniformConstInt32); int32Val[0] = i; } @@ -3939,7 +3939,7 @@ ConstExpr::ConstExpr(const Type *t, int32_t *i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstInt32 || + Assert(type == AtomicType::UniformConstInt32 || type == AtomicType::VaryingConstInt32); for (int j = 0; j < Count(); ++j) int32Val[j] = i[j]; @@ -3950,7 +3950,7 @@ ConstExpr::ConstExpr(const Type *t, uint32_t u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstUInt32 || + Assert(type == AtomicType::UniformConstUInt32 || (dynamic_cast(type) != NULL && type->IsUniformType())); uint32Val[0] = u; @@ -3961,7 +3961,7 @@ ConstExpr::ConstExpr(const Type *t, uint32_t *u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstUInt32 || + Assert(type == AtomicType::UniformConstUInt32 || type == AtomicType::VaryingConstUInt32 || (dynamic_cast(type) != NULL)); for (int j = 0; j < Count(); ++j) @@ -3973,7 +3973,7 @@ ConstExpr::ConstExpr(const Type *t, float f, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstFloat); + Assert(type == AtomicType::UniformConstFloat); floatVal[0] = f; } @@ -3982,7 +3982,7 @@ ConstExpr::ConstExpr(const Type *t, float *f, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstFloat || + Assert(type == AtomicType::UniformConstFloat || type == AtomicType::VaryingConstFloat); for (int j = 0; j < Count(); ++j) floatVal[j] = f[j]; @@ -3993,7 +3993,7 @@ ConstExpr::ConstExpr(const Type *t, int64_t i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstInt64); + Assert(type == AtomicType::UniformConstInt64); int64Val[0] = i; } @@ -4002,7 +4002,7 @@ ConstExpr::ConstExpr(const Type *t, int64_t *i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstInt64 || + Assert(type == AtomicType::UniformConstInt64 || type == AtomicType::VaryingConstInt64); for (int j = 0; j < Count(); ++j) int64Val[j] = i[j]; @@ -4013,7 +4013,7 @@ ConstExpr::ConstExpr(const Type *t, uint64_t u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstUInt64); + Assert(type == AtomicType::UniformConstUInt64); uint64Val[0] = u; } @@ -4022,7 +4022,7 @@ ConstExpr::ConstExpr(const Type *t, uint64_t *u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstUInt64 || + Assert(type == AtomicType::UniformConstUInt64 || type == AtomicType::VaryingConstUInt64); for (int j = 0; j < Count(); ++j) uint64Val[j] = u[j]; @@ -4033,7 +4033,7 @@ ConstExpr::ConstExpr(const Type *t, double f, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstDouble); + Assert(type == AtomicType::UniformConstDouble); doubleVal[0] = f; } @@ -4042,7 +4042,7 @@ ConstExpr::ConstExpr(const Type *t, double *f, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstDouble || + Assert(type == AtomicType::UniformConstDouble || type == AtomicType::VaryingConstDouble); for (int j = 0; j < Count(); ++j) doubleVal[j] = f[j]; @@ -4053,7 +4053,7 @@ ConstExpr::ConstExpr(const Type *t, bool b, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstBool); + Assert(type == AtomicType::UniformConstBool); boolVal[0] = b; } @@ -4062,7 +4062,7 @@ ConstExpr::ConstExpr(const Type *t, bool *b, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - assert(type == AtomicType::UniformConstBool || + Assert(type == AtomicType::UniformConstBool || type == AtomicType::VaryingConstBool); for (int j = 0; j < Count(); ++j) boolVal[j] = b[j]; @@ -4129,7 +4129,7 @@ ConstExpr::getBasicType() const { if (at != NULL) return at->basicType; else { - assert(dynamic_cast(type) != NULL); + Assert(dynamic_cast(type) != NULL); return AtomicType::TYPE_UINT32; } } @@ -4481,7 +4481,7 @@ ConstExpr::GetConstant(const Type *type) const { // Caller shouldn't be trying to stuff a varying value here into a // constant type. if (type->IsUniformType()) - assert(Count() == 1); + Assert(Count() == 1); type = type->GetAsNonConstType(); if (type == AtomicType::UniformBool || type == AtomicType::VaryingBool) { @@ -5208,7 +5208,7 @@ lUniformValueToVarying(FunctionEmitContext *ctx, llvm::Value *value, // Otherwise we must have a uniform AtomicType, so smear its value // across the vector lanes. - assert(dynamic_cast(type) != NULL); + Assert(dynamic_cast(type) != NULL); return ctx->SmearUniform(value); } @@ -5247,13 +5247,13 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { return value; } else { - assert(fromType->IsUniformType() && toType->IsVaryingType()); + Assert(fromType->IsUniformType() && toType->IsVaryingType()); value = ctx->PtrToIntInst(value); return ctx->SmearUniform(value); } } else { - assert(dynamic_cast(toType) != NULL); + Assert(dynamic_cast(toType) != NULL); if (toType->IsBoolType()) { // convert pointer to bool LLVM_TYPE_CONST llvm::Type *lfu = @@ -5305,20 +5305,20 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { // implicit array to pointer to first element Expr *arrayAsPtr = lArrayToPointer(expr); if (Type::EqualIgnoringConst(arrayAsPtr->GetType(), toPointerType) == false) { - assert(Type::EqualIgnoringConst(arrayAsPtr->GetType()->GetAsVaryingType(), + Assert(Type::EqualIgnoringConst(arrayAsPtr->GetType()->GetAsVaryingType(), toPointerType) == true); arrayAsPtr = new TypeCastExpr(toPointerType, arrayAsPtr, false, pos); arrayAsPtr = arrayAsPtr->TypeCheck(); - assert(arrayAsPtr != NULL); + Assert(arrayAsPtr != NULL); arrayAsPtr = arrayAsPtr->Optimize(); - assert(arrayAsPtr != NULL); + Assert(arrayAsPtr != NULL); } - assert(Type::EqualIgnoringConst(arrayAsPtr->GetType(), toPointerType)); + Assert(Type::EqualIgnoringConst(arrayAsPtr->GetType(), toPointerType)); return arrayAsPtr->GetValue(ctx); } // This also should be caught during typechecking - assert(!(toType->IsUniformType() && fromType->IsVaryingType())); + Assert(!(toType->IsUniformType() && fromType->IsVaryingType())); if (toArrayType != NULL && fromArrayType != NULL) { // cast array pointer from [n x foo] to [0 x foo] if needed to be able @@ -5327,7 +5327,7 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { (toArrayType->GetElementCount() != fromArrayType->GetElementCount())) Warning(pos, "Type-converting array of length %d to length %d", fromArrayType->GetElementCount(), toArrayType->GetElementCount()); - assert(Type::EqualIgnoringConst(toArrayType->GetBaseType(), + Assert(Type::EqualIgnoringConst(toArrayType->GetBaseType(), fromArrayType->GetBaseType())); llvm::Value *v = expr->GetValue(ctx); LLVM_TYPE_CONST llvm::Type *ptype = toType->LLVMType(g->ctx); @@ -5349,14 +5349,14 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { (toArray->GetElementCount() != fromArray->GetElementCount())) Warning(pos, "Type-converting array of length %d to length %d", fromArray->GetElementCount(), toArray->GetElementCount()); - assert(Type::EqualIgnoringConst(toArray->GetBaseType(), + Assert(Type::EqualIgnoringConst(toArray->GetBaseType(), fromArray->GetBaseType())); llvm::Value *v = expr->GetValue(ctx); LLVM_TYPE_CONST llvm::Type *ptype = toType->LLVMType(g->ctx); return ctx->BitCastInst(v, ptype); //, "array_cast_0size"); } - assert(Type::Equal(toTarget, fromTarget) || + Assert(Type::Equal(toTarget, fromTarget) || Type::Equal(toTarget, fromTarget->GetAsConstType())); return expr->GetValue(ctx); } @@ -5366,7 +5366,7 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { if (toStruct && fromStruct) { // The only legal type conversions for structs are to go from a // uniform to a varying instance of the same struct type. - assert(toStruct->IsVaryingType() && fromStruct->IsUniformType() && + Assert(toStruct->IsVaryingType() && fromStruct->IsUniformType() && Type::Equal(toStruct, fromStruct->GetAsVaryingType())); llvm::Value *origValue = expr->GetValue(ctx); @@ -5379,7 +5379,7 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { const VectorType *fromVector = dynamic_cast(fromType); if (toVector && fromVector) { // this should be caught during typechecking - assert(toVector->GetElementCount() == fromVector->GetElementCount()); + Assert(toVector->GetElementCount() == fromVector->GetElementCount()); llvm::Value *exprVal = expr->GetValue(ctx); if (!exprVal) @@ -5421,7 +5421,7 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { const AtomicType *fromAtomic = dynamic_cast(fromType); // at this point, coming from an atomic type is all that's left... - assert(fromAtomic != NULL); + Assert(fromAtomic != NULL); if (toVector) { // scalar -> short vector conversion @@ -5449,7 +5449,7 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { else { const AtomicType *toAtomic = dynamic_cast(toType); // typechecking should ensure this is the case - assert(toAtomic != NULL); + Assert(toAtomic != NULL); return lTypeConvAtomic(ctx, exprVal, toAtomic, fromAtomic, pos); } @@ -5666,7 +5666,7 @@ TypeCastExpr::GetConstant(const Type *constType) const { // method called. Thus, the only case we do need to worry about here // is converting a uniform function pointer to a varying function // pointer of the same type. - assert(Type::Equal(constType, type)); + Assert(Type::Equal(constType, type)); const FunctionType *ft = NULL; if (dynamic_cast(type) == NULL || (ft = dynamic_cast(type->GetBaseType())) == NULL) @@ -5678,7 +5678,7 @@ TypeCastExpr::GetConstant(const Type *constType) const { ec = llvm::ConstantExpr::getPtrToInt(ec, LLVMTypes::PointerIntType); - assert(type->IsVaryingType()); + Assert(type->IsVaryingType()); std::vector smear; for (int i = 0; i < g->target.vectorWidth; ++i) smear.push_back(ec); @@ -5834,7 +5834,7 @@ DereferenceExpr::GetType() const { if (dynamic_cast(exprType) != NULL) return exprType->GetReferenceTarget(); else { - assert(dynamic_cast(exprType) != NULL); + Assert(dynamic_cast(exprType) != NULL); if (exprType->IsUniformType()) return exprType->GetBaseType(); else @@ -6095,7 +6095,7 @@ SymbolExpr::Optimize() { if (symbol == NULL) return NULL; else if (symbol->constValue != NULL) { - assert(GetType()->IsConstType()); + Assert(GetType()->IsConstType()); return symbol->constValue; } else @@ -6191,8 +6191,8 @@ FunctionSymbolExpr::Print() const { llvm::Constant * FunctionSymbolExpr::GetConstant(const Type *type) const { - assert(type->IsUniformType()); - assert(GetType()->IsUniformType()); + Assert(type->IsUniformType()); + Assert(GetType()->IsUniformType()); if (Type::EqualIgnoringConst(type, GetType()) == false) return NULL; @@ -6364,7 +6364,7 @@ lMatchWithTypeConv(const Type *callType, const Type *funcArgType) { */ static Symbol * lGetBestMatch(std::vector > &matches) { - assert(matches.size() > 0); + Assert(matches.size() > 0); int minCost = matches[0].first; for (unsigned int i = 1; i < matches.size(); ++i) @@ -6405,7 +6405,7 @@ FunctionSymbolExpr::tryResolve(int (*matchFunc)(const Type *, const Type *), Symbol *candidateFunction = *iter; const FunctionType *ft = dynamic_cast(candidateFunction->type); - assert(ft != NULL); + Assert(ft != NULL); // There's no way to match if the caller is passing more arguments // than this function instance takes. diff --git a/func.cpp b/func.cpp index 4da771ff..603a6641 100644 --- a/func.cpp +++ b/func.cpp @@ -72,7 +72,7 @@ Function::Function(Symbol *s, const std::vector &a, Stmt *c) { code = c; maskSymbol = m->symbolTable->LookupVariable("__mask"); - assert(maskSymbol != NULL); + Assert(maskSymbol != NULL); if (code != NULL) { if (g->debugPrint) { @@ -109,7 +109,7 @@ Function::Function(Symbol *s, const std::vector &a, Stmt *c) { } const FunctionType *type = dynamic_cast(sym->type); - assert(type != NULL); + Assert(type != NULL); for (unsigned int i = 0; i < args.size(); ++i) if (dynamic_cast(args[i]->type) == NULL) @@ -117,13 +117,13 @@ Function::Function(Symbol *s, const std::vector &a, Stmt *c) { if (type->isTask) { threadIndexSym = m->symbolTable->LookupVariable("threadIndex"); - assert(threadIndexSym); + Assert(threadIndexSym); threadCountSym = m->symbolTable->LookupVariable("threadCount"); - assert(threadCountSym); + Assert(threadCountSym); taskIndexSym = m->symbolTable->LookupVariable("taskIndex"); - assert(taskIndexSym); + Assert(taskIndexSym); taskCountSym = m->symbolTable->LookupVariable("taskCount"); - assert(taskCountSym); + Assert(taskCountSym); } else threadIndexSym = threadCountSym = taskIndexSym = taskCountSym = NULL; @@ -133,7 +133,7 @@ Function::Function(Symbol *s, const std::vector &a, Stmt *c) { const Type * Function::GetReturnType() const { const FunctionType *type = dynamic_cast(sym->type); - assert(type != NULL); + Assert(type != NULL); return type->GetReturnType(); } @@ -141,7 +141,7 @@ Function::GetReturnType() const { const FunctionType * Function::GetType() const { const FunctionType *type = dynamic_cast(sym->type); - assert(type != NULL); + Assert(type != NULL); return type; } @@ -157,9 +157,9 @@ lCopyInTaskParameter(int i, llvm::Value *structArgPtr, const std::vectorgetType(); - assert(llvm::isa(structArgType)); + Assert(llvm::isa(structArgType)); const llvm::PointerType *pt = llvm::dyn_cast(structArgType); - assert(llvm::isa(pt->getElementType())); + Assert(llvm::isa(pt->getElementType())); const llvm::StructType *argStructType = llvm::dyn_cast(pt->getElementType()); @@ -201,7 +201,7 @@ Function::emitCode(FunctionEmitContext *ctx, llvm::Function *function, llvm::BasicBlock *entryBBlock = ctx->GetCurrentBasicBlock(); #endif const FunctionType *type = dynamic_cast(sym->type); - assert(type != NULL); + Assert(type != NULL); if (type->isTask == true) { // For tasks, we there should always be three parmeters: the // pointer to the structure that holds all of the arguments, the @@ -266,9 +266,9 @@ Function::emitCode(FunctionEmitContext *ctx, llvm::Function *function, else { // Otherwise use the mask to set the entry mask value argIter->setName("__mask"); - assert(argIter->getType() == LLVMTypes::MaskType); + Assert(argIter->getType() == LLVMTypes::MaskType); ctx->SetFunctionMask(argIter); - assert(++argIter == function->arg_end()); + Assert(++argIter == function->arg_end()); } } @@ -372,7 +372,7 @@ Function::GenerateIR() { return; llvm::Function *function = sym->function; - assert(function != NULL); + Assert(function != NULL); // But if that function has a definition, we don't want to redefine it. if (function->empty() == false) { @@ -411,7 +411,7 @@ Function::GenerateIR() { // it without a mask parameter and without name mangling so that // the application can call it const FunctionType *type = dynamic_cast(sym->type); - assert(type != NULL); + Assert(type != NULL); if (type->isExported) { if (!type->isTask) { LLVM_TYPE_CONST llvm::FunctionType *ftype = diff --git a/ispc.cpp b/ispc.cpp index a3b26cb0..b573cdff 100644 --- a/ispc.cpp +++ b/ispc.cpp @@ -282,7 +282,7 @@ Target::GetTargetMachine() const { #endif // !ISPC_IS_WINDOWS #endif // LLVM_2_9 - assert(targetMachine != NULL); + Assert(targetMachine != NULL); targetMachine->setAsmVerbosityDefault(true); return targetMachine; @@ -310,7 +310,7 @@ Target::GetISAString() const { llvm::Value * Target::SizeOf(LLVM_TYPE_CONST llvm::Type *type) { const llvm::TargetData *td = GetTargetMachine()->getTargetData(); - assert(td != NULL); + Assert(td != NULL); uint64_t byteSize = td->getTypeSizeInBits(type) / 8; if (is32Bit || g->opt.force32BitAddressing) return LLVMInt32(byteSize); @@ -322,12 +322,12 @@ Target::SizeOf(LLVM_TYPE_CONST llvm::Type *type) { llvm::Value * Target::StructOffset(LLVM_TYPE_CONST llvm::Type *type, int element) { const llvm::TargetData *td = GetTargetMachine()->getTargetData(); - assert(td != NULL); + Assert(td != NULL); LLVM_TYPE_CONST llvm::StructType *structType = llvm::dyn_cast(type); - assert(structType != NULL); + Assert(structType != NULL); const llvm::StructLayout *sl = td->getStructLayout(structType); - assert(sl != NULL); + Assert(sl != NULL); uint64_t offset = sl->getElementOffset(element); if (is32Bit || g->opt.force32BitAddressing) diff --git a/ispc.h b/ispc.h index 1662b4b3..7211ff93 100644 --- a/ispc.h +++ b/ispc.h @@ -50,11 +50,21 @@ #define ISPC_IS_APPLE #endif -#include #include +#include #include #include +#define Assert(expr) \ + ((void)((expr) ? 0 : __Assert (#expr, __FILE__, __LINE__))) +#define __Assert(expr, file, line) \ + ((void)fprintf(stderr, "%s:%u: Assertion failed: \"%s\"\n" \ + "***\n*** Please file a bug report at " \ + "https://github.com/ispc/ispc/issues\n*** (Including as much " \ + "information as you can about how to reproduce this error).\n" \ + "*** You have apparently encountered a bug in the compiler that " \ + "we'd like to fix!\n***\n", file, line, expr), abort(), 0) + /** @def ISPC_MAX_NVEC maximum vector size of any of the compliation targets. */ @@ -247,7 +257,7 @@ struct Opt { */ bool force32BitAddressing; - /** Indicates whether assert() statements should be ignored (for + /** Indicates whether Assert() statements should be ignored (for performance in the generated code). */ bool disableAsserts; diff --git a/lex.ll b/lex.ll index 88ff0763..ca5dfc64 100644 --- a/lex.ll +++ b/lex.ll @@ -274,7 +274,7 @@ lParseBinary(const char *ptr, SourcePos pos) { while (*ptr != '\0') { /* if this hits, the regexp for 0b... constants is broken */ - assert(*ptr == '0' || *ptr == '1'); + Assert(*ptr == '0' || *ptr == '1'); if ((val & (((int64_t)1)<<63)) && warned == false) { // We're about to shift out a set bit @@ -329,7 +329,7 @@ static void lHandleCppHash(SourcePos *pos) { char *ptr, *src; // Advance past the opening stuff on the line. - assert(yytext[0] == '#'); + Assert(yytext[0] == '#'); if (yytext[1] == ' ') // On Linux/OSX, the preprocessor gives us lines like // # 1234 "foo.c" @@ -337,7 +337,7 @@ static void lHandleCppHash(SourcePos *pos) { else { // On windows, cl.exe's preprocessor gives us lines of the form: // #line 1234 "foo.c" - assert(!strncmp(yytext+1, "line ", 5)); + Assert(!strncmp(yytext+1, "line ", 5)); ptr = yytext + 6; } @@ -347,13 +347,13 @@ static void lHandleCppHash(SourcePos *pos) { pos->last_column = 1; // Make sure that the character after the integer is a space and that // then we have open quotes - assert(src != ptr && src[0] == ' ' && src[1] == '"'); + Assert(src != ptr && src[0] == ' ' && src[1] == '"'); src += 2; // And the filename is everything up until the closing quotes std::string filename; while (*src != '"') { - assert(*src && *src != '\n'); + Assert(*src && *src != '\n'); filename.push_back(*src); ++src; } @@ -454,13 +454,13 @@ ipow2(int exponent) { */ static double lParseHexFloat(const char *ptr) { - assert(ptr != NULL); + Assert(ptr != NULL); - assert(ptr[0] == '0' && ptr[1] == 'x'); + Assert(ptr[0] == '0' && ptr[1] == 'x'); ptr += 2; // Start initializing the mantissa - assert(*ptr == '0' || *ptr == '1'); + Assert(*ptr == '0' || *ptr == '1'); double mantissa = (*ptr == '1') ? 1. : 0.; ++ptr; @@ -480,7 +480,7 @@ lParseHexFloat(const char *ptr) { else if (*ptr >= 'a' && *ptr <= 'f') digit = 10 + *ptr - 'a'; else { - assert(*ptr >= 'A' && *ptr <= 'F'); + Assert(*ptr >= 'A' && *ptr <= 'F'); digit = 10 + *ptr - 'A'; } @@ -493,7 +493,7 @@ lParseHexFloat(const char *ptr) { else // If there's not a '.', then we better be going straight to the // exponent - assert(*ptr == 'p'); + Assert(*ptr == 'p'); ++ptr; // skip the 'p' diff --git a/llvmutil.cpp b/llvmutil.cpp index 34e830d5..6c440a91 100644 --- a/llvmutil.cpp +++ b/llvmutil.cpp @@ -424,7 +424,7 @@ LLVMBoolVector(bool b) { v = llvm::ConstantInt::get(LLVMTypes::Int32Type, b ? 0xffffffff : 0, false /*unsigned*/); else { - assert(LLVMTypes::BoolVectorType->getElementType() == + Assert(LLVMTypes::BoolVectorType->getElementType() == llvm::Type::getInt1Ty(*g->ctx)); v = b ? LLVMTrue : LLVMFalse; } @@ -445,7 +445,7 @@ LLVMBoolVector(const bool *bvec) { v = llvm::ConstantInt::get(LLVMTypes::Int32Type, bvec[i] ? 0xffffffff : 0, false /*unsigned*/); else { - assert(LLVMTypes::BoolVectorType->getElementType() == + Assert(LLVMTypes::BoolVectorType->getElementType() == llvm::Type::getInt1Ty(*g->ctx)); v = bvec[i] ? LLVMTrue : LLVMFalse; } diff --git a/main.cpp b/main.cpp index 995420b7..1ab765c1 100644 --- a/main.cpp +++ b/main.cpp @@ -167,10 +167,12 @@ int main(int Argc, char *Argv[]) { char *argv[128]; lGetAllArgs(Argc, Argv, argc, argv); +#if 0 // Use LLVM's little utility function to print out nice stack traces if // we crash llvm::sys::PrintStackTraceOnErrorSignal(); llvm::PrettyStackTraceProgram X(argc, argv); +#endif // initialize available LLVM targets LLVMInitializeX86TargetInfo(); diff --git a/module.cpp b/module.cpp index 80aec28b..7fd806a4 100644 --- a/module.cpp +++ b/module.cpp @@ -49,7 +49,6 @@ #include "llvmutil.h" #include -#include #include #include #include @@ -224,7 +223,7 @@ Module::AddGlobalVariable(Symbol *sym, Expr *initExpr, bool isConst) { if (sym == NULL || sym->type == NULL) { // But if these are NULL and there haven't been any previous // errors, something surprising is going on - assert(errorCount > 0); + Assert(errorCount > 0); return; } @@ -391,7 +390,7 @@ void Module::AddFunctionDeclaration(Symbol *funSym, bool isInline) { const FunctionType *functionType = dynamic_cast(funSym->type); - assert(functionType != NULL); + Assert(functionType != NULL); // If a global variable with the same name has already been declared // issue an error. @@ -418,7 +417,7 @@ Module::AddFunctionDeclaration(Symbol *funSym, bool isInline) { // allowed. const FunctionType *ofType = dynamic_cast(overloadFunc->type); - assert(ofType != NULL); + Assert(ofType != NULL); if (ofType->GetNumParameters() == functionType->GetNumParameters()) { int i; for (i = 0; i < functionType->GetNumParameters(); ++i) { @@ -573,7 +572,7 @@ Module::AddFunctionDeclaration(Symbol *funSym, bool isInline) { // Finally, we know all is good and we can add the function to the // symbol table bool ok = symbolTable->AddFunction(funSym); - assert(ok); + Assert(ok); } @@ -731,7 +730,7 @@ static void lVisitNode(const StructType *structType, std::map &structToNode, std::vector &sortedTypes) { - assert(structToNode.find(structType) != structToNode.end()); + Assert(structToNode.find(structType) != structToNode.end()); // Get the node that encodes the structs that this one is immediately // dependent on. StructDAGNode *node = structToNode[structType]; @@ -795,7 +794,7 @@ lEmitStructDecls(std::vector &structTypes, FILE *file) { if (hasIncomingEdges.find(structType) == hasIncomingEdges.end()) lVisitNode(structType, structToNode, sortedTypes); } - assert(sortedTypes.size() == structTypes.size()); + Assert(sortedTypes.size() == structTypes.size()); // And finally we can emit the struct declarations by going through the // sorted ones in order. @@ -830,10 +829,10 @@ lEmitEnumDecls(const std::vector &enumTypes, FILE *file) { // Print the individual enumerators for (int j = 0; j < enumTypes[i]->GetEnumeratorCount(); ++j) { const Symbol *e = enumTypes[i]->GetEnumerator(j); - assert(e->constValue != NULL); + Assert(e->constValue != NULL); unsigned int enumValue; int count = e->constValue->AsUInt32(&enumValue); - assert(count == 1); + Assert(count == 1); // Always print an initializer to set the value. We could be // 'clever' here and detect whether the implicit value given by @@ -899,7 +898,7 @@ lAddTypeIfNew(const Type *type, std::vector *exportedTypes) { return; const T *castType = dynamic_cast(type); - assert(castType != NULL); + Assert(castType != NULL); exportedTypes->push_back(castType); } @@ -936,7 +935,7 @@ lGetExportedTypes(const Type *type, else if (dynamic_cast(type) != NULL) lAddTypeIfNew(type, exportedVectorTypes); else - assert(dynamic_cast(type) != NULL); + Assert(dynamic_cast(type) != NULL); } @@ -967,7 +966,7 @@ lPrintFunctionDeclarations(FILE *file, const std::vector &funcs) { fprintf(file, "#ifdef __cplusplus\nextern \"C\" {\n#endif // __cplusplus\n"); for (unsigned int i = 0; i < funcs.size(); ++i) { const FunctionType *ftype = dynamic_cast(funcs[i]->type); - assert(ftype); + Assert(ftype); std::string decl = ftype->GetCDeclaration(funcs[i]->name); fprintf(file, " extern %s;\n", decl.c_str()); } @@ -992,7 +991,7 @@ lPrintExternGlobals(FILE *file, const std::vector &externGlobals) { static bool lIsExported(const Symbol *sym) { const FunctionType *ft = dynamic_cast(sym->type); - assert(ft); + Assert(ft); return ft->isExported; } @@ -1000,7 +999,7 @@ lIsExported(const Symbol *sym) { static bool lIsExternC(const Symbol *sym) { const FunctionType *ft = dynamic_cast(sym->type); - assert(ft); + Assert(ft); return ft->isExternC; } @@ -1186,9 +1185,9 @@ Module::execPreprocessor(const char* infilename, llvm::raw_string_ostream* ostre if (g->includeStdlib) { if (g->opt.disableAsserts) - opts.addMacroDef("assert(x)="); + opts.addMacroDef("Assert(x)="); else - opts.addMacroDef("assert(x)=__assert(#x, x)"); + opts.addMacroDef("Assert(x)=__Assert(#x, x)"); } for (unsigned int i = 0; i < g->cppArgs.size(); ++i) { @@ -1327,7 +1326,7 @@ lExtractAndRewriteGlobals(llvm::Module *module, Symbol *sym = m->symbolTable->LookupVariable(gv->getName().str().c_str()); - assert(sym != NULL); + Assert(sym != NULL); globals->push_back(RewriteGlobalInfo(gv, init, sym->pos)); } } @@ -1376,9 +1375,9 @@ lAddExtractedGlobals(llvm::Module *module, if (globals[j].size() > 0) { // There should be the same number of globals in the other // vectors, in the same order. - assert(globals[firstActive].size() == globals[j].size()); + Assert(globals[firstActive].size() == globals[j].size()); llvm::GlobalVariable *gv2 = globals[j][i].gv; - assert(gv2->getName() == gv->getName()); + Assert(gv2->getName() == gv->getName()); // It is possible that the types may not match, though--for // example, this happens with varying globals if we compile @@ -1432,7 +1431,7 @@ lCreateDispatchFunction(llvm::Module *module, llvm::Function *setISAFunc, // Grab the type of the function as well. if (ftype != NULL) - assert(ftype == funcs.func[i]->getFunctionType()); + Assert(ftype == funcs.func[i]->getFunctionType()); else ftype = funcs.func[i]->getFunctionType(); @@ -1520,7 +1519,7 @@ lCreateDispatchFunction(llvm::Module *module, llvm::Function *setISAFunc, // or some such, but we don't want to start imposing too much of a // runtime library requirement either... llvm::Function *abortFunc = module->getFunction("abort"); - assert(abortFunc); + Assert(abortFunc); llvm::CallInst::Create(abortFunc, "", bblock); // Return an undef value from the function here; we won't get to this @@ -1552,10 +1551,10 @@ lCreateDispatchModule(std::map &functions) // Get pointers to things we need below llvm::Function *setFunc = module->getFunction("__set_system_isa"); - assert(setFunc != NULL); + Assert(setFunc != NULL); llvm::Value *systemBestISAPtr = module->getGlobalVariable("__system_best_isa", true); - assert(systemBestISAPtr != NULL); + Assert(systemBestISAPtr != NULL); // For each exported function, create the dispatch function std::map::iterator iter; @@ -1601,7 +1600,7 @@ Module::CompileAndOutput(const char *srcFile, const char *arch, const char *cpu, else { // The user supplied multiple targets std::vector targets = lExtractTargets(target); - assert(targets.size() > 1); + Assert(targets.size() > 1); if (outFileName != NULL && strcmp(outFileName, "-") == 0) { Error(SourcePos(), "Multi-target compilation can't generate output " @@ -1678,7 +1677,7 @@ Module::CompileAndOutput(const char *srcFile, const char *arch, const char *cpu, int i = 1; while (i < Target::NUM_ISAS && firstTargetMachine == NULL) firstTargetMachine = targetMachines[i++]; - assert(firstTargetMachine != NULL); + Assert(firstTargetMachine != NULL); if (outFileName != NULL) { if (outputType == Bitcode) diff --git a/opt.cpp b/opt.cpp index f20badf1..f83e3766 100644 --- a/opt.cpp +++ b/opt.cpp @@ -152,19 +152,19 @@ lGetSourcePosFromMetadata(const llvm::Instruction *inst, SourcePos *pos) { // All of these asserts are things that FunctionEmitContext::addGSMetadata() is // expected to have done in its operation llvm::MDString *str = llvm::dyn_cast(filename->getOperand(0)); - assert(str); + Assert(str); llvm::ConstantInt *first_lnum = llvm::dyn_cast(first_line->getOperand(0)); - assert(first_lnum); + Assert(first_lnum); llvm::ConstantInt *first_colnum = llvm::dyn_cast(first_column->getOperand(0)); - assert(first_column); + Assert(first_column); llvm::ConstantInt *last_lnum = llvm::dyn_cast(last_line->getOperand(0)); - assert(last_lnum); + Assert(last_lnum); llvm::ConstantInt *last_colnum = llvm::dyn_cast(last_column->getOperand(0)); - assert(last_column); + Assert(last_column); *pos = SourcePos(str->getString().data(), (int)first_lnum->getZExtValue(), (int)first_colnum->getZExtValue(), (int)last_lnum->getZExtValue(), @@ -440,7 +440,7 @@ IntrinsicsOpt::IntrinsicsOpt() #if defined(LLVM_3_0) || defined(LLVM_3_0svn) || defined(LLVM_3_1svn) llvm::Function *avxMovmsk = llvm::Intrinsic::getDeclaration(m->module, llvm::Intrinsic::x86_avx_movmsk_ps_256); - assert(avxMovmsk != NULL); + Assert(avxMovmsk != NULL); maskInstructions.push_back(avxMovmsk); #endif @@ -486,7 +486,7 @@ lGetMask(llvm::Value *factor) { else { // Otherwise get it as an int llvm::ConstantInt *ci = llvm::dyn_cast(elements[i]); - assert(ci != NULL); // vs return -1 if NULL? + Assert(ci != NULL); // vs return -1 if NULL? intMaskValue = ci->getValue(); } // Is the high-bit set? If so, OR in the appropriate bit in @@ -509,7 +509,7 @@ lGetMask(llvm::Value *factor) { factor = c; } // else we should be able to handle it above... - assert(!llvm::isa(factor)); + Assert(!llvm::isa(factor)); #endif return -1; } @@ -549,8 +549,8 @@ IntrinsicsOpt::runOnBasicBlock(llvm::BasicBlock &bb) { llvm::Intrinsic::getDeclaration(m->module, llvm::Intrinsic::x86_avx_maskstore_ps_256); llvm::Function *avxMaskedStore64 = llvm::Intrinsic::getDeclaration(m->module, llvm::Intrinsic::x86_avx_maskstore_pd_256); - assert(avxMaskedLoad32 != NULL && avxMaskedStore32 != NULL); - assert(avxMaskedLoad64 != NULL && avxMaskedStore64 != NULL); + Assert(avxMaskedLoad32 != NULL && avxMaskedStore32 != NULL); + Assert(avxMaskedLoad64 != NULL && avxMaskedStore64 != NULL); #endif bool modifiedAny = false; @@ -631,7 +631,7 @@ IntrinsicsOpt::runOnBasicBlock(llvm::BasicBlock &bb) { if (mask == 0) { // nothing being loaded, replace with undef value llvm::Type *returnType = callInst->getType(); - assert(llvm::isa(returnType)); + Assert(llvm::isa(returnType)); llvm::Value *undefValue = llvm::UndefValue::get(returnType); llvm::ReplaceInstWithValue(iter->getParent()->getInstList(), iter, undefValue); @@ -641,7 +641,7 @@ IntrinsicsOpt::runOnBasicBlock(llvm::BasicBlock &bb) { else if (mask == 0xff) { // all lanes active; replace with a regular load llvm::Type *returnType = callInst->getType(); - assert(llvm::isa(returnType)); + Assert(llvm::isa(returnType)); // cast the i8 * to the appropriate type llvm::Value *castPtr = new llvm::BitCastInst(callInst->getArgOperand(0), @@ -755,7 +755,7 @@ llvm::RegisterPass gsf("gs-flatten", "Gather/Scatter Fl static int64_t lGetIntValue(llvm::Value *offset) { llvm::ConstantInt *intOffset = llvm::dyn_cast(offset); - assert(intOffset && (intOffset->getBitWidth() == 32 || + Assert(intOffset && (intOffset->getBitWidth() == 32 || intOffset->getBitWidth() == 64)); return intOffset->getSExtValue(); } @@ -780,15 +780,15 @@ lFlattenInsertChain(llvm::InsertElementInst *ie, int vectorWidth, while (ie != NULL) { int64_t iOffset = lGetIntValue(ie->getOperand(2)); - assert(iOffset >= 0 && iOffset < vectorWidth); - assert(elements[iOffset] == NULL); + Assert(iOffset >= 0 && iOffset < vectorWidth); + Assert(elements[iOffset] == NULL); elements[iOffset] = ie->getOperand(1); llvm::Value *insertBase = ie->getOperand(0); ie = llvm::dyn_cast(insertBase); if (ie == NULL) - assert(llvm::isa(insertBase)); + Assert(llvm::isa(insertBase)); } } @@ -954,7 +954,7 @@ lGetBasePtrAndOffsets(llvm::Value *ptrs, llvm::Value **offsets) { if (elementBase == NULL) return NULL; - assert(delta[i] != NULL); + Assert(delta[i] != NULL); if (base == NULL) // The first time we've found a base pointer base = elementBase; @@ -964,7 +964,7 @@ lGetBasePtrAndOffsets(llvm::Value *ptrs, llvm::Value **offsets) { return NULL; } - assert(base != NULL); + Assert(base != NULL); #ifdef LLVM_2_9 *offsets = llvm::ConstantVector::get(delta); #else @@ -1031,7 +1031,7 @@ GatherScatterFlattenOpt::runOnBasicBlock(llvm::BasicBlock &bb) { }; int numGSFuncs = sizeof(gsFuncs) / sizeof(gsFuncs[0]); for (int i = 0; i < numGSFuncs; ++i) - assert(gsFuncs[i].func != NULL && gsFuncs[i].baseOffsetsFunc != NULL && + Assert(gsFuncs[i].func != NULL && gsFuncs[i].baseOffsetsFunc != NULL && gsFuncs[i].baseOffsets32Func != NULL); bool modifiedAny = false; @@ -1177,7 +1177,7 @@ struct MSInfo { MSInfo(const char *name, const int a) : align(a) { func = m->module->getFunction(name); - assert(func != NULL); + Assert(func != NULL); } llvm::Function *func; const int align; @@ -1321,7 +1321,7 @@ struct LMSInfo { pseudoFunc = m->module->getFunction(pname); blendFunc = m->module->getFunction(bname); maskedStoreFunc = m->module->getFunction(msname); - assert(pseudoFunc != NULL && blendFunc != NULL && + Assert(pseudoFunc != NULL && blendFunc != NULL && maskedStoreFunc != NULL); } llvm::Function *pseudoFunc; @@ -1455,7 +1455,7 @@ lValuesAreEqual(llvm::Value *v0, llvm::Value *v1, if (v0 == v1) return true; - assert(seenPhi0.size() == seenPhi1.size()); + Assert(seenPhi0.size() == seenPhi1.size()); for (unsigned int i = 0; i < seenPhi0.size(); ++i) if (v0 == seenPhi0[i] && v1 == seenPhi1[i]) return true; @@ -1485,7 +1485,7 @@ lValuesAreEqual(llvm::Value *v0, llvm::Value *v1, // then we're good. bool anyFailure = false; for (unsigned int i = 0; i < numIncoming; ++i) { - assert(phi0->getIncomingBlock(i) == phi1->getIncomingBlock(i)); + Assert(phi0->getIncomingBlock(i) == phi1->getIncomingBlock(i)); if (!lValuesAreEqual(phi0->getIncomingValue(i), phi1->getIncomingValue(i), seenPhi0, seenPhi1)) { anyFailure = true; @@ -1539,7 +1539,7 @@ lVectorValuesAllEqual(llvm::Value *v, int vectorLength, // probably to just ignore undef elements and return true if // all of the other ones are equal, but it'd be nice to have // some test cases to verify this. - assert(elements[i] != NULL && elements[i+1] != NULL); + Assert(elements[i] != NULL && elements[i+1] != NULL); std::vector seenPhi0; std::vector seenPhi1; @@ -1573,7 +1573,7 @@ lVectorValuesAllEqual(llvm::Value *v, int vectorLength, return true; } - assert(!llvm::isa(v)); + Assert(!llvm::isa(v)); if (llvm::isa(v) || llvm::isa(v) || !llvm::isa(v)) @@ -1618,7 +1618,7 @@ lVectorIsLinearConstantInts(llvm::ConstantVector *cv, int vectorLength, // Flatten the vector out into the elements array llvm::SmallVector elements; cv->getVectorElements(elements); - assert((int)elements.size() == vectorLength); + Assert((int)elements.size() == vectorLength); llvm::ConstantInt *ci = llvm::dyn_cast(elements[0]); if (ci == NULL) @@ -1792,7 +1792,7 @@ struct GatherImpInfo { loadBroadcastFunc = m->module->getFunction(lbName); loadMaskedFunc = m->module->getFunction(lmName); - assert(pseudoFunc != NULL && loadBroadcastFunc != NULL && + Assert(pseudoFunc != NULL && loadBroadcastFunc != NULL && loadMaskedFunc != NULL); } llvm::Function *pseudoFunc; @@ -1809,7 +1809,7 @@ struct ScatterImpInfo { pseudoFunc = m->module->getFunction(pName); maskedStoreFunc = m->module->getFunction(msName); vecPtrType = vpt; - assert(pseudoFunc != NULL && maskedStoreFunc != NULL); + Assert(pseudoFunc != NULL && maskedStoreFunc != NULL); } llvm::Function *pseudoFunc; llvm::Function *maskedStoreFunc; @@ -1888,7 +1888,7 @@ GSImprovementsPass::runOnBasicBlock(llvm::BasicBlock &bb) { SourcePos pos; bool ok = lGetSourcePosFromMetadata(callInst, &pos); - assert(ok); + Assert(ok); llvm::Value *base = callInst->getArgOperand(0); llvm::Value *offsets = callInst->getArgOperand(1); @@ -2066,7 +2066,7 @@ struct LowerGSInfo { : isGather(ig) { pseudoFunc = m->module->getFunction(pName); actualFunc = m->module->getFunction(aName); - assert(pseudoFunc != NULL && actualFunc != NULL); + Assert(pseudoFunc != NULL && actualFunc != NULL); } llvm::Function *pseudoFunc; llvm::Function *actualFunc; @@ -2143,7 +2143,7 @@ LowerGSPass::runOnBasicBlock(llvm::BasicBlock &bb) { // instruction so that we can issue PerformanceWarning()s below. SourcePos pos; bool ok = lGetSourcePosFromMetadata(callInst, &pos); - assert(ok); + Assert(ok); callInst->setCalledFunction(info->actualFunc); if (info->isGather) diff --git a/parse.yy b/parse.yy index 239f5239..3117365e 100644 --- a/parse.yy +++ b/parse.yy @@ -327,7 +327,7 @@ argument_expression_list | argument_expression_list ',' assignment_expression { ExprList *argList = dynamic_cast($1); - assert(argList != NULL); + Assert(argList != NULL); argList->exprs.push_back($3); argList->pos = Union(argList->pos, @3); $$ = argList; @@ -629,7 +629,7 @@ type_specifier : atomic_var_type_specifier { $$ = $1; } | TOKEN_TYPE_NAME { const Type *t = m->symbolTable->LookupType(yytext); - assert(t != NULL); + Assert(t != NULL); $$ = t; } | struct_or_union_specifier { $$ = $1; } @@ -1223,7 +1223,7 @@ initializer_list $$ = NULL; else { ExprList *exprList = dynamic_cast($1); - assert(exprList); + Assert(exprList); exprList->exprs.push_back($3); exprList->pos = Union(exprList->pos, @3); $$ = exprList; @@ -1554,7 +1554,7 @@ lAddDeclaration(DeclSpecs *ds, Declarator *decl) { const FunctionType *ft = dynamic_cast(t); if (ft != NULL) { Symbol *funSym = decl->GetSymbol(); - assert(funSym != NULL); + Assert(funSym != NULL); funSym->type = ft; funSym->storageClass = ds->storageClass; @@ -1578,7 +1578,7 @@ lAddFunctionParams(Declarator *decl) { // walk down to the declarator for the function itself while (decl->kind != DK_FUNCTION && decl->child != NULL) decl = decl->child; - assert(decl->kind == DK_FUNCTION); + Assert(decl->kind == DK_FUNCTION); // now loop over its parameters and add them to the symbol table for (unsigned int i = 0; i < decl->functionParams.size(); ++i) { @@ -1587,12 +1587,12 @@ lAddFunctionParams(Declarator *decl) { // zero size declarators array corresponds to an anonymous // parameter continue; - assert(pdecl->declarators.size() == 1); + Assert(pdecl->declarators.size() == 1); Symbol *sym = pdecl->declarators[0]->GetSymbol(); #ifndef NDEBUG bool ok = m->symbolTable->AddVariable(sym); if (ok == false) - assert(m->errorCount > 0); + Assert(m->errorCount > 0); #else m->symbolTable->AddVariable(sym); #endif @@ -1659,7 +1659,7 @@ lGetStorageClassString(StorageClass sc) { case SC_EXTERN_C: return "extern \"C\""; default: - assert(!"logic error in lGetStorageClassString()"); + Assert(!"logic error in lGetStorageClassString()"); return ""; } } @@ -1743,7 +1743,7 @@ lFinalizeEnumeratorSymbols(std::vector &enums, if (enums[i]->constValue != NULL) { /* Already has a value, so first update nextVal with it. */ int count = enums[i]->constValue->AsUInt32(&nextVal); - assert(count == 1); + Assert(count == 1); ++nextVal; /* When the source file as being parsed, the ConstExpr for any @@ -1756,7 +1756,7 @@ lFinalizeEnumeratorSymbols(std::vector &enums, false, enums[i]->pos); castExpr = castExpr->Optimize(); enums[i]->constValue = dynamic_cast(castExpr); - assert(enums[i]->constValue != NULL); + Assert(enums[i]->constValue != NULL); } else { enums[i]->constValue = new ConstExpr(enumType, nextVal++, diff --git a/stmt.cpp b/stmt.cpp index 48b30c60..8a34ba1c 100644 --- a/stmt.cpp +++ b/stmt.cpp @@ -287,7 +287,7 @@ DeclStmt::EmitCode(FunctionEmitContext *ctx) const { for (unsigned int i = 0; i < vars.size(); ++i) { Symbol *sym = vars[i].sym; - assert(sym != NULL); + Assert(sym != NULL); if (sym->type == NULL) continue; Expr *initExpr = vars[i].init; @@ -324,7 +324,7 @@ DeclStmt::EmitCode(FunctionEmitContext *ctx) const { LLVM_TYPE_CONST llvm::Type *llvmType = sym->type->LLVMType(g->ctx); if (llvmType == NULL) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return; } @@ -645,12 +645,12 @@ IfStmt::emitMaskedTrueAndFalse(FunctionEmitContext *ctx, llvm::Value *oldMask, lEmitIfStatements(ctx, trueStmts, "if: expr mixed, true statements"); // under varying control flow,, returns can't stop instruction // emission, so this better be non-NULL... - assert(ctx->GetCurrentBasicBlock()); + Assert(ctx->GetCurrentBasicBlock()); } if (falseStmts) { ctx->SetInternalMaskAndNot(oldMask, test); lEmitIfStatements(ctx, falseStmts, "if: expr mixed, false statements"); - assert(ctx->GetCurrentBasicBlock()); + Assert(ctx->GetCurrentBasicBlock()); } } @@ -724,7 +724,7 @@ lSafeToRunWithAllLanesOff(Expr *expr) { const SequentialType *seqType = dynamic_cast(type); - assert(seqType != NULL); + Assert(seqType != NULL); int nElements = seqType->GetElementCount(); if (nElements == 0) // Unsized array, so we can't be sure @@ -930,7 +930,7 @@ IfStmt::emitVaryingIf(FunctionEmitContext *ctx, llvm::Value *ltest) const { (costIsAcceptable || g->opt.disableCoherentControlFlow)) { ctx->StartVaryingIf(oldMask); emitMaskedTrueAndFalse(ctx, oldMask, ltest); - assert(ctx->GetCurrentBasicBlock()); + Assert(ctx->GetCurrentBasicBlock()); ctx->EndIf(); } else { @@ -953,7 +953,7 @@ IfStmt::emitMaskAllOn(FunctionEmitContext *ctx, llvm::Value *ltest, // compiler see what's going on so that subsequent optimizations for // code emitted here can operate with the knowledge that the mask is // definitely all on (until it modifies the mask itself). - assert(!g->opt.disableCoherentControlFlow); + Assert(!g->opt.disableCoherentControlFlow); if (!g->opt.disableMaskAllOnOptimizations) ctx->SetInternalMask(LLVMMaskAllOn); llvm::Value *oldFunctionMask = ctx->GetFunctionMask(); @@ -1003,7 +1003,7 @@ IfStmt::emitMaskAllOn(FunctionEmitContext *ctx, llvm::Value *ltest, emitMaskedTrueAndFalse(ctx, LLVMMaskAllOn, ltest); // In this case, return/break/continue isn't allowed to jump and end // emission. - assert(ctx->GetCurrentBasicBlock()); + Assert(ctx->GetCurrentBasicBlock()); ctx->EndIf(); ctx->BranchInst(bDone); @@ -1032,7 +1032,7 @@ IfStmt::emitMaskMixed(FunctionEmitContext *ctx, llvm::Value *oldMask, // Emit statements for true ctx->SetCurrentBasicBlock(bRunTrue); lEmitIfStatements(ctx, trueStmts, "if: expr mixed, true statements"); - assert(ctx->GetCurrentBasicBlock()); + Assert(ctx->GetCurrentBasicBlock()); ctx->BranchInst(bNext); ctx->SetCurrentBasicBlock(bNext); } @@ -1049,7 +1049,7 @@ IfStmt::emitMaskMixed(FunctionEmitContext *ctx, llvm::Value *oldMask, // Emit code for false ctx->SetCurrentBasicBlock(bRunFalse); lEmitIfStatements(ctx, falseStmts, "if: expr mixed, false statements"); - assert(ctx->GetCurrentBasicBlock()); + Assert(ctx->GetCurrentBasicBlock()); ctx->BranchInst(bNext); ctx->SetCurrentBasicBlock(bNext); } @@ -1167,7 +1167,7 @@ void DoStmt::EmitCode(FunctionEmitContext *ctx) const { ctx->SetFunctionMask(LLVMMaskAllOn); if (bodyStmts) bodyStmts->EmitCode(ctx); - assert(ctx->GetCurrentBasicBlock()); + Assert(ctx->GetCurrentBasicBlock()); ctx->SetFunctionMask(oldFunctionMask); ctx->BranchInst(btest); @@ -1175,7 +1175,7 @@ void DoStmt::EmitCode(FunctionEmitContext *ctx) const { ctx->SetCurrentBasicBlock(bMixed); if (bodyStmts) bodyStmts->EmitCode(ctx); - assert(ctx->GetCurrentBasicBlock()); + Assert(ctx->GetCurrentBasicBlock()); ctx->BranchInst(btest); } else { @@ -1328,7 +1328,7 @@ ForStmt::EmitCode(FunctionEmitContext *ctx) const { // it and then jump into the loop test code. (Also start a new scope // since the initiailizer may be a declaration statement). if (init) { - assert(dynamic_cast(init) == NULL); + Assert(dynamic_cast(init) == NULL); ctx->StartScope(); init->EmitCode(ctx); } @@ -1356,7 +1356,7 @@ ForStmt::EmitCode(FunctionEmitContext *ctx) const { if (uniformTest) { if (doCoherentCheck) Warning(pos, "Uniform condition supplied to cfor/cwhile statement."); - assert(ltest->getType() == LLVMTypes::BoolType); + Assert(ltest->getType() == LLVMTypes::BoolType); ctx->BranchInst(bloop, bexit, ltest); } else { @@ -1392,7 +1392,7 @@ ForStmt::EmitCode(FunctionEmitContext *ctx) const { ctx->SetFunctionMask(LLVMMaskAllOn); if (stmts) stmts->EmitCode(ctx); - assert(ctx->GetCurrentBasicBlock()); + Assert(ctx->GetCurrentBasicBlock()); ctx->SetFunctionMask(oldFunctionMask); ctx->BranchInst(bstep); @@ -1741,7 +1741,7 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { ctx->StartScope(); // This should be caught during typechecking - assert(startExprs.size() == dimVariables.size() && + Assert(startExprs.size() == dimVariables.size() && endExprs.size() == dimVariables.size()); int nDims = (int)dimVariables.size(); @@ -1923,7 +1923,7 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { ctx->SetInternalMask(LLVMMaskAllOn); ctx->AddInstrumentationPoint("foreach loop body"); stmts->EmitCode(ctx); - assert(ctx->GetCurrentBasicBlock() != NULL); + Assert(ctx->GetCurrentBasicBlock() != NULL); ctx->BranchInst(bbStep[nDims-1]); /////////////////////////////////////////////////////////////////////////// @@ -2351,7 +2351,7 @@ PrintStmt::EmitCode(FunctionEmitContext *ctx) const { // Now we can emit code to call __do_print() llvm::Function *printFunc = m->module->getFunction("__do_print"); - assert(printFunc); + Assert(printFunc); llvm::Value *mask = ctx->GetFullMask(); // Set up the rest of the parameters to it @@ -2414,7 +2414,7 @@ AssertStmt::EmitCode(FunctionEmitContext *ctx) const { llvm::Function *assertFunc = isUniform ? m->module->getFunction("__do_assert_uniform") : m->module->getFunction("__do_assert_varying"); - assert(assertFunc != NULL); + Assert(assertFunc != NULL); #ifdef ISPC_IS_WINDOWS char errorString[2048]; diff --git a/stmt.h b/stmt.h index 928a5e7a..2ce97753 100644 --- a/stmt.h +++ b/stmt.h @@ -341,7 +341,7 @@ public: Like print() above, since we don't have strings as first-class types in the language, we need to do some gymnastics to support it. Like - assert() in C, assert checks the given condition and prints an error + assert() in C, assert() checks the given condition and prints an error and calls abort if the condition fails. For varying conditions, the assert triggers if it's true for any of the program instances. */ diff --git a/sym.cpp b/sym.cpp index 82416a88..b33e8cbe 100644 --- a/sym.cpp +++ b/sym.cpp @@ -72,7 +72,7 @@ SymbolTable::SymbolTable() { SymbolTable::~SymbolTable() { // Otherwise we have mismatched push/pop scopes - assert(variables.size() == 1 && functions.size() == 1 && + Assert(variables.size() == 1 && functions.size() == 1 && types.size() == 1); PopScope(); } @@ -88,15 +88,15 @@ SymbolTable::PushScope() { void SymbolTable::PopScope() { - assert(variables.size() > 1); + Assert(variables.size() > 1); delete variables.back(); variables.pop_back(); - assert(functions.size() > 1); + Assert(functions.size() > 1); delete functions.back(); functions.pop_back(); - assert(types.size() > 1); + Assert(types.size() > 1); delete types.back(); types.pop_back(); } @@ -104,7 +104,7 @@ SymbolTable::PopScope() { bool SymbolTable::AddVariable(Symbol *symbol) { - assert(symbol != NULL); + Assert(symbol != NULL); // Check to see if a symbol of the same name has already been declared. for (int i = (int)variables.size() - 1; i >= 0; --i) { @@ -154,7 +154,7 @@ SymbolTable::LookupVariable(const char *name) { bool SymbolTable::AddFunction(Symbol *symbol) { const FunctionType *ft = dynamic_cast(symbol->type); - assert(ft != NULL); + Assert(ft != NULL); if (LookupFunction(symbol->name.c_str(), ft) != NULL) // A function of the same name and type has already been added to // the symbol table diff --git a/test_static.cpp b/test_static.cpp index f56d0506..ad35ae0e 100644 --- a/test_static.cpp +++ b/test_static.cpp @@ -102,7 +102,7 @@ void *ISPCAlloc(void **handle, int64_t size, int32_t alignment) { int main(int argc, char *argv[]) { int w = width(); - assert(w <= 16); + Assert(w <= 16); float returned_result[16]; for (int i = 0; i < 16; ++i) diff --git a/type.cpp b/type.cpp index 84538d23..cbc011a1 100644 --- a/type.cpp +++ b/type.cpp @@ -293,7 +293,7 @@ AtomicType::GetAsUniformType() const { const Type * AtomicType::GetSOAType(int width) const { - assert(width > 0); + Assert(width > 0); return new ArrayType(this, width); } @@ -354,7 +354,7 @@ std::string AtomicType::GetCDeclaration(const std::string &name) const { std::string ret; if (isUniform == false) { - assert(m->errorCount > 0); + Assert(m->errorCount > 0); return ret; } if (isConst) ret += "const "; @@ -570,7 +570,7 @@ EnumType::GetAsUniformType() const { const Type * EnumType::GetSOAType(int width) const { - assert(width > 0); + Assert(width > 0); return new ArrayType(this, width); } @@ -644,9 +644,9 @@ EnumType::GetDIType(llvm::DIDescriptor scope) const { std::vector enumeratorDescriptors; for (unsigned int i = 0; i < enumerators.size(); ++i) { unsigned int enumeratorValue; - assert(enumerators[i]->constValue != NULL); + Assert(enumerators[i]->constValue != NULL); int count = enumerators[i]->constValue->AsUInt32(&enumeratorValue); - assert(count == 1); + Assert(count == 1); llvm::Value *descriptor = m->diBuilder->createEnumerator(enumerators[i]->name, enumeratorValue); @@ -938,7 +938,7 @@ const Type *SequentialType::GetElementType(int index) const { ArrayType::ArrayType(const Type *c, int a) : child(c), numElements(a) { // 0 -> unsized array. - assert(numElements >= 0); + Assert(numElements >= 0); } @@ -1137,7 +1137,7 @@ ArrayType::GetDIType(llvm::DIDescriptor scope) const { ArrayType * ArrayType::GetSizedArray(int sz) const { - assert(numElements == 0); + Assert(numElements == 0); return new ArrayType(child, sz); } @@ -1178,7 +1178,7 @@ ArrayType::SizeUnsizedArrays(const Type *type, Expr *initExpr) { for (unsigned int i = 1; i < exprList->exprs.size(); ++i) { if (exprList->exprs[i] == NULL) { // We should have seen an error earlier in this case. - assert(m->errorCount > 0); + Assert(m->errorCount > 0); continue; } @@ -1204,9 +1204,9 @@ ArrayType::SizeUnsizedArrays(const Type *type, Expr *initExpr) { SOAArrayType::SOAArrayType(const StructType *eltType, int nElem, int sw) : ArrayType(eltType, nElem), soaWidth(sw) { - assert(soaWidth > 0); + Assert(soaWidth > 0); if (numElements > 0) - assert((numElements % soaWidth) == 0); + Assert((numElements % soaWidth) == 0); } @@ -1337,8 +1337,8 @@ SOAArrayType::soaType() const { VectorType::VectorType(const AtomicType *b, int a) : base(b), numElements(a) { - assert(numElements > 0); - assert(base != NULL); + Assert(numElements > 0); + Assert(base != NULL); } @@ -1719,7 +1719,7 @@ StructType::GetDIType(llvm::DIDescriptor scope) const { // element starts at an offset that's the correct alignment. if (currentSize > 0 && (currentSize % eltAlign)) currentSize += eltAlign - (currentSize % eltAlign); - assert((currentSize == 0) || (currentSize % eltAlign) == 0); + Assert((currentSize == 0) || (currentSize % eltAlign) == 0); llvm::DIFile diFile = elementPositions[i].GetDIFile(); int line = elementPositions[i].first_line; @@ -1758,7 +1758,7 @@ StructType::GetDIType(llvm::DIDescriptor scope) const { const Type * StructType::GetElementType(int i) const { - assert(i < (int)elementTypes.size()); + Assert(i < (int)elementTypes.size()); // If the struct is uniform qualified, then each member comes out with // the same type as in the original source file. If it's varying, then // all members are promoted to varying. @@ -1958,7 +1958,7 @@ FunctionType::FunctionType(const Type *r, const std::vector &a, paramTypes(a), paramNames(std::vector(a.size(), "")), paramDefaults(std::vector(a.size(), NULL)), paramPositions(std::vector(a.size(), p)) { - assert(returnType != NULL); + Assert(returnType != NULL); } @@ -1969,10 +1969,10 @@ FunctionType::FunctionType(const Type *r, const std::vector &a, bool it, bool is, bool ec) : isTask(it), isExported(is), isExternC(ec), returnType(r), paramTypes(a), paramNames(an), paramDefaults(ad), paramPositions(ap) { - assert(paramTypes.size() == paramNames.size() && + Assert(paramTypes.size() == paramNames.size() && paramNames.size() == paramDefaults.size() && paramDefaults.size() == paramPositions.size()); - assert(returnType != NULL); + Assert(returnType != NULL); } @@ -2127,14 +2127,14 @@ FunctionType::GetDIType(llvm::DIDescriptor scope) const { LLVM_TYPE_CONST llvm::FunctionType * FunctionType::LLVMFunctionType(llvm::LLVMContext *ctx, bool includeMask) const { - if (isTask == true) assert(includeMask == true); + if (isTask == true) Assert(includeMask == true); // Get the LLVM Type *s for the function arguments std::vector llvmArgTypes; for (unsigned int i = 0; i < paramTypes.size(); ++i) { if (!paramTypes[i]) return NULL; - assert(paramTypes[i] != AtomicType::Void); + Assert(paramTypes[i] != AtomicType::Void); LLVM_TYPE_CONST llvm::Type *t = paramTypes[i]->LLVMType(ctx); if (t == NULL) @@ -2170,28 +2170,28 @@ FunctionType::LLVMFunctionType(llvm::LLVMContext *ctx, bool includeMask) const { const Type * FunctionType::GetParameterType(int i) const { - assert(i < (int)paramTypes.size()); + Assert(i < (int)paramTypes.size()); return paramTypes[i]; } ConstExpr * FunctionType::GetParameterDefault(int i) const { - assert(i < (int)paramDefaults.size()); + Assert(i < (int)paramDefaults.size()); return paramDefaults[i]; } const SourcePos & FunctionType::GetParameterSourcePos(int i) const { - assert(i < (int)paramPositions.size()); + Assert(i < (int)paramPositions.size()); return paramPositions[i]; } const std::string & FunctionType::GetParameterName(int i) const { - assert(i < (int)paramNames.size()); + Assert(i < (int)paramNames.size()); return paramNames[i]; } @@ -2244,7 +2244,7 @@ lVectorConvert(const Type *type, SourcePos pos, const char *reason, int vecSize) const Type * Type::MoreGeneralType(const Type *t0, const Type *t1, SourcePos pos, const char *reason, bool forceVarying, int vecSize) { - assert(reason != NULL); + Assert(reason != NULL); // First, if we need to go varying, promote both of the types to be // varying. @@ -2315,7 +2315,7 @@ Type::MoreGeneralType(const Type *t0, const Type *t1, SourcePos pos, const char // The 'more general' version of the two vector element types must // be an AtomicType (that's all that vectors can hold...) const AtomicType *at = dynamic_cast(t); - assert(at != NULL); + Assert(at != NULL); return new VectorType(at, vt0->GetElementCount()); } @@ -2330,7 +2330,7 @@ Type::MoreGeneralType(const Type *t0, const Type *t1, SourcePos pos, const char return NULL; const AtomicType *at = dynamic_cast(t); - assert(at != NULL); + Assert(at != NULL); return new VectorType(at, vt0->GetElementCount()); } else if (vt1) { @@ -2342,7 +2342,7 @@ Type::MoreGeneralType(const Type *t0, const Type *t1, SourcePos pos, const char return NULL; const AtomicType *at = dynamic_cast(t); - assert(at != NULL); + Assert(at != NULL); return new VectorType(at, vt1->GetElementCount()); } @@ -2355,7 +2355,7 @@ Type::MoreGeneralType(const Type *t0, const Type *t1, SourcePos pos, const char const EnumType *et1 = dynamic_cast(t1->GetReferenceTarget()); if (et0 != NULL && et1 != NULL) { // Two different enum types -> make them uint32s... - assert(et0->IsVaryingType() == et1->IsVaryingType()); + Assert(et0->IsVaryingType() == et1->IsVaryingType()); return et0->IsVaryingType() ? AtomicType::VaryingUInt32 : AtomicType::UniformUInt32; } @@ -2386,7 +2386,7 @@ Type::MoreGeneralType(const Type *t0, const Type *t1, SourcePos pos, const char // Now all we can do is promote atomic types... if (at0 == NULL || at1 == NULL) { - assert(reason != NULL); + Assert(reason != NULL); Error(pos, "Implicit conversion from type \"%s\" to \"%s\" for %s not possible.", t0->GetString().c_str(), t1->GetString().c_str(), reason); return NULL; diff --git a/util.cpp b/util.cpp index 97606a5b..008fc5a3 100644 --- a/util.cpp +++ b/util.cpp @@ -45,7 +45,6 @@ #include #include -#include #include #include #include @@ -147,7 +146,7 @@ lPrintWithWordBreaks(const char *buf, int columnWidth, FILE *out) { fputs(buf, out); #else int column = 0; - assert(strchr(buf, ':') != NULL); + Assert(strchr(buf, ':') != NULL); int indent = strchr(buf, ':') - buf + 2; int width = std::max(40, columnWidth - 2); @@ -313,6 +312,12 @@ PerformanceWarning(SourcePos p, const char *fmt, ...) { void FatalError(const char *file, int line, const char *message) { fprintf(stderr, "%s(%d): FATAL ERROR: %s\n", file, line, message); + fprintf(stderr, "***\n" + "*** Please file a bug report at https://github.com/ispc/ispc/issues\n" + "*** (Including as much information as you can about how to " + "reproduce this error).\n" + "*** You have apparently encountered a bug in the compiler that we'd " + "like to fix!\n***\n"); abort(); } @@ -392,7 +397,7 @@ GetDirectoryAndFileName(const std::string ¤tDirectory, char path[MAX_PATH]; const char *combPath = PathCombine(path, currentDirectory.c_str(), relativeName.c_str()); - assert(combPath != NULL); + Assert(combPath != NULL); const char *filenamePtr = PathFindFileName(combPath); *filename = filenamePtr; *directory = std::string(combPath, filenamePtr - combPath); @@ -414,9 +419,9 @@ GetDirectoryAndFileName(const std::string ¤tDirectory, // now, we need to separate it into the base name and the directory const char *fp = fullPath.c_str(); const char *basenameStart = strrchr(fp, '/'); - assert(basenameStart != NULL); + Assert(basenameStart != NULL); ++basenameStart; - assert(basenameStart != '\0'); + Assert(basenameStart != '\0'); *filename = basenameStart; *directory = std::string(fp, basenameStart - fp); #endif // ISPC_IS_WINDOWS