diff --git a/ast.cpp b/ast.cpp index beb6004f..3bd77e9b 100644 --- a/ast.cpp +++ b/ast.cpp @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file ast.cpp @@ -109,20 +109,20 @@ WalkAST(ASTNode *node, ASTPreCallBackFunc preFunc, ASTPostCallBackFunc postFunc, es->expr = (Expr *)WalkAST(es->expr, preFunc, postFunc, data); else if ((ds = dynamic_cast(node)) != NULL) { for (unsigned int i = 0; i < ds->vars.size(); ++i) - ds->vars[i].init = (Expr *)WalkAST(ds->vars[i].init, preFunc, + ds->vars[i].init = (Expr *)WalkAST(ds->vars[i].init, preFunc, postFunc, data); } else if ((is = dynamic_cast(node)) != NULL) { is->test = (Expr *)WalkAST(is->test, preFunc, postFunc, data); - is->trueStmts = (Stmt *)WalkAST(is->trueStmts, preFunc, + is->trueStmts = (Stmt *)WalkAST(is->trueStmts, preFunc, postFunc, data); - is->falseStmts = (Stmt *)WalkAST(is->falseStmts, preFunc, + is->falseStmts = (Stmt *)WalkAST(is->falseStmts, preFunc, postFunc, data); } else if ((dos = dynamic_cast(node)) != NULL) { - dos->testExpr = (Expr *)WalkAST(dos->testExpr, preFunc, + dos->testExpr = (Expr *)WalkAST(dos->testExpr, preFunc, postFunc, data); - dos->bodyStmts = (Stmt *)WalkAST(dos->bodyStmts, preFunc, + dos->bodyStmts = (Stmt *)WalkAST(dos->bodyStmts, preFunc, postFunc, data); } else if ((fs = dynamic_cast(node)) != NULL) { @@ -133,10 +133,10 @@ WalkAST(ASTNode *node, ASTPreCallBackFunc preFunc, ASTPostCallBackFunc postFunc, } else if ((fes = dynamic_cast(node)) != NULL) { for (unsigned int i = 0; i < fes->startExprs.size(); ++i) - fes->startExprs[i] = (Expr *)WalkAST(fes->startExprs[i], preFunc, + fes->startExprs[i] = (Expr *)WalkAST(fes->startExprs[i], preFunc, postFunc, data); for (unsigned int i = 0; i < fes->endExprs.size(); ++i) - fes->endExprs[i] = (Expr *)WalkAST(fes->endExprs[i], preFunc, + fes->endExprs[i] = (Expr *)WalkAST(fes->endExprs[i], preFunc, postFunc, data); fes->stmts = (Stmt *)WalkAST(fes->stmts, preFunc, postFunc, data); } @@ -217,7 +217,7 @@ WalkAST(ASTNode *node, ASTPreCallBackFunc preFunc, ASTPostCallBackFunc postFunc, } else if ((el = dynamic_cast(node)) != NULL) { for (unsigned int i = 0; i < el->exprs.size(); ++i) - el->exprs[i] = (Expr *)WalkAST(el->exprs[i], preFunc, + el->exprs[i] = (Expr *)WalkAST(el->exprs[i], preFunc, postFunc, data); } else if ((fce = dynamic_cast(node)) != NULL) { @@ -247,9 +247,9 @@ WalkAST(ASTNode *node, ASTPreCallBackFunc preFunc, ASTPostCallBackFunc postFunc, else if ((aoe = dynamic_cast(node)) != NULL) aoe->expr = (Expr *)WalkAST(aoe->expr, preFunc, postFunc, data); else if ((newe = dynamic_cast(node)) != NULL) { - newe->countExpr = (Expr *)WalkAST(newe->countExpr, preFunc, + newe->countExpr = (Expr *)WalkAST(newe->countExpr, preFunc, postFunc, data); - newe->initExpr = (Expr *)WalkAST(newe->initExpr, preFunc, + newe->initExpr = (Expr *)WalkAST(newe->initExpr, preFunc, postFunc, data); } else if (dynamic_cast(node) != NULL || @@ -257,9 +257,9 @@ WalkAST(ASTNode *node, ASTPreCallBackFunc preFunc, ASTPostCallBackFunc postFunc, dynamic_cast(node) != NULL || dynamic_cast(node) != NULL || dynamic_cast(node) != NULL) { - // nothing to do + // nothing to do } - else + else FATAL("Unhandled expression type in WalkAST()."); } diff --git a/ast.h b/ast.h index f03d7343..d98c1d37 100644 --- a/ast.h +++ b/ast.h @@ -28,11 +28,11 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file ast.h - @brief + @brief */ #ifndef ISPC_AST_H @@ -121,12 +121,12 @@ extern ASTNode *Optimize(ASTNode *root); /** Convenience version of Optimize() for Expr *s that returns an Expr * (rather than an ASTNode *, which would require the caller to cast back - to an Expr *). */ + to an Expr *). */ extern Expr *Optimize(Expr *); /** Convenience version of Optimize() for Expr *s that returns an Stmt * (rather than an ASTNode *, which would require the caller to cast back - to a Stmt *). */ + to a Stmt *). */ extern Stmt *Optimize(Stmt *); /** Perform type-checking on the given AST (or portion of one), returning a @@ -144,7 +144,7 @@ extern Stmt *TypeCheck(Stmt *); extern int EstimateCost(ASTNode *root); /** Returns true if it would be safe to run the given code with an "all - off" mask. */ + off" mask. */ extern bool SafeToRunWithMaskAllOff(ASTNode *root); #endif // ISPC_AST_H diff --git a/builtins.cpp b/builtins.cpp index bd714547..716339a1 100644 --- a/builtins.cpp +++ b/builtins.cpp @@ -28,11 +28,11 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file builtins.cpp - @brief Definitions of functions related to setting up the standard library + @brief Definitions of functions related to setting up the standard library and other builtins. */ @@ -169,9 +169,9 @@ lLLVMTypeToISPCType(const llvm::Type *t, bool intAsUnsigned) { static void -lCreateSymbol(const std::string &name, const Type *returnType, - llvm::SmallVector &argTypes, - const llvm::FunctionType *ftype, llvm::Function *func, +lCreateSymbol(const std::string &name, const Type *returnType, + llvm::SmallVector &argTypes, + const llvm::FunctionType *ftype, llvm::Function *func, SymbolTable *symbolTable) { SourcePos noPos; noPos.name = "__stdlib"; @@ -251,7 +251,7 @@ lCreateISPCSymbol(llvm::Function *func, SymbolTable *symbolTable) { "representable for builtin %s", j, name.c_str()); return false; } - anyIntArgs |= + anyIntArgs |= (Type::Equal(type, lLLVMTypeToISPCType(llvmArgType, !intAsUnsigned)) == false); argTypes.push_back(type); } @@ -286,7 +286,7 @@ lAddModuleSymbols(llvm::Module *module, SymbolTable *symbolTable) { /** In many of the builtins-*.ll files, we have declarations of various LLVM intrinsics that are then used in the implementation of various target- - specific functions. This function loops over all of the intrinsic + specific functions. This function loops over all of the intrinsic declarations and makes sure that the signature we have in our .ll file matches the signature of the actual intrinsic. */ @@ -304,7 +304,7 @@ lCheckModuleIntrinsics(llvm::Module *module) { if (!strncmp(funcName.c_str(), "llvm.x86.", 9)) { llvm::Intrinsic::ID id = (llvm::Intrinsic::ID)func->getIntrinsicID(); Assert(id != 0); - llvm::Type *intrinsicType = + llvm::Type *intrinsicType = llvm::Intrinsic::getType(*g->ctx, id); intrinsicType = llvm::PointerType::get(intrinsicType, 0); Assert(func->getType() == intrinsicType); @@ -410,7 +410,7 @@ lSetInternalFunctions(llvm::Module *module) { "__delete_varying", "__do_assert_uniform", "__do_assert_varying", - "__do_print", + "__do_print", "__doublebits_uniform_int64", "__doublebits_varying_int64", "__exclusive_scan_add_double", @@ -654,7 +654,7 @@ AddBitcodeToModule(const unsigned char *bitcode, int length, bcModule->setDataLayout(module->getDataLayout()); std::string(linkError); - if (llvm::Linker::LinkModules(module, bcModule, + if (llvm::Linker::LinkModules(module, bcModule, llvm::Linker::DestroySource, &linkError)) Error(SourcePos(), "Error linking stdlib bitcode: %s", linkError.c_str()); @@ -672,7 +672,7 @@ AddBitcodeToModule(const unsigned char *bitcode, int length, static void lDefineConstantInt(const char *name, int val, llvm::Module *module, SymbolTable *symbolTable) { - Symbol *sym = + Symbol *sym = new Symbol(name, SourcePos(), AtomicType::UniformInt32->GetAsConstType(), SC_STATIC); sym->constValue = new ConstExpr(sym->type, val, SourcePos()); @@ -694,8 +694,8 @@ lDefineConstantInt(const char *name, int val, llvm::Module *module, // FIXME? DWARF says that this (and programIndex below) should // have the DW_AT_artifical attribute. It's not clear if this // matters for anything though. - llvm::DIGlobalVariable var = - m->diBuilder->createGlobalVariable(name, + llvm::DIGlobalVariable var = + m->diBuilder->createGlobalVariable(name, file, 0 /* line */, diType, @@ -732,8 +732,8 @@ lDefineConstantIntFunc(const char *name, int val, llvm::Module *module, static void lDefineProgramIndex(llvm::Module *module, SymbolTable *symbolTable) { - Symbol *sym = - new Symbol("programIndex", SourcePos(), + Symbol *sym = + new Symbol("programIndex", SourcePos(), AtomicType::VaryingInt32->GetAsConstType(), SC_STATIC); int pi[ISPC_MAX_NVEC]; @@ -755,7 +755,7 @@ lDefineProgramIndex(llvm::Module *module, SymbolTable *symbolTable) { llvm::DIType diType = sym->type->GetDIType(file); Assert(diType.Verify()); llvm::DIGlobalVariable var = - m->diBuilder->createGlobalVariable(sym->name.c_str(), + m->diBuilder->createGlobalVariable(sym->name.c_str(), file, 0 /* line */, diType, @@ -773,13 +773,13 @@ DefineStdlib(SymbolTable *symbolTable, llvm::LLVMContext *ctx, llvm::Module *mod if (g->target.is32Bit) { extern unsigned char builtins_bitcode_c_32[]; extern int builtins_bitcode_c_32_length; - AddBitcodeToModule(builtins_bitcode_c_32, builtins_bitcode_c_32_length, + AddBitcodeToModule(builtins_bitcode_c_32, builtins_bitcode_c_32_length, module, symbolTable); } else { extern unsigned char builtins_bitcode_c_64[]; extern int builtins_bitcode_c_64_length; - AddBitcodeToModule(builtins_bitcode_c_64, builtins_bitcode_c_64_length, + AddBitcodeToModule(builtins_bitcode_c_64, builtins_bitcode_c_64_length, module, symbolTable); } @@ -792,12 +792,12 @@ DefineStdlib(SymbolTable *symbolTable, llvm::LLVMContext *ctx, llvm::Module *mod extern unsigned char builtins_bitcode_sse2_x2[]; extern int builtins_bitcode_sse2_x2_length; switch (g->target.vectorWidth) { - case 4: - AddBitcodeToModule(builtins_bitcode_sse2, builtins_bitcode_sse2_length, + case 4: + AddBitcodeToModule(builtins_bitcode_sse2, builtins_bitcode_sse2_length, module, symbolTable); break; case 8: - AddBitcodeToModule(builtins_bitcode_sse2_x2, builtins_bitcode_sse2_x2_length, + AddBitcodeToModule(builtins_bitcode_sse2_x2, builtins_bitcode_sse2_x2_length, module, symbolTable); break; default: @@ -810,14 +810,14 @@ DefineStdlib(SymbolTable *symbolTable, llvm::LLVMContext *ctx, llvm::Module *mod extern unsigned char builtins_bitcode_sse4_x2[]; extern int builtins_bitcode_sse4_x2_length; switch (g->target.vectorWidth) { - case 4: + case 4: AddBitcodeToModule(builtins_bitcode_sse4, - builtins_bitcode_sse4_length, + builtins_bitcode_sse4_length, module, symbolTable); break; case 8: - AddBitcodeToModule(builtins_bitcode_sse4_x2, - builtins_bitcode_sse4_x2_length, + AddBitcodeToModule(builtins_bitcode_sse4_x2, + builtins_bitcode_sse4_x2_length, module, symbolTable); break; default: @@ -829,14 +829,14 @@ DefineStdlib(SymbolTable *symbolTable, llvm::LLVMContext *ctx, llvm::Module *mod case 8: extern unsigned char builtins_bitcode_avx1[]; extern int builtins_bitcode_avx1_length; - AddBitcodeToModule(builtins_bitcode_avx1, - builtins_bitcode_avx1_length, + AddBitcodeToModule(builtins_bitcode_avx1, + builtins_bitcode_avx1_length, module, symbolTable); break; case 16: extern unsigned char builtins_bitcode_avx1_x2[]; extern int builtins_bitcode_avx1_x2_length; - AddBitcodeToModule(builtins_bitcode_avx1_x2, + AddBitcodeToModule(builtins_bitcode_avx1_x2, builtins_bitcode_avx1_x2_length, module, symbolTable); break; @@ -849,14 +849,14 @@ DefineStdlib(SymbolTable *symbolTable, llvm::LLVMContext *ctx, llvm::Module *mod case 8: extern unsigned char builtins_bitcode_avx11[]; extern int builtins_bitcode_avx11_length; - AddBitcodeToModule(builtins_bitcode_avx11, - builtins_bitcode_avx11_length, + AddBitcodeToModule(builtins_bitcode_avx11, + builtins_bitcode_avx11_length, module, symbolTable); break; case 16: extern unsigned char builtins_bitcode_avx11_x2[]; extern int builtins_bitcode_avx11_x2_length; - AddBitcodeToModule(builtins_bitcode_avx11_x2, + AddBitcodeToModule(builtins_bitcode_avx11_x2, builtins_bitcode_avx11_x2_length, module, symbolTable); break; @@ -869,14 +869,14 @@ DefineStdlib(SymbolTable *symbolTable, llvm::LLVMContext *ctx, llvm::Module *mod case 8: extern unsigned char builtins_bitcode_avx2[]; extern int builtins_bitcode_avx2_length; - AddBitcodeToModule(builtins_bitcode_avx2, - builtins_bitcode_avx2_length, + AddBitcodeToModule(builtins_bitcode_avx2, + builtins_bitcode_avx2_length, module, symbolTable); break; case 16: extern unsigned char builtins_bitcode_avx2_x2[]; extern int builtins_bitcode_avx2_x2_length; - AddBitcodeToModule(builtins_bitcode_avx2_x2, + AddBitcodeToModule(builtins_bitcode_avx2_x2, builtins_bitcode_avx2_x2_length, module, symbolTable); break; @@ -889,43 +889,43 @@ DefineStdlib(SymbolTable *symbolTable, llvm::LLVMContext *ctx, llvm::Module *mod case 4: extern unsigned char builtins_bitcode_generic_4[]; extern int builtins_bitcode_generic_4_length; - AddBitcodeToModule(builtins_bitcode_generic_4, - builtins_bitcode_generic_4_length, + AddBitcodeToModule(builtins_bitcode_generic_4, + builtins_bitcode_generic_4_length, module, symbolTable); break; case 8: extern unsigned char builtins_bitcode_generic_8[]; extern int builtins_bitcode_generic_8_length; - AddBitcodeToModule(builtins_bitcode_generic_8, - builtins_bitcode_generic_8_length, + AddBitcodeToModule(builtins_bitcode_generic_8, + builtins_bitcode_generic_8_length, module, symbolTable); break; case 16: extern unsigned char builtins_bitcode_generic_16[]; extern int builtins_bitcode_generic_16_length; - AddBitcodeToModule(builtins_bitcode_generic_16, - builtins_bitcode_generic_16_length, + AddBitcodeToModule(builtins_bitcode_generic_16, + builtins_bitcode_generic_16_length, module, symbolTable); break; case 32: extern unsigned char builtins_bitcode_generic_32[]; extern int builtins_bitcode_generic_32_length; - AddBitcodeToModule(builtins_bitcode_generic_32, - builtins_bitcode_generic_32_length, + AddBitcodeToModule(builtins_bitcode_generic_32, + builtins_bitcode_generic_32_length, module, symbolTable); break; case 64: extern unsigned char builtins_bitcode_generic_64[]; extern int builtins_bitcode_generic_64_length; - AddBitcodeToModule(builtins_bitcode_generic_64, - builtins_bitcode_generic_64_length, + AddBitcodeToModule(builtins_bitcode_generic_64, + builtins_bitcode_generic_64_length, module, symbolTable); break; - case 1: + case 1: extern unsigned char builtins_bitcode_generic_1[]; extern int builtins_bitcode_generic_1_length; - AddBitcodeToModule(builtins_bitcode_generic_1, - builtins_bitcode_generic_1_length, + AddBitcodeToModule(builtins_bitcode_generic_1, + builtins_bitcode_generic_1_length, module, symbolTable); break; default: @@ -947,7 +947,7 @@ DefineStdlib(SymbolTable *symbolTable, llvm::LLVMContext *ctx, llvm::Module *mod lDefineConstantInt("__math_lib", (int)g->mathLib, module, symbolTable); lDefineConstantInt("__math_lib_ispc", (int)Globals::Math_ISPC, module, symbolTable); - lDefineConstantInt("__math_lib_ispc_fast", (int)Globals::Math_ISPCFast, + lDefineConstantInt("__math_lib_ispc_fast", (int)Globals::Math_ISPCFast, module, symbolTable); lDefineConstantInt("__math_lib_svml", (int)Globals::Math_SVML, module, symbolTable); @@ -956,9 +956,9 @@ DefineStdlib(SymbolTable *symbolTable, llvm::LLVMContext *ctx, llvm::Module *mod lDefineConstantIntFunc("__fast_masked_vload", (int)g->opt.fastMaskedVload, module, symbolTable); - lDefineConstantInt("__have_native_half", g->target.hasHalf, module, + lDefineConstantInt("__have_native_half", g->target.hasHalf, module, symbolTable); - lDefineConstantInt("__have_native_rand", g->target.hasRand, module, + lDefineConstantInt("__have_native_rand", g->target.hasRand, module, symbolTable); lDefineConstantInt("__have_native_transcendentals", g->target.hasTranscendentals, module, symbolTable); diff --git a/builtins.h b/builtins.h index 876f2356..14f3896e 100644 --- a/builtins.h +++ b/builtins.h @@ -28,11 +28,11 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file builtins.h - @brief Declarations of functions related to builtins and the + @brief Declarations of functions related to builtins and the standard library */ diff --git a/builtins/builtins.c b/builtins/builtins.c index f985c465..8d687698 100644 --- a/builtins/builtins.c +++ b/builtins/builtins.c @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file builtins-c.c @@ -101,12 +101,12 @@ typedef int Bool; @param format Print format string @param types Encoded types of the values being printed. - (See lEncodeType()). + (See lEncodeType()). @param width Vector width of the compilation target @param mask Current lane mask when the print statemnt is called @param args Array of pointers to the values to be printed */ -void __do_print(const char *format, const char *types, int width, uint64_t mask, +void __do_print(const char *format, const char *types, int width, uint64_t mask, void **args) { char printString[PRINT_BUF_SIZE+1]; // +1 for trailing NUL char *bufp = &printString[0]; diff --git a/cbackend.cpp b/cbackend.cpp index eb68bc15..df8b4f04 100644 --- a/cbackend.cpp +++ b/cbackend.cpp @@ -119,12 +119,12 @@ namespace { // objects, we keep several helper maps. llvm::DenseSet VisitedConstants; llvm::DenseSet VisitedTypes; - + std::vector &ArrayTypes; public: TypeFinder(std::vector &t) : ArrayTypes(t) {} - + void run(const llvm::Module &M) { // Get types from global variables. for (llvm::Module::const_global_iterator I = M.global_begin(), @@ -133,7 +133,7 @@ namespace { if (I->hasInitializer()) incorporateValue(I->getInitializer()); } - + // Get types from aliases. for (llvm::Module::const_alias_iterator I = M.alias_begin(), E = M.alias_end(); I != E; ++I) { @@ -141,13 +141,13 @@ namespace { if (const llvm::Value *Aliasee = I->getAliasee()) incorporateValue(Aliasee); } - + llvm::SmallVector, 4> MDForInst; // Get types from functions. for (llvm::Module::const_iterator FI = M.begin(), E = M.end(); FI != E; ++FI) { incorporateType(FI->getType()); - + for (llvm::Function::const_iterator BB = FI->begin(), E = FI->end(); BB != E;++BB) for (llvm::BasicBlock::const_iterator II = BB->begin(), @@ -158,13 +158,13 @@ namespace { // Seems like there ought to be better way to do what we // want here. For now, punt on SwitchInsts. if (llvm::isa(&I)) continue; - + // Incorporate the type of the instruction and all its operands. incorporateType(I.getType()); for (llvm::User::const_op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI) incorporateValue(*OI); - + // Incorporate types hiding in metadata. I.getAllMetadataOtherThanDebugLoc(MDForInst); for (unsigned i = 0, e = MDForInst.size(); i != e; ++i) @@ -172,7 +172,7 @@ namespace { MDForInst.clear(); } } - + for (llvm::Module::const_named_metadata_iterator I = M.named_metadata_begin(), E = M.named_metadata_end(); I != E; ++I) { const llvm::NamedMDNode *NMD = I; @@ -180,7 +180,7 @@ namespace { incorporateMDNode(NMD->getOperand(i)); } } - + private: void incorporateType(llvm::Type *Ty) { // Check to see if we're already visited this type. @@ -189,13 +189,13 @@ namespace { if (llvm::ArrayType *ATy = llvm::dyn_cast(Ty)) ArrayTypes.push_back(ATy); - + // Recursively walk all contained types. for (llvm::Type::subtype_iterator I = Ty->subtype_begin(), E = Ty->subtype_end(); I != E; ++I) incorporateType(*I); } - + /// incorporateValue - This method is used to walk operand lists finding /// types hiding in constant expressions and other operands that won't be /// walked in other ways. GlobalValues, basic blocks, instructions, and @@ -204,27 +204,27 @@ namespace { if (const llvm::MDNode *M = llvm::dyn_cast(V)) return incorporateMDNode(M); if (!llvm::isa(V) || llvm::isa(V)) return; - + // Already visited? if (!VisitedConstants.insert(V).second) return; - + // Check this type. incorporateType(V->getType()); - + // Look in operands for types. const llvm::User *U = llvm::cast(V); for (llvm::Constant::const_op_iterator I = U->op_begin(), E = U->op_end(); I != E;++I) incorporateValue(*I); } - + void incorporateMDNode(const llvm::MDNode *V) { - + // Already visited? if (!VisitedConstants.insert(V).second) return; - + // Look in operands for types. for (unsigned i = 0, e = V->getNumOperands(); i != e; ++i) if (llvm::Value *Op = V->getOperand(i)) @@ -266,7 +266,7 @@ namespace { // TargetData have generally similar interfaces... const llvm::DataLayout* TD; #endif - + std::map FPConstantMap; std::map VectorConstantMap; unsigned VectorConstantIndex; @@ -276,7 +276,7 @@ namespace { unsigned OpaqueCounter; llvm::DenseMap AnonValueNumbers; unsigned NextAnonValueNumber; - + std::string includeName; int vectorWidth; @@ -291,7 +291,7 @@ namespace { int vecwidth) : FunctionPass(ID), Out(o), IL(0), /* Mang(0), */ LI(0), TheModule(0), TAsm(0), MRI(0), MOFI(0), TCtx(0), TD(0), - OpaqueCounter(0), NextAnonValueNumber(0), + OpaqueCounter(0), NextAnonValueNumber(0), includeName(incname ? incname : "generic_defs.h"), vectorWidth(vecwidth) { initializeLoopInfoPass(*llvm::PassRegistry::getPassRegistry()); @@ -372,7 +372,7 @@ namespace { std::string getStructName(llvm::StructType *ST); std::string getArrayName(llvm::ArrayType *AT); - + /// writeOperandDeref - Print the result of dereferencing the specified /// operand with '*'. This is equivalent to printing '*' then using /// writeOperand, but avoids excess syntax in some cases. @@ -399,7 +399,7 @@ namespace { private : void lowerIntrinsics(llvm::Function &F); - /// Prints the definition of the intrinsic function F. Supports the + /// Prints the definition of the intrinsic function F. Supports the /// intrinsics which need to be explicitly defined in the CBackend. void printIntrinsicDefinition(const llvm::Function &F, llvm::raw_ostream &Out); @@ -573,7 +573,7 @@ static std::string CBEMangle(const std::string &S) { std::string CWriter::getStructName(llvm::StructType *ST) { if (!ST->isLiteral() && !ST->getName().empty()) return CBEMangle("l_"+ST->getName().str()); - + return "l_unnamed_" + llvm::utostr(UnnamedStructIDs[ST]); } @@ -694,19 +694,19 @@ CWriter::printSimpleType(llvm::raw_ostream &Out, llvm::Type *Ty, bool isSigned, assert(eltTy->isIntegerTy()); switch (eltTy->getPrimitiveSizeInBits()) { case 1: - suffix = "i1"; + suffix = "i1"; break; case 8: - suffix = "i8"; + suffix = "i8"; break; case 16: - suffix = "i16"; + suffix = "i16"; break; case 32: - suffix = "i32"; + suffix = "i32"; break; case 64: - suffix = "i64"; + suffix = "i64"; break; default: llvm::report_fatal_error("Only integer types of size 8/16/32/64 are " @@ -714,7 +714,7 @@ CWriter::printSimpleType(llvm::raw_ostream &Out, llvm::Type *Ty, bool isSigned, } } - return Out << "__vec" << VTy->getNumElements() << "_" << suffix << " " << + return Out << "__vec" << VTy->getNumElements() << "_" << suffix << " " << NameSoFar; #else return printSimpleType(Out, VTy->getElementType(), isSigned, @@ -736,7 +736,7 @@ CWriter::printSimpleType(llvm::raw_ostream &Out, llvm::Type *Ty, bool isSigned, // llvm::raw_ostream &CWriter::printType(llvm::raw_ostream &Out, llvm::Type *Ty, bool isSigned, const std::string &NameSoFar, - bool IgnoreName, + bool IgnoreName, #if defined(LLVM_3_1) || defined(LLVM_3_2) const llvm::AttrListPtr &PAL #else @@ -803,11 +803,11 @@ llvm::raw_ostream &CWriter::printType(llvm::raw_ostream &Out, llvm::Type *Ty, } case llvm::Type::StructTyID: { llvm::StructType *STy = llvm::cast(Ty); - + // Check to see if the type is named. if (!IgnoreName) return Out << getStructName(STy) << ' ' << NameSoFar; - + Out << "struct " << NameSoFar << " {\n"; // print initialization func @@ -930,13 +930,13 @@ void CWriter::printConstantDataSequential(llvm::ConstantDataSequential *CDS, Out << '\"'; // Keep track of whether the last number was a hexadecimal escape. bool LastWasHex = false; - + llvm::StringRef Bytes = CDS->getAsCString(); - + // Do not include the last character, which we know is null for (unsigned i = 0, e = Bytes.size(); i != e; ++i) { unsigned char C = Bytes[i]; - + // Print it out literally if it is a printable character. The only thing // to be careful about is when the last letter output was a hex escape // code, in which case we have to be careful not to print out hex digits @@ -1143,8 +1143,8 @@ bool CWriter::printCast(unsigned opc, llvm::Type *SrcTy, llvm::Type *DstTy) { /** Construct the name of a function with the given base and returning a - vector of a given type, of the specified idth. For example, if base - is "foo" and matchType is i32 and width is 16, this will return the + vector of a given type, of the specified idth. For example, if base + is "foo" and matchType is i32 and width is 16, this will return the string "__foo_i32<__vec16_i32>". */ static const char * @@ -1492,7 +1492,7 @@ void CWriter::printConstant(llvm::Constant *CPV, bool Static) { } if (llvm::ConstantArray *CA = llvm::dyn_cast(CPV)) { printConstantArray(CA, Static); - } else if (llvm::ConstantDataSequential *CDS = + } else if (llvm::ConstantDataSequential *CDS = llvm::dyn_cast(CPV)) { printConstantDataSequential(CDS, Static); } else { @@ -1584,7 +1584,7 @@ void CWriter::printConstant(llvm::Constant *CPV, bool Static) { llvm::report_fatal_error("Unexpected vector type"); } } - + break; } case llvm::Type::StructTyID: @@ -2658,7 +2658,7 @@ void CWriter::printModuleTypes() { Out << "/* Structure and array forward declarations */\n"; unsigned NextTypeID = 0; - + // If any of them are missing names, add a unique ID to UnnamedStructIDs. // Print out forward declarations for structure types. for (unsigned i = 0, e = StructTypes.size(); i != e; ++i) { @@ -2718,7 +2718,7 @@ void CWriter::printContainedStructs(llvm::Type *Ty, if (llvm::StructType *ST = llvm::dyn_cast(Ty)) { // Check to see if we have already printed this struct. if (!Printed.insert(Ty)) return; - + // Print structure type out. printType(Out, ST, false, getStructName(ST), true); Out << ";\n\n"; @@ -3623,8 +3623,8 @@ void CWriter::printIntrinsicDefinition(const llvm::Function &F, llvm::raw_ostrea Out << " r.field1 = (r.field0 < a);\n"; Out << " return r;\n}\n"; break; - - case llvm::Intrinsic::sadd_with_overflow: + + case llvm::Intrinsic::sadd_with_overflow: // static inline Rty sadd_ixx(ixx a, ixx b) { // Rty r; // r.field1 = (b > 0 && a > XX_MAX - b) || @@ -3851,10 +3851,10 @@ void CWriter::visitCallInst(llvm::CallInst &I) { Out << ')'; } // Check if the argument is expected to be passed by value. - if (I.paramHasAttr(ArgNo+1, + if (I.paramHasAttr(ArgNo+1, #if defined(LLVM_3_2) llvm::Attributes::ByVal -#else +#else llvm::Attribute::ByVal #endif )) @@ -4419,7 +4419,7 @@ SmearCleanupPass::runOnBasicBlock(llvm::BasicBlock &bb) { restart: for (llvm::BasicBlock::iterator iter = bb.begin(), e = bb.end(); iter != e; ++iter) { - llvm::InsertElementInst *insertInst = + llvm::InsertElementInst *insertInst = llvm::dyn_cast(&*iter); if (insertInst == NULL) continue; @@ -4444,7 +4444,7 @@ SmearCleanupPass::runOnBasicBlock(llvm::BasicBlock &bb) { else if (toMatch != insertValue) goto not_equal; - insertInst = + insertInst = llvm::dyn_cast(insertInst->getOperand(0)); } assert(toMatch != NULL); @@ -4458,8 +4458,8 @@ SmearCleanupPass::runOnBasicBlock(llvm::BasicBlock &bb) { // Declare the smear function if needed; it takes a single // scalar parameter and returns a vector of the same // parameter type. - llvm::Constant *sf = - module->getOrInsertFunction(smearFuncName, iter->getType(), + llvm::Constant *sf = + module->getOrInsertFunction(smearFuncName, iter->getType(), matchType, NULL); smearFunc = llvm::dyn_cast(sf); assert(smearFunc != NULL); @@ -4475,7 +4475,7 @@ SmearCleanupPass::runOnBasicBlock(llvm::BasicBlock &bb) { assert(smearFunc != NULL); llvm::Value *args[1] = { toMatch }; llvm::ArrayRef argArray(&args[0], &args[1]); - llvm::Instruction *smearCall = + llvm::Instruction *smearCall = llvm::CallInst::Create(smearFunc, argArray, LLVMGetName(toMatch, "_smear"), (llvm::Instruction *)NULL); @@ -4608,9 +4608,9 @@ AndCmpCleanupPass::runOnBasicBlock(llvm::BasicBlock &bb) { // are the same as the two arguments to the compare we're // replacing and the third argument is the mask type. llvm::Type *cmpOpType = opCmp->getOperand(0)->getType(); - llvm::Constant *acf = + llvm::Constant *acf = m->module->getOrInsertFunction(funcName, LLVMTypes::MaskType, - cmpOpType, cmpOpType, + cmpOpType, cmpOpType, LLVMTypes::MaskType, NULL); andCmpFunc = llvm::dyn_cast(acf); Assert(andCmpFunc != NULL); @@ -4625,11 +4625,11 @@ AndCmpCleanupPass::runOnBasicBlock(llvm::BasicBlock &bb) { // Set up the function call to the *_and_mask function; the // mask value passed in is the other operand to the AND. - llvm::Value *args[3] = { opCmp->getOperand(0), opCmp->getOperand(1), + llvm::Value *args[3] = { opCmp->getOperand(0), opCmp->getOperand(1), bop->getOperand(i ^ 1) }; llvm::ArrayRef argArray(&args[0], &args[3]); - llvm::Instruction *cmpCall = - llvm::CallInst::Create(andCmpFunc, argArray, + llvm::Instruction *cmpCall = + llvm::CallInst::Create(andCmpFunc, argArray, LLVMGetName(bop, "_and_mask"), (llvm::Instruction *)NULL); @@ -4656,12 +4656,12 @@ AndCmpCleanupPass::runOnBasicBlock(llvm::BasicBlock &bb) { class MaskOpsCleanupPass : public llvm::BasicBlockPass { public: MaskOpsCleanupPass(llvm::Module *m) - : BasicBlockPass(ID) { + : BasicBlockPass(ID) { llvm::Type *mt = LLVMTypes::MaskType; // Declare the __not, __and_not1, and __and_not2 functions that we // expect the target to end up providing. - notFunc = + notFunc = llvm::dyn_cast(m->getOrInsertFunction("__not", mt, mt, NULL)); assert(notFunc != NULL); #if defined(LLVM_3_2) @@ -4672,7 +4672,7 @@ public: notFunc->addFnAttr(llvm::Attribute::ReadNone); #endif - andNotFuncs[0] = + andNotFuncs[0] = llvm::dyn_cast(m->getOrInsertFunction("__and_not1", mt, mt, mt, NULL)); assert(andNotFuncs[0] != NULL); @@ -4683,7 +4683,7 @@ public: andNotFuncs[0]->addFnAttr(llvm::Attribute::NoUnwind); andNotFuncs[0]->addFnAttr(llvm::Attribute::ReadNone); #endif - andNotFuncs[1] = + andNotFuncs[1] = llvm::dyn_cast(m->getOrInsertFunction("__and_not2", mt, mt, mt, NULL)); assert(andNotFuncs[1] != NULL); @@ -4711,7 +4711,7 @@ char MaskOpsCleanupPass::ID = 0; /** Returns true if the given value is a compile-time constant vector of - i1s with all elements 'true'. + i1s with all elements 'true'. */ static bool lIsAllTrue(llvm::Value *v) { @@ -4721,7 +4721,7 @@ lIsAllTrue(llvm::Value *v) { (ci = llvm::dyn_cast(cv->getSplatValue())) != NULL && ci->isOne()); } - + if (llvm::ConstantDataVector *cdv = llvm::dyn_cast(v)) { llvm::ConstantInt *ci; return (cdv->getSplatValue() != NULL && @@ -4770,7 +4770,7 @@ MaskOpsCleanupPass::runOnBasicBlock(llvm::BasicBlock &bb) { // Check for XOR with all-true values if (lIsAllTrue(bop->getOperand(1))) { llvm::ArrayRef arg(bop->getOperand(0)); - llvm::CallInst *notCall = llvm::CallInst::Create(notFunc, arg, + llvm::CallInst *notCall = llvm::CallInst::Create(notFunc, arg, bop->getName()); ReplaceInstWithInst(iter, notCall); modifiedAny = true; @@ -4791,7 +4791,7 @@ MaskOpsCleanupPass::runOnBasicBlock(llvm::BasicBlock &bb) { llvm::ArrayRef argsRef(&args[0], 2); // Call the appropriate __and_not* function. - llvm::CallInst *andNotCall = + llvm::CallInst *andNotCall = llvm::CallInst::Create(andNotFuncs[i], argsRef, bop->getName()); ReplaceInstWithInst(iter, andNotCall); diff --git a/ctx.cpp b/ctx.cpp index 29b4caaf..bfb45c6e 100644 --- a/ctx.cpp +++ b/ctx.cpp @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file ctx.cpp @@ -60,7 +60,7 @@ /** This is a small utility structure that records information related to one level of nested control flow. It's mostly used in correctly restoring - the mask and other state as we exit control flow nesting levels. + the mask and other state as we exit control flow nesting levels. */ struct CFInfo { /** Returns a new instance of the structure that represents entering an @@ -70,20 +70,20 @@ struct CFInfo { /** Returns a new instance of the structure that represents entering a loop. */ static CFInfo *GetLoop(bool isUniform, llvm::BasicBlock *breakTarget, - llvm::BasicBlock *continueTarget, + llvm::BasicBlock *continueTarget, llvm::Value *savedBreakLanesPtr, llvm::Value *savedContinueLanesPtr, llvm::Value *savedMask, llvm::Value *savedBlockEntryMask); static CFInfo *GetForeach(FunctionEmitContext::ForeachType ft, llvm::BasicBlock *breakTarget, - llvm::BasicBlock *continueTarget, + llvm::BasicBlock *continueTarget, llvm::Value *savedBreakLanesPtr, llvm::Value *savedContinueLanesPtr, llvm::Value *savedMask, llvm::Value *savedBlockEntryMask); static CFInfo *GetSwitch(bool isUniform, llvm::BasicBlock *breakTarget, - llvm::BasicBlock *continueTarget, + llvm::BasicBlock *continueTarget, llvm::Value *savedBreakLanesPtr, llvm::Value *savedContinueLanesPtr, llvm::Value *savedMask, llvm::Value *savedBlockEntryMask, @@ -92,7 +92,7 @@ struct CFInfo { const std::vector > *bbCases, const std::map *bbNext, bool scUniform); - + bool IsIf() { return type == If; } bool IsLoop() { return type == Loop; } bool IsForeach() { return (type == ForeachRegular || @@ -102,7 +102,7 @@ struct CFInfo { bool IsVarying() { return !isUniform; } bool IsUniform() { return isUniform; } - enum CFType { If, Loop, ForeachRegular, ForeachActive, ForeachUnique, + enum CFType { If, Loop, ForeachRegular, ForeachActive, ForeachUnique, Switch }; CFType type; bool isUniform; @@ -130,7 +130,7 @@ private: } CFInfo(CFType t, bool iu, llvm::BasicBlock *bt, llvm::BasicBlock *ct, llvm::Value *sb, llvm::Value *sc, llvm::Value *sm, - llvm::Value *lm, llvm::Value *sse = NULL, llvm::BasicBlock *bbd = NULL, + llvm::Value *lm, llvm::Value *sse = NULL, llvm::BasicBlock *bbd = NULL, const std::vector > *bbc = NULL, const std::map *bbn = NULL, bool scu = false) { @@ -177,7 +177,7 @@ CFInfo::GetIf(bool isUniform, llvm::Value *savedMask) { CFInfo * CFInfo::GetLoop(bool isUniform, llvm::BasicBlock *breakTarget, - llvm::BasicBlock *continueTarget, + llvm::BasicBlock *continueTarget, llvm::Value *savedBreakLanesPtr, llvm::Value *savedContinueLanesPtr, llvm::Value *savedMask, llvm::Value *savedBlockEntryMask) { @@ -190,7 +190,7 @@ CFInfo::GetLoop(bool isUniform, llvm::BasicBlock *breakTarget, CFInfo * CFInfo::GetForeach(FunctionEmitContext::ForeachType ft, llvm::BasicBlock *breakTarget, - llvm::BasicBlock *continueTarget, + llvm::BasicBlock *continueTarget, llvm::Value *savedBreakLanesPtr, llvm::Value *savedContinueLanesPtr, llvm::Value *savedMask, llvm::Value *savedForeachMask) { @@ -218,7 +218,7 @@ CFInfo::GetForeach(FunctionEmitContext::ForeachType ft, CFInfo * CFInfo::GetSwitch(bool isUniform, llvm::BasicBlock *breakTarget, - llvm::BasicBlock *continueTarget, + llvm::BasicBlock *continueTarget, llvm::Value *savedBreakLanesPtr, llvm::Value *savedContinueLanesPtr, llvm::Value *savedMask, llvm::Value *savedBlockEntryMask, llvm::Value *savedSwitchExpr, @@ -226,9 +226,9 @@ CFInfo::GetSwitch(bool isUniform, llvm::BasicBlock *breakTarget, const std::vector > *savedCases, const std::map *savedNext, bool savedSwitchConditionUniform) { - return new CFInfo(Switch, isUniform, breakTarget, continueTarget, + return new CFInfo(Switch, isUniform, breakTarget, continueTarget, savedBreakLanesPtr, savedContinueLanesPtr, - savedMask, savedBlockEntryMask, savedSwitchExpr, savedDefaultBlock, + savedMask, savedBlockEntryMask, savedSwitchExpr, savedDefaultBlock, savedCases, savedNext, savedSwitchConditionUniform); } @@ -270,7 +270,7 @@ FunctionEmitContext::FunctionEmitContext(Function *func, Symbol *funSym, launchedTasks = false; launchGroupHandlePtr = AllocaInst(LLVMTypes::VoidPointerType, "launch_group_handle"); - StoreInst(llvm::Constant::getNullValue(LLVMTypes::VoidPointerType), + StoreInst(llvm::Constant::getNullValue(LLVMTypes::VoidPointerType), launchGroupHandlePtr); disableGSWarningCount = 0; @@ -299,22 +299,22 @@ FunctionEmitContext::FunctionEmitContext(Function *func, Symbol *funSym, // __all_on_mask can't be guaranteed to be "all on", we emit a // dummy function that sets __all_on_mask be "all off". (That // function is never actually called.) - llvm::Value *globalAllOnMaskPtr = + llvm::Value *globalAllOnMaskPtr = m->module->getNamedGlobal("__all_on_mask"); if (globalAllOnMaskPtr == NULL) { - globalAllOnMaskPtr = + globalAllOnMaskPtr = new llvm::GlobalVariable(*m->module, LLVMTypes::MaskType, false, llvm::GlobalValue::InternalLinkage, LLVMMaskAllOn, "__all_on_mask"); char buf[256]; sprintf(buf, "__off_all_on_mask_%s", g->target.GetISAString()); - llvm::Constant *offFunc = + llvm::Constant *offFunc = m->module->getOrInsertFunction(buf, LLVMTypes::VoidType, NULL); AssertPos(currentPos, llvm::isa(offFunc)); - llvm::BasicBlock *offBB = - llvm::BasicBlock::Create(*g->ctx, "entry", + llvm::BasicBlock *offBB = + llvm::BasicBlock::Create(*g->ctx, "entry", (llvm::Function *)offFunc, 0); new llvm::StoreInst(LLVMMaskAllOff, globalAllOnMaskPtr, offBB); llvm::ReturnInst::Create(*g->ctx, offBB); @@ -353,7 +353,7 @@ FunctionEmitContext::FunctionEmitContext(Function *func, Symbol *funSym, int firstLine = funcStartPos.first_line; int flags = (llvm::DIDescriptor::FlagPrototyped); - diSubprogram = + diSubprogram = m->diBuilder->createFunction(diFile /* scope */, funSym->name, mangledName, diFile, firstLine, diSubprogramType, @@ -407,7 +407,7 @@ FunctionEmitContext::GetInternalMask() { llvm::Value * FunctionEmitContext::GetFullMask() { - return BinaryOperator(llvm::Instruction::And, GetInternalMask(), + return BinaryOperator(llvm::Instruction::And, GetInternalMask(), functionMaskValue, "internal_mask&function_mask"); } @@ -442,7 +442,7 @@ FunctionEmitContext::SetInternalMask(llvm::Value *value) { void FunctionEmitContext::SetInternalMaskAnd(llvm::Value *oldMask, llvm::Value *test) { - llvm::Value *mask = BinaryOperator(llvm::Instruction::And, oldMask, + llvm::Value *mask = BinaryOperator(llvm::Instruction::And, oldMask, test, "oldMask&test"); SetInternalMask(mask); } @@ -529,7 +529,7 @@ FunctionEmitContext::EndIf() { // or continue statements (and breakLanesPtr and continueLanesPtr // have their initial 'all off' values), so we don't need to check // for that here. - // + // // There are three general cases to deal with here: // - Loops: both break and continue are allowed, and thus the corresponding // lane mask pointers are non-NULL @@ -549,15 +549,15 @@ FunctionEmitContext::EndIf() { if (breakLanesPtr != NULL) { llvm::Value *breakLanes = LoadInst(breakLanesPtr, "break_lanes"); - bcLanes = BinaryOperator(llvm::Instruction::Or, bcLanes, + bcLanes = BinaryOperator(llvm::Instruction::Or, bcLanes, breakLanes, "|break_lanes"); } - llvm::Value *notBreakOrContinue = + llvm::Value *notBreakOrContinue = NotOperator(bcLanes, "!(break|continue)_lanes"); llvm::Value *oldMask = GetInternalMask(); - llvm::Value *newMask = - BinaryOperator(llvm::Instruction::And, oldMask, + llvm::Value *newMask = + BinaryOperator(llvm::Instruction::And, oldMask, notBreakOrContinue, "new_mask"); SetInternalMask(newMask); } @@ -565,12 +565,12 @@ FunctionEmitContext::EndIf() { void -FunctionEmitContext::StartLoop(llvm::BasicBlock *bt, llvm::BasicBlock *ct, +FunctionEmitContext::StartLoop(llvm::BasicBlock *bt, llvm::BasicBlock *ct, bool uniformCF) { // Store the current values of various loop-related state so that we // can restore it when we exit this loop. llvm::Value *oldMask = GetInternalMask(); - controlFlowInfo.push_back(CFInfo::GetLoop(uniformCF, breakTarget, + controlFlowInfo.push_back(CFInfo::GetLoop(uniformCF, breakTarget, continueTarget, breakLanesPtr, continueLanesPtr, oldMask, blockEntryMask)); if (uniformCF) @@ -628,7 +628,7 @@ FunctionEmitContext::StartForeach(ForeachType ft) { // Store the current values of various loop-related state so that we // can restore it when we exit this loop. llvm::Value *oldMask = GetInternalMask(); - controlFlowInfo.push_back(CFInfo::GetForeach(ft, breakTarget, continueTarget, + controlFlowInfo.push_back(CFInfo::GetForeach(ft, breakTarget, continueTarget, breakLanesPtr, continueLanesPtr, oldMask, blockEntryMask)); breakLanesPtr = NULL; @@ -696,7 +696,7 @@ FunctionEmitContext::Break(bool doCoherenceCheck) { return; if (inSwitchStatement() == true && - switchConditionWasUniform == true && + switchConditionWasUniform == true && ifsInCFAllUniform(CFInfo::Switch)) { // We know that all program instances are executing the break, so // just jump to the block immediately after the switch. @@ -718,7 +718,7 @@ FunctionEmitContext::Break(bool doCoherenceCheck) { // Varying switch, uniform switch where the 'break' is under // varying control flow, or a loop with varying 'if's above the // break. In these cases, we need to update the mask of the lanes - // that have executed a 'break' statement: + // that have executed a 'break' statement: // breakLanes = breakLanes | mask AssertPos(currentPos, breakLanesPtr != NULL); llvm::Value *mask = GetInternalMask(); @@ -788,9 +788,9 @@ FunctionEmitContext::Continue(bool doCoherenceCheck) { // continueLanes = continueLanes | mask AssertPos(currentPos, continueLanesPtr); llvm::Value *mask = GetInternalMask(); - llvm::Value *continueMask = + llvm::Value *continueMask = LoadInst(continueLanesPtr, "continue_mask"); - llvm::Value *newMask = + llvm::Value *newMask = BinaryOperator(llvm::Instruction::Or, mask, continueMask, "mask|continueMask"); StoreInst(newMask, continueLanesPtr); @@ -799,7 +799,7 @@ FunctionEmitContext::Continue(bool doCoherenceCheck) { // statements in the same scope after the 'continue' SetInternalMask(LLVMMaskAllOff); - if (doCoherenceCheck) + if (doCoherenceCheck) // If this is a 'coherent continue' statement, then emit the // code to see if all of the lanes are now off due to // breaks/continues and jump to the continue target if so. @@ -846,7 +846,7 @@ FunctionEmitContext::jumpIfAllLoopLanesAreDone(llvm::BasicBlock *target) { llvm::Value *returned = LoadInst(returnedLanesPtr, "returned_lanes"); llvm::Value *breaked = LoadInst(breakLanesPtr, "break_lanes"); - llvm::Value *finishedLanes = BinaryOperator(llvm::Instruction::Or, + llvm::Value *finishedLanes = BinaryOperator(llvm::Instruction::Or, returned, breaked, "returned|breaked"); if (continueLanesPtr != NULL) { @@ -899,10 +899,10 @@ FunctionEmitContext::RestoreContinuedLanes() { void FunctionEmitContext::StartSwitch(bool cfIsUniform, llvm::BasicBlock *bbBreak) { llvm::Value *oldMask = GetInternalMask(); - controlFlowInfo.push_back(CFInfo::GetSwitch(cfIsUniform, breakTarget, + controlFlowInfo.push_back(CFInfo::GetSwitch(cfIsUniform, breakTarget, continueTarget, breakLanesPtr, - continueLanesPtr, oldMask, - blockEntryMask, switchExpr, defaultBlock, + continueLanesPtr, oldMask, + blockEntryMask, switchExpr, defaultBlock, caseBlocks, nextBlocks, switchConditionWasUniform)); @@ -932,7 +932,7 @@ FunctionEmitContext::EndSwitch() { } -/** Emit code to check for an "all off" mask before the code for a +/** Emit code to check for an "all off" mask before the code for a case or default label in a "switch" statement. */ void @@ -1006,13 +1006,13 @@ FunctionEmitContext::EmitDefaultLabel(bool checkMask, SourcePos pos) { // TODO: for AVX2 at least, the following generates better code // than doing ICMP_NE and skipping the NotOperator() below; file a // LLVM bug? - llvm::Value *matchesCaseValue = + llvm::Value *matchesCaseValue = CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_EQ, switchExpr, valueVec, "cmp_case_value"); matchesCaseValue = I1VecToBoolVec(matchesCaseValue); llvm::Value *notMatchesCaseValue = NotOperator(matchesCaseValue); - matchesDefault = BinaryOperator(llvm::Instruction::And, matchesDefault, + matchesDefault = BinaryOperator(llvm::Instruction::And, matchesDefault, notMatchesCaseValue, "default&~case_match"); } @@ -1020,7 +1020,7 @@ FunctionEmitContext::EmitDefaultLabel(bool checkMask, SourcePos pos) { // label falling through; compute the updated mask by ANDing with the // current mask. llvm::Value *oldMask = GetInternalMask(); - llvm::Value *newMask = BinaryOperator(llvm::Instruction::Or, oldMask, + llvm::Value *newMask = BinaryOperator(llvm::Instruction::Or, oldMask, matchesDefault, "old_mask|matches_default"); SetInternalMask(newMask); @@ -1059,7 +1059,7 @@ FunctionEmitContext::EmitCaseLabel(int value, bool checkMask, SourcePos pos) { // case statement. llvm::Value *valueVec = (switchExpr->getType() == LLVMTypes::Int32VectorType) ? LLVMInt32Vector(value) : LLVMInt64Vector(value); - llvm::Value *matchesCaseValue = + llvm::Value *matchesCaseValue = CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_EQ, switchExpr, valueVec, "cmp_case_value"); matchesCaseValue = I1VecToBoolVec(matchesCaseValue); @@ -1072,7 +1072,7 @@ FunctionEmitContext::EmitCaseLabel(int value, bool checkMask, SourcePos pos) { // Take the surviving lanes and turn on the mask for them. llvm::Value *oldMask = GetInternalMask(); - llvm::Value *newMask = BinaryOperator(llvm::Instruction::Or, oldMask, + llvm::Value *newMask = BinaryOperator(llvm::Instruction::Or, oldMask, matchesCaseValue, "mask|case_match"); SetInternalMask(newMask); @@ -1094,13 +1094,13 @@ FunctionEmitContext::SwitchInst(llvm::Value *expr, llvm::BasicBlock *bbDefault, defaultBlock = bbDefault; caseBlocks = new std::vector >(bbCases); nextBlocks = new std::map(bbNext); - switchConditionWasUniform = + switchConditionWasUniform = (llvm::isa(expr->getType()) == false); if (switchConditionWasUniform == true) { // For a uniform switch condition, just wire things up to the LLVM // switch instruction. - llvm::SwitchInst *s = llvm::SwitchInst::Create(expr, bbDefault, + llvm::SwitchInst *s = llvm::SwitchInst::Create(expr, bbDefault, bbCases.size(), bblock); for (int i = 0; i < (int)bbCases.size(); ++i) { if (expr->getType() == LLVMTypes::Int32Type) @@ -1135,7 +1135,7 @@ FunctionEmitContext::SwitchInst(llvm::Value *expr, llvm::BasicBlock *bbDefault, int -FunctionEmitContext::VaryingCFDepth() const { +FunctionEmitContext::VaryingCFDepth() const { int sum = 0; for (unsigned int i = 0; i < controlFlowInfo.size(); ++i) if (controlFlowInfo[i]->IsVarying()) @@ -1228,7 +1228,7 @@ FunctionEmitContext::CurrentLanesReturned(Expr *expr, bool doCoherenceCheck) { "statement for non-void function."); return; } - + expr = TypeConvertExpr(expr, returnType, "return statement"); if (expr != NULL) { llvm::Value *retVal = expr->GetValue(this); @@ -1241,7 +1241,7 @@ FunctionEmitContext::CurrentLanesReturned(Expr *expr, bool doCoherenceCheck) { // in the return value memory; this preserves the return // values from other lanes that may have executed return // statements previously. - StoreInst(retVal, returnValuePtr, GetInternalMask(), + StoreInst(retVal, returnValuePtr, GetInternalMask(), returnType, PointerType::GetUniform(returnType)); } } @@ -1258,17 +1258,17 @@ FunctionEmitContext::CurrentLanesReturned(Expr *expr, bool doCoherenceCheck) { else { // Otherwise we update the returnedLanes value by ANDing it with // the current lane mask. - llvm::Value *oldReturnedLanes = + llvm::Value *oldReturnedLanes = LoadInst(returnedLanesPtr, "old_returned_lanes"); - llvm::Value *newReturnedLanes = - BinaryOperator(llvm::Instruction::Or, oldReturnedLanes, + llvm::Value *newReturnedLanes = + BinaryOperator(llvm::Instruction::Or, oldReturnedLanes, GetInternalMask(), "old_mask|returned_lanes"); - + // For 'coherent' return statements, emit code to check if all // lanes have returned if (doCoherenceCheck) { // if newReturnedLanes == functionMaskValue, get out of here! - llvm::Value *cmp = MasksAllEqual(functionMaskValue, + llvm::Value *cmp = MasksAllEqual(functionMaskValue, newReturnedLanes); llvm::BasicBlock *bDoReturn = CreateBasicBlock("do_return"); llvm::BasicBlock *bNoReturn = CreateBasicBlock("no_return"); @@ -1353,7 +1353,7 @@ FunctionEmitContext::LaneMask(llvm::Value *v) { AssertPos(currentPos, mm.size() == 1); else // There should be one with signed int signature, one unsigned int. - AssertPos(currentPos, mm.size() == 2); + AssertPos(currentPos, mm.size() == 2); // We can actually call either one, since both are i32s as far as // LLVM's type system is concerned... llvm::Function *fmm = mm[0]->function; @@ -1385,9 +1385,9 @@ FunctionEmitContext::GetStringPtr(const std::string &str) { llvm::Constant *lstr = llvm::ConstantDataArray::getString(*g->ctx, str); llvm::GlobalValue::LinkageTypes linkage = llvm::GlobalValue::InternalLinkage; llvm::Value *lstrPtr = new llvm::GlobalVariable(*m->module, lstr->getType(), - true /*isConst*/, + true /*isConst*/, linkage, lstr, "__str"); - return new llvm::BitCastInst(lstrPtr, LLVMTypes::VoidPointerType, + return new llvm::BitCastInst(lstrPtr, LLVMTypes::VoidPointerType, "str_void_ptr", bblock); } @@ -1408,18 +1408,18 @@ FunctionEmitContext::I1VecToBoolVec(llvm::Value *b) { if (g->target.maskBitCount == 1) return b; - llvm::ArrayType *at = + llvm::ArrayType *at = llvm::dyn_cast(b->getType()); if (at) { // If we're given an array of vectors of i1s, then do the // conversion for each of the elements - llvm::Type *boolArrayType = + llvm::Type *boolArrayType = llvm::ArrayType::get(LLVMTypes::BoolVectorType, at->getNumElements()); llvm::Value *ret = llvm::UndefValue::get(boolArrayType); for (unsigned int i = 0; i < at->getNumElements(); ++i) { llvm::Value *elt = ExtractInst(b, i); - llvm::Value *sext = SExtInst(elt, LLVMTypes::BoolVectorType, + llvm::Value *sext = SExtInst(elt, LLVMTypes::BoolVectorType, LLVMGetName(elt, "_to_boolvec32")); ret = InsertInst(ret, sext, i); } @@ -1433,7 +1433,7 @@ FunctionEmitContext::I1VecToBoolVec(llvm::Value *b) { static llvm::Value * lGetStringAsValue(llvm::BasicBlock *bblock, const char *s) { llvm::Constant *sConstant = llvm::ConstantDataArray::getString(*g->ctx, s); - llvm::Value *sPtr = new llvm::GlobalVariable(*m->module, sConstant->getType(), + llvm::Value *sPtr = new llvm::GlobalVariable(*m->module, sConstant->getType(), true /* const */, llvm::GlobalValue::InternalLinkage, sConstant, s); @@ -1465,8 +1465,8 @@ FunctionEmitContext::AddInstrumentationPoint(const char *note) { void -FunctionEmitContext::SetDebugPos(SourcePos pos) { - currentPos = pos; +FunctionEmitContext::SetDebugPos(SourcePos pos) { + currentPos = pos; } @@ -1477,7 +1477,7 @@ FunctionEmitContext::GetDebugPos() const { void -FunctionEmitContext::AddDebugPos(llvm::Value *value, const SourcePos *pos, +FunctionEmitContext::AddDebugPos(llvm::Value *value, const SourcePos *pos, llvm::DIScope *scope) { llvm::Instruction *inst = llvm::dyn_cast(value); if (inst != NULL && m->diBuilder) { @@ -1486,7 +1486,7 @@ FunctionEmitContext::AddDebugPos(llvm::Value *value, const SourcePos *pos, // If first_line == 0, then we're in the middle of setting up // the standard library or the like; don't add debug positions // for those functions - inst->setDebugLoc(llvm::DebugLoc::get(p.first_line, p.first_column, + inst->setDebugLoc(llvm::DebugLoc::get(p.first_line, p.first_column, scope ? *scope : GetDIScope())); } } @@ -1501,7 +1501,7 @@ FunctionEmitContext::StartScope() { else parentScope = diSubprogram; - llvm::DILexicalBlock lexicalBlock = + llvm::DILexicalBlock lexicalBlock = m->diBuilder->createLexicalBlock(parentScope, diFile, currentPos.first_line, currentPos.first_column); @@ -1520,7 +1520,7 @@ FunctionEmitContext::EndScope() { } -llvm::DIScope +llvm::DIScope FunctionEmitContext::GetDIScope() const { AssertPos(currentPos, debugScopes.size() > 0); return debugScopes.back(); @@ -1535,7 +1535,7 @@ FunctionEmitContext::EmitVariableDebugInfo(Symbol *sym) { llvm::DIScope scope = GetDIScope(); llvm::DIType diType = sym->type->GetDIType(scope); AssertPos(currentPos, diType.Verify()); - llvm::DIVariable var = + llvm::DIVariable var = m->diBuilder->createLocalVariable(llvm::dwarf::DW_TAG_auto_variable, scope, sym->name, @@ -1544,7 +1544,7 @@ FunctionEmitContext::EmitVariableDebugInfo(Symbol *sym) { diType, true /* preserve through opts */); AssertPos(currentPos, var.Verify()); - llvm::Instruction *declareInst = + llvm::Instruction *declareInst = m->diBuilder->insertDeclare(sym->storagePtr, var, bblock); AddDebugPos(declareInst, &sym->pos, &scope); } @@ -1560,7 +1560,7 @@ FunctionEmitContext::EmitFunctionParameterDebugInfo(Symbol *sym, int argNum) { AssertPos(currentPos, diType.Verify()); int flags = 0; - llvm::DIVariable var = + llvm::DIVariable var = m->diBuilder->createLocalVariable(llvm::dwarf::DW_TAG_arg_variable, scope, sym->name, @@ -1571,7 +1571,7 @@ FunctionEmitContext::EmitFunctionParameterDebugInfo(Symbol *sym, int argNum) { flags, argNum+1); AssertPos(currentPos, var.Verify()); - llvm::Instruction *declareInst = + llvm::Instruction *declareInst = m->diBuilder->insertDeclare(sym->storagePtr, var, bblock); AddDebugPos(declareInst, &sym->pos, &scope); } @@ -1584,25 +1584,25 @@ FunctionEmitContext::EmitFunctionParameterDebugInfo(Symbol *sym, int argNum) { */ static int lArrayVectorWidth(llvm::Type *t) { - llvm::ArrayType *arrayType = + llvm::ArrayType *arrayType = llvm::dyn_cast(t); if (arrayType == NULL) return 0; // We shouldn't be seeing arrays of anything but vectors being passed // to things like FunctionEmitContext::BinaryOperator() as operands. - llvm::VectorType *vectorElementType = + llvm::VectorType *vectorElementType = llvm::dyn_cast(arrayType->getElementType()); Assert((vectorElementType != NULL && (int)vectorElementType->getNumElements() == g->target.vectorWidth)); - + return (int)arrayType->getNumElements(); } llvm::Value * -FunctionEmitContext::BinaryOperator(llvm::Instruction::BinaryOps inst, - llvm::Value *v0, llvm::Value *v1, +FunctionEmitContext::BinaryOperator(llvm::Instruction::BinaryOps inst, + llvm::Value *v0, llvm::Value *v1, const char *name) { if (v0 == NULL || v1 == NULL) { AssertPos(currentPos, m->errorCount > 0); @@ -1613,7 +1613,7 @@ FunctionEmitContext::BinaryOperator(llvm::Instruction::BinaryOps inst, llvm::Type *type = v0->getType(); int arraySize = lArrayVectorWidth(type); if (arraySize == 0) { - llvm::Instruction *bop = + llvm::Instruction *bop = llvm::BinaryOperator::Create(inst, v0, v1, name ? name : "", bblock); AddDebugPos(bop); return bop; @@ -1647,7 +1647,7 @@ FunctionEmitContext::NotOperator(llvm::Value *v, const char *name) { llvm::Type *type = v->getType(); int arraySize = lArrayVectorWidth(type); if (arraySize == 0) { - llvm::Instruction *binst = + llvm::Instruction *binst = llvm::BinaryOperator::CreateNot(v, name ? name : "not", bblock); AddDebugPos(binst); return binst; @@ -1656,7 +1656,7 @@ FunctionEmitContext::NotOperator(llvm::Value *v, const char *name) { llvm::Value *ret = llvm::UndefValue::get(type); for (int i = 0; i < arraySize; ++i) { llvm::Value *a = ExtractInst(v, i); - llvm::Value *op = + llvm::Value *op = llvm::BinaryOperator::CreateNot(a, name ? name : "not", bblock); AddDebugPos(op); ret = InsertInst(ret, op, i); @@ -1671,25 +1671,25 @@ FunctionEmitContext::NotOperator(llvm::Value *v, const char *name) { // be returned from CmpInst with ispc VectorTypes). static llvm::Type * lGetMatchingBoolVectorType(llvm::Type *type) { - llvm::ArrayType *arrayType = + llvm::ArrayType *arrayType = llvm::dyn_cast(type); Assert(arrayType != NULL); - llvm::VectorType *vectorElementType = + llvm::VectorType *vectorElementType = llvm::dyn_cast(arrayType->getElementType()); Assert(vectorElementType != NULL); Assert((int)vectorElementType->getNumElements() == g->target.vectorWidth); - llvm::Type *base = + llvm::Type *base = llvm::VectorType::get(LLVMTypes::BoolType, g->target.vectorWidth); return llvm::ArrayType::get(base, arrayType->getNumElements()); } llvm::Value * -FunctionEmitContext::CmpInst(llvm::Instruction::OtherOps inst, +FunctionEmitContext::CmpInst(llvm::Instruction::OtherOps inst, llvm::CmpInst::Predicate pred, - llvm::Value *v0, llvm::Value *v1, + llvm::Value *v0, llvm::Value *v1, const char *name) { if (v0 == NULL || v1 == NULL) { AssertPos(currentPos, m->errorCount > 0); @@ -1700,8 +1700,8 @@ FunctionEmitContext::CmpInst(llvm::Instruction::OtherOps inst, llvm::Type *type = v0->getType(); int arraySize = lArrayVectorWidth(type); if (arraySize == 0) { - llvm::Instruction *ci = - llvm::CmpInst::Create(inst, pred, v0, v1, name ? name : "cmp", + llvm::Instruction *ci = + llvm::CmpInst::Create(inst, pred, v0, v1, name ? name : "cmp", bblock); AddDebugPos(ci); return ci; @@ -1730,7 +1730,7 @@ FunctionEmitContext::SmearUniform(llvm::Value *value, const char *name) { llvm::Value *ret = NULL; llvm::Type *eltType = value->getType(); - llvm::PointerType *pt = + llvm::PointerType *pt = llvm::dyn_cast(eltType); if (pt != NULL) { // Varying pointers are represented as vectors of i32/i64s @@ -1744,17 +1744,17 @@ FunctionEmitContext::SmearUniform(llvm::Value *value, const char *name) { g->target.vectorWidth)); for (int i = 0; i < g->target.vectorWidth; ++i) { - llvm::Twine n = llvm::Twine("smear.") + llvm::Twine(name ? name : "") + + llvm::Twine n = llvm::Twine("smear.") + llvm::Twine(name ? name : "") + llvm::Twine(i); ret = InsertInst(ret, value, i, n.str().c_str()); } return ret; } - + llvm::Value * -FunctionEmitContext::BitCastInst(llvm::Value *value, llvm::Type *type, +FunctionEmitContext::BitCastInst(llvm::Value *value, llvm::Type *type, const char *name) { if (value == NULL) { AssertPos(currentPos, m->errorCount > 0); @@ -1848,7 +1848,7 @@ FunctionEmitContext::IntToPtrInst(llvm::Value *value, llvm::Type *toType, } } - llvm::Instruction *inst = new llvm::IntToPtrInst(value, toType, name, + llvm::Instruction *inst = new llvm::IntToPtrInst(value, toType, name, bblock); AddDebugPos(inst); return inst; @@ -1895,7 +1895,7 @@ FunctionEmitContext::CastInst(llvm::Instruction::CastOps op, llvm::Value *value, llvm::Instruction * -FunctionEmitContext::FPCastInst(llvm::Value *value, llvm::Type *type, +FunctionEmitContext::FPCastInst(llvm::Value *value, llvm::Type *type, const char *name) { if (value == NULL) { AssertPos(currentPos, m->errorCount > 0); @@ -1914,7 +1914,7 @@ FunctionEmitContext::FPCastInst(llvm::Value *value, llvm::Type *type, llvm::Instruction * -FunctionEmitContext::SExtInst(llvm::Value *value, llvm::Type *type, +FunctionEmitContext::SExtInst(llvm::Value *value, llvm::Type *type, const char *name) { if (value == NULL) { AssertPos(currentPos, m->errorCount > 0); @@ -1933,7 +1933,7 @@ FunctionEmitContext::SExtInst(llvm::Value *value, llvm::Type *type, llvm::Instruction * -FunctionEmitContext::ZExtInst(llvm::Value *value, llvm::Type *type, +FunctionEmitContext::ZExtInst(llvm::Value *value, llvm::Type *type, const char *name) { if (value == NULL) { AssertPos(currentPos, m->errorCount > 0); @@ -1958,20 +1958,20 @@ FunctionEmitContext::ZExtInst(llvm::Value *value, llvm::Type *type, the size of the object that the pointer points to. */ llvm::Value * -FunctionEmitContext::applyVaryingGEP(llvm::Value *basePtr, llvm::Value *index, +FunctionEmitContext::applyVaryingGEP(llvm::Value *basePtr, llvm::Value *index, const Type *ptrType) { // Find the scale factor for the index (i.e. the size of the object // that the pointer(s) point(s) to. const Type *scaleType = ptrType->GetBaseType(); llvm::Value *scale = g->target.SizeOf(scaleType->LLVMType(g->ctx), bblock); - bool indexIsVarying = + bool indexIsVarying = llvm::isa(index->getType()); llvm::Value *offset = NULL; if (indexIsVarying == false) { // Truncate or sign extend the index as appropriate to a 32 or // 64-bit type. - if ((g->target.is32Bit || g->opt.force32BitAddressing) && + if ((g->target.is32Bit || g->opt.force32BitAddressing) && index->getType() == LLVMTypes::Int64Type) index = TruncInst(index, LLVMTypes::Int32Type); else if ((!g->target.is32Bit && !g->opt.force32BitAddressing) && @@ -1988,9 +1988,9 @@ FunctionEmitContext::applyVaryingGEP(llvm::Value *basePtr, llvm::Value *index, else { // Similarly, truncate or sign extend the index to be a 32 or 64 // bit vector type - if ((g->target.is32Bit || g->opt.force32BitAddressing) && + if ((g->target.is32Bit || g->opt.force32BitAddressing) && index->getType() == LLVMTypes::Int64VectorType) - index = TruncInst(index, LLVMTypes::Int32VectorType); + index = TruncInst(index, LLVMTypes::Int32VectorType); else if ((!g->target.is32Bit && !g->opt.force32BitAddressing) && index->getType() == LLVMTypes::Int32VectorType) index = SExtInst(index, LLVMTypes::Int64VectorType); @@ -1998,7 +1998,7 @@ FunctionEmitContext::applyVaryingGEP(llvm::Value *basePtr, llvm::Value *index, scale = SmearUniform(scale); // offset = index * scale - offset = BinaryOperator(llvm::Instruction::Mul, scale, index, + offset = BinaryOperator(llvm::Instruction::Mul, scale, index, LLVMGetName("mul", scale, index)); } @@ -2006,18 +2006,18 @@ FunctionEmitContext::applyVaryingGEP(llvm::Value *basePtr, llvm::Value *index, // 32 bits, we still have to convert to a 64-bit value before we // actually add the offset to the pointer. if (g->target.is32Bit == false && g->opt.force32BitAddressing == true) - offset = SExtInst(offset, LLVMTypes::Int64VectorType, + offset = SExtInst(offset, LLVMTypes::Int64VectorType, LLVMGetName(offset, "_to_64")); // Smear out the pointer to be varying; either the base pointer or the // index must be varying for this method to be called. - bool baseIsUniform = + bool baseIsUniform = (llvm::isa(basePtr->getType())); AssertPos(currentPos, baseIsUniform == false || indexIsVarying == true); llvm::Value *varyingPtr = baseIsUniform ? SmearUniform(basePtr) : basePtr; // newPtr = ptr + offset - return BinaryOperator(llvm::Instruction::Add, varyingPtr, offset, + return BinaryOperator(llvm::Instruction::Add, varyingPtr, offset, LLVMGetName(basePtr, "_offset")); } @@ -2065,7 +2065,7 @@ FunctionEmitContext::MatchIntegerTypes(llvm::Value **v0, llvm::Value **v1) { */ static llvm::Value * lComputeSliceIndex(FunctionEmitContext *ctx, int soaWidth, - llvm::Value *indexValue, llvm::Value *ptrSliceOffset, + llvm::Value *indexValue, llvm::Value *ptrSliceOffset, llvm::Value **newSliceOffset) { // Compute the log2 of the soaWidth. Assert(soaWidth > 0); @@ -2082,7 +2082,7 @@ lComputeSliceIndex(FunctionEmitContext *ctx, int soaWidth, llvm::Value *shift = LLVMIntAsType(logWidth, indexType); llvm::Value *mask = LLVMIntAsType(soaWidth-1, indexType); - llvm::Value *indexSum = + llvm::Value *indexSum = ctx->BinaryOperator(llvm::Instruction::Add, indexValue, ptrSliceOffset, "index_sum"); @@ -2109,7 +2109,7 @@ FunctionEmitContext::MakeSlicePointer(llvm::Value *ptr, llvm::Value *offset) { std::vector eltTypes; eltTypes.push_back(ptr->getType()); eltTypes.push_back(offset->getType()); - llvm::StructType *st = + llvm::StructType *st = llvm::StructType::get(*g->ctx, eltTypes); llvm::Value *ret = llvm::UndefValue::get(st); @@ -2120,7 +2120,7 @@ FunctionEmitContext::MakeSlicePointer(llvm::Value *ptr, llvm::Value *offset) { llvm::Value * -FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index, +FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index, const Type *ptrRefType, const char *name) { if (basePtr == NULL || index == NULL) { AssertPos(currentPos, m->errorCount > 0); @@ -2148,7 +2148,7 @@ FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index, // the soa<> structs with. llvm::Value *newSliceOffset; int soaWidth = ptrType->GetBaseType()->GetSOAWidth(); - index = lComputeSliceIndex(this, soaWidth, index, + index = lComputeSliceIndex(this, soaWidth, index, ptrSliceOffset, &newSliceOffset); ptrSliceOffset = newSliceOffset; } @@ -2169,7 +2169,7 @@ FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index, else if (ptrType->IsVaryingType()) AssertPos(currentPos, llvm::isa(basePtr->getType())); - bool indexIsVaryingType = + bool indexIsVaryingType = llvm::isa(index->getType()); if (indexIsVaryingType == false && ptrType->IsUniformType() == true) { @@ -2177,7 +2177,7 @@ FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index, // uniform, so just emit the regular LLVM GEP instruction llvm::Value *ind[1] = { index }; llvm::ArrayRef arrayRef(&ind[0], &ind[1]); - llvm::Instruction *inst = + llvm::Instruction *inst = llvm::GetElementPtrInst::Create(basePtr, arrayRef, name ? name : "gep", bblock); AddDebugPos(inst); @@ -2189,7 +2189,7 @@ FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index, llvm::Value * -FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index0, +FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index0, llvm::Value *index1, const Type *ptrRefType, const char *name) { if (basePtr == NULL || index0 == NULL || index1 == NULL) { @@ -2221,23 +2221,23 @@ FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index0 } llvm::Value *p = GetElementPtrInst(ExtractInst(basePtr, 0), index0, - index1, ptrType->GetAsNonSlice(), + index1, ptrType->GetAsNonSlice(), name); return MakeSlicePointer(p, ptrSliceOffset); } - bool index0IsVaryingType = + bool index0IsVaryingType = llvm::isa(index0->getType()); - bool index1IsVaryingType = + bool index1IsVaryingType = llvm::isa(index1->getType()); - if (index0IsVaryingType == false && index1IsVaryingType == false && + if (index0IsVaryingType == false && index1IsVaryingType == false && ptrType->IsUniformType() == true) { // The easy case: both the base pointer and the indices are // uniform, so just emit the regular LLVM GEP instruction llvm::Value *indices[2] = { index0, index1 }; llvm::ArrayRef arrayRef(&indices[0], &indices[2]); - llvm::Instruction *inst = + llvm::Instruction *inst = llvm::GetElementPtrInst::Create(basePtr, arrayRef, name ? name : "gep", bblock); AddDebugPos(inst); @@ -2253,11 +2253,11 @@ FunctionEmitContext::GetElementPtrInst(llvm::Value *basePtr, llvm::Value *index0 const SequentialType *st = CastType(baseType); AssertPos(currentPos, st != NULL); - bool ptr0IsUniform = + bool ptr0IsUniform = llvm::isa(ptr0->getType()); const Type *ptr0BaseType = st->GetElementType(); const Type *ptr0Type = ptr0IsUniform ? - PointerType::GetUniform(ptr0BaseType) : + PointerType::GetUniform(ptr0BaseType) : PointerType::GetVarying(ptr0BaseType); return applyVaryingGEP(ptr0, index1, ptr0Type); @@ -2272,10 +2272,10 @@ FunctionEmitContext::AddElementOffset(llvm::Value *fullBasePtr, int elementNum, if (resultPtrType != NULL) AssertPos(currentPos, ptrRefType != NULL); - llvm::PointerType *llvmPtrType = + llvm::PointerType *llvmPtrType = llvm::dyn_cast(fullBasePtr->getType()); if (llvmPtrType != NULL) { - llvm::StructType *llvmStructType = + llvm::StructType *llvmStructType = llvm::dyn_cast(llvmPtrType->getElementType()); if (llvmStructType != NULL && llvmStructType->isSized() == false) { AssertPos(currentPos, m->errorCount > 0); @@ -2299,7 +2299,7 @@ FunctionEmitContext::AddElementOffset(llvm::Value *fullBasePtr, int elementNum, // we have a slice pointer instead of looking at ptrType; this is also // unfortunate... llvm::Value *basePtr = fullBasePtr; - bool baseIsSlicePtr = + bool baseIsSlicePtr = llvm::isa(fullBasePtr->getType()); const PointerType *rpt; if (baseIsSlicePtr) { @@ -2331,7 +2331,7 @@ FunctionEmitContext::AddElementOffset(llvm::Value *fullBasePtr, int elementNum, // If the pointer is uniform, we can use the regular LLVM GEP. llvm::Value *offsets[2] = { LLVMInt32(0), LLVMInt32(elementNum) }; llvm::ArrayRef arrayRef(&offsets[0], &offsets[2]); - resultPtr = + resultPtr = llvm::GetElementPtrInst::Create(basePtr, arrayRef, name ? name : "struct_offset", bblock); } @@ -2352,7 +2352,7 @@ FunctionEmitContext::AddElementOffset(llvm::Value *fullBasePtr, int elementNum, const SequentialType *st = CastType(ptrType->GetBaseType()); AssertPos(currentPos, st != NULL); - llvm::Value *size = + llvm::Value *size = g->target.SizeOf(st->GetElementType()->LLVMType(g->ctx), bblock); llvm::Value *scale = (g->target.is32Bit || g->opt.force32BitAddressing) ? LLVMInt32(elementNum) : LLVMInt64(elementNum); @@ -2367,7 +2367,7 @@ FunctionEmitContext::AddElementOffset(llvm::Value *fullBasePtr, int elementNum, // we add the offset to the varying pointers. offset = SExtInst(offset, LLVMTypes::Int64VectorType, "offset_to_64"); - resultPtr = BinaryOperator(llvm::Instruction::Add, basePtr, offset, + resultPtr = BinaryOperator(llvm::Instruction::Add, basePtr, offset, "struct_ptr_offset"); } @@ -2378,7 +2378,7 @@ FunctionEmitContext::AddElementOffset(llvm::Value *fullBasePtr, int elementNum, else return resultPtr; } - + llvm::Value * FunctionEmitContext::LoadInst(llvm::Value *ptr, const char *name) { @@ -2387,7 +2387,7 @@ FunctionEmitContext::LoadInst(llvm::Value *ptr, const char *name) { return NULL; } - llvm::PointerType *pt = + llvm::PointerType *pt = llvm::dyn_cast(ptr->getType()); AssertPos(currentPos, pt != NULL); @@ -2420,8 +2420,8 @@ lFinalSliceOffset(FunctionEmitContext *ctx, llvm::Value *ptr, // The final pointer type is a uniform or varying pointer to the // underlying uniform type, depending on whether the given pointer is // uniform or varying. - *ptrType = (*ptrType)->IsUniformType() ? - PointerType::GetUniform(unifBaseType) : + *ptrType = (*ptrType)->IsUniformType() ? + PointerType::GetUniform(unifBaseType) : PointerType::GetVarying(unifBaseType); // For uniform pointers, bitcast to a pointer to the uniform element @@ -2454,7 +2454,7 @@ FunctionEmitContext::loadUniformFromSOA(llvm::Value *ptr, llvm::Value *mask, for (int i = 0; i < ct->GetElementCount(); ++i) { const PointerType *eltPtrType; - llvm::Value *eltPtr = AddElementOffset(ptr, i, ptrType, + llvm::Value *eltPtr = AddElementOffset(ptr, i, ptrType, "elt_offset", &eltPtrType); llvm::Value *eltValue = LoadInst(eltPtr, mask, eltPtrType, name); retValue = InsertInst(retValue, eltValue, i, "set_value"); @@ -2536,7 +2536,7 @@ FunctionEmitContext::LoadInst(llvm::Value *ptr, llvm::Value *mask, llvm::Value * -FunctionEmitContext::gather(llvm::Value *ptr, const PointerType *ptrType, +FunctionEmitContext::gather(llvm::Value *ptr, const PointerType *ptrType, llvm::Value *mask, const char *name) { // We should have a varying pointer if we get here... AssertPos(currentPos, ptrType->IsVaryingType()); @@ -2544,7 +2544,7 @@ FunctionEmitContext::gather(llvm::Value *ptr, const PointerType *ptrType, const Type *returnType = ptrType->GetBaseType()->GetAsVaryingType(); llvm::Type *llvmReturnType = returnType->LLVMType(g->ctx); - const CollectionType *collectionType = + const CollectionType *collectionType = CastType(ptrType->GetBaseType()); if (collectionType != NULL) { // For collections, recursively gather element wise to find the @@ -2553,7 +2553,7 @@ FunctionEmitContext::gather(llvm::Value *ptr, const PointerType *ptrType, for (int i = 0; i < collectionType->GetElementCount(); ++i) { const PointerType *eltPtrType; - llvm::Value *eltPtr = + llvm::Value *eltPtr = AddElementOffset(ptr, i, ptrType, "gather_elt_ptr", &eltPtrType); eltPtr = addVaryingOffsetsIfNeeded(eltPtr, eltPtrType); @@ -2583,26 +2583,26 @@ FunctionEmitContext::gather(llvm::Value *ptr, const PointerType *ptrType, const PointerType *pt = CastType(returnType); const char *funcName = NULL; if (pt != NULL) - funcName = g->target.is32Bit ? "__pseudo_gather32_i32" : + funcName = g->target.is32Bit ? "__pseudo_gather32_i32" : "__pseudo_gather64_i64"; else if (llvmReturnType == LLVMTypes::DoubleVectorType) funcName = g->target.is32Bit ? "__pseudo_gather32_double" : "__pseudo_gather64_double"; else if (llvmReturnType == LLVMTypes::Int64VectorType) - funcName = g->target.is32Bit ? "__pseudo_gather32_i64" : + funcName = g->target.is32Bit ? "__pseudo_gather32_i64" : "__pseudo_gather64_i64"; else if (llvmReturnType == LLVMTypes::FloatVectorType) - funcName = g->target.is32Bit ? "__pseudo_gather32_float" : + funcName = g->target.is32Bit ? "__pseudo_gather32_float" : "__pseudo_gather64_float"; else if (llvmReturnType == LLVMTypes::Int32VectorType) - funcName = g->target.is32Bit ? "__pseudo_gather32_i32" : + funcName = g->target.is32Bit ? "__pseudo_gather32_i32" : "__pseudo_gather64_i32"; else if (llvmReturnType == LLVMTypes::Int16VectorType) - funcName = g->target.is32Bit ? "__pseudo_gather32_i16" : + funcName = g->target.is32Bit ? "__pseudo_gather32_i16" : "__pseudo_gather64_i16"; else { AssertPos(currentPos, llvmReturnType == LLVMTypes::Int8VectorType); - funcName = g->target.is32Bit ? "__pseudo_gather32_i8" : + funcName = g->target.is32Bit ? "__pseudo_gather32_i8" : "__pseudo_gather64_i8"; } @@ -2623,7 +2623,7 @@ FunctionEmitContext::gather(llvm::Value *ptr, const PointerType *ptrType, /** Add metadata to the given instruction to encode the current source file position. This data is used in the lGetSourcePosFromMetadata() - function in opt.cpp. + function in opt.cpp. */ void FunctionEmitContext::addGSMetadata(llvm::Value *v, SourcePos pos) { @@ -2654,8 +2654,8 @@ FunctionEmitContext::addGSMetadata(llvm::Value *v, SourcePos pos) { llvm::Value * -FunctionEmitContext::AllocaInst(llvm::Type *llvmType, - const char *name, int align, +FunctionEmitContext::AllocaInst(llvm::Type *llvmType, + const char *name, int align, bool atEntryBlock) { if (llvmType == NULL) { AssertPos(currentPos, m->errorCount > 0); @@ -2680,9 +2680,9 @@ FunctionEmitContext::AllocaInst(llvm::Type *llvmType, // unlikely that this array will be loaded into varying variables with // what will be aligned accesses if the uniform -> varying load is done // in regular chunks. - llvm::ArrayType *arrayType = + llvm::ArrayType *arrayType = llvm::dyn_cast(llvmType); - if (align == 0 && arrayType != NULL && + if (align == 0 && arrayType != NULL && !llvm::isa(arrayType->getElementType())) align = 4 * g->target.nativeVectorWidth; @@ -2723,7 +2723,7 @@ FunctionEmitContext::maskedStore(llvm::Value *value, llvm::Value *ptr, continue; } llvm::Value *eltValue = ExtractInst(value, i, "value_member"); - llvm::Value *eltPtr = + llvm::Value *eltPtr = AddElementOffset(ptr, i, ptrType, "struct_ptr_ptr"); const Type *eltPtrType = PointerType::GetUniform(eltType); StoreInst(eltValue, eltPtr, mask, eltType, eltPtrType); @@ -2744,7 +2744,7 @@ FunctionEmitContext::maskedStore(llvm::Value *value, llvm::Value *ptr, if (pt->IsSlice()) { // Masked store of (varying) slice pointer. AssertPos(currentPos, pt->IsVaryingType()); - + // First, extract the pointer from the slice struct and masked // store that. llvm::Value *v0 = ExtractInst(value, 0); @@ -2819,17 +2819,17 @@ FunctionEmitContext::maskedStore(llvm::Value *value, llvm::Value *ptr, lvalue (which should be an array of pointers with size equal to the target's vector width. We want to store each rvalue element at the corresponding pointer's location, *if* the mask for the corresponding - program instance are on. If they're off, don't do anything. + program instance are on. If they're off, don't do anything. */ void -FunctionEmitContext::scatter(llvm::Value *value, llvm::Value *ptr, +FunctionEmitContext::scatter(llvm::Value *value, llvm::Value *ptr, const Type *valueType, const Type *origPt, llvm::Value *mask) { const PointerType *ptrType = CastType(origPt); AssertPos(currentPos, ptrType != NULL); AssertPos(currentPos, ptrType->IsVaryingType()); - const CollectionType *srcCollectionType = + const CollectionType *srcCollectionType = CastType(valueType); if (srcCollectionType != NULL) { // We're scattering a collection type--we need to keep track of the @@ -2843,7 +2843,7 @@ FunctionEmitContext::scatter(llvm::Value *value, llvm::Value *ptr, const CollectionType *dstCollectionType = CastType(ptrType->GetBaseType()); AssertPos(currentPos, dstCollectionType != NULL); - + // Scatter the collection elements individually for (int i = 0; i < srcCollectionType->GetElementCount(); ++i) { // First, get the values for the current element out of the @@ -2924,7 +2924,7 @@ FunctionEmitContext::scatter(llvm::Value *value, llvm::Value *ptr, llvm::Function *scatterFunc = m->module->getFunction(funcName); AssertPos(currentPos, scatterFunc != NULL); - + AddInstrumentationPoint("scatter"); std::vector args; @@ -3005,7 +3005,7 @@ void FunctionEmitContext::storeUniformToSOA(llvm::Value *value, llvm::Value *ptr, llvm::Value *mask, const Type *valueType, const PointerType *ptrType) { - AssertPos(currentPos, Type::EqualIgnoringConst(ptrType->GetBaseType()->GetAsUniformType(), + AssertPos(currentPos, Type::EqualIgnoringConst(ptrType->GetBaseType()->GetAsUniformType(), valueType)); const CollectionType *ct = CastType(valueType); @@ -3015,7 +3015,7 @@ FunctionEmitContext::storeUniformToSOA(llvm::Value *value, llvm::Value *ptr, llvm::Value *eltValue = ExtractInst(value, i); const Type *eltType = ct->GetElementType(i); const PointerType *dstEltPtrType; - llvm::Value *dstEltPtr = + llvm::Value *dstEltPtr = AddElementOffset(ptr, i, ptrType, "slice_offset", &dstEltPtrType); StoreInst(eltValue, dstEltPtr, mask, eltType, dstEltPtrType); @@ -3032,7 +3032,7 @@ FunctionEmitContext::storeUniformToSOA(llvm::Value *value, llvm::Value *ptr, void -FunctionEmitContext::MemcpyInst(llvm::Value *dest, llvm::Value *src, +FunctionEmitContext::MemcpyInst(llvm::Value *dest, llvm::Value *src, llvm::Value *count, llvm::Value *align) { dest = BitCastInst(dest, LLVMTypes::VoidPointerType); src = BitCastInst(src, LLVMTypes::VoidPointerType); @@ -3043,8 +3043,8 @@ FunctionEmitContext::MemcpyInst(llvm::Value *dest, llvm::Value *src, if (align == NULL) align = LLVMInt32(1); - llvm::Constant *mcFunc = - m->module->getOrInsertFunction("llvm.memcpy.p0i8.p0i8.i64", + llvm::Constant *mcFunc = + m->module->getOrInsertFunction("llvm.memcpy.p0i8.p0i8.i64", LLVMTypes::VoidType, LLVMTypes::VoidPointerType, LLVMTypes::VoidPointerType, LLVMTypes::Int64Type, LLVMTypes::Int32Type, LLVMTypes::BoolType, NULL); @@ -3069,7 +3069,7 @@ FunctionEmitContext::BranchInst(llvm::BasicBlock *dest) { void -FunctionEmitContext::BranchInst(llvm::BasicBlock *trueBlock, +FunctionEmitContext::BranchInst(llvm::BasicBlock *trueBlock, llvm::BasicBlock *falseBlock, llvm::Value *test) { if (test == NULL) { @@ -3077,7 +3077,7 @@ FunctionEmitContext::BranchInst(llvm::BasicBlock *trueBlock, return; } - llvm::Instruction *b = + llvm::Instruction *b = llvm::BranchInst::Create(trueBlock, falseBlock, test, bblock); AddDebugPos(b); } @@ -3107,7 +3107,7 @@ FunctionEmitContext::ExtractInst(llvm::Value *v, int elt, const char *name) { llvm::Value * -FunctionEmitContext::InsertInst(llvm::Value *v, llvm::Value *eltVal, int elt, +FunctionEmitContext::InsertInst(llvm::Value *v, llvm::Value *eltVal, int elt, const char *name) { if (v == NULL || eltVal == NULL) { AssertPos(currentPos, m->errorCount > 0); @@ -3122,7 +3122,7 @@ FunctionEmitContext::InsertInst(llvm::Value *v, llvm::Value *eltVal, int elt, llvm::Instruction *ii = NULL; if (llvm::isa(v->getType())) - ii = llvm::InsertElementInst::Create(v, eltVal, LLVMInt32(elt), + ii = llvm::InsertElementInst::Create(v, eltVal, LLVMInt32(elt), name, bblock); else ii = llvm::InsertValueInst::Create(v, eltVal, elt, name, bblock); @@ -3132,7 +3132,7 @@ FunctionEmitContext::InsertInst(llvm::Value *v, llvm::Value *eltVal, int elt, llvm::PHINode * -FunctionEmitContext::PhiNode(llvm::Type *type, int count, +FunctionEmitContext::PhiNode(llvm::Type *type, int count, const char *name) { llvm::PHINode *pn = llvm::PHINode::Create(type, count, name ? name : "phi", bblock); @@ -3164,7 +3164,7 @@ FunctionEmitContext::SelectInst(llvm::Value *test, llvm::Value *val0, function has. */ static unsigned int lCalleeArgCount(llvm::Value *callee, const FunctionType *funcType) { - llvm::FunctionType *ft = + llvm::FunctionType *ft = llvm::dyn_cast(callee->getType()); if (ft == NULL) { @@ -3205,7 +3205,7 @@ FunctionEmitContext::CallInst(llvm::Value *func, const FunctionType *funcType, if (llvm::isa(func->getType()) == false) { // Regular 'uniform' function call--just one function or function // pointer, so just emit the IR directly. - llvm::Instruction *ci = + llvm::Instruction *ci = llvm::CallInst::Create(func, argVals, name ? name : "", bblock); AddDebugPos(ci); return ci; @@ -3260,18 +3260,18 @@ FunctionEmitContext::CallInst(llvm::Value *func, const FunctionType *funcType, // Figure out the first lane that still needs its function // pointer to be called. llvm::Value *currentMask = LoadInst(maskPtr); - llvm::Function *cttz = + llvm::Function *cttz = m->module->getFunction("__count_trailing_zeros_i64"); AssertPos(currentPos, cttz != NULL); llvm::Value *firstLane64 = CallInst(cttz, NULL, LaneMask(currentMask), "first_lane64"); - llvm::Value *firstLane = + llvm::Value *firstLane = TruncInst(firstLane64, LLVMTypes::Int32Type, "first_lane32"); // Get the pointer to the function we're going to call this // time through: ftpr = func[firstLane] - llvm::Value *fptr = - llvm::ExtractElementInst::Create(func, firstLane, + llvm::Value *fptr = + llvm::ExtractElementInst::Create(func, firstLane, "extract_fptr", bblock); // Smear it out into an array of function pointers @@ -3280,7 +3280,7 @@ FunctionEmitContext::CallInst(llvm::Value *func, const FunctionType *funcType, // fpOverlap = (fpSmearAsVec == fpOrigAsVec). This gives us a // mask for the set of program instances that have the same // value for their function pointer. - llvm::Value *fpOverlap = + llvm::Value *fpOverlap = CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_EQ, fptrSmear, func); fpOverlap = I1VecToBoolVec(fpOverlap); @@ -3290,7 +3290,7 @@ FunctionEmitContext::CallInst(llvm::Value *func, const FunctionType *funcType, // the case of any non-running program instances that happen to // have this function pointer value. // callMask = (currentMask & fpOverlap) - llvm::Value *callMask = + llvm::Value *callMask = BinaryOperator(llvm::Instruction::And, currentMask, fpOverlap, "call_mask"); @@ -3308,7 +3308,7 @@ FunctionEmitContext::CallInst(llvm::Value *func, const FunctionType *funcType, // Now, do a masked store into the memory allocated to // accumulate the result using the call mask. - if (callResult != NULL && + if (callResult != NULL && callResult->getType() != LLVMTypes::VoidType) { AssertPos(currentPos, resultPtr != NULL); StoreInst(callResult, resultPtr, callMask, returnType, @@ -3320,10 +3320,10 @@ FunctionEmitContext::CallInst(llvm::Value *func, const FunctionType *funcType, // Update the mask to turn off the program instances for which // we just called the function. // currentMask = currentMask & ~callmask - llvm::Value *notCallMask = + llvm::Value *notCallMask = BinaryOperator(llvm::Instruction::Xor, callMask, LLVMMaskAllOn, "~callMask"); - currentMask = BinaryOperator(llvm::Instruction::And, currentMask, + currentMask = BinaryOperator(llvm::Instruction::And, currentMask, notCallMask, "currentMask&~callMask"); StoreInst(currentMask, maskPtr); @@ -3352,7 +3352,7 @@ FunctionEmitContext::CallInst(llvm::Value *func, const FunctionType *funcType, llvm::Value * FunctionEmitContext::CallInst(llvm::Value *func, const FunctionType *funcType, - llvm::Value *arg0, llvm::Value *arg1, + llvm::Value *arg0, llvm::Value *arg1, const char *name) { std::vector args; args.push_back(arg0); @@ -3386,7 +3386,7 @@ FunctionEmitContext::ReturnInst() { llvm::Value * -FunctionEmitContext::LaunchInst(llvm::Value *callee, +FunctionEmitContext::LaunchInst(llvm::Value *callee, std::vector &argVals, llvm::Value *launchCount) { if (callee == NULL) { @@ -3397,13 +3397,13 @@ FunctionEmitContext::LaunchInst(llvm::Value *callee, launchedTasks = true; AssertPos(currentPos, llvm::isa(callee)); - llvm::Type *argType = + llvm::Type *argType = (llvm::dyn_cast(callee))->arg_begin()->getType(); AssertPos(currentPos, llvm::PointerType::classof(argType)); - llvm::PointerType *pt = + llvm::PointerType *pt = llvm::dyn_cast(argType); AssertPos(currentPos, llvm::StructType::classof(pt->getElementType())); - llvm::StructType *argStructType = + llvm::StructType *argStructType = static_cast(pt->getElementType()); llvm::Function *falloc = m->module->getFunction("ISPCAlloc"); @@ -3457,7 +3457,7 @@ FunctionEmitContext::LaunchInst(llvm::Value *callee, void FunctionEmitContext::SyncInst() { llvm::Value *launchGroupHandle = LoadInst(launchGroupHandlePtr); - llvm::Value *nullPtrValue = + llvm::Value *nullPtrValue = llvm::Constant::getNullValue(LLVMTypes::VoidPointerType); llvm::Value *nonNull = CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_NE, @@ -3489,7 +3489,7 @@ FunctionEmitContext::SyncInst() { returns an updated pointer that incorporates these offsets if needed. */ llvm::Value * -FunctionEmitContext::addVaryingOffsetsIfNeeded(llvm::Value *ptr, +FunctionEmitContext::addVaryingOffsetsIfNeeded(llvm::Value *ptr, const Type *ptrType) { // This should only be called for varying pointers const PointerType *pt = CastType(ptrType); @@ -3501,9 +3501,9 @@ FunctionEmitContext::addVaryingOffsetsIfNeeded(llvm::Value *ptr, if (baseType->IsVaryingType() == false) return ptr; - + // Find the size of a uniform element of the varying type - llvm::Type *llvmBaseUniformType = + llvm::Type *llvmBaseUniformType = baseType->GetAsUniformType()->LLVMType(g->ctx); llvm::Value *unifSize = g->target.SizeOf(llvmBaseUniformType, bblock); unifSize = SmearUniform(unifSize); @@ -3515,9 +3515,9 @@ FunctionEmitContext::addVaryingOffsetsIfNeeded(llvm::Value *ptr, LLVMInt32(i) : LLVMInt64(i); varyingOffsets = InsertInst(varyingOffsets, iValue, i, "varying_delta"); } - llvm::Value *offset = BinaryOperator(llvm::Instruction::Mul, unifSize, + llvm::Value *offset = BinaryOperator(llvm::Instruction::Mul, unifSize, varyingOffsets); - + if (g->opt.force32BitAddressing == true && g->target.is32Bit == false) // On 64-bit targets where we're doing 32-bit addressing // calculations, we need to convert to an i64 vector before adding diff --git a/ctx.h b/ctx.h index 14ceed7c..7e262310 100644 --- a/ctx.h +++ b/ctx.h @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file ctx.h @@ -75,7 +75,7 @@ public: @param firstStmtPos Source file position of the first statement in the function */ - FunctionEmitContext(Function *function, Symbol *funSym, + FunctionEmitContext(Function *function, Symbol *funSym, llvm::Function *llvmFunction, SourcePos firstStmtPos); ~FunctionEmitContext(); @@ -87,9 +87,9 @@ public: /** @name Current basic block management @{ */ - /** Returns the current basic block pointer */ + /** Returns the current basic block pointer */ llvm::BasicBlock *GetCurrentBasicBlock(); - + /** Set the given llvm::BasicBlock to be the basic block to emit forthcoming instructions into. */ void SetCurrentBasicBlock(llvm::BasicBlock *bblock); @@ -97,7 +97,7 @@ public: /** @name Mask management @{ */ - /** Returns the mask value at entry to the current function. */ + /** Returns the mask value at entry to the current function. */ llvm::Value *GetFunctionMask(); /** Returns the mask value corresponding to "varying" control flow @@ -106,7 +106,7 @@ public: llvm::Value *GetInternalMask(); /** Returns the complete current mask value--i.e. the logical AND of - the function entry mask and the internal mask. */ + the function entry mask and the internal mask. */ llvm::Value *GetFullMask(); /** Returns a pointer to storage in memory that stores the current full @@ -159,7 +159,7 @@ public: 'continue' statements should jump to (if all running lanes want to break or continue), uniformControlFlow indicates whether the loop condition is 'uniform'. */ - void StartLoop(llvm::BasicBlock *breakTarget, llvm::BasicBlock *continueTarget, + void StartLoop(llvm::BasicBlock *breakTarget, llvm::BasicBlock *continueTarget, bool uniformControlFlow); /** Informs FunctionEmitContext of the value of the mask at the start @@ -213,8 +213,8 @@ public: @param caseBlocks vector that stores the mapping from label values after "case" statements to basic blocks corresponding to the "case" labels. - @param nextBlocks For each basic block for a "case" or "default" - label, this gives the basic block for the + @param nextBlocks For each basic block for a "case" or "default" + label, this gives the basic block for the immediately-following "case" or "default" label (or the basic block after the "switch" statement for the last label.) @@ -272,7 +272,7 @@ public: /** @} */ /** @name Small helper/utility routines - @{ + @{ */ /** Given a boolean mask value of type LLVMTypes::MaskType, return an i1 value that indicates if any of the mask lanes are on. */ @@ -332,7 +332,7 @@ public: llvm::Instruction for convenience; in calling code we often have Instructions stored using Value pointers; the code here returns silently if it's not actually given an instruction. */ - void AddDebugPos(llvm::Value *instruction, const SourcePos *pos = NULL, + void AddDebugPos(llvm::Value *instruction, const SourcePos *pos = NULL, llvm::DIScope *scope = NULL); /** Inform the debugging information generation code that a new scope @@ -361,7 +361,7 @@ public: instructions. See the LLVM assembly language reference manual (http://llvm.org/docs/LangRef.html) and the LLVM doxygen documentaion (http://llvm.org/doxygen) for more information. Here we will only - document significant generalizations to the functionality of the + document significant generalizations to the functionality of the corresponding basic LLVM instructions. Beyond actually emitting the instruction, the implementations of @@ -377,7 +377,7 @@ public: this also handles applying the given operation to the vector elements. */ llvm::Value *BinaryOperator(llvm::Instruction::BinaryOps inst, - llvm::Value *v0, llvm::Value *v1, + llvm::Value *v0, llvm::Value *v1, const char *name = NULL); /** Emit the "not" operator. Like BinaryOperator(), this also handles @@ -387,7 +387,7 @@ public: /** Emit a comparison instruction. If the operands are VectorTypes, then a value for the corresponding boolean VectorType is returned. */ - llvm::Value *CmpInst(llvm::Instruction::OtherOps inst, + llvm::Value *CmpInst(llvm::Instruction::OtherOps inst, llvm::CmpInst::Predicate pred, llvm::Value *v0, llvm::Value *v1, const char *name = NULL); @@ -407,17 +407,17 @@ public: const char *name = NULL); llvm::Instruction *CastInst(llvm::Instruction::CastOps op, llvm::Value *value, llvm::Type *type, const char *name = NULL); - llvm::Instruction *FPCastInst(llvm::Value *value, llvm::Type *type, + llvm::Instruction *FPCastInst(llvm::Value *value, llvm::Type *type, const char *name = NULL); - llvm::Instruction *SExtInst(llvm::Value *value, llvm::Type *type, + llvm::Instruction *SExtInst(llvm::Value *value, llvm::Type *type, const char *name = NULL); - llvm::Instruction *ZExtInst(llvm::Value *value, llvm::Type *type, + llvm::Instruction *ZExtInst(llvm::Value *value, llvm::Type *type, const char *name = NULL); /** Given two integer-typed values (but possibly one vector and the other not, and or of possibly-different bit-widths), update their values as needed so that the two have the same (more general) - type. */ + type. */ void MatchIntegerTypes(llvm::Value **v0, llvm::Value **v1); /** Create a new slice pointer out of the given pointer to an soa type @@ -462,9 +462,9 @@ public: allocated at the given alignment. By default, the alloca instruction is added at the start of the function in the entry basic block; if it should be added to the current basic block, then - the atEntryBlock parameter should be false. */ - llvm::Value *AllocaInst(llvm::Type *llvmType, - const char *name = NULL, int align = 0, + the atEntryBlock parameter should be false. */ + llvm::Value *AllocaInst(llvm::Type *llvmType, + const char *name = NULL, int align = 0, bool atEntryBlock = true); /** Standard store instruction; for this variant, the lvalue must be a @@ -481,7 +481,7 @@ public: /** Copy count bytes of memory from the location pointed to by src to the location pointed to by dest. (src and dest must not be - overlapping.) */ + overlapping.) */ void MemcpyInst(llvm::Value *dest, llvm::Value *src, llvm::Value *count, llvm::Value *align = NULL); @@ -497,10 +497,10 @@ public: /** This convenience method maps to an llvm::InsertElementInst if the given value is a llvm::VectorType, and to an llvm::InsertValueInst otherwise. */ - llvm::Value *InsertInst(llvm::Value *v, llvm::Value *eltVal, int elt, + llvm::Value *InsertInst(llvm::Value *v, llvm::Value *eltVal, int elt, const char *name = NULL); - llvm::PHINode *PhiNode(llvm::Type *type, int count, + llvm::PHINode *PhiNode(llvm::Type *type, int count, const char *name = NULL); llvm::Instruction *SelectInst(llvm::Value *test, llvm::Value *val0, llvm::Value *val1, const char *name = NULL); @@ -526,7 +526,7 @@ public: /** Launch an asynchronous task to run the given function, passing it he given argument values. */ - llvm::Value *LaunchInst(llvm::Value *callee, + llvm::Value *LaunchInst(llvm::Value *callee, std::vector &argVals, llvm::Value *launchCount); @@ -680,7 +680,7 @@ private: void jumpIfAllLoopLanesAreDone(llvm::BasicBlock *target); llvm::Value *emitGatherCallback(llvm::Value *lvalue, llvm::Value *retPtr); - llvm::Value *applyVaryingGEP(llvm::Value *basePtr, llvm::Value *index, + llvm::Value *applyVaryingGEP(llvm::Value *basePtr, llvm::Value *index, const Type *ptrType); void restoreMaskGivenReturns(llvm::Value *oldMask); @@ -694,7 +694,7 @@ private: const Type *ptrType, llvm::Value *mask); void maskedStore(llvm::Value *value, llvm::Value *ptr, const Type *ptrType, llvm::Value *mask); - void storeUniformToSOA(llvm::Value *value, llvm::Value *ptr, + void storeUniformToSOA(llvm::Value *value, llvm::Value *ptr, llvm::Value *mask, const Type *valueType, const PointerType *ptrType); llvm::Value *loadUniformFromSOA(llvm::Value *ptr, llvm::Value *mask, diff --git a/decl.cpp b/decl.cpp index c91e60a7..61b21322 100644 --- a/decl.cpp +++ b/decl.cpp @@ -28,11 +28,11 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file decl.cpp - @brief Implementations of classes related to turning declarations into + @brief Implementations of classes related to turning declarations into symbol names and types. */ @@ -62,7 +62,7 @@ lPrintTypeQualifiers(int typeQualifiers) { /** Given a Type and a set of type qualifiers, apply the type qualifiers to - the type, returning the type that is the result. + the type, returning the type that is the result. */ static const Type * lApplyTypeQualifiers(int typeQualifiers, const Type *type, SourcePos pos) { @@ -97,7 +97,7 @@ lApplyTypeQualifiers(int typeQualifiers, const Type *type, SourcePos pos) { if (unsignedType != NULL) type = unsignedType; else { - const Type *resolvedType = + const Type *resolvedType = type->ResolveUnboundVariability(Variability::Varying); Error(pos, "\"unsigned\" qualifier is illegal with \"%s\" type.", resolvedType->GetString().c_str()); @@ -105,7 +105,7 @@ lApplyTypeQualifiers(int typeQualifiers, const Type *type, SourcePos pos) { } if ((typeQualifiers & TYPEQUAL_SIGNED) != 0 && type->IsIntType() == false) { - const Type *resolvedType = + const Type *resolvedType = type->ResolveUnboundVariability(Variability::Varying); Error(pos, "\"signed\" qualifier is illegal with non-integer type " "\"%s\".", resolvedType->GetString().c_str()); @@ -147,7 +147,7 @@ DeclSpecs::GetBaseType(SourcePos pos) const { } retType = lApplyTypeQualifiers(typeQualifiers, retType, pos); - + if (soaWidth > 0) { const StructType *st = CastType(retType); @@ -180,7 +180,7 @@ DeclSpecs::GetBaseType(SourcePos pos) const { "currently leads to inefficient code to access " "soa types.", soaWidth, g->target.vectorWidth); } - + return retType; } @@ -215,8 +215,8 @@ DeclSpecs::Print() const { /////////////////////////////////////////////////////////////////////////// // Declarator -Declarator::Declarator(DeclaratorKind dk, SourcePos p) - : pos(p), kind(dk) { +Declarator::Declarator(DeclaratorKind dk, SourcePos p) + : pos(p), kind(dk) { child = NULL; typeQualifiers = 0; storageClass = SC_NONE; @@ -238,7 +238,7 @@ Declarator::InitFromDeclSpecs(DeclSpecs *ds) { storageClass = ds->storageClass; - if (ds->declSpecList.size() > 0 && + if (ds->declSpecList.size() > 0 && CastType(type) == NULL) { Error(pos, "__declspec specifiers for non-function type \"%s\" are " "not used.", type->GetString().c_str()); @@ -315,7 +315,7 @@ Declarator::InitFromType(const Type *baseType, DeclSpecs *ds) { Error(pos, "\"export\" qualifier illegal in variable declaration."); return; } - + Variability variability(Variability::Unbound); if (hasUniformQual) variability = Variability::Uniform; @@ -396,7 +396,7 @@ Declarator::InitFromType(const Type *baseType, DeclSpecs *ds) { llvm::SmallVector argNames; llvm::SmallVector argDefaults; llvm::SmallVector argPos; - + // Loop over the function arguments and store the names, types, // default values (if any), and source file positions each one in // the corresponding vector. @@ -431,7 +431,7 @@ Declarator::InitFromType(const Type *baseType, DeclSpecs *ds) { if (d->declSpecs->storageClass != SC_NONE) Error(decl->pos, "Storage class \"%s\" is illegal in " - "function parameter declaration for parameter \"%s\".", + "function parameter declaration for parameter \"%s\".", lGetStorageClassName(d->declSpecs->storageClass), decl->name.c_str()); if (Type::Equal(decl->type, AtomicType::Void)) { @@ -486,7 +486,7 @@ Declarator::InitFromType(const Type *baseType, DeclSpecs *ds) { init = dynamic_cast(decl->initExpr); if (init == NULL) Error(decl->initExpr->pos, "Default value for parameter " - "\"%s\" must be a compile-time constant.", + "\"%s\" must be a compile-time constant.", decl->name.c_str()); } break; @@ -507,14 +507,14 @@ Declarator::InitFromType(const Type *baseType, DeclSpecs *ds) { Error(pos, "Illegal to return function type from function."); return; } - + returnType = returnType->ResolveUnboundVariability(Variability::Varying); bool isExternC = ds && (ds->storageClass == SC_EXTERN_C); bool isExported = ds && ((ds->typeQualifiers & TYPEQUAL_EXPORT) != 0); bool isTask = ds && ((ds->typeQualifiers & TYPEQUAL_TASK) != 0); bool isUnmasked = ds && ((ds->typeQualifiers & TYPEQUAL_UNMASKED) != 0); - + if (isExported && isTask) { Error(pos, "Function can't have both \"task\" and \"export\" " "qualifiers"); @@ -539,7 +539,7 @@ Declarator::InitFromType(const Type *baseType, DeclSpecs *ds) { return; } - const FunctionType *functionType = + const FunctionType *functionType = new FunctionType(returnType, args, argNames, argDefaults, argPos, isTask, isExported, isExternC, isUnmasked); @@ -669,7 +669,7 @@ GetStructTypesNamesPositions(const std::vector &sd, // disgusting DeclSpecs ds(type); if (Type::Equal(type, AtomicType::Void) == false) { - if (type->IsUniformType()) + if (type->IsUniformType()) ds.typeQualifiers |= TYPEQUAL_UNIFORM; else if (type->IsVaryingType()) ds.typeQualifiers |= TYPEQUAL_VARYING; diff --git a/decl.h b/decl.h index fd473b6c..1a240fd7 100644 --- a/decl.h +++ b/decl.h @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file decl.h @@ -48,7 +48,7 @@ qualifiers, and that it's basic type is 'int'. Then for each variable declaration, the Declaraiton class holds an instance of a Declarator, which in turn records the per-variable information like the name, array - size (if any), initializer expression, etc. + size (if any), initializer expression, etc. */ #ifndef ISPC_DECL_H @@ -124,7 +124,7 @@ enum DeclaratorKind { DK_FUNCTION }; -/** @brief Representation of the declaration of a single variable. +/** @brief Representation of the declaration of a single variable. In conjunction with an instance of the DeclSpecs, this gives us everything we need for a full variable declaration. @@ -162,7 +162,7 @@ public: StorageClass storageClass; /** For array declarators, this gives the declared size of the array. - Unsized arrays have arraySize == 0. */ + Unsized arrays have arraySize == 0. */ int arraySize; /** Name associated with the declarator. */ diff --git a/expr.cpp b/expr.cpp index 73697605..d518e787 100644 --- a/expr.cpp +++ b/expr.cpp @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file expr.cpp @@ -116,8 +116,8 @@ Expr::GetBaseSymbol() const { conversions between signed and unsigned integers of the same size. */ static void -lMaybeIssuePrecisionWarning(const AtomicType *toAtomicType, - const AtomicType *fromAtomicType, +lMaybeIssuePrecisionWarning(const AtomicType *toAtomicType, + const AtomicType *fromAtomicType, SourcePos pos, const char *errorMsgBase) { switch (toAtomicType->basicType) { case AtomicType::TYPE_BOOL: @@ -133,13 +133,13 @@ lMaybeIssuePrecisionWarning(const AtomicType *toAtomicType, case AtomicType::TYPE_DOUBLE: if ((int)toAtomicType->basicType < (int)fromAtomicType->basicType && toAtomicType->basicType != AtomicType::TYPE_BOOL && - !(toAtomicType->basicType == AtomicType::TYPE_INT8 && + !(toAtomicType->basicType == AtomicType::TYPE_INT8 && fromAtomicType->basicType == AtomicType::TYPE_UINT8) && - !(toAtomicType->basicType == AtomicType::TYPE_INT16 && + !(toAtomicType->basicType == AtomicType::TYPE_INT16 && fromAtomicType->basicType == AtomicType::TYPE_UINT16) && - !(toAtomicType->basicType == AtomicType::TYPE_INT32 && + !(toAtomicType->basicType == AtomicType::TYPE_INT32 && fromAtomicType->basicType == AtomicType::TYPE_UINT32) && - !(toAtomicType->basicType == AtomicType::TYPE_INT64 && + !(toAtomicType->basicType == AtomicType::TYPE_INT64 && fromAtomicType->basicType == AtomicType::TYPE_UINT64)) Warning(pos, "Conversion from type \"%s\" to type \"%s\" for %s" " may lose information.", @@ -181,7 +181,7 @@ lIsAllIntZeros(Expr *expr) { uint64_t vals[ISPC_MAX_NVEC]; int count = ce->AsUInt64(vals); - if (count == 1) + if (count == 1) return (vals[0] == 0); else { for (int i = 0; i < count; ++i) @@ -295,7 +295,7 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, else { if (!failureOk) Error(pos, "Can't convert from incompatible array type \"%s\" " - "to pointer type \"%s\" for %s.", + "to pointer type \"%s\" for %s.", fromType->GetString().c_str(), toType->GetString().c_str(), errorMsgBase); return false; @@ -305,7 +305,7 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, if (toType->IsUniformType() && fromType->IsVaryingType()) { if (!failureOk) Error(pos, "Can't convert from type \"%s\" to type \"%s\" for %s.", - fromType->GetString().c_str(), toType->GetString().c_str(), + fromType->GetString().c_str(), toType->GetString().c_str(), errorMsgBase); return false; } @@ -324,7 +324,7 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, if (toPointerType == NULL) { if (!failureOk) Error(pos, "Can't convert between from pointer type " - "\"%s\" to non-pointer type \"%s\" for %s.", + "\"%s\" to non-pointer type \"%s\" for %s.", fromType->GetString().c_str(), toType->GetString().c_str(), errorMsgBase); return false; @@ -348,9 +348,9 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, // and a NULL convert to any other pointer type goto typecast_ok; } - else if (!Type::Equal(fromPointerType->GetBaseType(), + else if (!Type::Equal(fromPointerType->GetBaseType(), toPointerType->GetBaseType()) && - !Type::Equal(fromPointerType->GetBaseType()->GetAsConstType(), + !Type::Equal(fromPointerType->GetBaseType()->GetAsConstType(), toPointerType->GetBaseType())) { if (!failureOk) Error(pos, "Can't convert from pointer type \"%s\" to " @@ -371,7 +371,7 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, return true; } - if (toPointerType != NULL && fromAtomicType != NULL && + if (toPointerType != NULL && fromAtomicType != NULL && fromAtomicType->IsIntType() && expr != NULL && lIsAllIntZeros(*expr)) { // We have a zero-valued integer expression, which can also be @@ -404,7 +404,7 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, toType->IsConstType() == true && fromType->IsConstType() == false) goto typecast_ok; - + if (CastType(fromType)) { if (CastType(toType)) { // Convert from a reference to a type to a const reference to a type; @@ -413,12 +413,12 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, fromType->GetReferenceTarget()->GetAsConstType())) goto typecast_ok; - const ArrayType *atFrom = + const ArrayType *atFrom = CastType(fromType->GetReferenceTarget()); - const ArrayType *atTo = + const ArrayType *atTo = CastType(toType->GetReferenceTarget()); - if (atFrom != NULL && atTo != NULL && + if (atFrom != NULL && atTo != NULL && Type::Equal(atFrom->GetElementType(), atTo->GetElementType())) { goto typecast_ok; } @@ -434,7 +434,7 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, // convert from a reference T -> T if (expr != NULL) { Expr *drExpr = new RefDerefExpr(*expr, pos); - if (lDoTypeConv(drExpr->GetType(), toType, &drExpr, failureOk, + if (lDoTypeConv(drExpr->GetType(), toType, &drExpr, failureOk, errorMsgBase, pos) == true) { *expr = drExpr; return true; @@ -469,14 +469,14 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, fromType = fromType->GetReferenceTarget(); toType = toType->GetReferenceTarget(); if (toArrayType && fromArrayType) { - if (Type::Equal(toArrayType->GetElementType(), + if (Type::Equal(toArrayType->GetElementType(), fromArrayType->GetElementType())) { // the case of different element counts should have returned // successfully earlier, yes?? AssertPos(pos, toArrayType->GetElementCount() != fromArrayType->GetElementCount()); goto typecast_ok; } - else if (Type::Equal(toArrayType->GetElementType(), + else if (Type::Equal(toArrayType->GetElementType(), fromArrayType->GetElementType()->GetAsConstType())) { // T[x] -> const T[x] goto typecast_ok; @@ -538,7 +538,7 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, if (fromAtomicType == NULL) { if (!failureOk) Error(pos, "Type conversion from \"%s\" to \"%s\" for %s is not " - "possible.", fromType->GetString().c_str(), + "possible.", fromType->GetString().c_str(), toType->GetString().c_str(), errorMsgBase); return false; } @@ -552,7 +552,7 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, if (toAtomicType == NULL) { if (!failureOk) Error(pos, "Type conversion from \"%s\" to \"%s\" for %s is " - "not possible", fromType->GetString().c_str(), + "not possible", fromType->GetString().c_str(), toType->GetString().c_str(), errorMsgBase); return false; } @@ -573,7 +573,7 @@ lDoTypeConv(const Type *fromType, const Type *toType, Expr **expr, bool -CanConvertTypes(const Type *fromType, const Type *toType, +CanConvertTypes(const Type *fromType, const Type *toType, const char *errorMsgBase, SourcePos pos) { return lDoTypeConv(fromType, toType, NULL, errorMsgBase == NULL, errorMsgBase, pos); @@ -592,7 +592,7 @@ TypeConvertExpr(Expr *expr, const Type *toType, const char *errorMsgBase) { const Type *fromType = expr->GetType(); Expr *e = expr; - if (lDoTypeConv(fromType, toType, &e, false, errorMsgBase, + if (lDoTypeConv(fromType, toType, &e, false, errorMsgBase, expr->pos)) return e; else @@ -634,7 +634,7 @@ PossiblyResolveFunctionOverloads(Expr *expr, const Type *type) { @param pos Source file position of the variable being initialized */ void -InitSymbol(llvm::Value *ptr, const Type *symType, Expr *initExpr, +InitSymbol(llvm::Value *ptr, const Type *symType, Expr *initExpr, FunctionEmitContext *ctx, SourcePos pos) { if (initExpr == NULL) // leave it uninitialized @@ -658,11 +658,11 @@ InitSymbol(llvm::Value *ptr, const Type *symType, Expr *initExpr, if (Type::IsBasicType(symType)) ctx->StoreInst(constValue, ptr); else { - llvm::Value *constPtr = - new llvm::GlobalVariable(*m->module, llvmType, true /* const */, + llvm::Value *constPtr = + new llvm::GlobalVariable(*m->module, llvmType, true /* const */, llvm::GlobalValue::InternalLinkage, constValue, "const_initializer"); - llvm::Value *size = g->target.SizeOf(llvmType, + llvm::Value *size = g->target.SizeOf(llvmType, ctx->GetCurrentBasicBlock()); ctx->MemcpyInst(ptr, constPtr, size); } @@ -698,7 +698,7 @@ InitSymbol(llvm::Value *ptr, const Type *symType, Expr *initExpr, InitSymbol(ptr, symType, elist->exprs[0], ctx, pos); else Error(initExpr->pos, "Expression list initializers with " - "multiple values can't be used with type \"%s\".", + "multiple values can't be used with type \"%s\".", symType->GetString().c_str()); } return; @@ -708,7 +708,7 @@ InitSymbol(llvm::Value *ptr, const Type *symType, Expr *initExpr, if (rt) { if (!Type::Equal(initExpr->GetType(), rt)) { Error(initExpr->pos, "Initializer for reference type \"%s\" must have same " - "reference type itself. \"%s\" is incompatible.", + "reference type itself. \"%s\" is incompatible.", rt->GetString().c_str(), initExpr->GetType()->GetString().c_str()); return; } @@ -729,9 +729,9 @@ InitSymbol(llvm::Value *ptr, const Type *symType, Expr *initExpr, std::string name; if (CastType(symType) != NULL) name = "struct"; - else if (CastType(symType) != NULL) + else if (CastType(symType) != NULL) name = "array"; - else if (CastType(symType) != NULL) + else if (CastType(symType) != NULL) name = "vector"; else if (symType->IsSOAType()) name = symType->GetVariability().GetString(); @@ -751,7 +751,7 @@ InitSymbol(llvm::Value *ptr, const Type *symType, Expr *initExpr, int nInits = exprList->exprs.size(); if (nInits > nElements) { Error(initExpr->pos, "Initializer for %s type \"%s\" requires " - "no more than %d values; %d provided.", name.c_str(), + "no more than %d values; %d provided.", name.c_str(), symType->GetString().c_str(), nElements, nInits); return; } @@ -761,8 +761,8 @@ InitSymbol(llvm::Value *ptr, const Type *symType, Expr *initExpr, for (int i = 0; i < nElements; ++i) { // For SOA types, the element type is the uniform variant // of the underlying type - const Type *elementType = - collectionType ? collectionType->GetElementType(i) : + const Type *elementType = + collectionType ? collectionType->GetElementType(i) : symType->GetAsUniformType(); if (elementType == NULL) { AssertPos(pos, m->errorCount > 0); @@ -773,7 +773,7 @@ InitSymbol(llvm::Value *ptr, const Type *symType, Expr *initExpr, if (CastType(symType) != NULL) ep = ctx->AddElementOffset(ptr, i, NULL, "element"); else - ep = ctx->GetElementPtrInst(ptr, LLVMInt32(0), LLVMInt32(i), + ep = ctx->GetElementPtrInst(ptr, LLVMInt32(0), LLVMInt32(i), PointerType::GetUniform(elementType), "gep"); @@ -813,7 +813,7 @@ InitSymbol(llvm::Value *ptr, const Type *symType, Expr *initExpr, static const Type * lMatchingBoolType(const Type *type) { bool uniformTest = type->IsUniformType(); - const AtomicType *boolBase = uniformTest ? AtomicType::UniformBool : + const AtomicType *boolBase = uniformTest ? AtomicType::UniformBool : AtomicType::VaryingBool; const VectorType *vt = CastType(type); if (vt != NULL) @@ -844,7 +844,7 @@ lLLVMConstantValue(const Type *type, llvm::LLVMContext *ctx, double value) { // the llvmutil.h functions to call to get the corresponding // constant and then call it... bool isUniform = type->IsUniformType(); - AtomicType::BasicType basicType = (enumType != NULL) ? + AtomicType::BasicType basicType = (enumType != NULL) ? AtomicType::TYPE_UINT32 : atomicType->basicType; switch (basicType) { @@ -884,7 +884,7 @@ lLLVMConstantValue(const Type *type, llvm::LLVMContext *ctx, double value) { return isUniform ? LLVMUInt32(i) : LLVMUInt32Vector(i); } case AtomicType::TYPE_FLOAT: - return isUniform ? LLVMFloat((float)value) : + return isUniform ? LLVMFloat((float)value) : LLVMFloatVector((float)value); case AtomicType::TYPE_UINT64: { uint64_t i = (uint64_t)value; @@ -924,7 +924,7 @@ lLLVMConstantValue(const Type *type, llvm::LLVMContext *ctx, double value) { // LLVM ArrayTypes leaks into the code here; it feels like this detail // should be better encapsulated? if (baseType->IsUniformType()) { - llvm::VectorType *lvt = + llvm::VectorType *lvt = llvm::dyn_cast(llvmVectorType); Assert(lvt != NULL); std::vector vals; @@ -933,7 +933,7 @@ lLLVMConstantValue(const Type *type, llvm::LLVMContext *ctx, double value) { return llvm::ConstantVector::get(vals); } else { - llvm::ArrayType *lat = + llvm::ArrayType *lat = llvm::dyn_cast(llvmVectorType); Assert(lat != NULL); std::vector vals; @@ -958,14 +958,14 @@ lMaskForSymbol(Symbol *baseSym, FunctionEmitContext *ctx) { // depending on context... return ctx->GetFullMask(); - llvm::Value *mask = (baseSym->parentFunction == ctx->GetFunction() && - baseSym->storageClass != SC_STATIC) ? + llvm::Value *mask = (baseSym->parentFunction == ctx->GetFunction() && + baseSym->storageClass != SC_STATIC) ? ctx->GetInternalMask() : ctx->GetFullMask(); return mask; } -/** Store the result of an assignment to the given location. +/** Store the result of an assignment to the given location. */ static void lStoreAssignResult(llvm::Value *value, llvm::Value *ptr, const Type *valueType, @@ -1024,10 +1024,10 @@ lEmitPrePostIncDec(UnaryExpr::Op op, Expr *expr, SourcePos pos, } if (lvalue == NULL) { - // If we can't get a lvalue, then we have an error here - const char *prepost = (op == UnaryExpr::PreInc || + // If we can't get a lvalue, then we have an error here + const char *prepost = (op == UnaryExpr::PreInc || op == UnaryExpr::PreDec) ? "pre" : "post"; - const char *incdec = (op == UnaryExpr::PreInc || + const char *incdec = (op == UnaryExpr::PreInc || op == UnaryExpr::PostInc) ? "increment" : "decrement"; Error(pos, "Can't %s-%s non-lvalues.", prepost, incdec); return NULL; @@ -1054,10 +1054,10 @@ lEmitPrePostIncDec(UnaryExpr::Op op, Expr *expr, SourcePos pos, else { llvm::Constant *dval = lLLVMConstantValue(type, g->ctx, delta); if (type->IsFloatType()) - binop = ctx->BinaryOperator(llvm::Instruction::FAdd, rvalue, + binop = ctx->BinaryOperator(llvm::Instruction::FAdd, rvalue, dval, opName.c_str()); else - binop = ctx->BinaryOperator(llvm::Instruction::Add, rvalue, + binop = ctx->BinaryOperator(llvm::Instruction::Add, rvalue, dval, opName.c_str()); } @@ -1096,8 +1096,8 @@ lEmitNegate(Expr *arg, SourcePos pos, FunctionEmitContext *ctx) { } -UnaryExpr::UnaryExpr(Op o, Expr *e, SourcePos p) - : Expr(p), op(o) { +UnaryExpr::UnaryExpr(Op o, Expr *e, SourcePos p) + : Expr(p), op(o) { expr = e; } @@ -1150,7 +1150,7 @@ UnaryExpr::GetType() const { case PostInc: case PostDec: case Negate: - case BitNot: + case BitNot: return type; case LogicalNot: return lMatchingBoolType(type); @@ -1203,7 +1203,7 @@ UnaryExpr::Optimize() { return new ConstExpr(constExpr, v); } case BitNot: { - if (Type::EqualIgnoringConst(type, AtomicType::UniformInt32) || + if (Type::EqualIgnoringConst(type, AtomicType::UniformInt32) || Type::EqualIgnoringConst(type, AtomicType::VaryingInt32)) { int32_t v[ISPC_MAX_NVEC]; int count = constExpr->AsInt32(v); @@ -1211,7 +1211,7 @@ UnaryExpr::Optimize() { v[i] = ~v[i]; return new ConstExpr(type, v, pos); } - else if (Type::EqualIgnoringConst(type, AtomicType::UniformUInt32) || + else if (Type::EqualIgnoringConst(type, AtomicType::UniformUInt32) || Type::EqualIgnoringConst(type, AtomicType::VaryingUInt32) || isEnumType == true) { uint32_t v[ISPC_MAX_NVEC]; @@ -1224,7 +1224,7 @@ UnaryExpr::Optimize() { FATAL("unexpected type in UnaryExpr::Optimize() / BitNot case"); } case LogicalNot: { - AssertPos(pos, Type::EqualIgnoringConst(type, AtomicType::UniformBool) || + AssertPos(pos, Type::EqualIgnoringConst(type, AtomicType::UniformBool) || Type::EqualIgnoringConst(type, AtomicType::VaryingBool)); bool v[ISPC_MAX_NVEC]; int count = constExpr->AsBool(v); @@ -1291,7 +1291,7 @@ UnaryExpr::TypeCheck() { if (op == Negate) { if (!type->IsNumericType()) { - Error(expr->pos, "Negate not allowed for non-numeric type \"%s\".", + Error(expr->pos, "Negate not allowed for non-numeric type \"%s\".", type->GetString().c_str()); return NULL; } @@ -1376,7 +1376,7 @@ lOpString(BinaryExpr::Op op) { /** Utility routine to emit the binary bitwise operator corresponding to - the given BinaryExpr::Op. + the given BinaryExpr::Op. */ static llvm::Value * lEmitBinaryBitOp(BinaryExpr::Op op, llvm::Value *arg0Val, @@ -1387,10 +1387,10 @@ lEmitBinaryBitOp(BinaryExpr::Op op, llvm::Value *arg0Val, case BinaryExpr::Shl: inst = llvm::Instruction::Shl; break; case BinaryExpr::Shr: if (isUnsigned) - inst = llvm::Instruction::LShr; + inst = llvm::Instruction::LShr; else - inst = llvm::Instruction::AShr; - break; + inst = llvm::Instruction::AShr; + break; case BinaryExpr::BitAnd: inst = llvm::Instruction::And; break; case BinaryExpr::BitXor: inst = llvm::Instruction::Xor; break; case BinaryExpr::BitOr: inst = llvm::Instruction::Or; break; @@ -1423,13 +1423,13 @@ lEmitBinaryPointerArith(BinaryExpr::Op op, llvm::Value *value0, llvm::Value *p0 = ctx->ExtractInst(value0, 0); llvm::Value *p1 = ctx->ExtractInst(value1, 0); const Type *majorType = ptrType->GetAsNonSlice(); - llvm::Value *majorDelta = + llvm::Value *majorDelta = lEmitBinaryPointerArith(op, p0, p1, majorType, majorType, ctx, pos); - + int soaWidth = ptrType->GetBaseType()->GetSOAWidth(); AssertPos(pos, soaWidth > 0); - llvm::Value *soaScale = LLVMIntAsType(soaWidth, + llvm::Value *soaScale = LLVMIntAsType(soaWidth, majorDelta->getType()); llvm::Value *majorScale = @@ -1454,20 +1454,20 @@ lEmitBinaryPointerArith(BinaryExpr::Op op, llvm::Value *value0, } // Compute the difference in bytes - llvm::Value *delta = + llvm::Value *delta = ctx->BinaryOperator(llvm::Instruction::Sub, value0, value1, "ptr_diff"); // Now divide by the size of the type that the pointer // points to in order to return the difference in elements. - llvm::Type *llvmElementType = + llvm::Type *llvmElementType = ptrType->GetBaseType()->LLVMType(g->ctx); - llvm::Value *size = g->target.SizeOf(llvmElementType, + llvm::Value *size = g->target.SizeOf(llvmElementType, ctx->GetCurrentBasicBlock()); if (ptrType->IsVaryingType()) size = ctx->SmearUniform(size); - if (g->target.is32Bit == false && + if (g->target.is32Bit == false && g->opt.force32BitAddressing == true) { // If we're doing 32-bit addressing math on a 64-bit // target, then trunc the delta down to a 32-bit value. @@ -1488,11 +1488,11 @@ lEmitBinaryPointerArith(BinaryExpr::Op op, llvm::Value *value0, else { // ptr - integer llvm::Value *zero = lLLVMConstantValue(type1, g->ctx, 0.); - llvm::Value *negOffset = - ctx->BinaryOperator(llvm::Instruction::Sub, zero, value1, + llvm::Value *negOffset = + ctx->BinaryOperator(llvm::Instruction::Sub, zero, value1, "negate"); // Do a GEP as ptr + -integer - return ctx->GetElementPtrInst(value0, negOffset, ptrType, + return ctx->GetElementPtrInst(value0, negOffset, ptrType, "ptrmath"); } } @@ -1540,16 +1540,16 @@ lEmitBinaryArith(BinaryExpr::Op op, llvm::Value *value0, llvm::Value *value1, opName = "div"; if (type0->IsVaryingType() && !isFloatOp) PerformanceWarning(pos, "Division with varying integer types is " - "very inefficient."); - inst = isFloatOp ? llvm::Instruction::FDiv : + "very inefficient."); + inst = isFloatOp ? llvm::Instruction::FDiv : (isUnsignedOp ? llvm::Instruction::UDiv : llvm::Instruction::SDiv); break; case BinaryExpr::Mod: opName = "mod"; if (type0->IsVaryingType() && !isFloatOp) PerformanceWarning(pos, "Modulus operator with varying types is " - "very inefficient."); - inst = isFloatOp ? llvm::Instruction::FRem : + "very inefficient."); + inst = isFloatOp ? llvm::Instruction::FRem : (isUnsignedOp ? llvm::Instruction::URem : llvm::Instruction::SRem); break; default: @@ -1576,22 +1576,22 @@ lEmitBinaryCmp(BinaryExpr::Op op, llvm::Value *e0Val, llvm::Value *e1Val, switch (op) { case BinaryExpr::Lt: opName = "less"; - pred = isFloatOp ? llvm::CmpInst::FCMP_ULT : + pred = isFloatOp ? llvm::CmpInst::FCMP_ULT : (isUnsignedOp ? llvm::CmpInst::ICMP_ULT : llvm::CmpInst::ICMP_SLT); break; case BinaryExpr::Gt: opName = "greater"; - pred = isFloatOp ? llvm::CmpInst::FCMP_UGT : + pred = isFloatOp ? llvm::CmpInst::FCMP_UGT : (isUnsignedOp ? llvm::CmpInst::ICMP_UGT : llvm::CmpInst::ICMP_SGT); break; case BinaryExpr::Le: opName = "lessequal"; - pred = isFloatOp ? llvm::CmpInst::FCMP_ULE : + pred = isFloatOp ? llvm::CmpInst::FCMP_ULE : (isUnsignedOp ? llvm::CmpInst::ICMP_ULE : llvm::CmpInst::ICMP_SLE); break; case BinaryExpr::Ge: opName = "greaterequal"; - pred = isFloatOp ? llvm::CmpInst::FCMP_UGE : + pred = isFloatOp ? llvm::CmpInst::FCMP_UGE : (isUnsignedOp ? llvm::CmpInst::ICMP_UGE : llvm::CmpInst::ICMP_SGE); break; case BinaryExpr::Equal: @@ -1607,9 +1607,9 @@ lEmitBinaryCmp(BinaryExpr::Op op, llvm::Value *e0Val, llvm::Value *e1Val, return NULL; } - llvm::Value *cmp = ctx->CmpInst(isFloatOp ? llvm::Instruction::FCmp : + llvm::Value *cmp = ctx->CmpInst(isFloatOp ? llvm::Instruction::FCmp : llvm::Instruction::ICmp, - pred, e0Val, e1Val, + pred, e0Val, e1Val, LLVMGetName(opName, e0Val, e1Val)); // This is a little ugly: CmpInst returns i1 values, but we use vectors // of i32s for varying bool values; type convert the result here if @@ -1621,7 +1621,7 @@ lEmitBinaryCmp(BinaryExpr::Op op, llvm::Value *e0Val, llvm::Value *e1Val, } -BinaryExpr::BinaryExpr(Op o, Expr *a, Expr *b, SourcePos p) +BinaryExpr::BinaryExpr(Op o, Expr *a, Expr *b, SourcePos p) : Expr(p), op(o) { arg0 = a; arg1 = b; @@ -1631,8 +1631,8 @@ BinaryExpr::BinaryExpr(Op o, Expr *a, Expr *b, SourcePos p) /** Emit code for a && or || logical operator. In particular, the code here handles "short-circuit" evaluation, where the second expression isn't evaluated if the value of the first one determines the value of - the result. -*/ + the result. +*/ llvm::Value * lEmitLogicalOp(BinaryExpr::Op op, Expr *arg0, Expr *arg1, FunctionEmitContext *ctx, SourcePos pos) { @@ -1676,7 +1676,7 @@ lEmitLogicalOp(BinaryExpr::Op op, Expr *arg0, Expr *arg1, "logical_and"); else { AssertPos(pos, op == BinaryExpr::LogicalOr); - return ctx->BinaryOperator(llvm::Instruction::Or, value0, value1, + return ctx->BinaryOperator(llvm::Instruction::Or, value0, value1, "logical_or"); } } @@ -1699,7 +1699,7 @@ lEmitLogicalOp(BinaryExpr::Op op, Expr *arg0, Expr *arg1, if (type0->IsUniformType()) { // Check to see if the value of the first operand is true or false - llvm::Value *value0True = + llvm::Value *value0True = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_EQ, value0, LLVMTrue); @@ -1774,8 +1774,8 @@ lEmitLogicalOp(BinaryExpr::Op op, Expr *arg0, Expr *arg1, // See if value0 is true for all currently executing // lanes--i.e. if (value0 & mask) == mask. If so, we don't // need to evaluate the second operand of the expression. - llvm::Value *value0AndMask = - ctx->BinaryOperator(llvm::Instruction::And, value0, + llvm::Value *value0AndMask = + ctx->BinaryOperator(llvm::Instruction::And, value0, oldFullMask, "op&mask"); llvm::Value *equalsMask = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_EQ, @@ -1810,10 +1810,10 @@ lEmitLogicalOp(BinaryExpr::Op op, Expr *arg0, Expr *arg1, // disabled have undefined values: // result = (value0 & old_mask) | (value1 & current_mask) llvm::Value *value1AndMask = - ctx->BinaryOperator(llvm::Instruction::And, value1, + ctx->BinaryOperator(llvm::Instruction::And, value1, ctx->GetInternalMask(), "op&mask"); llvm::Value *result = - ctx->BinaryOperator(llvm::Instruction::Or, value0AndMask, + ctx->BinaryOperator(llvm::Instruction::Or, value0AndMask, value1AndMask, "or_result"); ctx->StoreInst(result, retPtr); ctx->BranchInst(bbLogicalDone); @@ -1825,8 +1825,8 @@ lEmitLogicalOp(BinaryExpr::Op op, Expr *arg0, Expr *arg1, // overall result must be false: this corresponds to checking // if (mask & ~value0) == mask. llvm::Value *notValue0 = ctx->NotOperator(value0, "not_value0"); - llvm::Value *notValue0AndMask = - ctx->BinaryOperator(llvm::Instruction::And, notValue0, + llvm::Value *notValue0AndMask = + ctx->BinaryOperator(llvm::Instruction::And, notValue0, oldFullMask, "not_value0&mask"); llvm::Value *equalsMask = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_EQ, @@ -1857,14 +1857,14 @@ lEmitLogicalOp(BinaryExpr::Op op, Expr *arg0, Expr *arg1, // And as in the || case, we compute the overall result by // masking off the valid lanes before we AND them together: // result = (value0 & old_mask) & (value1 & current_mask) - llvm::Value *value0AndMask = - ctx->BinaryOperator(llvm::Instruction::And, value0, + llvm::Value *value0AndMask = + ctx->BinaryOperator(llvm::Instruction::And, value0, oldFullMask, "op&mask"); llvm::Value *value1AndMask = ctx->BinaryOperator(llvm::Instruction::And, value1, ctx->GetInternalMask(), "value1&mask"); llvm::Value *result = - ctx->BinaryOperator(llvm::Instruction::And, value0AndMask, + ctx->BinaryOperator(llvm::Instruction::And, value0AndMask, value1AndMask, "or_result"); ctx->StoreInst(result, retPtr); ctx->BranchInst(bbLogicalDone); @@ -1919,11 +1919,11 @@ BinaryExpr::GetValue(FunctionEmitContext *ctx) const { case BitAnd: case BitXor: case BitOr: { - if (op == Shr && arg1->GetType()->IsVaryingType() && + if (op == Shr && arg1->GetType()->IsVaryingType() && dynamic_cast(arg1) == NULL) PerformanceWarning(pos, "Shift right is extremely inefficient for " "varying shift amounts."); - return lEmitBinaryBitOp(op, value0, value1, + return lEmitBinaryBitOp(op, value0, value1, arg0->GetType()->IsUnsignedType(), ctx); } case Comma: @@ -1961,8 +1961,8 @@ BinaryExpr::GetType() const { else if (op == Sub) { if (CastType(type1) != NULL) { // ptr - ptr -> ~ptrdiff_t - const Type *diffType = (g->target.is32Bit || - g->opt.force32BitAddressing) ? + const Type *diffType = (g->target.is32Bit || + g->opt.force32BitAddressing) ? AtomicType::UniformInt32 : AtomicType::UniformInt64; if (type0->IsVaryingType() || type1->IsVaryingType()) diffType = diffType->GetAsVaryingType(); @@ -2022,13 +2022,13 @@ BinaryExpr::GetType() const { break /** Constant fold the binary integer operations that aren't also applicable - to floating-point types. + to floating-point types. */ template static ConstExpr * lConstFoldBinIntOp(BinaryExpr::Op op, const T *v0, const T *v1, ConstExpr *carg0) { T result[ISPC_MAX_NVEC]; int count = carg0->Count(); - + switch (op) { FOLD_OP(BinaryExpr::Mod, %); FOLD_OP(BinaryExpr::Shl, <<); @@ -2045,7 +2045,7 @@ lConstFoldBinIntOp(BinaryExpr::Op op, const T *v0, const T *v1, ConstExpr *carg0 /** Constant fold the binary logical ops. - */ + */ template static ConstExpr * lConstFoldBinLogicalOp(BinaryExpr::Op op, const T *v0, const T *v1, ConstExpr *carg0) { bool result[ISPC_MAX_NVEC]; @@ -2064,7 +2064,7 @@ lConstFoldBinLogicalOp(BinaryExpr::Op op, const T *v0, const T *v1, ConstExpr *c return NULL; } - const Type *rType = carg0->GetType()->IsUniformType() ? + const Type *rType = carg0->GetType()->IsUniformType() ? AtomicType::UniformBool : AtomicType::VaryingBool; return new ConstExpr(rType, result, carg0->pos); } @@ -2095,7 +2095,7 @@ lConstFoldBinArithOp(BinaryExpr::Op op, const T *v0, const T *v1, ConstExpr *car default: return NULL; } - + return new ConstExpr(carg0->GetType(), result, carg0->pos); } @@ -2103,7 +2103,7 @@ lConstFoldBinArithOp(BinaryExpr::Op op, const T *v0, const T *v1, ConstExpr *car /** Constant fold the various boolean binary ops. */ static ConstExpr * -lConstFoldBoolBinOp(BinaryExpr::Op op, const bool *v0, const bool *v1, +lConstFoldBoolBinOp(BinaryExpr::Op op, const bool *v0, const bool *v1, ConstExpr *carg0) { bool result[ISPC_MAX_NVEC]; int count = carg0->Count(); @@ -2168,7 +2168,7 @@ BinaryExpr::Optimize() { if (rcpFuns.size() > 0) { Expr *rcpSymExpr = new FunctionSymbolExpr("rcp", rcpFuns, pos); ExprList *args = new ExprList(arg1, arg1->pos); - Expr *rcpCall = new FunctionCallExpr(rcpSymExpr, args, + Expr *rcpCall = new FunctionCallExpr(rcpSymExpr, args, arg1->pos); rcpCall = ::TypeCheck(rcpCall); if (rcpCall == NULL) @@ -2197,7 +2197,7 @@ BinaryExpr::Optimize() { AssertPos(pos, Type::EqualIgnoringConst(arg0->GetType(), arg1->GetType())); const Type *type = arg0->GetType()->GetAsNonConstType(); - if (Type::Equal(type, AtomicType::UniformFloat) || + if (Type::Equal(type, AtomicType::UniformFloat) || Type::Equal(type, AtomicType::VaryingFloat)) { float v0[ISPC_MAX_NVEC], v1[ISPC_MAX_NVEC]; constArg0->AsFloat(v0); @@ -2207,10 +2207,10 @@ BinaryExpr::Optimize() { return ret; else if ((ret = lConstFoldBinLogicalOp(op, v0, v1, constArg0)) != NULL) return ret; - else + else return this; } - if (Type::Equal(type, AtomicType::UniformDouble) || + if (Type::Equal(type, AtomicType::UniformDouble) || Type::Equal(type, AtomicType::VaryingDouble)) { double v0[ISPC_MAX_NVEC], v1[ISPC_MAX_NVEC]; constArg0->AsDouble(v0); @@ -2220,10 +2220,10 @@ BinaryExpr::Optimize() { return ret; else if ((ret = lConstFoldBinLogicalOp(op, v0, v1, constArg0)) != NULL) return ret; - else + else return this; } - if (Type::Equal(type, AtomicType::UniformInt32) || + if (Type::Equal(type, AtomicType::UniformInt32) || Type::Equal(type, AtomicType::VaryingInt32)) { int32_t v0[ISPC_MAX_NVEC], v1[ISPC_MAX_NVEC]; constArg0->AsInt32(v0); @@ -2238,7 +2238,7 @@ BinaryExpr::Optimize() { else return this; } - else if (Type::Equal(type, AtomicType::UniformUInt32) || + else if (Type::Equal(type, AtomicType::UniformUInt32) || Type::Equal(type, AtomicType::VaryingUInt32) || CastType(type) != NULL) { uint32_t v0[ISPC_MAX_NVEC], v1[ISPC_MAX_NVEC]; @@ -2254,7 +2254,7 @@ BinaryExpr::Optimize() { else return this; } - else if (Type::Equal(type, AtomicType::UniformBool) || + else if (Type::Equal(type, AtomicType::UniformBool) || Type::Equal(type, AtomicType::VaryingBool)) { bool v0[ISPC_MAX_NVEC], v1[ISPC_MAX_NVEC]; constArg0->AsBool(v0); @@ -2264,7 +2264,7 @@ BinaryExpr::Optimize() { return ret; else if ((ret = lConstFoldBinLogicalOp(op, v0, v1, constArg0)) != NULL) return ret; - else + else return this; } else @@ -2381,12 +2381,12 @@ BinaryExpr::TypeCheck() { return NULL; } - const Type *offsetType = g->target.is32Bit ? + const Type *offsetType = g->target.is32Bit ? AtomicType::UniformInt32 : AtomicType::UniformInt64; if (pt0->IsVaryingType()) offsetType = offsetType->GetAsVaryingType(); if (type1->IsVaryingType()) { - arg0 = TypeConvertExpr(arg0, type0->GetAsVaryingType(), + arg0 = TypeConvertExpr(arg0, type0->GetAsVaryingType(), "pointer addition"); offsetType = offsetType->GetAsVaryingType(); AssertPos(pos, arg0 != NULL); @@ -2422,7 +2422,7 @@ BinaryExpr::TypeCheck() { bool isVarying = (type0->IsVaryingType() || type1->IsVaryingType()); if (isVarying) { - arg0 = TypeConvertExpr(arg0, type0->GetAsVaryingType(), + arg0 = TypeConvertExpr(arg0, type0->GetAsVaryingType(), "shift operator"); if (arg0 == NULL) return NULL; @@ -2453,18 +2453,18 @@ BinaryExpr::TypeCheck() { // Must be numeric type for these. (And mod is special--can't be float) if (!type0->IsNumericType() || (op == Mod && type0->IsFloatType())) { Error(arg0->pos, "First operand to binary operator \"%s\" is of " - "invalid type \"%s\".", lOpString(op), + "invalid type \"%s\".", lOpString(op), type0->GetString().c_str()); return NULL; } if (!type1->IsNumericType() || (op == Mod && type1->IsFloatType())) { Error(arg1->pos, "First operand to binary operator \"%s\" is of " - "invalid type \"%s\".", lOpString(op), + "invalid type \"%s\".", lOpString(op), type1->GetString().c_str()); return NULL; } - const Type *promotedType = Type::MoreGeneralType(type0, type1, + const Type *promotedType = Type::MoreGeneralType(type0, type1, Union(arg0->pos, arg1->pos), lOpString(op)); if (promotedType == NULL) @@ -2502,20 +2502,20 @@ BinaryExpr::TypeCheck() { if (!type0->IsBoolType() && !type0->IsNumericType()) { Error(arg0->pos, "First operand to operator \"%s\" is of " - "non-comparable type \"%s\".", lOpString(op), + "non-comparable type \"%s\".", lOpString(op), type0->GetString().c_str()); return NULL; } if (!type1->IsBoolType() && !type1->IsNumericType()) { Error(arg1->pos, "Second operand to operator \"%s\" is of " - "non-comparable type \"%s\".", lOpString(op), + "non-comparable type \"%s\".", lOpString(op), type1->GetString().c_str()); return NULL; } } - const Type *promotedType = + const Type *promotedType = Type::MoreGeneralType(type0, type1, arg0->pos, lOpString(op)); if (promotedType == NULL) return NULL; @@ -2531,9 +2531,9 @@ BinaryExpr::TypeCheck() { // For now, we just type convert to boolean types, of the same // variability as the original types. (When generating code, it's // useful to have preserved the uniform/varying distinction.) - const AtomicType *boolType0 = type0->IsUniformType() ? + const AtomicType *boolType0 = type0->IsUniformType() ? AtomicType::UniformBool : AtomicType::VaryingBool; - const AtomicType *boolType1 = type1->IsUniformType() ? + const AtomicType *boolType1 = type1->IsUniformType() ? AtomicType::UniformBool : AtomicType::VaryingBool; const Type *destType0 = NULL, *destType1 = NULL; @@ -2583,7 +2583,7 @@ BinaryExpr::EstimateCost() const { dynamic_cast(arg1) != NULL) return 0; - return (op == Div || op == Mod) ? COST_COMPLEX_ARITH_OP : + return (op == Div || op == Mod) ? COST_COMPLEX_ARITH_OP : COST_SIMPLE_ARITH_LOGIC_OP; } @@ -2628,7 +2628,7 @@ lOpString(AssignExpr::Op op) { /** Emit code to do an "assignment + operation" operator, e.g. "+=". */ static llvm::Value * -lEmitOpAssign(AssignExpr::Op op, Expr *arg0, Expr *arg1, const Type *type, +lEmitOpAssign(AssignExpr::Op op, Expr *arg0, Expr *arg1, const Type *type, Symbol *baseSym, SourcePos pos, FunctionEmitContext *ctx) { llvm::Value *lv = arg0->GetLValue(ctx); if (!lv) { @@ -2684,7 +2684,7 @@ lEmitOpAssign(AssignExpr::Op op, Expr *arg0, Expr *arg1, const Type *type, case AssignExpr::AndAssign: case AssignExpr::XorAssign: case AssignExpr::OrAssign: - newValue = lEmitBinaryBitOp(basicop, oldLHS, rvalue, + newValue = lEmitBinaryBitOp(basicop, oldLHS, rvalue, arg0->GetType()->IsUnsignedType(), ctx); break; default: @@ -2700,7 +2700,7 @@ lEmitOpAssign(AssignExpr::Op op, Expr *arg0, Expr *arg1, const Type *type, } -AssignExpr::AssignExpr(AssignExpr::Op o, Expr *a, Expr *b, SourcePos p) +AssignExpr::AssignExpr(AssignExpr::Op o, Expr *a, Expr *b, SourcePos p) : Expr(p), op(o) { lvalue = a; rvalue = b; @@ -2795,9 +2795,9 @@ lCheckForConstStructMember(SourcePos pos, const StructType *structType, t->GetString().c_str()); else Error(pos, "Illegal to assign to type \"%s\" in type \"%s\" " - "due to element \"%s\" with type \"%s\".", + "due to element \"%s\" with type \"%s\".", structType->GetString().c_str(), - initialType->GetString().c_str(), + initialType->GetString().c_str(), structType->GetElementName(i).c_str(), t->GetString().c_str()); return true; @@ -2813,10 +2813,10 @@ lCheckForConstStructMember(SourcePos pos, const StructType *structType, Expr * AssignExpr::TypeCheck() { - if (lvalue == NULL || rvalue == NULL) + if (lvalue == NULL || rvalue == NULL) return NULL; - bool lvalueIsReference = + bool lvalueIsReference = CastType(lvalue->GetType()) != NULL; if (lvalueIsReference) lvalue = new RefDerefExpr(lvalue, lvalue->pos); @@ -2892,7 +2892,7 @@ AssignExpr::TypeCheck() { return NULL; if (lhsType->IsFloatType() == true && - (op == ShlAssign || op == ShrAssign || op == AndAssign || + (op == ShlAssign || op == ShrAssign || op == AndAssign || op == XorAssign || op == OrAssign)) { Error(pos, "Illegal to use %s operator with floating-point " "operands.", lOpString(op)); @@ -2904,7 +2904,7 @@ AssignExpr::TypeCheck() { // Make sure we're not assigning to a struct that has a constant member if (lCheckForConstStructMember(pos, st, st)) return NULL; - + if (op != Assign) { Error(lvalue->pos, "Assignment operator \"%s\" is illegal with struct " "type \"%s\".", lOpString(op), st->GetString().c_str()); @@ -2944,7 +2944,7 @@ AssignExpr::Print() const { /////////////////////////////////////////////////////////////////////////// // SelectExpr -SelectExpr::SelectExpr(Expr *t, Expr *e1, Expr *e2, SourcePos p) +SelectExpr::SelectExpr(Expr *t, Expr *e1, Expr *e2, SourcePos p) : Expr(p) { test = t; expr1 = e1; @@ -2956,8 +2956,8 @@ SelectExpr::SelectExpr(Expr *t, Expr *e1, Expr *e2, SourcePos p) value. */ static llvm::Value * -lEmitVaryingSelect(FunctionEmitContext *ctx, llvm::Value *test, - llvm::Value *expr1, llvm::Value *expr2, +lEmitVaryingSelect(FunctionEmitContext *ctx, llvm::Value *test, + llvm::Value *expr1, llvm::Value *expr2, const Type *type) { llvm::Value *resultPtr = ctx->AllocaInst(expr1->getType(), "selectexpr_tmp"); // Don't need to worry about masking here @@ -2971,7 +2971,7 @@ lEmitVaryingSelect(FunctionEmitContext *ctx, llvm::Value *test, static void -lEmitSelectExprCode(FunctionEmitContext *ctx, llvm::Value *testVal, +lEmitSelectExprCode(FunctionEmitContext *ctx, llvm::Value *testVal, llvm::Value *oldMask, llvm::Value *fullMask, Expr *expr, llvm::Value *exprPtr) { llvm::BasicBlock *bbEval = ctx->CreateBasicBlock("select_eval_expr"); @@ -2979,15 +2979,15 @@ lEmitSelectExprCode(FunctionEmitContext *ctx, llvm::Value *testVal, // Check to see if the test was true for any of the currently executing // program instances. - llvm::Value *testAndFullMask = - ctx->BinaryOperator(llvm::Instruction::And, testVal, fullMask, + llvm::Value *testAndFullMask = + ctx->BinaryOperator(llvm::Instruction::And, testVal, fullMask, "test&mask"); llvm::Value *anyOn = ctx->Any(testAndFullMask); ctx->BranchInst(bbEval, bbDone, anyOn); ctx->SetCurrentBasicBlock(bbEval); - llvm::Value *testAndMask = - ctx->BinaryOperator(llvm::Instruction::And, testVal, oldMask, + llvm::Value *testAndMask = + ctx->BinaryOperator(llvm::Instruction::And, testVal, oldMask, "test&mask"); ctx->SetInternalMask(testAndMask); llvm::Value *exprVal = expr->GetValue(ctx); @@ -3069,13 +3069,13 @@ SelectExpr::GetValue(FunctionEmitContext *ctx) const { // Temporary storage to store the values computed for each // expression, if any. (These stay as uninitialized memory if we // short circuit around the corresponding expression.) - llvm::Type *exprType = + llvm::Type *exprType = expr1->GetType()->LLVMType(g->ctx); llvm::Value *expr1Ptr = ctx->AllocaInst(exprType); llvm::Value *expr2Ptr = ctx->AllocaInst(exprType); if (shortCircuit1) - lEmitSelectExprCode(ctx, testVal, oldMask, fullMask, expr1, + lEmitSelectExprCode(ctx, testVal, oldMask, fullMask, expr1, expr1Ptr); else { ctx->SetInternalMaskAnd(oldMask, testVal); @@ -3085,7 +3085,7 @@ SelectExpr::GetValue(FunctionEmitContext *ctx) const { if (shortCircuit2) { llvm::Value *notTest = ctx->NotOperator(testVal); - lEmitSelectExprCode(ctx, notTest, oldMask, fullMask, expr2, + lEmitSelectExprCode(ctx, notTest, oldMask, fullMask, expr2, expr2Ptr); } else { @@ -3112,10 +3112,10 @@ SelectExpr::GetValue(FunctionEmitContext *ctx) const { // Things that typechecking should have caught AssertPos(pos, vt != NULL); AssertPos(pos, CastType(testType) != NULL && - (CastType(testType)->GetElementCount() == + (CastType(testType)->GetElementCount() == vt->GetElementCount())); - // Do an element-wise select + // Do an element-wise select llvm::Value *result = llvm::UndefValue::get(type->LLVMType(g->ctx)); for (int i = 0; i < vt->GetElementCount(); ++i) { llvm::Value *ti = ctx->ExtractInst(testVal, i); @@ -3153,7 +3153,7 @@ SelectExpr::GetType() const { int expr1VecSize = CastType(expr1Type) != NULL ? CastType(expr1Type)->GetElementCount() : 0; AssertPos(pos, !(testVecSize != 0 && expr1VecSize != 0 && testVecSize != expr1VecSize)); - + int vectorSize = std::max(testVecSize, expr1VecSize); return Type::MoreGeneralType(expr1Type, expr2Type, Union(expr1->pos, expr2->pos), "select expression", becomesVarying, vectorSize); @@ -3307,7 +3307,7 @@ SelectExpr::TypeCheck() { int testVecSize = CastType(testType) ? CastType(testType)->GetElementCount() : 0; - const Type *promotedType = + const Type *promotedType = Type::MoreGeneralType(type1, type2, Union(expr1->pos, expr2->pos), "select expression", testType->IsVaryingType(), testVecSize); if (promotedType == NULL) @@ -3347,8 +3347,8 @@ SelectExpr::Print() const { /////////////////////////////////////////////////////////////////////////// // FunctionCallExpr -FunctionCallExpr::FunctionCallExpr(Expr *f, ExprList *a, SourcePos p, - bool il, Expr *lce) +FunctionCallExpr::FunctionCallExpr(Expr *f, ExprList *a, SourcePos p, + bool il, Expr *lce) : Expr(p), isLaunch(il) { func = f; args = a; @@ -3396,7 +3396,7 @@ FunctionCallExpr::GetValue(FunctionEmitContext *ctx) const { // Automatically convert function call args to references if needed. // FIXME: this should move to the TypeCheck() method... (but the // GetLValue call below needs a FunctionEmitContext, which is - // problematic...) + // problematic...) std::vector callargs = args->exprs; bool err = false; @@ -3412,7 +3412,7 @@ FunctionCallExpr::GetValue(FunctionEmitContext *ctx) const { if (argExpr == NULL) continue; - const Type *paramType = ft->GetParameterType(i); + const Type *paramType = ft->GetParameterType(i); const Type *argLValueType = argExpr->GetLValueType(); if (argLValueType != NULL && @@ -3476,7 +3476,7 @@ FunctionCallExpr::GetValue(FunctionEmitContext *ctx) const { ctx->LaunchInst(callee, argVals, launchCount); } else - retVal = ctx->CallInst(callee, ft, argVals, + retVal = ctx->CallInst(callee, ft, argVals, isVoidFunc ? "" : "calltmp"); if (isVoidFunc) @@ -3538,7 +3538,7 @@ FunctionCallExpr::TypeCheck() { const FunctionType *ft = CastType(func->GetType()); if (ft == NULL) { const PointerType *pt = CastType(func->GetType()); - ft = (pt == NULL) ? NULL : + ft = (pt == NULL) ? NULL : CastType(pt->GetBaseType()); } @@ -3554,7 +3554,7 @@ FunctionCallExpr::TypeCheck() { if (!launchCountExpr) return NULL; - launchCountExpr = + launchCountExpr = TypeConvertExpr(launchCountExpr, AtomicType::UniformInt32, "task launch count"); if (launchCountExpr == NULL) @@ -3583,7 +3583,7 @@ FunctionCallExpr::TypeCheck() { "function call expression."); return NULL; } - + // Make sure we don't have too many arguments for the function if ((int)argTypes.size() > funcType->GetNumParameters()) { Error(args->pos, "Too many parameter values provided in " @@ -3726,16 +3726,16 @@ ExprList::GetConstant(const Type *type) const { std::string name; if (CastType(type) != NULL) name = "struct"; - else if (CastType(type) != NULL) + else if (CastType(type) != NULL) name = "array"; - else if (CastType(type) != NULL) + else if (CastType(type) != NULL) name = "vector"; - else + else FATAL("Unexpected CollectionType in ExprList::GetConstant()"); if ((int)exprs.size() > collectionType->GetElementCount()) { Error(pos, "Initializer list for %s \"%s\" must have no more than %d " - "elements (has %d).", name.c_str(), + "elements (has %d).", name.c_str(), collectionType->GetString().c_str(), collectionType->GetElementCount(), (int)exprs.size()); return NULL; @@ -3795,7 +3795,7 @@ ExprList::GetConstant(const Type *type) const { } else { llvm::Type *lt = type->LLVMType(g->ctx); - llvm::ArrayType *lat = + llvm::ArrayType *lat = llvm::dyn_cast(lt); if (lat != NULL) return llvm::ConstantArray::get(lat, cv); @@ -3804,7 +3804,7 @@ ExprList::GetConstant(const Type *type) const { AssertPos(pos, type->IsUniformType() && CastType(type) != NULL); - llvm::VectorType *lvt = + llvm::VectorType *lvt = llvm::dyn_cast(lt); AssertPos(pos, lvt != NULL); @@ -3823,7 +3823,7 @@ ExprList::GetConstant(const Type *type) const { // them to fill the native width vectorWidth /= 2; } - + while ((cv.size() % vectorWidth) != 0) { cv.push_back(llvm::UndefValue::get(lvt->getElementType())); } @@ -3856,7 +3856,7 @@ ExprList::Print() const { /////////////////////////////////////////////////////////////////////////// // IndexExpr -IndexExpr::IndexExpr(Expr *a, Expr *i, SourcePos p) +IndexExpr::IndexExpr(Expr *a, Expr *i, SourcePos p) : Expr(p) { baseExpr = a; index = i; @@ -3879,13 +3879,13 @@ IndexExpr::IndexExpr(Expr *a, Expr *i, SourcePos p) sizeof(float) before doing the memory load. For v[index], we need to do the same scaling but also need to add per-lane offsets <0, sizeof(float), 2*sizeof(float), ...> so that the i'th lane loads the - i'th of the varying values at its index value. + i'th of the varying values at its index value. This function handles figuring out when this additional offset is needed and then incorporates it in the varying pointer value. - */ + */ static llvm::Value * -lAddVaryingOffsetsIfNeeded(FunctionEmitContext *ctx, llvm::Value *ptr, +lAddVaryingOffsetsIfNeeded(FunctionEmitContext *ctx, llvm::Value *ptr, const Type *ptrRefType) { if (CastType(ptrRefType) != NULL) // References are uniform pointers, so no offsetting is needed @@ -3905,7 +3905,7 @@ lAddVaryingOffsetsIfNeeded(FunctionEmitContext *ctx, llvm::Value *ptr, return ptr; // Onward: compute the per lane offsets. - llvm::Value *varyingOffsets = + llvm::Value *varyingOffsets = llvm::UndefValue::get(LLVMTypes::Int32VectorType); for (int i = 0; i < g->target.vectorWidth; ++i) varyingOffsets = ctx->InsertInst(varyingOffsets, LLVMInt32(i), i, @@ -3983,7 +3983,7 @@ lVaryingStructHasUniformMember(const Type *type, SourcePos pos) { llvm::Value * IndexExpr::GetValue(FunctionEmitContext *ctx) const { const Type *indexType, *returnType; - if (baseExpr == NULL || index == NULL || + if (baseExpr == NULL || index == NULL || ((indexType = index->GetType()) == NULL) || ((returnType = GetType()) == NULL)) { AssertPos(pos, m->errorCount > 0); @@ -3993,7 +3993,7 @@ IndexExpr::GetValue(FunctionEmitContext *ctx) const { // If this is going to be a gather, make sure that the varying return // type can represent the result (i.e. that we don't have a bound // 'uniform' member in a varying struct...) - if (indexType->IsVaryingType() && + if (indexType->IsVaryingType() && lVaryingStructHasUniformMember(returnType, pos)) return NULL; @@ -4013,7 +4013,7 @@ IndexExpr::GetValue(FunctionEmitContext *ctx) const { return NULL; } ctx->SetDebugPos(pos); - llvm::Value *tmpPtr = ctx->AllocaInst(baseExprType->LLVMType(g->ctx), + llvm::Value *tmpPtr = ctx->AllocaInst(baseExprType->LLVMType(g->ctx), "array_tmp"); ctx->StoreInst(val, tmpPtr); @@ -4026,7 +4026,7 @@ IndexExpr::GetValue(FunctionEmitContext *ctx) const { lvType = PointerType::GetUniform(st->GetElementType()); // And do the indexing calculation into the temporary array in memory - ptr = ctx->GetElementPtrInst(tmpPtr, LLVMInt32(0), index->GetValue(ctx), + ptr = ctx->GetElementPtrInst(tmpPtr, LLVMInt32(0), index->GetValue(ctx), PointerType::GetUniform(baseExprType)); ptr = lAddVaryingOffsetsIfNeeded(ctx, ptr, lvType); @@ -4049,7 +4049,7 @@ IndexExpr::GetType() const { return type; const Type *baseExprType, *indexType; - if (!baseExpr || !index || + if (!baseExpr || !index || ((baseExprType = baseExpr->GetType()) == NULL) || ((indexType = index->GetType()) == NULL)) return NULL; @@ -4061,7 +4061,7 @@ IndexExpr::GetType() const { elementType = pointerType->GetBaseType(); else { // sequential type[index] -> element type of the sequential type - const SequentialType *sequentialType = + const SequentialType *sequentialType = CastType(baseExprType->GetReferenceTarget()); // Typechecking should have caught this... AssertPos(pos, sequentialType != NULL); @@ -4103,7 +4103,7 @@ IndexExpr::GetBaseSymbol() const { static llvm::Value * lConvertToSlicePointer(FunctionEmitContext *ctx, llvm::Value *ptr, const PointerType *slicePtrType) { - llvm::Type *llvmSlicePtrType = + llvm::Type *llvmSlicePtrType = slicePtrType->LLVMType(g->ctx); llvm::StructType *sliceStructType = llvm::dyn_cast(llvmSlicePtrType); @@ -4155,10 +4155,10 @@ lCheckIndicesVersusBounds(const Type *baseExprType, Expr *index) { /** Converts the given pointer value to a slice pointer if the pointer - points to SOA'ed data. + points to SOA'ed data. */ static llvm::Value * -lConvertPtrToSliceIfNeeded(FunctionEmitContext *ctx, +lConvertPtrToSliceIfNeeded(FunctionEmitContext *ctx, llvm::Value *ptr, const Type **type) { Assert(*type != NULL); const PointerType *ptrType = CastType(*type); @@ -4175,7 +4175,7 @@ lConvertPtrToSliceIfNeeded(FunctionEmitContext *ctx, llvm::Value * IndexExpr::GetLValue(FunctionEmitContext *ctx) const { const Type *baseExprType; - if (baseExpr == NULL || index == NULL || + if (baseExpr == NULL || index == NULL || ((baseExprType = baseExpr->GetType()) == NULL)) { AssertPos(pos, m->errorCount > 0); return NULL; @@ -4190,7 +4190,7 @@ IndexExpr::GetLValue(FunctionEmitContext *ctx) const { ctx->SetDebugPos(pos); if (CastType(baseExprType) != NULL) { - // We're indexing off of a pointer + // We're indexing off of a pointer llvm::Value *basePtrValue = baseExpr->GetValue(ctx); if (basePtrValue == NULL) { AssertPos(pos, m->errorCount > 0); @@ -4203,7 +4203,7 @@ IndexExpr::GetLValue(FunctionEmitContext *ctx) const { &baseExprType); llvm::Value *ptr = ctx->GetElementPtrInst(basePtrValue, indexValue, - baseExprType, + baseExprType, LLVMGetName(basePtrValue, "_offset")); return lAddVaryingOffsetsIfNeeded(ctx, ptr, GetLValueType()); } @@ -4232,14 +4232,14 @@ IndexExpr::GetLValue(FunctionEmitContext *ctx) const { lCheckIndicesVersusBounds(baseExprType, index); // Convert to a slice pointer if indexing into SOA data - basePtr = lConvertPtrToSliceIfNeeded(ctx, basePtr, + basePtr = lConvertPtrToSliceIfNeeded(ctx, basePtr, (const Type **)&basePtrType); ctx->SetDebugPos(pos); // And do the actual indexing calculation.. - llvm::Value *ptr = - ctx->GetElementPtrInst(basePtr, LLVMInt32(0), indexValue, + llvm::Value *ptr = + ctx->GetElementPtrInst(basePtr, LLVMInt32(0), indexValue, basePtrType, LLVMGetName(basePtr, "_offset")); return lAddVaryingOffsetsIfNeeded(ctx, ptr, GetLValueType()); } @@ -4257,7 +4257,7 @@ IndexExpr::GetLValueType() const { ((indexType = index->GetType()) == NULL)) return NULL; - // regularize to a PointerType + // regularize to a PointerType if (CastType(baseExprLValueType) != NULL) { const Type *refTarget = baseExprLValueType->GetReferenceTarget(); baseExprLValueType = PointerType::GetUniform(refTarget); @@ -4266,12 +4266,12 @@ IndexExpr::GetLValueType() const { // Find the type of thing that we're indexing into const Type *elementType; - const SequentialType *st = + const SequentialType *st = CastType(baseExprLValueType->GetBaseType()); if (st != NULL) elementType = st->GetElementType(); else { - const PointerType *pt = + const PointerType *pt = CastType(baseExprLValueType->GetBaseType()); AssertPos(pos, pt != NULL); elementType = pt->GetBaseType(); @@ -4313,7 +4313,7 @@ IndexExpr::Optimize() { Expr * IndexExpr::TypeCheck() { const Type *indexType; - if (baseExpr == NULL || index == NULL || + if (baseExpr == NULL || index == NULL || ((indexType = index->GetType()) == NULL)) { AssertPos(pos, m->errorCount > 0); return NULL; @@ -4340,7 +4340,7 @@ IndexExpr::TypeCheck() { } } - bool isUniform = (index->GetType()->IsUniformType() && + bool isUniform = (index->GetType()->IsUniformType() && !g->opt.disableUniformMemoryOptimizations); if (!isUniform) { @@ -4361,17 +4361,17 @@ IndexExpr::TypeCheck() { // For 32-bit target: // force the index to 32 bit. // For 64-bit target: - // We don't want to limit the index range. - // We sxt/zxt the index to 64 bit right here because + // We don't want to limit the index range. + // We sxt/zxt the index to 64 bit right here because // LLVM doesn't distinguish unsigned from signed (both are i32) // // However, the index can be still truncated to signed int32 if // the index type is 64 bit and --addressing=32. - bool force_32bit = g->target.is32Bit || - (g->opt.force32BitAddressing && + bool force_32bit = g->target.is32Bit || + (g->opt.force32BitAddressing && Type::EqualIgnoringConst(indexType->GetAsUniformType(), AtomicType::UniformInt64)); - const Type *indexType = force_32bit ? + const Type *indexType = force_32bit ? AtomicType::UniformInt32 : AtomicType::UniformInt64; index = TypeConvertExpr(index, indexType, "array index"); if (index == NULL) @@ -4389,7 +4389,7 @@ IndexExpr::EstimateCost() const { const Type *indexType = index->GetType(); const Type *baseExprType = baseExpr->GetType(); - + if ((indexType != NULL && indexType->IsVaryingType()) || (CastType(baseExprType) != NULL && baseExprType->IsVaryingType())) @@ -4542,7 +4542,7 @@ StructMemberExpr::GetLValueType() const { const PointerType *ptrType = (exprLValueType->IsUniformType() || CastType(exprLValueType) != NULL) ? - PointerType::GetUniform(getElementType()) : + PointerType::GetUniform(getElementType()) : PointerType::GetVarying(getElementType()); // If struct pointer is a slice pointer, the resulting member pointer @@ -4644,7 +4644,7 @@ VectorMemberExpr::VectorMemberExpr(Expr *e, const char *id, SourcePos p, exprVectorType = CastType(pt->GetBaseType()); else { AssertPos(pos, CastType(exprType) != NULL); - exprVectorType = + exprVectorType = CastType(exprType->GetReferenceTarget()); } AssertPos(pos, exprVectorType != NULL); @@ -4663,8 +4663,8 @@ VectorMemberExpr::GetType() const { // type. For n-element expressions, we have a shortvec type // with n > 1 elements. This can be changed when we get // type<1> -> type conversions. - type = (identifier.length() == 1) ? - (const Type *)exprVectorType->GetElementType() : + type = (identifier.length() == 1) ? + (const Type *)exprVectorType->GetElementType() : (const Type *)memberType; const Type *lvType = GetLValueType(); @@ -4725,9 +4725,9 @@ VectorMemberExpr::GetLValueType() const { lvalueType = new ReferenceType(elementType); else { const PointerType *ptrType = exprLValueType->IsUniformType() ? - PointerType::GetUniform(elementType) : + PointerType::GetUniform(elementType) : PointerType::GetVarying(elementType); - // FIXME: replicated logic with structmemberexpr.... + // FIXME: replicated logic with structmemberexpr.... if (CastType(exprLValueType) && CastType(exprLValueType)->IsSlice()) ptrType = ptrType->GetAsFrozenSlice(); @@ -4743,7 +4743,7 @@ llvm::Value * VectorMemberExpr::GetValue(FunctionEmitContext *ctx) const { if (identifier.length() == 1) { return MemberExpr::GetValue(ctx); - } + } else { std::vector indices; @@ -4774,14 +4774,14 @@ VectorMemberExpr::GetValue(FunctionEmitContext *ctx) const { } // Allocate temporary memory to tore the result - llvm::Value *resultPtr = ctx->AllocaInst(memberType->LLVMType(g->ctx), + llvm::Value *resultPtr = ctx->AllocaInst(memberType->LLVMType(g->ctx), "vector_tmp"); - + // FIXME: we should be able to use the internal mask here according // to the same logic where it's used elsewhere llvm::Value *elementMask = ctx->GetFullMask(); - const Type *elementPtrType = basePtrType->IsUniformType() ? + const Type *elementPtrType = basePtrType->IsUniformType() ? PointerType::GetUniform(exprVectorType->GetElementType()) : PointerType::GetVarying(exprVectorType->GetElementType()); @@ -4791,7 +4791,7 @@ VectorMemberExpr::GetValue(FunctionEmitContext *ctx) const { llvm::Value *elementPtr = ctx->AddElementOffset(basePtr, indices[i], basePtrType, LLVMGetName(basePtr, idStr)); - llvm::Value *elementValue = + llvm::Value *elementValue = ctx->LoadInst(elementPtr, elementMask, elementPtrType); const char *resultName = LLVMGetName(resultPtr, idStr); @@ -4808,7 +4808,7 @@ int VectorMemberExpr::getElementNumber() const { int elementNumber = lIdentifierToVectorElement(identifier[0]); if (elementNumber == -1) - Error(pos, "Vector element identifier \"%s\" unknown.", + Error(pos, "Vector element identifier \"%s\" unknown.", identifier.c_str()); return elementNumber; } @@ -4849,7 +4849,7 @@ MemberExpr::create(Expr *e, const char *id, SourcePos p, SourcePos idpos, const Type *targetType = exprType->GetReferenceTarget(); if (CastType(targetType) != NULL) Error(p, "Member operator \"->\" can't be applied to non-pointer " - "type \"%s\". Did you mean to use \".\"?", + "type \"%s\". Did you mean to use \".\"?", exprType->GetString().c_str()); else Error(p, "Member operator \"->\" can't be applied to non-struct " @@ -4859,7 +4859,7 @@ MemberExpr::create(Expr *e, const char *id, SourcePos p, SourcePos idpos, if (derefLValue == false && pointerType != NULL && CastType(pointerType->GetBaseType()) != NULL) { Error(p, "Member operator \".\" can't be applied to pointer " - "type \"%s\". Did you mean to use \"->\"?", + "type \"%s\". Did you mean to use \"->\"?", exprType->GetString().c_str()); return NULL; } @@ -4876,7 +4876,7 @@ MemberExpr::create(Expr *e, const char *id, SourcePos p, SourcePos idpos, } else { Error(p, "Member operator \"%s\" can't be used with expression of " - "\"%s\" type.", derefLValue ? "->" : ".", + "\"%s\" type.", derefLValue ? "->" : ".", exprType->GetString().c_str()); return NULL; } @@ -4884,7 +4884,7 @@ MemberExpr::create(Expr *e, const char *id, SourcePos p, SourcePos idpos, MemberExpr::MemberExpr(Expr *e, const char *id, SourcePos p, SourcePos idpos, - bool derefLValue) + bool derefLValue) : Expr(p), identifierPos(idpos) { expr = e; identifier = id; @@ -4895,7 +4895,7 @@ MemberExpr::MemberExpr(Expr *e, const char *id, SourcePos p, SourcePos idpos, llvm::Value * MemberExpr::GetValue(FunctionEmitContext *ctx) const { - if (!expr) + if (!expr) return NULL; llvm::Value *lvalue = GetLValue(ctx); @@ -4916,7 +4916,7 @@ MemberExpr::GetValue(FunctionEmitContext *ctx) const { } ctx->SetDebugPos(pos); const Type *exprType = expr->GetType(); - llvm::Value *ptr = ctx->AllocaInst(exprType->LLVMType(g->ctx), + llvm::Value *ptr = ctx->AllocaInst(exprType->LLVMType(g->ctx), "struct_tmp"); ctx->StoreInst(val, ptr); @@ -4924,7 +4924,7 @@ MemberExpr::GetValue(FunctionEmitContext *ctx) const { if (elementNumber == -1) return NULL; - lvalue = ctx->AddElementOffset(ptr, elementNumber, + lvalue = ctx->AddElementOffset(ptr, elementNumber, PointerType::GetUniform(exprType)); lvalueType = PointerType::GetUniform(GetType()); mask = LLVMMaskAllOn; @@ -4937,7 +4937,7 @@ MemberExpr::GetValue(FunctionEmitContext *ctx) const { ctx->SetDebugPos(pos); std::string suffix = std::string("_") + identifier; - return ctx->LoadInst(lvalue, mask, lvalueType, + return ctx->LoadInst(lvalue, mask, lvalueType, LLVMGetName(lvalue, suffix.c_str())); } @@ -4980,7 +4980,7 @@ MemberExpr::GetLValue(FunctionEmitContext *ctx) const { expr->GetLValueType(); ctx->SetDebugPos(pos); llvm::Value *ptr = ctx->AddElementOffset(basePtr, elementNumber, - exprLValueType, + exprLValueType, basePtr->getName().str().c_str()); if (ptr == NULL) { AssertPos(pos, m->errorCount > 0); @@ -5057,7 +5057,7 @@ MemberExpr::getCandidateNearMatches() const { /////////////////////////////////////////////////////////////////////////// // ConstExpr -ConstExpr::ConstExpr(const Type *t, int8_t i, SourcePos p) +ConstExpr::ConstExpr(const Type *t, int8_t i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); @@ -5066,18 +5066,18 @@ ConstExpr::ConstExpr(const Type *t, int8_t i, SourcePos p) } -ConstExpr::ConstExpr(const Type *t, int8_t *i, SourcePos p) +ConstExpr::ConstExpr(const Type *t, int8_t *i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - AssertPos(pos, Type::Equal(type, AtomicType::UniformInt8->GetAsConstType()) || + AssertPos(pos, Type::Equal(type, AtomicType::UniformInt8->GetAsConstType()) || Type::Equal(type, AtomicType::VaryingInt8->GetAsConstType())); for (int j = 0; j < Count(); ++j) int8Val[j] = i[j]; } -ConstExpr::ConstExpr(const Type *t, uint8_t u, SourcePos p) +ConstExpr::ConstExpr(const Type *t, uint8_t u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); @@ -5086,18 +5086,18 @@ ConstExpr::ConstExpr(const Type *t, uint8_t u, SourcePos p) } -ConstExpr::ConstExpr(const Type *t, uint8_t *u, SourcePos p) +ConstExpr::ConstExpr(const Type *t, uint8_t *u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - AssertPos(pos, Type::Equal(type, AtomicType::UniformUInt8->GetAsConstType()) || + AssertPos(pos, Type::Equal(type, AtomicType::UniformUInt8->GetAsConstType()) || Type::Equal(type, AtomicType::VaryingUInt8->GetAsConstType())); for (int j = 0; j < Count(); ++j) uint8Val[j] = u[j]; } -ConstExpr::ConstExpr(const Type *t, int16_t i, SourcePos p) +ConstExpr::ConstExpr(const Type *t, int16_t i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); @@ -5106,18 +5106,18 @@ ConstExpr::ConstExpr(const Type *t, int16_t i, SourcePos p) } -ConstExpr::ConstExpr(const Type *t, int16_t *i, SourcePos p) +ConstExpr::ConstExpr(const Type *t, int16_t *i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - AssertPos(pos, Type::Equal(type, AtomicType::UniformInt16->GetAsConstType()) || + AssertPos(pos, Type::Equal(type, AtomicType::UniformInt16->GetAsConstType()) || Type::Equal(type, AtomicType::VaryingInt16->GetAsConstType())); for (int j = 0; j < Count(); ++j) int16Val[j] = i[j]; } -ConstExpr::ConstExpr(const Type *t, uint16_t u, SourcePos p) +ConstExpr::ConstExpr(const Type *t, uint16_t u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); @@ -5126,18 +5126,18 @@ ConstExpr::ConstExpr(const Type *t, uint16_t u, SourcePos p) } -ConstExpr::ConstExpr(const Type *t, uint16_t *u, SourcePos p) +ConstExpr::ConstExpr(const Type *t, uint16_t *u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - AssertPos(pos, Type::Equal(type, AtomicType::UniformUInt16->GetAsConstType()) || + AssertPos(pos, Type::Equal(type, AtomicType::UniformUInt16->GetAsConstType()) || Type::Equal(type, AtomicType::VaryingUInt16->GetAsConstType())); for (int j = 0; j < Count(); ++j) uint16Val[j] = u[j]; } -ConstExpr::ConstExpr(const Type *t, int32_t i, SourcePos p) +ConstExpr::ConstExpr(const Type *t, int32_t i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); @@ -5146,18 +5146,18 @@ ConstExpr::ConstExpr(const Type *t, int32_t i, SourcePos p) } -ConstExpr::ConstExpr(const Type *t, int32_t *i, SourcePos p) +ConstExpr::ConstExpr(const Type *t, int32_t *i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - AssertPos(pos, Type::Equal(type, AtomicType::UniformInt32->GetAsConstType()) || + AssertPos(pos, Type::Equal(type, AtomicType::UniformInt32->GetAsConstType()) || Type::Equal(type, AtomicType::VaryingInt32->GetAsConstType())); for (int j = 0; j < Count(); ++j) int32Val[j] = i[j]; } -ConstExpr::ConstExpr(const Type *t, uint32_t u, SourcePos p) +ConstExpr::ConstExpr(const Type *t, uint32_t u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); @@ -5168,11 +5168,11 @@ ConstExpr::ConstExpr(const Type *t, uint32_t u, SourcePos p) } -ConstExpr::ConstExpr(const Type *t, uint32_t *u, SourcePos p) +ConstExpr::ConstExpr(const Type *t, uint32_t *u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - AssertPos(pos, Type::Equal(type, AtomicType::UniformUInt32->GetAsConstType()) || + AssertPos(pos, Type::Equal(type, AtomicType::UniformUInt32->GetAsConstType()) || Type::Equal(type, AtomicType::VaryingUInt32->GetAsConstType()) || (CastType(type) != NULL)); for (int j = 0; j < Count(); ++j) @@ -5189,18 +5189,18 @@ ConstExpr::ConstExpr(const Type *t, float f, SourcePos p) } -ConstExpr::ConstExpr(const Type *t, float *f, SourcePos p) +ConstExpr::ConstExpr(const Type *t, float *f, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - AssertPos(pos, Type::Equal(type, AtomicType::UniformFloat->GetAsConstType()) || + AssertPos(pos, Type::Equal(type, AtomicType::UniformFloat->GetAsConstType()) || Type::Equal(type, AtomicType::VaryingFloat->GetAsConstType())); for (int j = 0; j < Count(); ++j) floatVal[j] = f[j]; } -ConstExpr::ConstExpr(const Type *t, int64_t i, SourcePos p) +ConstExpr::ConstExpr(const Type *t, int64_t i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); @@ -5209,18 +5209,18 @@ ConstExpr::ConstExpr(const Type *t, int64_t i, SourcePos p) } -ConstExpr::ConstExpr(const Type *t, int64_t *i, SourcePos p) +ConstExpr::ConstExpr(const Type *t, int64_t *i, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - AssertPos(pos, Type::Equal(type, AtomicType::UniformInt64->GetAsConstType()) || + AssertPos(pos, Type::Equal(type, AtomicType::UniformInt64->GetAsConstType()) || Type::Equal(type, AtomicType::VaryingInt64->GetAsConstType())); for (int j = 0; j < Count(); ++j) int64Val[j] = i[j]; } -ConstExpr::ConstExpr(const Type *t, uint64_t u, SourcePos p) +ConstExpr::ConstExpr(const Type *t, uint64_t u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); @@ -5229,11 +5229,11 @@ ConstExpr::ConstExpr(const Type *t, uint64_t u, SourcePos p) } -ConstExpr::ConstExpr(const Type *t, uint64_t *u, SourcePos p) +ConstExpr::ConstExpr(const Type *t, uint64_t *u, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - AssertPos(pos, Type::Equal(type, AtomicType::UniformUInt64->GetAsConstType()) || + AssertPos(pos, Type::Equal(type, AtomicType::UniformUInt64->GetAsConstType()) || Type::Equal(type, AtomicType::VaryingUInt64->GetAsConstType())); for (int j = 0; j < Count(); ++j) uint64Val[j] = u[j]; @@ -5249,18 +5249,18 @@ ConstExpr::ConstExpr(const Type *t, double f, SourcePos p) } -ConstExpr::ConstExpr(const Type *t, double *f, SourcePos p) +ConstExpr::ConstExpr(const Type *t, double *f, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - AssertPos(pos, Type::Equal(type, AtomicType::UniformDouble->GetAsConstType()) || + AssertPos(pos, Type::Equal(type, AtomicType::UniformDouble->GetAsConstType()) || Type::Equal(type, AtomicType::VaryingDouble->GetAsConstType())); for (int j = 0; j < Count(); ++j) doubleVal[j] = f[j]; } -ConstExpr::ConstExpr(const Type *t, bool b, SourcePos p) +ConstExpr::ConstExpr(const Type *t, bool b, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); @@ -5269,18 +5269,18 @@ ConstExpr::ConstExpr(const Type *t, bool b, SourcePos p) } -ConstExpr::ConstExpr(const Type *t, bool *b, SourcePos p) +ConstExpr::ConstExpr(const Type *t, bool *b, SourcePos p) : Expr(p) { type = t; type = type->GetAsConstType(); - AssertPos(pos, Type::Equal(type, AtomicType::UniformBool->GetAsConstType()) || + AssertPos(pos, Type::Equal(type, AtomicType::UniformBool->GetAsConstType()) || Type::Equal(type, AtomicType::VaryingBool->GetAsConstType())); for (int j = 0; j < Count(); ++j) boolVal[j] = b[j]; } -ConstExpr::ConstExpr(ConstExpr *old, double *v) +ConstExpr::ConstExpr(ConstExpr *old, double *v) : Expr(old->pos) { type = old->type; @@ -5325,7 +5325,7 @@ ConstExpr::ConstExpr(ConstExpr *old, double *v) break; case AtomicType::TYPE_INT64: case AtomicType::TYPE_UINT64: - // For now, this should never be reached + // For now, this should never be reached FATAL("fixme; we need another constructor so that we're not trying to pass " "double values to init an int64 type..."); default: @@ -5377,7 +5377,7 @@ ConstExpr::ConstExpr(ConstExpr *old, SourcePos p) default: FATAL("unimplemented const type"); } - + } @@ -5394,8 +5394,8 @@ ConstExpr::getBasicType() const { const Type * -ConstExpr::GetType() const { - return type; +ConstExpr::GetType() const { + return type; } @@ -5413,34 +5413,34 @@ ConstExpr::GetValue(FunctionEmitContext *ctx) const { else return boolVal[0] ? LLVMTrue : LLVMFalse; case AtomicType::TYPE_INT8: - return isVarying ? LLVMInt8Vector(int8Val) : + return isVarying ? LLVMInt8Vector(int8Val) : LLVMInt8(int8Val[0]); case AtomicType::TYPE_UINT8: - return isVarying ? LLVMUInt8Vector(uint8Val) : + return isVarying ? LLVMUInt8Vector(uint8Val) : LLVMUInt8(uint8Val[0]); case AtomicType::TYPE_INT16: - return isVarying ? LLVMInt16Vector(int16Val) : + return isVarying ? LLVMInt16Vector(int16Val) : LLVMInt16(int16Val[0]); case AtomicType::TYPE_UINT16: - return isVarying ? LLVMUInt16Vector(uint16Val) : + return isVarying ? LLVMUInt16Vector(uint16Val) : LLVMUInt16(uint16Val[0]); case AtomicType::TYPE_INT32: - return isVarying ? LLVMInt32Vector(int32Val) : + return isVarying ? LLVMInt32Vector(int32Val) : LLVMInt32(int32Val[0]); case AtomicType::TYPE_UINT32: - return isVarying ? LLVMUInt32Vector(uint32Val) : + return isVarying ? LLVMUInt32Vector(uint32Val) : LLVMUInt32(uint32Val[0]); case AtomicType::TYPE_FLOAT: - return isVarying ? LLVMFloatVector(floatVal) : + return isVarying ? LLVMFloatVector(floatVal) : LLVMFloat(floatVal[0]); case AtomicType::TYPE_INT64: - return isVarying ? LLVMInt64Vector(int64Val) : + return isVarying ? LLVMInt64Vector(int64Val) : LLVMInt64(int64Val[0]); case AtomicType::TYPE_UINT64: - return isVarying ? LLVMUInt64Vector(uint64Val) : + return isVarying ? LLVMUInt64Vector(uint64Val) : LLVMUInt64(uint64Val[0]); case AtomicType::TYPE_DOUBLE: - return isVarying ? LLVMDoubleVector(doubleVal) : + return isVarying ? LLVMDoubleVector(doubleVal) : LLVMDouble(doubleVal[0]); default: FATAL("unimplemented const type"); @@ -5462,7 +5462,7 @@ lConvertElement(From from, To *to) { /** When converting from bool types to numeric types, make sure the result is one or zero. - */ + */ template static inline void lConvertElement(bool from, To *to) { *to = from ? (To)1 : (To)0; @@ -5729,8 +5729,8 @@ ConstExpr::AsUInt32(uint32_t *up, bool forceVarying) const { int -ConstExpr::Count() const { - return GetType()->IsVaryingType() ? g->target.vectorWidth : 1; +ConstExpr::Count() const { + return GetType()->IsVaryingType() ? g->target.vectorWidth : 1; } @@ -5742,7 +5742,7 @@ ConstExpr::GetConstant(const Type *type) const { AssertPos(pos, Count() == 1); type = type->GetAsNonConstType(); - if (Type::Equal(type, AtomicType::UniformBool) || + if (Type::Equal(type, AtomicType::UniformBool) || Type::Equal(type, AtomicType::VaryingBool)) { bool bv[ISPC_MAX_NVEC]; AsBool(bv, type->IsVaryingType()); @@ -5760,7 +5760,7 @@ ConstExpr::GetConstant(const Type *type) const { else return LLVMInt8Vector(iv); } - else if (Type::Equal(type, AtomicType::UniformUInt8) || + else if (Type::Equal(type, AtomicType::UniformUInt8) || Type::Equal(type, AtomicType::VaryingUInt8)) { uint8_t uiv[ISPC_MAX_NVEC]; AsUInt8(uiv, type->IsVaryingType()); @@ -5787,7 +5787,7 @@ ConstExpr::GetConstant(const Type *type) const { else return LLVMUInt16Vector(uiv); } - else if (Type::Equal(type, AtomicType::UniformInt32) || + else if (Type::Equal(type, AtomicType::UniformInt32) || Type::Equal(type, AtomicType::VaryingInt32)) { int32_t iv[ISPC_MAX_NVEC]; AsInt32(iv, type->IsVaryingType()); @@ -5796,7 +5796,7 @@ ConstExpr::GetConstant(const Type *type) const { else return LLVMInt32Vector(iv); } - else if (Type::Equal(type, AtomicType::UniformUInt32) || + else if (Type::Equal(type, AtomicType::UniformUInt32) || Type::Equal(type, AtomicType::VaryingUInt32) || CastType(type) != NULL) { uint32_t uiv[ISPC_MAX_NVEC]; @@ -5806,7 +5806,7 @@ ConstExpr::GetConstant(const Type *type) const { else return LLVMUInt32Vector(uiv); } - else if (Type::Equal(type, AtomicType::UniformFloat) || + else if (Type::Equal(type, AtomicType::UniformFloat) || Type::Equal(type, AtomicType::VaryingFloat)) { float fv[ISPC_MAX_NVEC]; AsFloat(fv, type->IsVaryingType()); @@ -5815,7 +5815,7 @@ ConstExpr::GetConstant(const Type *type) const { else return LLVMFloatVector(fv); } - else if (Type::Equal(type, AtomicType::UniformInt64) || + else if (Type::Equal(type, AtomicType::UniformInt64) || Type::Equal(type, AtomicType::VaryingInt64)) { int64_t iv[ISPC_MAX_NVEC]; AsInt64(iv, type->IsVaryingType()); @@ -5940,7 +5940,7 @@ ConstExpr::Print() const { /////////////////////////////////////////////////////////////////////////// // TypeCastExpr -TypeCastExpr::TypeCastExpr(const Type *t, Expr *e, SourcePos p) +TypeCastExpr::TypeCastExpr(const Type *t, Expr *e, SourcePos p) : Expr(p) { type = t; expr = e; @@ -5952,7 +5952,7 @@ TypeCastExpr::TypeCastExpr(const Type *t, Expr *e, SourcePos p) llvm::Value with type toType. */ static llvm::Value * -lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, +lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, const AtomicType *toType, const AtomicType *fromType, SourcePos pos) { llvm::Value *cast = NULL; @@ -5976,12 +5976,12 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, switch (toType->basicType) { case AtomicType::TYPE_FLOAT: { - llvm::Type *targetType = - fromType->IsUniformType() ? LLVMTypes::FloatType : + llvm::Type *targetType = + fromType->IsUniformType() ? LLVMTypes::FloatType : LLVMTypes::FloatVectorType; switch (fromType->basicType) { case AtomicType::TYPE_BOOL: - if (fromType->IsVaryingType() && + if (fromType->IsVaryingType() && LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType) // If we have a bool vector of i32 elements, first truncate // down to a single bit @@ -6020,12 +6020,12 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, break; } case AtomicType::TYPE_DOUBLE: { - llvm::Type *targetType = + llvm::Type *targetType = fromType->IsUniformType() ? LLVMTypes::DoubleType : LLVMTypes::DoubleVectorType; switch (fromType->basicType) { case AtomicType::TYPE_BOOL: - if (fromType->IsVaryingType() && + if (fromType->IsVaryingType() && LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType) // truncate i32 bool vector values to i1s exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName); @@ -6058,12 +6058,12 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, break; } case AtomicType::TYPE_INT8: { - llvm::Type *targetType = + llvm::Type *targetType = fromType->IsUniformType() ? LLVMTypes::Int8Type : LLVMTypes::Int8VectorType; switch (fromType->basicType) { case AtomicType::TYPE_BOOL: - if (fromType->IsVaryingType() && + if (fromType->IsVaryingType() && LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType) exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName); cast = ctx->ZExtInst(exprVal, targetType, cOpName); @@ -6094,12 +6094,12 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, break; } case AtomicType::TYPE_UINT8: { - llvm::Type *targetType = + llvm::Type *targetType = fromType->IsUniformType() ? LLVMTypes::Int8Type : LLVMTypes::Int8VectorType; switch (fromType->basicType) { case AtomicType::TYPE_BOOL: - if (fromType->IsVaryingType() && + if (fromType->IsVaryingType() && LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType) exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName); cast = ctx->ZExtInst(exprVal, targetType, cOpName); @@ -6136,12 +6136,12 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, break; } case AtomicType::TYPE_INT16: { - llvm::Type *targetType = + llvm::Type *targetType = fromType->IsUniformType() ? LLVMTypes::Int16Type : LLVMTypes::Int16VectorType; switch (fromType->basicType) { case AtomicType::TYPE_BOOL: - if (fromType->IsVaryingType() && + if (fromType->IsVaryingType() && LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType) exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName); cast = ctx->ZExtInst(exprVal, targetType, cOpName); @@ -6176,12 +6176,12 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, break; } case AtomicType::TYPE_UINT16: { - llvm::Type *targetType = + llvm::Type *targetType = fromType->IsUniformType() ? LLVMTypes::Int16Type : LLVMTypes::Int16VectorType; switch (fromType->basicType) { case AtomicType::TYPE_BOOL: - if (fromType->IsVaryingType() && + if (fromType->IsVaryingType() && LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType) exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName); cast = ctx->ZExtInst(exprVal, targetType, cOpName); @@ -6191,7 +6191,7 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, break; case AtomicType::TYPE_UINT8: cast = ctx->ZExtInst(exprVal, targetType, cOpName); - break; + break; case AtomicType::TYPE_INT16: case AtomicType::TYPE_UINT16: cast = exprVal; @@ -6222,12 +6222,12 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, break; } case AtomicType::TYPE_INT32: { - llvm::Type *targetType = + llvm::Type *targetType = fromType->IsUniformType() ? LLVMTypes::Int32Type : LLVMTypes::Int32VectorType; switch (fromType->basicType) { case AtomicType::TYPE_BOOL: - if (fromType->IsVaryingType() && + if (fromType->IsVaryingType() && LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType) exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName); cast = ctx->ZExtInst(exprVal, targetType, cOpName); @@ -6262,12 +6262,12 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, break; } case AtomicType::TYPE_UINT32: { - llvm::Type *targetType = + llvm::Type *targetType = fromType->IsUniformType() ? LLVMTypes::Int32Type : LLVMTypes::Int32VectorType; switch (fromType->basicType) { case AtomicType::TYPE_BOOL: - if (fromType->IsVaryingType() && + if (fromType->IsVaryingType() && LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType) exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName); cast = ctx->ZExtInst(exprVal, targetType, cOpName); @@ -6279,7 +6279,7 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, case AtomicType::TYPE_UINT8: case AtomicType::TYPE_UINT16: cast = ctx->ZExtInst(exprVal, targetType, cOpName); - break; + break; case AtomicType::TYPE_INT32: case AtomicType::TYPE_UINT32: cast = exprVal; @@ -6308,8 +6308,8 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, break; } case AtomicType::TYPE_INT64: { - llvm::Type *targetType = - fromType->IsUniformType() ? LLVMTypes::Int64Type : + llvm::Type *targetType = + fromType->IsUniformType() ? LLVMTypes::Int64Type : LLVMTypes::Int64VectorType; switch (fromType->basicType) { case AtomicType::TYPE_BOOL: @@ -6346,12 +6346,12 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, break; } case AtomicType::TYPE_UINT64: { - llvm::Type *targetType = - fromType->IsUniformType() ? LLVMTypes::Int64Type : + llvm::Type *targetType = + fromType->IsUniformType() ? LLVMTypes::Int64Type : LLVMTypes::Int64VectorType; switch (fromType->basicType) { case AtomicType::TYPE_BOOL: - if (fromType->IsVaryingType() && + if (fromType->IsVaryingType() && LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType) exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName); cast = ctx->ZExtInst(exprVal, targetType, cOpName); @@ -6396,7 +6396,7 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, break; case AtomicType::TYPE_INT8: case AtomicType::TYPE_UINT8: { - llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMInt8(0) : + llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMInt8(0) : (llvm::Value *)LLVMInt8Vector((int8_t)0); cast = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_NE, exprVal, zero, cOpName); @@ -6404,7 +6404,7 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, } case AtomicType::TYPE_INT16: case AtomicType::TYPE_UINT16: { - llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMInt16(0) : + llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMInt16(0) : (llvm::Value *)LLVMInt16Vector((int16_t)0); cast = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_NE, exprVal, zero, cOpName); @@ -6412,14 +6412,14 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, } case AtomicType::TYPE_INT32: case AtomicType::TYPE_UINT32: { - llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMInt32(0) : + llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMInt32(0) : (llvm::Value *)LLVMInt32Vector(0); cast = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_NE, exprVal, zero, cOpName); break; } case AtomicType::TYPE_FLOAT: { - llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMFloat(0.f) : + llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMFloat(0.f) : (llvm::Value *)LLVMFloatVector(0.f); cast = ctx->CmpInst(llvm::Instruction::FCmp, llvm::CmpInst::FCMP_ONE, exprVal, zero, cOpName); @@ -6427,14 +6427,14 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, } case AtomicType::TYPE_INT64: case AtomicType::TYPE_UINT64: { - llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMInt64(0) : + llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMInt64(0) : (llvm::Value *)LLVMInt64Vector((int64_t)0); cast = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_NE, exprVal, zero, cOpName); break; } case AtomicType::TYPE_DOUBLE: { - llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMDouble(0.) : + llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMDouble(0.) : (llvm::Value *)LLVMDoubleVector(0.); cast = ctx->CmpInst(llvm::Instruction::FCmp, llvm::CmpInst::FCMP_ONE, exprVal, zero, cOpName); @@ -6445,7 +6445,7 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal, } if (fromType->IsUniformType()) { - if (toType->IsVaryingType() && + if (toType->IsVaryingType() && LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType) { // extend out to i32 bool values from i1 here. then we'll // turn into a vector below, the way it does for everyone @@ -6489,7 +6489,7 @@ lUniformValueToVarying(FunctionEmitContext *ctx, llvm::Value *value, // varying (if needed) and populate the return value. const CollectionType *collectionType = CastType(type); if (collectionType != NULL) { - llvm::Type *llvmType = + llvm::Type *llvmType = type->GetAsVaryingType()->LLVMType(g->ctx); llvm::Value *retValue = llvm::UndefValue::get(llvmType); @@ -6592,18 +6592,18 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { AssertPos(pos, CastType(toType) != NULL); if (toType->IsBoolType()) { // convert pointer to bool - llvm::Type *lfu = + llvm::Type *lfu = fromType->GetAsUniformType()->LLVMType(g->ctx); - llvm::PointerType *llvmFromUnifType = + llvm::PointerType *llvmFromUnifType = llvm::dyn_cast(lfu); - llvm::Value *nullPtrValue = + llvm::Value *nullPtrValue = llvm::ConstantPointerNull::get(llvmFromUnifType); if (fromType->IsVaryingType()) nullPtrValue = ctx->SmearUniform(nullPtrValue); llvm::Value *exprVal = expr->GetValue(ctx); - llvm::Value *cmp = + llvm::Value *cmp = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_NE, exprVal, nullPtrValue, "ptr_ne_NULL"); @@ -6660,7 +6660,7 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { if (toArrayType != NULL && fromArrayType != NULL) { // cast array pointer from [n x foo] to [0 x foo] if needed to be able // to pass to a function that takes an unsized array as a parameter - if (toArrayType->GetElementCount() != 0 && + if (toArrayType->GetElementCount() != 0 && (toArrayType->GetElementCount() != fromArrayType->GetElementCount())) Warning(pos, "Type-converting array of length %d to length %d", fromArrayType->GetElementCount(), toArrayType->GetElementCount()); @@ -6682,7 +6682,7 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { if (toArray && fromArray) { // cast array pointer from [n x foo] to [0 x foo] if needed to be able // to pass to a function that takes an unsized array as a parameter - if(toArray->GetElementCount() != 0 && + if(toArray->GetElementCount() != 0 && (toArray->GetElementCount() != fromArray->GetElementCount())) Warning(pos, "Type-converting array of length %d to length %d", fromArray->GetElementCount(), toArray->GetElementCount()); @@ -6704,7 +6704,7 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { // The only legal type conversions for structs are to go from a // uniform to a varying instance of the same struct type. AssertPos(pos, toStruct->IsVaryingType() && fromStruct->IsUniformType() && - Type::EqualIgnoringConst(toStruct, + Type::EqualIgnoringConst(toStruct, fromStruct->GetAsVaryingType())); llvm::Value *origValue = expr->GetValue(ctx); @@ -6735,7 +6735,7 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { llvm::Value *conv = lTypeConvAtomic(ctx, ei, toVector->GetElementType(), fromVector->GetElementType(), pos); - if (!conv) + if (!conv) return NULL; cast = ctx->InsertInst(cast, conv, i); } @@ -6765,7 +6765,7 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { // scalar -> short vector conversion llvm::Value *conv = lTypeConvAtomic(ctx, exprVal, toVector->GetElementType(), fromAtomic, pos); - if (!conv) + if (!conv) return NULL; llvm::Value *cast = llvm::UndefValue::get(toType->LLVMType(g->ctx)); @@ -6795,9 +6795,9 @@ TypeCastExpr::GetValue(FunctionEmitContext *ctx) const { const Type * -TypeCastExpr::GetType() const { +TypeCastExpr::GetType() const { AssertPos(pos, type->HasUnboundVariability() == false); - return type; + return type; } @@ -6805,7 +6805,7 @@ static const Type * lDeconstifyType(const Type *t) { const PointerType *pt = CastType(t); if (pt != NULL) - return new PointerType(lDeconstifyType(pt->GetBaseType()), + return new PointerType(lDeconstifyType(pt->GetBaseType()), pt->GetVariability(), false); else return t->GetAsNonConstType(); @@ -6866,8 +6866,8 @@ TypeCastExpr::TypeCheck() { toAtomic->basicType == AtomicType::TYPE_UINT32); if (safeCast == false) Warning(pos, "Pointer type cast of type \"%s\" to integer type " - "\"%s\" may lose information.", - fromType->GetString().c_str(), + "\"%s\" may lose information.", + fromType->GetString().c_str(), toType->GetString().c_str()); return this; } @@ -7004,7 +7004,7 @@ lConvertPointerConstant(llvm::Constant *c, const Type *constType) { // Handle conversion to int and then to vector of int or array of int // (for varying and soa types, respectively) - llvm::Constant *intPtr = + llvm::Constant *intPtr = llvm::ConstantExpr::getPtrToInt(c, LLVMTypes::PointerIntType); Assert(constType->IsVaryingType() || constType->IsSOAType()); int count = constType->IsVaryingType() ? g->target.vectorWidth : @@ -7062,7 +7062,7 @@ ReferenceExpr::GetValue(FunctionEmitContext *ctx) const { AssertPos(pos, m->errorCount > 0); return NULL; } - + llvm::Value *value = expr->GetLValue(ctx); if (value != NULL) return value; @@ -7097,11 +7097,11 @@ ReferenceExpr::GetBaseSymbol() const { const Type * ReferenceExpr::GetType() const { - if (!expr) + if (!expr) return NULL; const Type *type = expr->GetType(); - if (!type) + if (!type) return NULL; return new ReferenceType(type); @@ -7110,11 +7110,11 @@ ReferenceExpr::GetType() const { const Type * ReferenceExpr::GetLValueType() const { - if (!expr) + if (!expr) return NULL; const Type *type = expr->GetType(); - if (!type) + if (!type) return NULL; return PointerType::GetUniform(type); @@ -7166,7 +7166,7 @@ DerefExpr::DerefExpr(Expr *e, SourcePos p) llvm::Value * DerefExpr::GetValue(FunctionEmitContext *ctx) const { - if (expr == NULL) + if (expr == NULL) return NULL; llvm::Value *ptr = expr->GetValue(ctx); if (ptr == NULL) @@ -7179,7 +7179,7 @@ DerefExpr::GetValue(FunctionEmitContext *ctx) const { return NULL; Symbol *baseSym = expr->GetBaseSymbol(); - llvm::Value *mask = baseSym ? lMaskForSymbol(baseSym, ctx) : + llvm::Value *mask = baseSym ? lMaskForSymbol(baseSym, ctx) : ctx->GetFullMask(); ctx->SetDebugPos(pos); @@ -7189,7 +7189,7 @@ DerefExpr::GetValue(FunctionEmitContext *ctx) const { llvm::Value * DerefExpr::GetLValue(FunctionEmitContext *ctx) const { - if (expr == NULL) + if (expr == NULL) return NULL; return expr->GetValue(ctx); } @@ -7257,7 +7257,7 @@ PtrDerefExpr::TypeCheck() { } } else { - Error(pos, "Illegal to dereference non-pointer type \"%s\".", + Error(pos, "Illegal to dereference non-pointer type \"%s\".", type->GetString().c_str()); return NULL; } @@ -7310,7 +7310,7 @@ RefDerefExpr::GetType() const { AssertPos(pos, m->errorCount > 0); return NULL; } - + AssertPos(pos, CastType(type) != NULL); return type->GetReferenceTarget(); } @@ -7476,7 +7476,7 @@ AddressOfExpr::GetConstant(const Type *type) const { /////////////////////////////////////////////////////////////////////////// // SizeOfExpr -SizeOfExpr::SizeOfExpr(Expr *e, SourcePos p) +SizeOfExpr::SizeOfExpr(Expr *e, SourcePos p) : Expr(p), expr(e), type(NULL) { } @@ -7504,7 +7504,7 @@ SizeOfExpr::GetValue(FunctionEmitContext *ctx) const { const Type * SizeOfExpr::GetType() const { - return (g->target.is32Bit || g->opt.force32BitAddressing) ? + return (g->target.is32Bit || g->opt.force32BitAddressing) ? AtomicType::UniformUInt32 : AtomicType::UniformUInt64; } @@ -7512,7 +7512,7 @@ SizeOfExpr::GetType() const { void SizeOfExpr::Print() const { printf("Sizeof ("); - if (expr != NULL) + if (expr != NULL) expr->Print(); const Type *t = expr ? expr->GetType() : type; if (t != NULL) @@ -7550,7 +7550,7 @@ SizeOfExpr::EstimateCost() const { /////////////////////////////////////////////////////////////////////////// // SymbolExpr -SymbolExpr::SymbolExpr(Symbol *s, SourcePos p) +SymbolExpr::SymbolExpr(Symbol *s, SourcePos p) : Expr(p) { symbol = s; } @@ -7596,7 +7596,7 @@ SymbolExpr::GetBaseSymbol() const { const Type * -SymbolExpr::GetType() const { +SymbolExpr::GetType() const { return symbol ? symbol->type : NULL; } @@ -7633,7 +7633,7 @@ SymbolExpr::Print() const { if (symbol == NULL || GetType() == NULL) return; - printf("[%s] sym: (%s)", GetType()->GetString().c_str(), + printf("[%s] sym: (%s)", GetType()->GetString().c_str(), symbol->name.c_str()); pos.Print(); } @@ -7644,7 +7644,7 @@ SymbolExpr::Print() const { FunctionSymbolExpr::FunctionSymbolExpr(const char *n, const std::vector &candidates, - SourcePos p) + SourcePos p) : Expr(p) { name = n; candidateFunctions = candidates; @@ -7656,7 +7656,7 @@ FunctionSymbolExpr::FunctionSymbolExpr(const char *n, const Type * FunctionSymbolExpr::GetType() const { if (triedToResolve == false && matchingFunc == NULL) { - Error(pos, "Ambiguous use of overloaded function \"%s\".", + Error(pos, "Ambiguous use of overloaded function \"%s\".", name.c_str()); return NULL; } @@ -7727,8 +7727,8 @@ FunctionSymbolExpr::GetConstant(const Type *type) const { static std::string -lGetOverloadCandidateMessage(const std::vector &funcs, - const std::vector &argTypes, +lGetOverloadCandidateMessage(const std::vector &funcs, + const std::vector &argTypes, const std::vector *argCouldBeNULL) { std::string message = "Passed types: ("; for (unsigned int i = 0; i < argTypes.size(); ++i) { @@ -7821,7 +7821,7 @@ lIsMatchWithTypeWidening(const Type *callType, const Type *funcArgType) { */ static bool lIsMatchWithUniformToVarying(const Type *callType, const Type *funcArgType) { - return (callType->IsUniformType() && + return (callType->IsUniformType() && funcArgType->IsVaryingType() && Type::EqualIgnoringConst(callType->GetAsVaryingType(), funcArgType)); } @@ -7846,7 +7846,7 @@ std::vector FunctionSymbolExpr::getCandidateFunctions(int argCount) const { std::vector ret; for (int i = 0; i < (int)candidateFunctions.size(); ++i) { - const FunctionType *ft = + const FunctionType *ft = CastType(candidateFunctions[i]->type); AssertPos(pos, ft != NULL); @@ -7931,7 +7931,7 @@ FunctionSymbolExpr::computeOverloadCost(const FunctionType *ftype, callTypeNC = callType->GetAsNonConstType(); if (CastType(fargType) == NULL) fargTypeNC = fargType->GetAsNonConstType(); - + if (Type::Equal(callTypeNC, fargTypeNC)) // Exact match (after dealing with references, above) costSum += 1 * costScale; @@ -7979,7 +7979,7 @@ FunctionSymbolExpr::ResolveOverloads(SourcePos argPos, // number of arguments as have parameters (including functions that // take more arguments but have defaults starting no later than after // our last parameter). - std::vector actualCandidates = + std::vector actualCandidates = getCandidateFunctions(argTypes.size()); int bestMatchCost = 1<<30; @@ -7991,7 +7991,7 @@ FunctionSymbolExpr::ResolveOverloads(SourcePos argPos, // Compute the cost for calling each of the candidate functions for (int i = 0; i < (int)actualCandidates.size(); ++i) { - const FunctionType *ft = + const FunctionType *ft = CastType(actualCandidates[i]->type); AssertPos(pos, ft != NULL); candidateCosts.push_back(computeOverloadCost(ft, argTypes, @@ -8020,10 +8020,10 @@ FunctionSymbolExpr::ResolveOverloads(SourcePos argPos, } else if (matches.size() > 1) { // Multiple matches: ambiguous - std::string candidateMessage = + std::string candidateMessage = lGetOverloadCandidateMessage(matches, argTypes, argCouldBeNULL); Error(pos, "Multiple overloaded functions matched call to function " - "\"%s\"%s.\n%s", funName, + "\"%s\"%s.\n%s", funName, exactMatchOnly ? " only considering exact matches" : "", candidateMessage.c_str()); return false; @@ -8031,10 +8031,10 @@ FunctionSymbolExpr::ResolveOverloads(SourcePos argPos, else { // No matches at all failure: - std::string candidateMessage = + std::string candidateMessage = lGetOverloadCandidateMessage(matches, argTypes, argCouldBeNULL); Error(pos, "Unable to find any matching overload for call to function " - "\"%s\"%s.\n%s", funName, + "\"%s\"%s.\n%s", funName, exactMatchOnly ? " only considering exact matches" : "", candidateMessage.c_str()); return false; @@ -8149,7 +8149,7 @@ NullPointerExpr::EstimateCost() const { /////////////////////////////////////////////////////////////////////////// // NewExpr -NewExpr::NewExpr(int typeQual, const Type *t, Expr *init, Expr *count, +NewExpr::NewExpr(int typeQual, const Type *t, Expr *init, Expr *count, SourcePos tqPos, SourcePos p) : Expr(p) { allocType = t; @@ -8208,7 +8208,7 @@ NewExpr::GetValue(FunctionEmitContext *ctx) const { // Compute the total amount of memory to allocate, allocSize, as the // product of the number of elements to allocate and the size of a // single element. - llvm::Value *eltSize = g->target.SizeOf(allocType->LLVMType(g->ctx), + llvm::Value *eltSize = g->target.SizeOf(allocType->LLVMType(g->ctx), ctx->GetCurrentBasicBlock()); if (isVarying) eltSize = ctx->SmearUniform(eltSize, "smear_size"); @@ -8269,7 +8269,7 @@ NewExpr::GetValue(FunctionEmitContext *ctx) const { // Initialize the memory pointed to by the pointer for the // current lane. ctx->SetCurrentBasicBlock(bbInit); - llvm::Type *ptrType = + llvm::Type *ptrType = retType->GetAsUniformType()->LLVMType(g->ctx); llvm::Value *ptr = ctx->IntToPtrInst(p, ptrType); InitSymbol(ptr, allocType, initExpr, ctx, pos); @@ -8286,7 +8286,7 @@ NewExpr::GetValue(FunctionEmitContext *ctx) const { // pointer of the return type and to run the code for initializers, // if present. llvm::Type *ptrType = retType->LLVMType(g->ctx); - ptrValue = ctx->BitCastInst(ptrValue, ptrType, + ptrValue = ctx->BitCastInst(ptrValue, ptrType, LLVMGetName(ptrValue, "_cast_ptr")); if (initExpr != NULL) diff --git a/expr.h b/expr.h index d65bc8c3..3b1a07e3 100644 --- a/expr.h +++ b/expr.h @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file expr.h @@ -101,7 +101,7 @@ class UnaryExpr : public Expr { public: enum Op { PreInc, ///< Pre-increment - PreDec, ///< Pre-decrement + PreDec, ///< Pre-decrement PostInc, ///< Post-increment PostDec, ///< Post-decrement Negate, ///< Negation @@ -198,7 +198,7 @@ public: }; -/** @brief Selection expression, corresponding to "test ? a : b". +/** @brief Selection expression, corresponding to "test ? a : b". Returns the value of "a" or "b", depending on the value of "test". */ @@ -245,7 +245,7 @@ public: */ class FunctionCallExpr : public Expr { public: - FunctionCallExpr(Expr *func, ExprList *args, SourcePos p, + FunctionCallExpr(Expr *func, ExprList *args, SourcePos p, bool isLaunch = false, Expr *launchCountExpr = NULL); llvm::Value *GetValue(FunctionEmitContext *ctx) const; @@ -266,7 +266,7 @@ public: /** @brief Expression representing indexing into something with an integer offset. - This is used for both array indexing and indexing into VectorTypes. + This is used for both array indexing and indexing into VectorTypes. */ class IndexExpr : public Expr { public: @@ -317,7 +317,7 @@ public: std::string identifier; const SourcePos identifierPos; - MemberExpr(Expr *expr, const char *identifier, SourcePos pos, + MemberExpr(Expr *expr, const char *identifier, SourcePos pos, SourcePos identifierPos, bool derefLValue); /** Indicates whether the expression should be dereferenced before the @@ -330,7 +330,7 @@ protected: }; -/** @brief Expression representing a compile-time constant value. +/** @brief Expression representing a compile-time constant value. This class can currently represent compile-time constants of anything that is an AtomicType or an EnumType; for anything more complex, we @@ -640,7 +640,7 @@ private: /** @brief Expression representing a function symbol in the program (generally used for a function call). - */ + */ class FunctionSymbolExpr : public Expr { public: FunctionSymbolExpr(const char *name, const std::vector &candFuncs, @@ -714,7 +714,7 @@ public: class NullPointerExpr : public Expr { public: NullPointerExpr(SourcePos p) : Expr(p) { } - + llvm::Value *GetValue(FunctionEmitContext *ctx) const; const Type *GetType() const; Expr *TypeCheck(); @@ -726,11 +726,11 @@ public: /** An expression representing a "new" expression, used for dynamically - allocating memory. + allocating memory. */ class NewExpr : public Expr { public: - NewExpr(int typeQual, const Type *type, Expr *initializer, Expr *count, + NewExpr(int typeQual, const Type *type, Expr *initializer, Expr *count, SourcePos tqPos, SourcePos p); llvm::Value *GetValue(FunctionEmitContext *ctx) const; @@ -742,7 +742,7 @@ public: /** Type of object to allocate storage for. */ const Type *allocType; - /** Expression giving the number of elements to allocate, when the + /** Expression giving the number of elements to allocate, when the "new Foo[expr]" form is used. This may be NULL, in which case a single element of the given type will be allocated. */ Expr *countExpr; diff --git a/func.cpp b/func.cpp index cbe3aa88..8c842f6f 100644 --- a/func.cpp +++ b/func.cpp @@ -28,11 +28,11 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file func.cpp - @brief + @brief */ #include "func.h" @@ -85,7 +85,7 @@ Function::Function(Symbol *s, Stmt *c) { code = TypeCheck(code); if (code != NULL && g->debugPrint) { - fprintf(stderr, "After typechecking function \"%s\":\n", + fprintf(stderr, "After typechecking function \"%s\":\n", sym->name.c_str()); code->Print(0); fprintf(stderr, "---------------------\n"); @@ -94,7 +94,7 @@ Function::Function(Symbol *s, Stmt *c) { if (code != NULL) { code = Optimize(code); if (g->debugPrint) { - fprintf(stderr, "After optimizing function \"%s\":\n", + fprintf(stderr, "After optimizing function \"%s\":\n", sym->name.c_str()); code->Print(0); fprintf(stderr, "---------------------\n"); @@ -160,7 +160,7 @@ Function::GetType() const { 'mem2reg' pass will in turn promote to SSA registers.. */ static void -lCopyInTaskParameter(int i, llvm::Value *structArgPtr, const +lCopyInTaskParameter(int i, llvm::Value *structArgPtr, const std::vector &args, FunctionEmitContext *ctx) { // We expect the argument structure to come in as a poitner to a @@ -169,7 +169,7 @@ lCopyInTaskParameter(int i, llvm::Value *structArgPtr, const Assert(llvm::isa(structArgType)); const llvm::PointerType *pt = llvm::dyn_cast(structArgType); Assert(llvm::isa(pt->getElementType())); - const llvm::StructType *argStructType = + const llvm::StructType *argStructType = llvm::dyn_cast(pt->getElementType()); // Get the type of the argument we're copying in and its Symbol pointer @@ -199,8 +199,8 @@ lCopyInTaskParameter(int i, llvm::Value *structArgPtr, const involves wiring up the function parameter values to be available in the function body code. */ -void -Function::emitCode(FunctionEmitContext *ctx, llvm::Function *function, +void +Function::emitCode(FunctionEmitContext *ctx, llvm::Function *function, SourcePos firstStmtPos) { // Connect the __mask builtin to the location in memory that stores its // value @@ -259,7 +259,7 @@ Function::emitCode(FunctionEmitContext *ctx, llvm::Function *function, } else { // Regular, non-task function - llvm::Function::arg_iterator argIter = function->arg_begin(); + llvm::Function::arg_iterator argIter = function->arg_begin(); for (unsigned int i = 0; i < args.size(); ++i, ++argIter) { Symbol *sym = args[i]; if (sym == NULL) @@ -301,14 +301,14 @@ Function::emitCode(FunctionEmitContext *ctx, llvm::Function *function, ctx->AddInstrumentationPoint("function entry"); int costEstimate = EstimateCost(code); - Debug(code->pos, "Estimated cost for function \"%s\" = %d\n", + Debug(code->pos, "Estimated cost for function \"%s\" = %d\n", sym->name.c_str(), costEstimate); // If the body of the function is non-trivial, then we wrap the // entire thing inside code that tests to see if the mask is all // on, all off, or mixed. If this is a simple function, then this // isn't worth the code bloat / overhead. - bool checkMask = (type->isTask == true) || + bool checkMask = (type->isTask == true) || ( #if defined(LLVM_3_1) (function->hasFnAttr(llvm::Attribute::AlwaysInline) == false) @@ -322,7 +322,7 @@ Function::emitCode(FunctionEmitContext *ctx, llvm::Function *function, checkMask &= (type->isUnmasked == false); checkMask &= (g->target.maskingIsFree == false); checkMask &= (g->opt.disableCoherentControlFlow == false); - + if (checkMask) { llvm::Value *mask = ctx->GetFunctionMask(); llvm::Value *allOn = ctx->All(mask); @@ -409,7 +409,7 @@ Function::GenerateIR() { // But if that function has a definition, we don't want to redefine it. if (function->empty() == false) { - Error(sym->pos, "Ignoring redefinition of function \"%s\".", + Error(sym->pos, "Ignoring redefinition of function \"%s\".", sym->name.c_str()); return; } @@ -426,7 +426,7 @@ Function::GenerateIR() { firstStmtPos = code->pos; } - // And we can now go ahead and emit the code + // And we can now go ahead and emit the code { FunctionEmitContext ec(this, sym, function, firstStmtPos); emitCode(&ec, function, firstStmtPos); @@ -451,7 +451,7 @@ Function::GenerateIR() { std::string functionName = sym->name; if (g->mangleFunctionsWithTarget) functionName += std::string("_") + g->target.GetISAString(); - llvm::Function *appFunction = + llvm::Function *appFunction = llvm::Function::Create(ftype, linkage, functionName.c_str(), m->module); #if defined(LLVM_3_1) appFunction->setDoesNotThrow(true); @@ -470,7 +470,7 @@ Function::GenerateIR() { emitCode(&ec, appFunction, firstStmtPos); if (m->errorCount == 0) { sym->exportedFunction = appFunction; - if (llvm::verifyFunction(*appFunction, + if (llvm::verifyFunction(*appFunction, llvm::ReturnStatusAction) == true) { if (g->debugPrint) appFunction->dump(); diff --git a/func.h b/func.h index 6d0527fc..ac3e1447 100644 --- a/func.h +++ b/func.h @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file func.h @@ -52,7 +52,7 @@ public: void GenerateIR(); private: - void emitCode(FunctionEmitContext *ctx, llvm::Function *function, + void emitCode(FunctionEmitContext *ctx, llvm::Function *function, SourcePos firstStmtPos); Symbol *sym; diff --git a/ispc.cpp b/ispc.cpp index 3a602ce0..8a3beda3 100644 --- a/ispc.cpp +++ b/ispc.cpp @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file ispc.cpp @@ -136,7 +136,7 @@ lGetSystemISA() { } -static const char *supportedCPUs[] = { +static const char *supportedCPUs[] = { "atom", "penryn", "core2", "corei7", "corei7-avx" #if defined(LLVM_3_2) || defined(LLVM_3_3) , "core-avx-i", "core-avx2" @@ -187,7 +187,7 @@ Target::GetTarget(const char *arch, const char *cpu, const char *isa, } else { bool foundCPU = false; - for (int i = 0; i < int(sizeof(supportedCPUs) / sizeof(supportedCPUs[0])); + for (int i = 0; i < int(sizeof(supportedCPUs) / sizeof(supportedCPUs[0])); ++i) { if (!strcmp(cpu, supportedCPUs[i])) { foundCPU = true; @@ -405,7 +405,7 @@ Target::GetTarget(const char *arch, const char *cpu, const char *isa, #endif } else { - fprintf(stderr, "Target ISA \"%s\" is unknown. Choices are: %s\n", + fprintf(stderr, "Target ISA \"%s\" is unknown. Choices are: %s\n", isa, SupportedTargetISAs()); error = true; } @@ -494,7 +494,7 @@ llvm::TargetMachine * Target::GetTargetMachine() const { std::string triple = GetTripleString(); - llvm::Reloc::Model relocModel = generatePIC ? llvm::Reloc::PIC_ : + llvm::Reloc::Model relocModel = generatePIC ? llvm::Reloc::PIC_ : llvm::Reloc::Default; std::string featuresString = attributes; llvm::TargetOptions options; @@ -502,7 +502,7 @@ Target::GetTargetMachine() const { if (g->opt.disableFMA == false) options.AllowFPOpFusion = llvm::FPOpFusion::Fast; #endif // !LLVM_3_1 - llvm::TargetMachine *targetMachine = + llvm::TargetMachine *targetMachine = target->createTargetMachine(triple, cpu, featuresString, options, relocModel); Assert(targetMachine != NULL); @@ -544,12 +544,12 @@ lGenericTypeLayoutIndeterminate(llvm::Type *type) { type == LLVMTypes::Int1VectorType) return true; - llvm::ArrayType *at = + llvm::ArrayType *at = llvm::dyn_cast(type); if (at != NULL) return lGenericTypeLayoutIndeterminate(at->getElementType()); - llvm::PointerType *pt = + llvm::PointerType *pt = llvm::dyn_cast(type); if (pt != NULL) return false; @@ -569,7 +569,7 @@ lGenericTypeLayoutIndeterminate(llvm::Type *type) { llvm::Value * -Target::SizeOf(llvm::Type *type, +Target::SizeOf(llvm::Type *type, llvm::BasicBlock *insertAtEnd) { if (isa == Target::GENERIC && lGenericTypeLayoutIndeterminate(type)) { @@ -577,15 +577,15 @@ Target::SizeOf(llvm::Type *type, llvm::PointerType *ptrType = llvm::PointerType::get(type, 0); llvm::Value *voidPtr = llvm::ConstantPointerNull::get(ptrType); llvm::ArrayRef arrayRef(&index[0], &index[1]); - llvm::Instruction *gep = + llvm::Instruction *gep = llvm::GetElementPtrInst::Create(voidPtr, arrayRef, "sizeof_gep", insertAtEnd); if (is32Bit || g->opt.force32BitAddressing) - return new llvm::PtrToIntInst(gep, LLVMTypes::Int32Type, + return new llvm::PtrToIntInst(gep, LLVMTypes::Int32Type, "sizeof_int", insertAtEnd); else - return new llvm::PtrToIntInst(gep, LLVMTypes::Int64Type, + return new llvm::PtrToIntInst(gep, LLVMTypes::Int64Type, "sizeof_int", insertAtEnd); } @@ -611,25 +611,25 @@ Target::SizeOf(llvm::Type *type, llvm::Value * Target::StructOffset(llvm::Type *type, int element, llvm::BasicBlock *insertAtEnd) { - if (isa == Target::GENERIC && + if (isa == Target::GENERIC && lGenericTypeLayoutIndeterminate(type) == true) { llvm::Value *indices[2] = { LLVMInt32(0), LLVMInt32(element) }; llvm::PointerType *ptrType = llvm::PointerType::get(type, 0); llvm::Value *voidPtr = llvm::ConstantPointerNull::get(ptrType); llvm::ArrayRef arrayRef(&indices[0], &indices[2]); - llvm::Instruction *gep = + llvm::Instruction *gep = llvm::GetElementPtrInst::Create(voidPtr, arrayRef, "offset_gep", insertAtEnd); if (is32Bit || g->opt.force32BitAddressing) - return new llvm::PtrToIntInst(gep, LLVMTypes::Int32Type, + return new llvm::PtrToIntInst(gep, LLVMTypes::Int32Type, "offset_int", insertAtEnd); else - return new llvm::PtrToIntInst(gep, LLVMTypes::Int64Type, + return new llvm::PtrToIntInst(gep, LLVMTypes::Int64Type, "offset_int", insertAtEnd); } - llvm::StructType *structType = + llvm::StructType *structType = llvm::dyn_cast(type); if (structType == NULL || structType->isSized() == false) { Assert(m->errorCount > 0); @@ -699,7 +699,7 @@ Globals::Globals() { enableFuzzTest = false; fuzzTestSeed = -1; mangleFunctionsWithTarget = false; - + ctx = new llvm::LLVMContext; #ifdef ISPC_IS_WINDOWS @@ -739,15 +739,15 @@ SourcePos::GetDIFile() const { void -SourcePos::Print() const { +SourcePos::Print() const { printf(" @ [%s:%d.%d - %d.%d] ", name, first_line, first_column, - last_line, last_column); + last_line, last_column); } bool SourcePos::operator==(const SourcePos &p2) const { - return (!strcmp(name, p2.name) && + return (!strcmp(name, p2.name) && first_line == p2.first_line && first_column == p2.first_column && last_line == p2.last_line && diff --git a/ispc.h b/ispc.h index 1d456087..95523f2b 100644 --- a/ispc.h +++ b/ispc.h @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file ispc.h @@ -158,7 +158,7 @@ extern void DoAssertPos(SourcePos pos, const char *file, int line, const char *e ((void)((expr) ? 0 : ((void)DoAssertPos (pos, __FILE__, __LINE__, #expr), 0))) -/** @brief Structure that defines a compilation target +/** @brief Structure that defines a compilation target This structure defines a compilation target for the ispc compiler. */ @@ -188,7 +188,7 @@ struct Target { /** Returns the LLVM TargetMachine object corresponding to this target. */ llvm::TargetMachine *GetTargetMachine() const; - + /** Returns a string like "avx" encoding the target. */ const char *GetISAString() const; @@ -281,11 +281,11 @@ struct Target { /** @brief Structure that collects optimization options This structure collects all of the options related to optimization of - generated code. + generated code. */ struct Opt { Opt(); - + /** Optimization level. Currently, the only valid values are 0, indicating essentially no optimization, and 1, indicating as much optimization as possible. */ @@ -308,7 +308,7 @@ struct Opt { /** Indicates if addressing math will be done with 32-bit math, even on 64-bit systems. (This is generally noticably more efficient, though at the cost of addressing >2GB). - */ + */ bool force32BitAddressing; /** Indicates whether Assert() statements should be ignored (for @@ -387,7 +387,7 @@ struct Opt { bool disableCoalescing; }; -/** @brief This structure collects together a number of global variables. +/** @brief This structure collects together a number of global variables. This structure collects a number of global variables that mostly represent parameter settings for this compilation run. In particular, @@ -445,12 +445,12 @@ struct Globals { externally-defined program instrumentation function. (See the "Instrumenting your ispc programs" section in the user's manual.) */ - bool emitInstrumentation; + bool emitInstrumentation; /** Indicates whether ispc should generate debugging symbols for the program in its output. */ bool generateDebuggingSymbols; - + /** If true, function names are mangled by appending the target ISA and vector width to them. */ bool mangleFunctionsWithTarget; diff --git a/lex.ll b/lex.ll index 07725b42..f6633fce 100644 --- a/lex.ll +++ b/lex.ll @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ %{ @@ -62,7 +62,7 @@ inline int isatty(int) { return 0; } #include #endif // ISPC_IS_WINDOWS -static int allTokens[] = { +static int allTokens[] = { TOKEN_ASSERT, TOKEN_BOOL, TOKEN_BREAK, TOKEN_CASE, TOKEN_CDO, TOKEN_CFOR, TOKEN_CIF, TOKEN_CWHILE, TOKEN_CONST, TOKEN_CONTINUE, TOKEN_DEFAULT, TOKEN_DO, @@ -74,11 +74,11 @@ static int allTokens[] = { TOKEN_NEW, TOKEN_NULL, TOKEN_PRINT, TOKEN_RETURN, TOKEN_SOA, TOKEN_SIGNED, TOKEN_SIZEOF, TOKEN_STATIC, TOKEN_STRUCT, TOKEN_SWITCH, TOKEN_SYNC, TOKEN_TASK, TOKEN_TRUE, TOKEN_TYPEDEF, TOKEN_UNIFORM, TOKEN_UNMASKED, - TOKEN_UNSIGNED, TOKEN_VARYING, TOKEN_VOID, TOKEN_WHILE, - TOKEN_STRING_C_LITERAL, TOKEN_DOTDOTDOT, + TOKEN_UNSIGNED, TOKEN_VARYING, TOKEN_VOID, TOKEN_WHILE, + TOKEN_STRING_C_LITERAL, TOKEN_DOTDOTDOT, TOKEN_FLOAT_CONSTANT, - TOKEN_INT32_CONSTANT, TOKEN_UINT32_CONSTANT, - TOKEN_INT64_CONSTANT, TOKEN_UINT64_CONSTANT, + TOKEN_INT32_CONSTANT, TOKEN_UINT32_CONSTANT, + TOKEN_INT64_CONSTANT, TOKEN_UINT64_CONSTANT, TOKEN_INC_OP, TOKEN_DEC_OP, TOKEN_LEFT_OP, TOKEN_RIGHT_OP, TOKEN_LE_OP, TOKEN_GE_OP, TOKEN_EQ_OP, TOKEN_NE_OP, TOKEN_AND_OP, TOKEN_OR_OP, TOKEN_MUL_ASSIGN, TOKEN_DIV_ASSIGN, TOKEN_MOD_ASSIGN, TOKEN_ADD_ASSIGN, @@ -406,7 +406,7 @@ while { RT; return TOKEN_WHILE; } L?\"(\\.|[^\\"])*\" { lStringConst(&yylval, &yylloc); return TOKEN_STRING_LITERAL; } -{IDENT} { +{IDENT} { RT; /* We have an identifier--is it a type name or an identifier? The symbol table will straighten us out... */ @@ -414,10 +414,10 @@ L?\"(\\.|[^\\"])*\" { lStringConst(&yylval, &yylloc); return TOKEN_STRING_LITERA if (m->symbolTable->LookupType(yytext) != NULL) return TOKEN_TYPE_NAME; else - return TOKEN_IDENTIFIER; + return TOKEN_IDENTIFIER; } -{INT_NUMBER} { +{INT_NUMBER} { RT; return lParseInteger(false); } @@ -428,16 +428,16 @@ L?\"(\\.|[^\\"])*\" { lStringConst(&yylval, &yylloc); return TOKEN_STRING_LITERA } -{FLOAT_NUMBER} { +{FLOAT_NUMBER} { RT; yylval.floatVal = (float)atof(yytext); - return TOKEN_FLOAT_CONSTANT; + return TOKEN_FLOAT_CONSTANT; } {HEX_FLOAT_NUMBER} { RT; - yylval.floatVal = (float)lParseHexFloat(yytext); - return TOKEN_FLOAT_CONSTANT; + yylval.floatVal = (float)lParseHexFloat(yytext); + return TOKEN_FLOAT_CONSTANT; } "++" { RT; return TOKEN_INC_OP; } @@ -489,17 +489,17 @@ L?\"(\\.|[^\\"])*\" { lStringConst(&yylval, &yylloc); return TOKEN_STRING_LITERA {WHITESPACE} { } \n { - yylloc.last_line++; - yylloc.last_column = 1; + yylloc.last_line++; + yylloc.last_column = 1; } -#(line)?[ ][0-9]+[ ]\"(\\.|[^\\"])*\"[^\n]* { - lHandleCppHash(&yylloc); +#(line)?[ ][0-9]+[ ]\"(\\.|[^\\"])*\"[^\n]* { + lHandleCppHash(&yylloc); } . { Error(yylloc, "Illegal character: %c (0x%x)", yytext[0], int(yytext[0])); - YY_USER_ACTION + YY_USER_ACTION } %% @@ -558,7 +558,7 @@ lParseInteger(bool dotdotdot) { else if (*endPtr == 'M') mega = true; else if (*endPtr == 'G') - giga = true; + giga = true; else if (*endPtr == 'l' || *endPtr == 'L') ls++; else if (*endPtr == 'u' || *endPtr == 'U') @@ -598,7 +598,7 @@ lParseInteger(bool dotdotdot) { return TOKEN_UINT64_CONSTANT; } else { - // No u or l suffix + // No u or l suffix // First, see if we can fit this into a 32-bit integer... if (yylval.intVal <= 0x7fffffffULL) return TOKEN_INT32_CONSTANT; @@ -613,7 +613,7 @@ lParseInteger(bool dotdotdot) { } -/** Handle a C-style comment in the source. +/** Handle a C-style comment in the source. */ static void lCComment(SourcePos *pos) { @@ -750,7 +750,7 @@ lStringConst(YYSTYPE *yylval, SourcePos *pos) char cval = '\0'; p = lEscapeChar(p, &cval, pos); str.push_back(cval); - } + } yylval->stringVal = new std::string(str); } @@ -758,7 +758,7 @@ lStringConst(YYSTYPE *yylval, SourcePos *pos) /** Compute the value 2^n, where the exponent is given as an integer. There are more efficient ways to do this, for example by just slamming the bits into the appropriate bits of the double, but let's just do the - obvious thing. + obvious thing. */ static double ipow2(int exponent) { @@ -777,7 +777,7 @@ ipow2(int exponent) { /** Parse a hexadecimal-formatted floating-point number (C99 hex float - constant-style). + constant-style). */ static double lParseHexFloat(const char *ptr) { diff --git a/llvmutil.cpp b/llvmutil.cpp index 74f395c4..ce89761d 100644 --- a/llvmutil.cpp +++ b/llvmutil.cpp @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file llvmutil.cpp @@ -124,19 +124,19 @@ InitLLVMUtil(llvm::LLVMContext *ctx, Target target) { llvm::VectorType::get(llvm::Type::getInt32Ty(*ctx), target.vectorWidth); } - LLVMTypes::Int1VectorType = + LLVMTypes::Int1VectorType = llvm::VectorType::get(llvm::Type::getInt1Ty(*ctx), target.vectorWidth); - LLVMTypes::Int8VectorType = + LLVMTypes::Int8VectorType = llvm::VectorType::get(LLVMTypes::Int8Type, target.vectorWidth); - LLVMTypes::Int16VectorType = + LLVMTypes::Int16VectorType = llvm::VectorType::get(LLVMTypes::Int16Type, target.vectorWidth); - LLVMTypes::Int32VectorType = + LLVMTypes::Int32VectorType = llvm::VectorType::get(LLVMTypes::Int32Type, target.vectorWidth); - LLVMTypes::Int64VectorType = + LLVMTypes::Int64VectorType = llvm::VectorType::get(LLVMTypes::Int64Type, target.vectorWidth); - LLVMTypes::FloatVectorType = + LLVMTypes::FloatVectorType = llvm::VectorType::get(LLVMTypes::FloatType, target.vectorWidth); - LLVMTypes::DoubleVectorType = + LLVMTypes::DoubleVectorType = llvm::VectorType::get(LLVMTypes::DoubleType, target.vectorWidth); LLVMTypes::Int8VectorPointerType = llvm::PointerType::get(LLVMTypes::Int8VectorType, 0); @@ -441,11 +441,11 @@ LLVMUInt64Vector(const uint64_t *ivec) { llvm::Constant * LLVMBoolVector(bool b) { llvm::Constant *v; - if (LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType) - v = llvm::ConstantInt::get(LLVMTypes::Int32Type, b ? 0xffffffff : 0, + if (LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType) + v = llvm::ConstantInt::get(LLVMTypes::Int32Type, b ? 0xffffffff : 0, false /*unsigned*/); else { - Assert(LLVMTypes::BoolVectorType->getElementType() == + Assert(LLVMTypes::BoolVectorType->getElementType() == llvm::Type::getInt1Ty(*g->ctx)); v = b ? LLVMTrue : LLVMFalse; } @@ -462,11 +462,11 @@ LLVMBoolVector(const bool *bvec) { std::vector vals; for (int i = 0; i < g->target.vectorWidth; ++i) { llvm::Constant *v; - if (LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType) - v = llvm::ConstantInt::get(LLVMTypes::Int32Type, bvec[i] ? 0xffffffff : 0, + if (LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType) + v = llvm::ConstantInt::get(LLVMTypes::Int32Type, bvec[i] ? 0xffffffff : 0, false /*unsigned*/); else { - Assert(LLVMTypes::BoolVectorType->getElementType() == + Assert(LLVMTypes::BoolVectorType->getElementType() == llvm::Type::getInt1Ty(*g->ctx)); v = bvec[i] ? LLVMTrue : LLVMFalse; } @@ -519,7 +519,7 @@ LLVMUIntAsType(uint64_t val, llvm::Type *type) { vectors definitely are equal. */ static bool -lValuesAreEqual(llvm::Value *v0, llvm::Value *v1, +lValuesAreEqual(llvm::Value *v0, llvm::Value *v1, std::vector &seenPhi0, std::vector &seenPhi1) { // Thanks to the fact that LLVM hashes and returns the same pointer for @@ -571,7 +571,7 @@ lValuesAreEqual(llvm::Value *v0, llvm::Value *v1, // FIXME: should it be ok if the incoming blocks are different, // where we just return faliure in this case? Assert(phi0->getIncomingBlock(i) == phi1->getIncomingBlock(i)); - if (!lValuesAreEqual(phi0->getIncomingValue(i), + if (!lValuesAreEqual(phi0->getIncomingValue(i), phi1->getIncomingValue(i), seenPhi0, seenPhi1)) { anyFailure = true; break; @@ -611,7 +611,7 @@ LLVMFlattenInsertChain(llvm::InsertElementInst *ie, int vectorWidth, Assert(iOffset >= 0 && iOffset < vectorWidth); Assert(elements[iOffset] == NULL); - // Get the scalar value from this insert + // Get the scalar value from this insert elements[iOffset] = ie->getOperand(1); // Do we have another insert? @@ -623,7 +623,7 @@ LLVMFlattenInsertChain(llvm::InsertElementInst *ie, int vectorWidth, // Get the value out of a constant vector if that's what we // have - llvm::ConstantVector *cv = + llvm::ConstantVector *cv = llvm::dyn_cast(insertBase); // FIXME: this assert is a little questionable; we probably @@ -717,7 +717,7 @@ lIsExactMultiple(llvm::Value *val, int baseValue, int vectorLength, // we're good. for (unsigned int i = 0; i < numIncoming; ++i) { llvm::Value *incoming = phi->getIncomingValue(i); - bool mult = lIsExactMultiple(incoming, baseValue, vectorLength, + bool mult = lIsExactMultiple(incoming, baseValue, vectorLength, seenPhis); if (mult == false) { seenPhis.pop_back(); @@ -748,9 +748,9 @@ lIsExactMultiple(llvm::Value *val, int baseValue, int vectorLength, static int lRoundUpPow2(int v) { v--; - v |= v >> 1; + v |= v >> 1; v |= v >> 2; - v |= v >> 4; + v |= v >> 4; v |= v >> 8; v |= v >> 16; return v+1; @@ -804,7 +804,7 @@ lAllDivBaseEqual(llvm::Value *val, int64_t baseValue, int vectorLength, for (unsigned int i = 0; i < numIncoming; ++i) { llvm::Value *incoming = phi->getIncomingValue(i); bool ca = canAdd; - bool mult = lAllDivBaseEqual(incoming, baseValue, vectorLength, + bool mult = lAllDivBaseEqual(incoming, baseValue, vectorLength, seenPhis, ca); if (mult == false) { seenPhis.pop_back(); @@ -816,7 +816,7 @@ lAllDivBaseEqual(llvm::Value *val, int64_t baseValue, int vectorLength, } llvm::BinaryOperator *bop = llvm::dyn_cast(val); - if (bop != NULL && bop->getOpcode() == llvm::Instruction::Add && + if (bop != NULL && bop->getOpcode() == llvm::Instruction::Add && canAdd == true) { llvm::Value *op0 = bop->getOperand(0); llvm::Value *op1 = bop->getOperand(1); @@ -942,9 +942,9 @@ lVectorValuesAllEqual(llvm::Value *v, int vectorLength, llvm::BinaryOperator *bop = llvm::dyn_cast(v); if (bop != NULL) { // Easy case: both operands are all equal -> return true - if (lVectorValuesAllEqual(bop->getOperand(0), vectorLength, + if (lVectorValuesAllEqual(bop->getOperand(0), vectorLength, seenPhis) && - lVectorValuesAllEqual(bop->getOperand(1), vectorLength, + lVectorValuesAllEqual(bop->getOperand(1), vectorLength, seenPhis)) return true; @@ -952,7 +952,7 @@ lVectorValuesAllEqual(llvm::Value *v, int vectorLength, // high (surviving) bits of the values are equal. if (bop->getOpcode() == llvm::Instruction::AShr || bop->getOpcode() == llvm::Instruction::LShr) - return lVectorShiftRightAllEqual(bop->getOperand(0), + return lVectorShiftRightAllEqual(bop->getOperand(0), bop->getOperand(1), vectorLength); return false; @@ -960,7 +960,7 @@ lVectorValuesAllEqual(llvm::Value *v, int vectorLength, llvm::CastInst *cast = llvm::dyn_cast(v); if (cast != NULL) - return lVectorValuesAllEqual(cast->getOperand(0), vectorLength, + return lVectorValuesAllEqual(cast->getOperand(0), vectorLength, seenPhis); llvm::InsertElementInst *ie = llvm::dyn_cast(v); @@ -985,7 +985,7 @@ lVectorValuesAllEqual(llvm::Value *v, int vectorLength, std::vector seenPhi0; std::vector seenPhi1; - if (lValuesAreEqual(elements[lastNonNull], elements[i], seenPhi0, + if (lValuesAreEqual(elements[lastNonNull], elements[i], seenPhi0, seenPhi1) == false) return false; lastNonNull = i; @@ -1087,8 +1087,8 @@ lVectorIsLinear(llvm::Value *v, int vectorLength, int stride, elements. */ static bool -lVectorIsLinearConstantInts(llvm::ConstantDataVector *cv, - int vectorLength, +lVectorIsLinearConstantInts(llvm::ConstantDataVector *cv, + int vectorLength, int stride) { // Flatten the vector out into the elements array llvm::SmallVector elements; @@ -1108,7 +1108,7 @@ lVectorIsLinearConstantInts(llvm::ConstantDataVector *cv, // is stride. If not, fail. for (int i = 1; i < vectorLength; ++i) { ci = llvm::dyn_cast(elements[i]); - if (ci == NULL) + if (ci == NULL) return false; int64_t nextVal = ci->getSExtValue(); @@ -1125,7 +1125,7 @@ lVectorIsLinearConstantInts(llvm::ConstantDataVector *cv, vector with values that increase by stride. */ static bool -lCheckMulForLinear(llvm::Value *op0, llvm::Value *op1, int vectorLength, +lCheckMulForLinear(llvm::Value *op0, llvm::Value *op1, int vectorLength, int stride, std::vector &seenPhis) { // Is the first operand a constant integer value splatted across all of // the lanes? @@ -1150,7 +1150,7 @@ lCheckMulForLinear(llvm::Value *op0, llvm::Value *op1, int vectorLength, // Check to see if the other operand is a linear vector with stride // given by stride/splatVal. - return lVectorIsLinear(op1, vectorLength, (int)(stride / splatVal), + return lVectorIsLinear(op1, vectorLength, (int)(stride / splatVal), seenPhis); } @@ -1161,7 +1161,7 @@ lCheckMulForLinear(llvm::Value *op0, llvm::Value *op1, int vectorLength, data. */ static bool -lCheckAndForLinear(llvm::Value *op0, llvm::Value *op1, int vectorLength, +lCheckAndForLinear(llvm::Value *op0, llvm::Value *op1, int vectorLength, int stride, std::vector &seenPhis) { // Require op1 to be a compile-time constant int64_t maskValue[ISPC_MAX_NVEC]; @@ -1359,7 +1359,7 @@ LLVMDumpValue(llvm::Value *v) { static llvm::Value * -lExtractFirstVectorElement(llvm::Value *v, +lExtractFirstVectorElement(llvm::Value *v, std::map &phiMap) { llvm::VectorType *vt = llvm::dyn_cast(v->getType()); @@ -1373,7 +1373,7 @@ lExtractFirstVectorElement(llvm::Value *v, if (llvm::ConstantVector *cv = llvm::dyn_cast(v)) { return cv->getOperand(0); } - if (llvm::ConstantDataVector *cdv = + if (llvm::ConstantDataVector *cdv = llvm::dyn_cast(v)) return cdv->getElementAsConstant(0); @@ -1423,9 +1423,9 @@ lExtractFirstVectorElement(llvm::Value *v, // The insertion point for the new phi node also has to be the // start of the bblock of the original phi node. llvm::Instruction *phiInsertPos = phi->getParent()->begin(); - llvm::PHINode *scalarPhi = - llvm::PHINode::Create(vt->getElementType(), - phi->getNumIncomingValues(), + llvm::PHINode *scalarPhi = + llvm::PHINode::Create(vt->getElementType(), + phi->getNumIncomingValues(), newName, phiInsertPos); phiMap[phi] = scalarPhi; @@ -1452,7 +1452,7 @@ lExtractFirstVectorElement(llvm::Value *v, // have here. llvm::Instruction *insertAfter = llvm::dyn_cast(v); Assert(insertAfter != NULL); - llvm::Instruction *ee = + llvm::Instruction *ee = llvm::ExtractElementInst::Create(v, LLVMInt32(0), "first_elt", (llvm::Instruction *)NULL); ee->insertAfter(insertAfter); @@ -1474,7 +1474,7 @@ LLVMExtractFirstVectorElement(llvm::Value *v) { vector. */ llvm::Value * -LLVMConcatVectors(llvm::Value *v1, llvm::Value *v2, +LLVMConcatVectors(llvm::Value *v1, llvm::Value *v2, llvm::Instruction *insertBefore) { Assert(v1->getType() == v2->getType()); diff --git a/llvmutil.h b/llvmutil.h index d6830276..636b76cb 100644 --- a/llvmutil.h +++ b/llvmutil.h @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file llvmutil.h @@ -59,7 +59,7 @@ namespace llvm { /** This structure holds pointers to a variety of LLVM types; code elsewhere can use them from here, ratherthan needing to make more verbose LLVM API calls. - */ + */ struct LLVMTypes { static llvm::Type *VoidType; static llvm::PointerType *VoidPointerType; @@ -257,7 +257,7 @@ extern bool LLVMExtractVectorInts(llvm::Value *v, int64_t ret[], int *nElts); and initializes the provided elements array such that the i'th llvm::Value * in the array is the element that was inserted into the - i'th element of the vector. + i'th element of the vector. When the chain of insertelement instruction comes to an end, the only base case that this function handles is the initial value being a @@ -286,7 +286,7 @@ extern llvm::Value *LLVMExtractFirstVectorElement(llvm::Value *v); /** This function takes two vectors, expected to be the same length, and returns a new vector of twice the length that represents concatenating the two of them. */ -extern llvm::Value *LLVMConcatVectors(llvm::Value *v1, llvm::Value *v2, +extern llvm::Value *LLVMConcatVectors(llvm::Value *v1, llvm::Value *v2, llvm::Instruction *insertBefore); /** This is a utility function for vector shuffling; it takes two vectors diff --git a/main.cpp b/main.cpp index af8e1285..dc0e7cfe 100644 --- a/main.cpp +++ b/main.cpp @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file main.cpp @@ -60,8 +60,8 @@ static void lPrintVersion() { - printf("Intel(r) SPMD Program Compiler (ispc), %s (build %s @ %s, LLVM %s)\n", - ISPC_VERSION, BUILD_VERSION, BUILD_DATE, + printf("Intel(r) SPMD Program Compiler (ispc), %s (build %s @ %s, LLVM %s)\n", + ISPC_VERSION, BUILD_VERSION, BUILD_DATE, #if defined(LLVM_3_1) "3.1" #elif defined(LLVM_3_2) @@ -70,7 +70,7 @@ lPrintVersion() { "3.3" #else #error "Unhandled LLVM version" -#endif +#endif ); } @@ -82,7 +82,7 @@ usage(int ret) { printf(" [--addressing={32,64}]\t\tSelect 32- or 64-bit addressing. (Note that 32-bit\n"); printf(" \t\taddressing calculations are done by default, even\n"); printf(" \t\ton 64-bit target architectures.)\n"); - printf(" [--arch={%s}]\t\tSelect target architecture\n", + printf(" [--arch={%s}]\t\tSelect target architecture\n", Target::SupportedTargetArchs()); printf(" [--c++-include-file=]\t\tSpecify name of file to emit in #include statement in generated C++ code.\n"); #ifndef ISPC_IS_WINDOWS @@ -160,7 +160,7 @@ devUsage(int ret) { /** We take arguments from both the command line as well as from the ISPC_ARGS environment variable. This function returns a new set of arguments representing the ones from those two sources merged together. -*/ +*/ static void lGetAllArgs(int Argc, char *Argv[], int &argc, char *argv[128]) { // Copy over the command line arguments (passed in) for (int i = 0; i < Argc; ++i) @@ -185,7 +185,7 @@ static void lGetAllArgs(int Argc, char *Argv[], int &argc, char *argv[128]) { strncpy(ptr, env, len); ptr[len] = '\0'; - // Add it to the args array and get out of here + // Add it to the args array and get out of here argv[argc++] = ptr; if (*end == '\0') break; @@ -403,7 +403,7 @@ int main(int Argc, char *Argv[]) { g->opt.level = 0; optSet = true; } - else if (!strcmp(argv[i], "-O") || !strcmp(argv[i], "-O1") || + else if (!strcmp(argv[i], "-O") || !strcmp(argv[i], "-O1") || !strcmp(argv[i], "-O2") || !strcmp(argv[i], "-O3")) { g->opt.level = 1; optSet = true; @@ -480,7 +480,7 @@ int main(int Argc, char *Argv[]) { int seed = getpid(); #endif g->fuzzTestSeed = seed; - Warning(SourcePos(), "Using seed %d for fuzz testing", + Warning(SourcePos(), "Using seed %d for fuzz testing", g->fuzzTestSeed); } #ifdef ISPC_IS_WINDOWS @@ -490,7 +490,7 @@ int main(int Argc, char *Argv[]) { #endif } - if (outFileName == NULL && + if (outFileName == NULL && headerFileName == NULL && depsFileName == NULL && hostStubFileName == NULL && @@ -500,9 +500,9 @@ int main(int Argc, char *Argv[]) { "be issued, but no output will be generated."); return Module::CompileAndOutput(file, arch, cpu, target, generatePIC, - ot, - outFileName, - headerFileName, + ot, + outFileName, + headerFileName, includeFileName, depsFileName, hostStubFileName, diff --git a/module.cpp b/module.cpp index 9e4defb8..8e13da61 100644 --- a/module.cpp +++ b/module.cpp @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file module.cpp @@ -163,7 +163,7 @@ lStripUnusedDebugInfo(llvm::Module *module) { // And now loop over the subprograms inside each compile unit. for (unsigned j = 0, je = subprograms.getNumElements(); j != je; ++j) { - llvm::MDNode *spNode = + llvm::MDNode *spNode = llvm::dyn_cast(subprograms->getOperand(j)); Assert(spNode != NULL); llvm::DISubprogram sp(spNode); @@ -197,7 +197,7 @@ lStripUnusedDebugInfo(llvm::Module *module) { // debugging metadata organization on the LLVM side changed, // here is a bunch of asserting to make sure that element 12 of // the compile unit's MDNode has the subprograms array.... - llvm::MDNode *nodeSPMD = + llvm::MDNode *nodeSPMD = llvm::dyn_cast(cuNode->getOperand(12)); Assert(nodeSPMD != NULL); llvm::MDNode *nodeSPMDArray = @@ -209,9 +209,9 @@ lStripUnusedDebugInfo(llvm::Module *module) { // And now we can go and stuff it into the node with some // confidence... - llvm::Value *usedSubprogramsArray = + llvm::Value *usedSubprogramsArray = m->diBuilder->getOrCreateArray(llvm::ArrayRef(usedSubprograms)); - llvm::MDNode *replNode = + llvm::MDNode *replNode = llvm::MDNode::get(*g->ctx, llvm::ArrayRef(usedSubprogramsArray)); cuNode->replaceOperandWith(12, replNode); } @@ -349,7 +349,7 @@ Module::CompileFile() { else { // No preprocessor, just open up the file if it's not stdin.. FILE* f = NULL; - if (filename == NULL) + if (filename == NULL) f = stdin; else { f = fopen(filename, "r"); @@ -383,7 +383,7 @@ Module::AddTypeDef(const std::string &name, const Type *type, void -Module::AddGlobalVariable(const std::string &name, const Type *type, Expr *initExpr, +Module::AddGlobalVariable(const std::string &name, const Type *type, Expr *initExpr, bool isConst, StorageClass storageClass, SourcePos pos) { // These may be NULL due to errors in parsing; just gracefully return // here if so. @@ -420,7 +420,7 @@ Module::AddGlobalVariable(const std::string &name, const Type *type, Expr *initE "expression."); return; } - + llvm::Type *llvmType = type->LLVMType(g->ctx); if (llvmType == NULL) return; @@ -444,7 +444,7 @@ Module::AddGlobalVariable(const std::string &name, const Type *type, Expr *initE // convert themselves anyway.) if (dynamic_cast(initExpr) == NULL) initExpr = TypeConvertExpr(initExpr, type, "initializer"); - + if (initExpr != NULL) { initExpr = Optimize(initExpr); // Fingers crossed, now let's see if we've got a @@ -481,21 +481,21 @@ Module::AddGlobalVariable(const std::string &name, const Type *type, Expr *initE // If the type doesn't match with the previous one, issue an error. if (!Type::Equal(sym->type, type) || - (sym->storageClass != SC_EXTERN && + (sym->storageClass != SC_EXTERN && sym->storageClass != SC_EXTERN_C && sym->storageClass != storageClass)) { Error(pos, "Definition of variable \"%s\" conflicts with " - "definition at %s:%d.", name.c_str(), + "definition at %s:%d.", name.c_str(), sym->pos.name, sym->pos.first_line); return; } - llvm::GlobalVariable *gv = + llvm::GlobalVariable *gv = llvm::dyn_cast(sym->storagePtr); Assert(gv != NULL); // And issue an error if this is a redefinition of a variable - if (gv->hasInitializer() && + if (gv->hasInitializer() && sym->storageClass != SC_EXTERN && sym->storageClass != SC_EXTERN_C) { Error(pos, "Redefinition of variable \"%s\" is illegal. " "(Previous definition at %s:%d.)", sym->name.c_str(), @@ -521,8 +521,8 @@ Module::AddGlobalVariable(const std::string &name, const Type *type, Expr *initE // Note that the NULL llvmInitializer is what leads to "extern" // declarations coming up extern and not defining storage (a bit // subtle)... - sym->storagePtr = new llvm::GlobalVariable(*module, llvmType, isConst, - linkage, llvmInitializer, + sym->storagePtr = new llvm::GlobalVariable(*module, llvmType, isConst, + linkage, llvmInitializer, sym->name.c_str()); // Patch up any references to the previous GlobalVariable (e.g. from a @@ -532,11 +532,11 @@ Module::AddGlobalVariable(const std::string &name, const Type *type, Expr *initE oldGV->removeFromParent(); sym->storagePtr->setName(sym->name.c_str()); } - + if (diBuilder) { llvm::DIFile file = pos.GetDIFile(); llvm::DIGlobalVariable var = - diBuilder->createGlobalVariable(name, + diBuilder->createGlobalVariable(name, file, pos.first_line, sym->type->GetDIType(file), @@ -555,10 +555,10 @@ Module::AddGlobalVariable(const std::string &name, const Type *type, Expr *initE (Note that it's fine for the original struct or a contained struct to be varying, so long as all of its members have bound 'uniform' - variability.) + variability.) This functions returns true and issues an error if are any illegal - types are found and returns false otherwise. + types are found and returns false otherwise. */ static bool lRecursiveCheckValidParamType(const Type *t, bool vectorOk) { @@ -601,7 +601,7 @@ lRecursiveCheckValidParamType(const Type *t, bool vectorOk) { varying parameters is illegal. */ static void -lCheckExportedParameterTypes(const Type *type, const std::string &name, +lCheckExportedParameterTypes(const Type *type, const std::string &name, SourcePos pos) { if (lRecursiveCheckValidParamType(type, false) == false) { if (CastType(type)) @@ -645,8 +645,8 @@ lCheckForStructParameters(const FunctionType *ftype, SourcePos pos) { false if any errors were encountered. */ void -Module::AddFunctionDeclaration(const std::string &name, - const FunctionType *functionType, +Module::AddFunctionDeclaration(const std::string &name, + const FunctionType *functionType, StorageClass storageClass, bool isInline, SourcePos pos) { Assert(functionType != NULL); @@ -666,7 +666,7 @@ Module::AddFunctionDeclaration(const std::string &name, for (unsigned int i = 0; i < overloadFuncs.size(); ++i) { Symbol *overloadFunc = overloadFuncs[i]; - const FunctionType *overloadType = + const FunctionType *overloadType = CastType(overloadFunc->type); if (overloadType == NULL) { Assert(m->errorCount == 0); @@ -688,7 +688,7 @@ Module::AddFunctionDeclaration(const std::string &name, // If all of the parameter types match but the return type is // different, return an error--overloading by return type isn't // allowed. - const FunctionType *ofType = + const FunctionType *ofType = CastType(overloadFunc->type); Assert(ofType != NULL); if (ofType->GetNumParameters() == functionType->GetNumParameters()) { @@ -725,7 +725,7 @@ Module::AddFunctionDeclaration(const std::string &name, symbolTable->LookupFunction(name.c_str(), &funcs); if (funcs.size() > 0) { if (funcs.size() > 1) { - // Multiple functions with this name have already been declared; + // Multiple functions with this name have already been declared; // can't overload here Error(pos, "Can't overload extern \"C\" function \"%s\"; " "%d functions with the same name have already been declared.", @@ -747,7 +747,7 @@ Module::AddFunctionDeclaration(const std::string &name, // Get the LLVM FunctionType bool disableMask = (storageClass == SC_EXTERN_C); - llvm::FunctionType *llvmFunctionType = + llvm::FunctionType *llvmFunctionType = functionType->LLVMFunctionType(g->ctx, disableMask); if (llvmFunctionType == NULL) return; @@ -763,13 +763,13 @@ Module::AddFunctionDeclaration(const std::string &name, if (g->mangleFunctionsWithTarget) functionName += g->target.GetISAString(); } - llvm::Function *function = - llvm::Function::Create(llvmFunctionType, linkage, functionName.c_str(), + llvm::Function *function = + llvm::Function::Create(llvmFunctionType, linkage, functionName.c_str(), module); // Set function attributes: we never throw exceptions function->setDoesNotThrow(); - if (storageClass != SC_EXTERN_C && + if (storageClass != SC_EXTERN_C && !g->generateDebuggingSymbols && isInline) #ifdef LLVM_3_2 @@ -778,7 +778,7 @@ Module::AddFunctionDeclaration(const std::string &name, function->addFnAttr(llvm::Attribute::AlwaysInline); #endif if (functionType->isTask) - // This also applies transitively to members I think? + // This also applies transitively to members I think? #if defined(LLVM_3_1) function->setDoesNotAlias(1, true); #else @@ -791,12 +791,12 @@ Module::AddFunctionDeclaration(const std::string &name, // Make sure that the return type isn't 'varying' or vector typed if // the function is 'export'ed. - if (functionType->isExported && + if (functionType->isExported && lRecursiveCheckValidParamType(functionType->GetReturnType(), false) == false) Error(pos, "Illegal to return a \"varying\" or vector type from " "exported function \"%s\"", name.c_str()); - if (functionType->isTask && + if (functionType->isTask && Type::Equal(functionType->GetReturnType(), AtomicType::Void) == false) Error(pos, "Task-qualified functions must have void return type."); @@ -822,7 +822,7 @@ Module::AddFunctionDeclaration(const std::string &name, // specify when this is not the case, but this should be the // default.) Set parameter attributes accordingly. (Only for // uniform pointers, since varying pointers are int vectors...) - if (!functionType->isTask && + if (!functionType->isTask && ((CastType(argType) != NULL && argType->IsUniformType() && // Exclude SOA argument because it is a pair {struct *, int} @@ -849,7 +849,7 @@ Module::AddFunctionDeclaration(const std::string &name, if (symbolTable->LookupFunction(argName.c_str())) Warning(argPos, "Function parameter \"%s\" shadows a function " "declared in global scope.", argName.c_str()); - + if (defaultValue != NULL) seenDefaultArg = true; else if (seenDefaultArg) { @@ -894,7 +894,7 @@ Module::AddFunctionDefinition(const std::string &name, const FunctionType *type, // we need to override the function type here in case the function had // earlier been declared with anonymous parameter names but is now // defined with actual names. This is yet another reason we shouldn't - // include the names in FunctionType... + // include the names in FunctionType... sym->type = type; ast->AddFunction(sym, code); @@ -902,14 +902,14 @@ Module::AddFunctionDefinition(const std::string &name, const FunctionType *type, void -Module::AddExportedTypes(const std::vector > &types) { for (int i = 0; i < (int)types.size(); ++i) { if (CastType(types[i].first) == NULL && CastType(types[i].first) == NULL && CastType(types[i].first) == NULL) Error(types[i].second, "Only struct, vector, and enum types, " - "not \"%s\", are allowed in type export lists.", + "not \"%s\", are allowed in type export lists.", types[i].first->GetString().c_str()); else exportedTypes.push_back(types[i]); @@ -996,7 +996,7 @@ Module::writeOutput(OutputType outputType, const char *outFileName, "C++ emission."); return false; } - extern bool WriteCXXFile(llvm::Module *module, const char *fn, + extern bool WriteCXXFile(llvm::Module *module, const char *fn, int vectorWidth, const char *includeName); return WriteCXXFile(module, outFileName, g->target.vectorWidth, includeFileName); @@ -1044,11 +1044,11 @@ Module::writeObjectFileOrAssembly(OutputType outputType, const char *outFileName bool Module::writeObjectFileOrAssembly(llvm::TargetMachine *targetMachine, - llvm::Module *module, OutputType outputType, + llvm::Module *module, OutputType outputType, const char *outFileName) { // Figure out if we're generating object file or assembly output, and // set binary output for object files - llvm::TargetMachine::CodeGenFileType fileType = (outputType == Object) ? + llvm::TargetMachine::CodeGenFileType fileType = (outputType == Object) ? llvm::TargetMachine::CGFT_ObjectFile : llvm::TargetMachine::CGFT_AssemblyFile; bool binary = (fileType == llvm::TargetMachine::CGFT_ObjectFile); unsigned int flags = binary ? llvm::raw_fd_ostream::F_Binary : 0; @@ -1083,7 +1083,7 @@ Module::writeObjectFileOrAssembly(llvm::TargetMachine *targetMachine, // Finally, run the passes to emit the object file/assembly pm.run(*module); - // Success; tell tool_output_file to keep the final output file. + // Success; tell tool_output_file to keep the final output file. of->keep(); return true; @@ -1123,7 +1123,7 @@ lEmitStructDecl(const StructType *st, std::vector *emittedSt // Otherwise first make sure any contained structs have been declared. for (int i = 0; i < st->GetElementCount(); ++i) { - const StructType *elementStructType = + const StructType *elementStructType = lGetElementStructType(st->GetElementType(i)); if (elementStructType != NULL) lEmitStructDecl(elementStructType, emittedStructs, file); @@ -1172,14 +1172,14 @@ lEmitEnumDecls(const std::vector &enumTypes, FILE *file) { fprintf(file, "///////////////////////////////////////////////////////////////////////////\n"); fprintf(file, "// Enumerator types with external visibility from ispc code\n"); fprintf(file, "///////////////////////////////////////////////////////////////////////////\n\n"); - + for (unsigned int i = 0; i < enumTypes.size(); ++i) { fprintf(file, "#ifndef __ISPC_ENUM_%s__\n",enumTypes[i]->GetEnumName().c_str()); fprintf(file, "#define __ISPC_ENUM_%s__\n",enumTypes[i]->GetEnumName().c_str()); std::string declaration = enumTypes[i]->GetCDeclaration(""); fprintf(file, "%s {\n", declaration.c_str()); - // Print the individual enumerators + // Print the individual enumerators for (int j = 0; j < enumTypes[i]->GetEnumeratorCount(); ++j) { const Symbol *e = enumTypes[i]->GetEnumerator(j); Assert(e->constValue != NULL); @@ -1233,7 +1233,7 @@ lEmitVectorTypedefs(const std::vector &types, FILE *file) { fprintf(file, "struct %s%d { %s v[%d]; };\n", baseDecl.c_str(), size, baseDecl.c_str(), size); fprintf(file, "#else\n"); - fprintf(file, "struct %s%d { %s v[%d]; } __attribute__ ((aligned(%d)));\n", + fprintf(file, "struct %s%d { %s v[%d]; } __attribute__ ((aligned(%d)));\n", baseDecl.c_str(), size, baseDecl.c_str(), size, align); fprintf(file, "#endif\n"); fprintf(file, "#endif\n\n"); @@ -1265,7 +1265,7 @@ lAddTypeIfNew(const Type *type, std::vector *exportedTypes) { Then, if it's a struct, recursively process its members to do the same. */ static void -lGetExportedTypes(const Type *type, +lGetExportedTypes(const Type *type, std::vector *exportedStructTypes, std::vector *exportedEnumTypes, std::vector *exportedVectorTypes) { @@ -1273,13 +1273,13 @@ lGetExportedTypes(const Type *type, const StructType *structType = CastType(type); if (CastType(type) != NULL) - lGetExportedTypes(type->GetReferenceTarget(), exportedStructTypes, + lGetExportedTypes(type->GetReferenceTarget(), exportedStructTypes, exportedEnumTypes, exportedVectorTypes); else if (CastType(type) != NULL) lGetExportedTypes(type->GetBaseType(), exportedStructTypes, exportedEnumTypes, exportedVectorTypes); else if (arrayType != NULL) - lGetExportedTypes(arrayType->GetElementType(), exportedStructTypes, + lGetExportedTypes(arrayType->GetElementType(), exportedStructTypes, exportedEnumTypes, exportedVectorTypes); else if (structType != NULL) { lAddTypeIfNew(type, exportedStructTypes); @@ -1303,7 +1303,7 @@ lGetExportedTypes(const Type *type, present in the parameters to them. */ static void -lGetExportedParamTypes(const std::vector &funcs, +lGetExportedParamTypes(const std::vector &funcs, std::vector *exportedStructTypes, std::vector *exportedEnumTypes, std::vector *exportedVectorTypes) { @@ -1383,7 +1383,7 @@ std::string emitOffloadParamStruct(const std::string ¶mStructName, { std::stringstream out; out << "struct " << paramStructName << " {" << std::endl; - + for (int i=0;iGetNumParameters();i++) { const Type *orgParamType = fct->GetParameterType(i); if (orgParamType->IsPointerType() || orgParamType->IsArrayType()) { @@ -1405,7 +1405,7 @@ std::string emitOffloadParamStruct(const std::string ¶mStructName, } std::string paramName = fct->GetParameterName(i); std::string paramTypeName = paramType->GetString(); - + std::string tmpArgDecl = paramType->GetCDeclaration(paramName); out << " " << tmpArgDecl << ";" << std::endl; } @@ -1415,7 +1415,7 @@ std::string emitOffloadParamStruct(const std::string ¶mStructName, } bool -Module::writeDevStub(const char *fn) +Module::writeDevStub(const char *fn) { FILE *file = fopen(fn, "w"); if (!file) { @@ -1427,14 +1427,14 @@ Module::writeDevStub(const char *fn) fprintf(file,"#include \"ispc/dev/offload.h\"\n\n"); fprintf(file, "#include \n\n"); - + // Collect single linear arrays of the *exported* functions (we'll // treat those as "__kernel"s in IVL -- "extern" functions will only // be used for dev-dev function calls; only "export" functions will // get exported to the host std::vector exportedFuncs; m->symbolTable->GetMatchingFunctions(lIsExported, &exportedFuncs); - + // Get all of the struct, vector, and enumerant types used as function // parameters. These vectors may have repeats. std::vector exportedStructTypes; @@ -1442,12 +1442,12 @@ Module::writeDevStub(const char *fn) std::vector exportedVectorTypes; lGetExportedParamTypes(exportedFuncs, &exportedStructTypes, &exportedEnumTypes, &exportedVectorTypes); - + // And print them lEmitVectorTypedefs(exportedVectorTypes, file); lEmitEnumDecls(exportedEnumTypes, file); lEmitStructDecls(exportedStructTypes, file); - + fprintf(file, "#ifdef __cplusplus\n"); fprintf(file, "namespace ispc {\n"); fprintf(file, "#endif // __cplusplus\n"); @@ -1475,7 +1475,7 @@ Module::writeDevStub(const char *fn) Assert(sym); const FunctionType *fct = CastType(sym->type); Assert(fct); - + if (!fct->GetReturnType()->IsVoidType()) { //Error(sym->pos,"When emitting offload-stubs, \"export\"ed functions cannot have non-void return types.\n"); Warning(sym->pos,"When emitting offload-stubs, ignoring \"export\"ed function with non-void return types.\n"); @@ -1505,7 +1505,7 @@ Module::writeDevStub(const char *fn) fprintf(file," struct %s args;\n memcpy(&args,in_pMiscData,sizeof(args));\n", paramStructName.c_str()); std::stringstream funcall; - + funcall << "ispc::" << sym->name << "("; for (int i=0;iGetNumParameters();i++) { // get param type and make it non-const, so we can write while unpacking @@ -1522,7 +1522,7 @@ Module::writeDevStub(const char *fn) } else { paramType = orgParamType->GetAsNonConstType(); } - + std::string paramName = fct->GetParameterName(i); std::string paramTypeName = paramType->GetString(); @@ -1554,7 +1554,7 @@ Module::writeDevStub(const char *fn) bool -Module::writeHostStub(const char *fn) +Module::writeHostStub(const char *fn) { FILE *file = fopen(fn, "w"); if (!file) { @@ -1568,14 +1568,14 @@ Module::writeHostStub(const char *fn) //fprintf(file,"#ifdef __cplusplus\nextern \"C\" {\n#endif // __cplusplus\n"); fprintf(file, "#ifdef __cplusplus\nnamespace ispc {\n#endif // __cplusplus\n\n"); - + // Collect single linear arrays of the *exported* functions (we'll // treat those as "__kernel"s in IVL -- "extern" functions will only // be used for dev-dev function calls; only "export" functions will // get exported to the host std::vector exportedFuncs; m->symbolTable->GetMatchingFunctions(lIsExported, &exportedFuncs); - + // Get all of the struct, vector, and enumerant types used as function // parameters. These vectors may have repeats. std::vector exportedStructTypes; @@ -1583,12 +1583,12 @@ Module::writeHostStub(const char *fn) std::vector exportedVectorTypes; lGetExportedParamTypes(exportedFuncs, &exportedStructTypes, &exportedEnumTypes, &exportedVectorTypes); - + // And print them lEmitVectorTypedefs(exportedVectorTypes, file); lEmitEnumDecls(exportedEnumTypes, file); lEmitStructDecls(exportedStructTypes, file); - + fprintf(file, "\n"); fprintf(file, "///////////////////////////////////////////////////////////////////////////\n"); fprintf(file, "// host-side stubs for dev-side ISPC fucntion(s)\n"); @@ -1615,7 +1615,7 @@ Module::writeHostStub(const char *fn) // ------------------------------------------------------- // then, emit a fct stub that unpacks the parameters and pointers // ------------------------------------------------------- - + std::string decl = fct->GetCDeclaration(sym->name); fprintf(file, "extern %s {\n", decl.c_str()); int numPointers = 0; @@ -1661,7 +1661,7 @@ Module::writeHostStub(const char *fn) numPointers); fprintf(file,"}\n\n"); } - + // end extern "C" fprintf(file, "#ifdef __cplusplus\n"); fprintf(file, "}/* namespace */\n"); @@ -1669,7 +1669,7 @@ Module::writeHostStub(const char *fn) // fprintf(file, "#ifdef __cplusplus\n"); // fprintf(file, "}/* end extern C */\n"); // fprintf(file, "#endif // __cplusplus\n"); - + fclose(file); return true; } @@ -1690,9 +1690,9 @@ Module::writeHeader(const char *fn) { std::string guard = "ISPC_"; const char *p = fn; while (*p) { - if (isdigit(*p)) + if (isdigit(*p)) guard += *p; - else if (isalpha(*p)) + else if (isalpha(*p)) guard += toupper(*p); else guard += "_"; @@ -1719,7 +1719,7 @@ Module::writeHeader(const char *fn) { std::vector exportedFuncs, externCFuncs; m->symbolTable->GetMatchingFunctions(lIsExported, &exportedFuncs); m->symbolTable->GetMatchingFunctions(lIsExternC, &externCFuncs); - + // Get all of the struct, vector, and enumerant types used as function // parameters. These vectors may have repeats. std::vector exportedStructTypes; @@ -1795,10 +1795,10 @@ Module::execPreprocessor(const char *infilename, llvm::raw_string_ostream *ostre #endif llvm::IntrusiveRefCntPtr diagIDs(new clang::DiagnosticIDs); #if defined(LLVM_3_1) - clang::DiagnosticsEngine *diagEngine = + clang::DiagnosticsEngine *diagEngine = new clang::DiagnosticsEngine(diagIDs, diagPrinter); #else - clang::DiagnosticsEngine *diagEngine = + clang::DiagnosticsEngine *diagEngine = new clang::DiagnosticsEngine(diagIDs, diagOptions, diagPrinter); #endif inst.setDiagnostics(diagEngine); @@ -1843,7 +1843,7 @@ Module::execPreprocessor(const char *infilename, llvm::raw_string_ostream *ostre headerOpts.Verbose = 1; for (int i = 0; i < (int)g->includePath.size(); ++i) { headerOpts.AddPath(g->includePath[i], clang::frontend::Angled, -#if !defined(LLVM_3_3) +#if !defined(LLVM_3_3) true /* is user supplied */, #endif false /* not a framework */, @@ -1884,7 +1884,7 @@ Module::execPreprocessor(const char *infilename, llvm::raw_string_ostream *ostre opts.addMacroDef("ISPC_MINOR_VERSION=3"); if (g->includeStdlib) { - if (g->opt.disableAsserts) + if (g->opt.disableAsserts) opts.addMacroDef("assert(x)="); else opts.addMacroDef("assert(x)=__assert(#x, x)"); @@ -1944,7 +1944,7 @@ lGetTargetFileName(const char *outFileName, const char *isaString) { // Given a comma-delimited string with one or more compilation targets of // the form "sse2,avx-x2", return a vector of strings where each returned // string holds one of the targets from the given string. -static std::vector +static std::vector lExtractTargets(const char *target) { std::vector targets; const char *tstart = target; @@ -1985,14 +1985,14 @@ struct FunctionTargetVariants { // Given the symbol table for a module, return a map from function names to // FunctionTargetVariants for each function that was defined with the // 'export' qualifier in ispc. -static void -lGetExportedFunctions(SymbolTable *symbolTable, +static void +lGetExportedFunctions(SymbolTable *symbolTable, std::map &functions) { std::vector syms; symbolTable->GetMatchingFunctions(lSymbolIsExported, &syms); for (unsigned int i = 0; i < syms.size(); ++i) { FunctionTargetVariants &ftv = functions[syms[i]->name]; - ftv.func[g->target.isa] = syms[i]->exportedFunction; + ftv.func[g->target.isa] = syms[i]->exportedFunction; } } @@ -2016,7 +2016,7 @@ struct RewriteGlobalInfo { // multiple definitions of them, one in each of the target-specific output // files. static void -lExtractAndRewriteGlobals(llvm::Module *module, +lExtractAndRewriteGlobals(llvm::Module *module, std::vector *globals) { llvm::Module::global_iterator iter; for (iter = module->global_begin(); iter != module->global_end(); ++iter) { @@ -2029,7 +2029,7 @@ lExtractAndRewriteGlobals(llvm::Module *module, llvm::Constant *init = gv->getInitializer(); gv->setInitializer(NULL); - Symbol *sym = + Symbol *sym = m->symbolTable->LookupVariable(gv->getName().str().c_str()); Assert(sym != NULL); globals->push_back(RewriteGlobalInfo(gv, init, sym->pos)); @@ -2067,7 +2067,7 @@ lAddExtractedGlobals(llvm::Module *module, // Create a new global in the given model that matches the original // global - llvm::GlobalVariable *newGlobal = + llvm::GlobalVariable *newGlobal = new llvm::GlobalVariable(*module, type, gv->isConstant(), llvm::GlobalValue::ExternalLinkage, initializer, gv->getName()); @@ -2101,7 +2101,7 @@ lAddExtractedGlobals(llvm::Module *module, /** Create the dispatch function for an exported ispc function. - This function checks to see which vector ISAs the system the + This function checks to see which vector ISAs the system the code is running on supports and calls out to the best available variant that was generated at compile time. @@ -2119,7 +2119,7 @@ lAddExtractedGlobals(llvm::Module *module, */ static void lCreateDispatchFunction(llvm::Module *module, llvm::Function *setISAFunc, - llvm::Value *systemBestISAPtr, const std::string &name, + llvm::Value *systemBestISAPtr, const std::string &name, FunctionTargetVariants &funcs) { // The llvm::Function pointers in funcs are pointers to functions in // different llvm::Modules, so we can't call them directly. Therefore, @@ -2144,18 +2144,18 @@ lCreateDispatchFunction(llvm::Module *module, llvm::Function *setISAFunc, if (ftype == NULL) ftype = funcs.func[i]->getFunctionType(); - targetFuncs[i] = - llvm::Function::Create(ftype, llvm::GlobalValue::ExternalLinkage, + targetFuncs[i] = + llvm::Function::Create(ftype, llvm::GlobalValue::ExternalLinkage, funcs.func[i]->getName(), module); } bool voidReturn = ftype->getReturnType()->isVoidTy(); // Now we can emit the definition of the dispatch function.. - llvm::Function *dispatchFunc = - llvm::Function::Create(ftype, llvm::GlobalValue::ExternalLinkage, + llvm::Function *dispatchFunc = + llvm::Function::Create(ftype, llvm::GlobalValue::ExternalLinkage, name.c_str(), module); - llvm::BasicBlock *bblock = + llvm::BasicBlock *bblock = llvm::BasicBlock::Create(*g->ctx, "entry", dispatchFunc); // Start by calling out to the function that determines the system's @@ -2163,7 +2163,7 @@ lCreateDispatchFunction(llvm::Module *module, llvm::Function *setISAFunc, llvm::CallInst::Create(setISAFunc, "", bblock); // Now we can load the system's ISA enuemrant - llvm::Value *systemISA = + llvm::Value *systemISA = new llvm::LoadInst(systemBestISAPtr, "system_isa", bblock); // Now emit code that works backwards though the available variants of @@ -2179,12 +2179,12 @@ lCreateDispatchFunction(llvm::Module *module, llvm::Function *setISAFunc, // Emit code to see if the system can run the current candidate // variant successfully--"is the system's ISA enuemrant value >= // the enumerant value of the current candidate?" - llvm::Value *ok = + llvm::Value *ok = llvm::CmpInst::Create(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_SGE, systemISA, LLVMInt32(i), "isa_ok", bblock); - llvm::BasicBlock *callBBlock = + llvm::BasicBlock *callBBlock = llvm::BasicBlock::Create(*g->ctx, "do_call", dispatchFunc); - llvm::BasicBlock *nextBBlock = + llvm::BasicBlock *nextBBlock = llvm::BasicBlock::Create(*g->ctx, "next_try", dispatchFunc); llvm::BranchInst::Create(callBBlock, nextBBlock, ok, bblock); @@ -2192,7 +2192,7 @@ lCreateDispatchFunction(llvm::Module *module, llvm::Function *setISAFunc, // Just pass through all of the args from the dispatch function to // the target-specific function. std::vector args; - llvm::Function::arg_iterator argIter = dispatchFunc->arg_begin(); + llvm::Function::arg_iterator argIter = dispatchFunc->arg_begin(); for (; argIter != dispatchFunc->arg_end(); ++argIter) args.push_back(argIter); if (voidReturn) { @@ -2200,8 +2200,8 @@ lCreateDispatchFunction(llvm::Module *module, llvm::Function *setISAFunc, llvm::ReturnInst::Create(*g->ctx, callBBlock); } else { - llvm::Value *retValue = - llvm::CallInst::Create(targetFuncs[i], args, "ret_value", + llvm::Value *retValue = + llvm::CallInst::Create(targetFuncs[i], args, "ret_value", callBBlock); llvm::ReturnInst::Create(*g->ctx, retValue, callBBlock); } @@ -2244,13 +2244,13 @@ lCreateDispatchModule(std::map &functions) // First, link in the definitions from the builtins-dispatch.ll file. extern unsigned char builtins_bitcode_dispatch[]; extern int builtins_bitcode_dispatch_length; - AddBitcodeToModule(builtins_bitcode_dispatch, + AddBitcodeToModule(builtins_bitcode_dispatch, builtins_bitcode_dispatch_length, module); // Get pointers to things we need below llvm::Function *setFunc = module->getFunction("__set_system_isa"); Assert(setFunc != NULL); - llvm::Value *systemBestISAPtr = + llvm::Value *systemBestISAPtr = module->getGlobalVariable("__system_best_isa", true); Assert(systemBestISAPtr != NULL); @@ -2272,18 +2272,18 @@ lCreateDispatchModule(std::map &functions) int -Module::CompileAndOutput(const char *srcFile, - const char *arch, - const char *cpu, - const char *target, - bool generatePIC, - OutputType outputType, - const char *outFileName, +Module::CompileAndOutput(const char *srcFile, + const char *arch, + const char *cpu, + const char *target, + bool generatePIC, + OutputType outputType, + const char *outFileName, const char *headerFileName, const char *includeFileName, const char *depsFileName, const char *hostStubFileName, - const char *devStubFileName) + const char *devStubFileName) { if (target == NULL || strchr(target, ',') == NULL) { // We're only compiling to a single target @@ -2368,7 +2368,7 @@ Module::CompileAndOutput(const char *srcFile, std::vector globals[Target::NUM_ISAS]; int errorCount = 0; for (unsigned int i = 0; i < targets.size(); ++i) { - if (!Target::GetTarget(arch, cpu, targets[i].c_str(), generatePIC, + if (!Target::GetTarget(arch, cpu, targets[i].c_str(), generatePIC, &g->target)) return 1; @@ -2393,7 +2393,7 @@ Module::CompileAndOutput(const char *srcFile, if (outFileName != NULL) { const char *isaName = g->target.GetISAString(); - std::string targetOutFileName = + std::string targetOutFileName = lGetTargetFileName(outFileName, isaName); if (!m->writeOutput(outputType, targetOutFileName.c_str())) return 1; @@ -2412,7 +2412,7 @@ Module::CompileAndOutput(const char *srcFile, // we generate the dispatch module's functions... } - llvm::Module *dispatchModule = + llvm::Module *dispatchModule = lCreateDispatchModule(exportedFunctions); lAddExtractedGlobals(dispatchModule, globals); @@ -2434,7 +2434,7 @@ Module::CompileAndOutput(const char *srcFile, writeObjectFileOrAssembly(firstTargetMachine, dispatchModule, outputType, outFileName); } - + return errorCount > 0; } } diff --git a/module.h b/module.h index bcbcab95..577e6b5b 100644 --- a/module.h +++ b/module.h @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file module.h @@ -64,7 +64,7 @@ public: /** Add a new global variable corresponding to the given Symbol to the module. If non-NULL, initExpr gives the initiailizer expression - for the global's inital value. */ + for the global's inital value. */ void AddGlobalVariable(const std::string &name, const Type *type, Expr *initExpr, bool isConst, StorageClass storageClass, SourcePos pos); @@ -72,7 +72,7 @@ public: /** Add a declaration of the function defined by the given function symbol to the module. */ void AddFunctionDeclaration(const std::string &name, - const FunctionType *ftype, + const FunctionType *ftype, StorageClass sc, bool isInline, SourcePos pos); /** Adds the function described by the declaration information and the @@ -82,7 +82,7 @@ public: /** Adds the given type to the set of types that have their definitions included in automatically generated header files. */ - void AddExportedTypes(const std::vector > &types); /** After a source file has been compiled, output can be generated in a @@ -91,7 +91,7 @@ public: Bitcode, /** Generate LLVM IR bitcode output */ Object, /** Generate a native object file */ CXX, /** Generate a C++ file */ - Header, /** Generate a C/C++ header file with + Header, /** Generate a C/C++ header file with declarations of 'export'ed functions, global variables, and the types used by them. */ Deps, /** generate dependencies */ @@ -122,18 +122,18 @@ public: inclusion from C/C++ code with declarations of types and functions exported from the given ispc source file. - @param includeFileName If non-NULL, gives the filename for the C++ + @param includeFileName If non-NULL, gives the filename for the C++ backend to emit in an #include statement to get definitions of the builtins for the generic target. @return Number of errors encountered when compiling srcFile. */ - static int CompileAndOutput(const char *srcFile, const char *arch, - const char *cpu, const char *targets, - bool generatePIC, - OutputType outputType, - const char *outFileName, + static int CompileAndOutput(const char *srcFile, const char *arch, + const char *cpu, const char *targets, + bool generatePIC, + OutputType outputType, + const char *outFileName, const char *headerFileName, const char *includeFileName, const char *depsFileName, @@ -148,7 +148,7 @@ public: SymbolTable *symbolTable; /** llvm Module object into which globals and functions are added. */ - llvm::Module *module; + llvm::Module *module; /** The diBuilder manages generating debugging information */ llvm::DIBuilder *diBuilder; @@ -171,7 +171,7 @@ private: bool writeHostStub(const char *filename); bool writeObjectFileOrAssembly(OutputType outputType, const char *filename); static bool writeObjectFileOrAssembly(llvm::TargetMachine *targetMachine, - llvm::Module *module, OutputType outputType, + llvm::Module *module, OutputType outputType, const char *outFileName); static bool writeBitcode(llvm::Module *module, const char *outFileName); diff --git a/opt.cpp b/opt.cpp index e23c9fbe..313e8214 100644 --- a/opt.cpp +++ b/opt.cpp @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file opt.cpp @@ -145,7 +145,7 @@ static llvm::Pass *CreateMakeInternalFuncsStaticPass(); /** This utility routine copies the metadata (if any) attached to the - 'from' instruction in the IR to the 'to' instruction. + 'from' instruction in the IR to the 'to' instruction. For flexibility, this function takes an llvm::Value rather than an llvm::Instruction for the 'to' parameter; at some places in the code @@ -157,9 +157,9 @@ static llvm::Pass *CreateMakeInternalFuncsStaticPass(); static void lCopyMetadata(llvm::Value *vto, const llvm::Instruction *from) { llvm::Instruction *to = llvm::dyn_cast(vto); - if (!to) + if (!to) return; - + llvm::SmallVector, 8> metadata; from->getAllMetadata(metadata); for (unsigned int i = 0; i < metadata.size(); ++i) @@ -202,16 +202,16 @@ lGetSourcePosFromMetadata(const llvm::Instruction *inst, SourcePos *pos) { // expected to have done in its operation llvm::MDString *str = llvm::dyn_cast(filename->getOperand(0)); Assert(str); - llvm::ConstantInt *first_lnum = + llvm::ConstantInt *first_lnum = llvm::dyn_cast(first_line->getOperand(0)); Assert(first_lnum); - llvm::ConstantInt *first_colnum = + llvm::ConstantInt *first_colnum = llvm::dyn_cast(first_column->getOperand(0)); Assert(first_column); - llvm::ConstantInt *last_lnum = + llvm::ConstantInt *last_lnum = llvm::dyn_cast(last_line->getOperand(0)); Assert(last_lnum); - llvm::ConstantInt *last_colnum = + llvm::ConstantInt *last_colnum = llvm::dyn_cast(last_column->getOperand(0)); Assert(last_column); @@ -223,7 +223,7 @@ lGetSourcePosFromMetadata(const llvm::Instruction *inst, SourcePos *pos) { static llvm::Instruction * -lCallInst(llvm::Function *func, llvm::Value *arg0, llvm::Value *arg1, +lCallInst(llvm::Function *func, llvm::Value *arg0, llvm::Value *arg1, const char *name, llvm::Instruction *insertBefore = NULL) { llvm::Value *args[2] = { arg0, arg1 }; llvm::ArrayRef newArgArray(&args[0], &args[2]); @@ -232,7 +232,7 @@ lCallInst(llvm::Function *func, llvm::Value *arg0, llvm::Value *arg1, static llvm::Instruction * -lCallInst(llvm::Function *func, llvm::Value *arg0, llvm::Value *arg1, +lCallInst(llvm::Function *func, llvm::Value *arg0, llvm::Value *arg1, llvm::Value *arg2, const char *name, llvm::Instruction *insertBefore = NULL) { llvm::Value *args[3] = { arg0, arg1, arg2 }; @@ -242,7 +242,7 @@ lCallInst(llvm::Function *func, llvm::Value *arg0, llvm::Value *arg1, static llvm::Instruction * -lCallInst(llvm::Function *func, llvm::Value *arg0, llvm::Value *arg1, +lCallInst(llvm::Function *func, llvm::Value *arg0, llvm::Value *arg1, llvm::Value *arg2, llvm::Value *arg3, const char *name, llvm::Instruction *insertBefore = NULL) { llvm::Value *args[4] = { arg0, arg1, arg2, arg3 }; @@ -251,7 +251,7 @@ lCallInst(llvm::Function *func, llvm::Value *arg0, llvm::Value *arg1, } static llvm::Instruction * -lCallInst(llvm::Function *func, llvm::Value *arg0, llvm::Value *arg1, +lCallInst(llvm::Function *func, llvm::Value *arg0, llvm::Value *arg1, llvm::Value *arg2, llvm::Value *arg3, llvm::Value *arg4, const char *name, llvm::Instruction *insertBefore = NULL) { llvm::Value *args[5] = { arg0, arg1, arg2, arg3, arg4 }; @@ -261,9 +261,9 @@ lCallInst(llvm::Function *func, llvm::Value *arg0, llvm::Value *arg1, static llvm::Instruction * -lCallInst(llvm::Function *func, llvm::Value *arg0, llvm::Value *arg1, +lCallInst(llvm::Function *func, llvm::Value *arg0, llvm::Value *arg1, llvm::Value *arg2, llvm::Value *arg3, llvm::Value *arg4, - llvm::Value *arg5, const char *name, + llvm::Value *arg5, const char *name, llvm::Instruction *insertBefore = NULL) { llvm::Value *args[6] = { arg0, arg1, arg2, arg3, arg4, arg5 }; llvm::ArrayRef newArgArray(&args[0], &args[6]); @@ -286,7 +286,7 @@ lGEPInst(llvm::Value *ptr, llvm::Value *offset, const char *name, to the first vector value and so forth. */ static uint64_t -lConstElementsToMask(const llvm::SmallVector &elements) { Assert(elements.size() <= 64); @@ -336,7 +336,7 @@ lGetMask(llvm::Value *factor, uint64_t *mask) { if (cv != NULL) { llvm::SmallVector elements; for (int i = 0; i < (int)cv->getNumOperands(); ++i) { - llvm::Constant *c = + llvm::Constant *c = llvm::dyn_cast(cv->getOperand(i)); if (c == NULL) return false; @@ -523,7 +523,7 @@ Optimize(llvm::Module *module, int optLevel) { g->target.vectorWidth > 1) { optPM.add(llvm::createInstructionCombiningPass()); optPM.add(CreateImproveMemoryOpsPass()); - + if (g->opt.disableCoalescing == false && g->target.isa != Target::GENERIC) { // It is important to run this here to make it easier to @@ -544,10 +544,10 @@ Optimize(llvm::Module *module, int optLevel) { optPM.add(CreateImproveMemoryOpsPass()); } - optPM.add(llvm::createIPSCCPPass()); - optPM.add(llvm::createDeadArgEliminationPass()); + optPM.add(llvm::createIPSCCPPass()); + optPM.add(llvm::createDeadArgEliminationPass()); optPM.add(llvm::createInstructionCombiningPass()); - optPM.add(llvm::createCFGSimplificationPass()); + optPM.add(llvm::createCFGSimplificationPass()); if (g->opt.disableHandlePseudoMemoryOps == false) optPM.add(CreateReplacePseudoMemoryOpsPass()); @@ -555,39 +555,39 @@ Optimize(llvm::Module *module, int optLevel) { optPM.add(CreateVSelMovmskOptPass()); optPM.add(llvm::createFunctionInliningPass()); - optPM.add(llvm::createArgumentPromotionPass()); + optPM.add(llvm::createArgumentPromotionPass()); optPM.add(llvm::createScalarReplAggregatesPass(sr_threshold, false)); - optPM.add(llvm::createInstructionCombiningPass()); - optPM.add(llvm::createCFGSimplificationPass()); - optPM.add(llvm::createReassociatePass()); - optPM.add(llvm::createLoopRotatePass()); - optPM.add(llvm::createLICMPass()); + optPM.add(llvm::createInstructionCombiningPass()); + optPM.add(llvm::createCFGSimplificationPass()); + optPM.add(llvm::createReassociatePass()); + optPM.add(llvm::createLoopRotatePass()); + optPM.add(llvm::createLICMPass()); optPM.add(llvm::createLoopUnswitchPass(false)); optPM.add(llvm::createInstructionCombiningPass()); - optPM.add(llvm::createIndVarSimplifyPass()); - optPM.add(llvm::createLoopIdiomPass()); - optPM.add(llvm::createLoopDeletionPass()); + optPM.add(llvm::createIndVarSimplifyPass()); + optPM.add(llvm::createLoopIdiomPass()); + optPM.add(llvm::createLoopDeletionPass()); if (g->opt.unrollLoops) - optPM.add(llvm::createLoopUnrollPass()); - optPM.add(llvm::createGVNPass()); + optPM.add(llvm::createLoopUnrollPass()); + optPM.add(llvm::createGVNPass()); optPM.add(CreateIsCompileTimeConstantPass(true)); optPM.add(CreateIntrinsicsOptPass()); optPM.add(CreateVSelMovmskOptPass()); - - optPM.add(llvm::createMemCpyOptPass()); - optPM.add(llvm::createSCCPPass()); + + optPM.add(llvm::createMemCpyOptPass()); + optPM.add(llvm::createSCCPPass()); optPM.add(llvm::createInstructionCombiningPass()); - optPM.add(llvm::createJumpThreadingPass()); + optPM.add(llvm::createJumpThreadingPass()); optPM.add(llvm::createCorrelatedValuePropagationPass()); - optPM.add(llvm::createDeadStoreEliminationPass()); - optPM.add(llvm::createAggressiveDCEPass()); - optPM.add(llvm::createCFGSimplificationPass()); - optPM.add(llvm::createInstructionCombiningPass()); - optPM.add(llvm::createStripDeadPrototypesPass()); + optPM.add(llvm::createDeadStoreEliminationPass()); + optPM.add(llvm::createAggressiveDCEPass()); + optPM.add(llvm::createCFGSimplificationPass()); + optPM.add(llvm::createInstructionCombiningPass()); + optPM.add(llvm::createStripDeadPrototypesPass()); optPM.add(CreateMakeInternalFuncsStaticPass()); - optPM.add(llvm::createGlobalDCEPass()); - optPM.add(llvm::createConstantMergePass()); + optPM.add(llvm::createGlobalDCEPass()); + optPM.add(llvm::createConstantMergePass()); } // Finish up by making sure we didn't mess anything up in the IR along @@ -614,7 +614,7 @@ Optimize(llvm::Module *module, int optLevel) { operations. @todo The better thing to do would be to submit a patch to LLVM to get - these; they're presumably pretty simple patterns to match. + these; they're presumably pretty simple patterns to match. */ class IntrinsicsOpt : public llvm::BasicBlockPass { public: @@ -638,7 +638,7 @@ private: struct BlendInstruction { BlendInstruction(llvm::Function *f, uint64_t ao, int o0, int o1, int of) : function(f), allOnMask(ao), op0(o0), op1(o1), opFactor(of) { } - /** Function pointer for the blend instruction */ + /** Function pointer for the blend instruction */ llvm::Function *function; /** Mask value for an "all on" mask for this instruction */ uint64_t allOnMask; @@ -661,17 +661,17 @@ private: char IntrinsicsOpt::ID = 0; -IntrinsicsOpt::IntrinsicsOpt() +IntrinsicsOpt::IntrinsicsOpt() : BasicBlockPass(ID) { // All of the mask instructions we may encounter. Note that even if // compiling for AVX, we may still encounter the regular 4-wide SSE // MOVMSK instruction. - llvm::Function *sseMovmsk = + llvm::Function *sseMovmsk = llvm::Intrinsic::getDeclaration(m->module, llvm::Intrinsic::x86_sse_movmsk_ps); maskInstructions.push_back(sseMovmsk); maskInstructions.push_back(m->module->getFunction("__movmsk")); - llvm::Function *avxMovmsk = + llvm::Function *avxMovmsk = llvm::Intrinsic::getDeclaration(m->module, llvm::Intrinsic::x86_avx_movmsk_ps_256); Assert(avxMovmsk != NULL); maskInstructions.push_back(avxMovmsk); @@ -712,13 +712,13 @@ bool IntrinsicsOpt::runOnBasicBlock(llvm::BasicBlock &bb) { DEBUG_START_PASS("IntrinsicsOpt"); - llvm::Function *avxMaskedLoad32 = + llvm::Function *avxMaskedLoad32 = llvm::Intrinsic::getDeclaration(m->module, llvm::Intrinsic::x86_avx_maskload_ps_256); - llvm::Function *avxMaskedLoad64 = + llvm::Function *avxMaskedLoad64 = llvm::Intrinsic::getDeclaration(m->module, llvm::Intrinsic::x86_avx_maskload_pd_256); - llvm::Function *avxMaskedStore32 = + llvm::Function *avxMaskedStore32 = llvm::Intrinsic::getDeclaration(m->module, llvm::Intrinsic::x86_avx_maskstore_ps_256); - llvm::Function *avxMaskedStore64 = + llvm::Function *avxMaskedStore64 = llvm::Intrinsic::getDeclaration(m->module, llvm::Intrinsic::x86_avx_maskstore_pd_256); Assert(avxMaskedLoad32 != NULL && avxMaskedStore32 != NULL); Assert(avxMaskedLoad64 != NULL && avxMaskedStore64 != NULL); @@ -732,13 +732,13 @@ IntrinsicsOpt::runOnBasicBlock(llvm::BasicBlock &bb) { BlendInstruction *blend = matchingBlendInstruction(callInst->getCalledFunction()); if (blend != NULL) { - llvm::Value *v[2] = { callInst->getArgOperand(blend->op0), + llvm::Value *v[2] = { callInst->getArgOperand(blend->op0), callInst->getArgOperand(blend->op1) }; llvm::Value *factor = callInst->getArgOperand(blend->opFactor); // If the values are the same, then no need to blend.. if (v[0] == v[1]) { - llvm::ReplaceInstWithValue(iter->getParent()->getInstList(), + llvm::ReplaceInstWithValue(iter->getParent()->getInstList(), iter, v[0]); modifiedAny = true; goto restart; @@ -751,13 +751,13 @@ IntrinsicsOpt::runOnBasicBlock(llvm::BasicBlock &bb) { // otherwise the result is undefined and any value is fine, // ergo the defined one is an acceptable result.) if (lIsUndef(v[0])) { - llvm::ReplaceInstWithValue(iter->getParent()->getInstList(), + llvm::ReplaceInstWithValue(iter->getParent()->getInstList(), iter, v[1]); modifiedAny = true; goto restart; } if (lIsUndef(v[1])) { - llvm::ReplaceInstWithValue(iter->getParent()->getInstList(), + llvm::ReplaceInstWithValue(iter->getParent()->getInstList(), iter, v[0]); modifiedAny = true; goto restart; @@ -774,7 +774,7 @@ IntrinsicsOpt::runOnBasicBlock(llvm::BasicBlock &bb) { value = v[1]; if (value != NULL) { - llvm::ReplaceInstWithValue(iter->getParent()->getInstList(), + llvm::ReplaceInstWithValue(iter->getParent()->getInstList(), iter, value); modifiedAny = true; goto restart; @@ -817,9 +817,9 @@ IntrinsicsOpt::runOnBasicBlock(llvm::BasicBlock &bb) { Assert(llvm::isa(returnType)); // cast the i8 * to the appropriate type const char *name = LLVMGetName(callInst->getArgOperand(0), "_cast"); - llvm::Value *castPtr = + llvm::Value *castPtr = new llvm::BitCastInst(callInst->getArgOperand(0), - llvm::PointerType::get(returnType, 0), + llvm::PointerType::get(returnType, 0), name, callInst); lCopyMetadata(castPtr, callInst); int align; @@ -828,7 +828,7 @@ IntrinsicsOpt::runOnBasicBlock(llvm::BasicBlock &bb) { else align = callInst->getCalledFunction() == avxMaskedLoad32 ? 4 : 8; name = LLVMGetName(callInst->getArgOperand(0), "_load"); - llvm::Instruction *loadInst = + llvm::Instruction *loadInst = new llvm::LoadInst(castPtr, name, false /* not volatile */, align, (llvm::Instruction *)NULL); lCopyMetadata(loadInst, callInst); @@ -856,13 +856,13 @@ IntrinsicsOpt::runOnBasicBlock(llvm::BasicBlock &bb) { llvm::Type *storeType = rvalue->getType(); const char *name = LLVMGetName(callInst->getArgOperand(0), "_ptrcast"); - llvm::Value *castPtr = + llvm::Value *castPtr = new llvm::BitCastInst(callInst->getArgOperand(0), - llvm::PointerType::get(storeType, 0), + llvm::PointerType::get(storeType, 0), name, callInst); lCopyMetadata(castPtr, callInst); - llvm::StoreInst *storeInst = + llvm::StoreInst *storeInst = new llvm::StoreInst(rvalue, castPtr, (llvm::Instruction *)NULL); int align; if (g->opt.forceAlignedMemory) @@ -919,7 +919,7 @@ CreateIntrinsicsOptPass() { appropriate operand if so. @todo The better thing to do would be to submit a patch to LLVM to get - these; they're presumably pretty simple patterns to match. + these; they're presumably pretty simple patterns to match. */ class VSelMovmskOpt : public llvm::BasicBlockPass { public: @@ -957,7 +957,7 @@ VSelMovmskOpt::runOnBasicBlock(llvm::BasicBlock &bb) { value = selectInst->getOperand(2); if (value != NULL) { - llvm::ReplaceInstWithValue(iter->getParent()->getInstList(), + llvm::ReplaceInstWithValue(iter->getParent()->getInstList(), iter, value); modifiedAny = true; goto restart; @@ -979,7 +979,7 @@ VSelMovmskOpt::runOnBasicBlock(llvm::BasicBlock &bb) { callInst->getArgOperand(0)->dump(); fprintf(stderr, "-----------\n"); #endif - llvm::ReplaceInstWithValue(iter->getParent()->getInstList(), + llvm::ReplaceInstWithValue(iter->getParent()->getInstList(), iter, LLVMInt64(mask)); modifiedAny = true; goto restart; @@ -1042,7 +1042,7 @@ lCheckForActualPointer(llvm::Value *v) { else if (llvm::isa(v)) return v; else { - llvm::ConstantExpr *uce = + llvm::ConstantExpr *uce = llvm::dyn_cast(v); if (uce != NULL && uce->getOpcode() == llvm::Instruction::PtrToInt) @@ -1093,7 +1093,7 @@ lGetBasePointer(llvm::Value *v) { form "base pointer + offset", whee op0 is the base pointer and op1 is the offset; if so return the base and the offset. */ static llvm::Constant * -lGetConstantAddExprBaseOffset(llvm::Constant *op0, llvm::Constant *op1, +lGetConstantAddExprBaseOffset(llvm::Constant *op0, llvm::Constant *op1, llvm::Constant **delta) { llvm::ConstantExpr *op = llvm::dyn_cast(op0); if (op == NULL || op->getOpcode() != llvm::Instruction::PtrToInt) @@ -1152,17 +1152,17 @@ lGetBasePtrAndOffsets(llvm::Value *ptrs, llvm::Value **offsets, if (bop != NULL && bop->getOpcode() == llvm::Instruction::Add) { // If we have a common pointer plus something, then we're also // good. - if ((base = lGetBasePtrAndOffsets(bop->getOperand(0), + if ((base = lGetBasePtrAndOffsets(bop->getOperand(0), offsets, insertBefore)) != NULL) { - *offsets = + *offsets = llvm::BinaryOperator::Create(llvm::Instruction::Add, *offsets, bop->getOperand(1), "new_offsets", insertBefore); return base; } - else if ((base = lGetBasePtrAndOffsets(bop->getOperand(1), + else if ((base = lGetBasePtrAndOffsets(bop->getOperand(1), offsets, insertBefore)) != NULL) { - *offsets = + *offsets = llvm::BinaryOperator::Create(llvm::Instruction::Add, *offsets, bop->getOperand(0), "new_offsets", insertBefore); @@ -1176,7 +1176,7 @@ lGetBasePtrAndOffsets(llvm::Value *ptrs, llvm::Value **offsets, // ConstantVectors.. llvm::SmallVector elements; for (int i = 0; i < (int)cv->getNumOperands(); ++i) { - llvm::Constant *c = + llvm::Constant *c = llvm::dyn_cast(cv->getOperand(i)); if (c == NULL) return NULL; @@ -1203,12 +1203,12 @@ lGetBasePtrAndOffsets(llvm::Value *ptrs, llvm::Value **offsets, // Try both orderings of the operands to see if we can get // a pointer+offset out of them. elementBase = - lGetConstantAddExprBaseOffset(ce->getOperand(0), + lGetConstantAddExprBaseOffset(ce->getOperand(0), ce->getOperand(1), &delta[i]); if (elementBase == NULL) - elementBase = - lGetConstantAddExprBaseOffset(ce->getOperand(1), + elementBase = + lGetConstantAddExprBaseOffset(ce->getOperand(1), ce->getOperand(0), &delta[i]); } @@ -1230,7 +1230,7 @@ lGetBasePtrAndOffsets(llvm::Value *ptrs, llvm::Value **offsets, } Assert(base != NULL); - llvm::ArrayRef deltas(&delta[0], + llvm::ArrayRef deltas(&delta[0], &delta[elements.size()]); *offsets = llvm::ConstantVector::get(deltas); return base; @@ -1263,7 +1263,7 @@ lGetBasePtrAndOffsets(llvm::Value *ptrs, llvm::Value **offsets, */ static void lExtractConstantOffset(llvm::Value *vec, llvm::Value **constOffset, - llvm::Value **variableOffset, + llvm::Value **variableOffset, llvm::Instruction *insertBefore) { if (llvm::isa(vec) || llvm::isa(vec) || @@ -1283,13 +1283,13 @@ lExtractConstantOffset(llvm::Value *vec, llvm::Value **constOffset, if (co == NULL) *constOffset = NULL; else - *constOffset = new llvm::SExtInst(co, sext->getType(), + *constOffset = new llvm::SExtInst(co, sext->getType(), LLVMGetName(co, "_sext"), insertBefore); if (vo == NULL) *variableOffset = NULL; else - *variableOffset = new llvm::SExtInst(vo, sext->getType(), + *variableOffset = new llvm::SExtInst(vo, sext->getType(), LLVMGetName(vo, "_sext"), insertBefore); return; @@ -1312,7 +1312,7 @@ lExtractConstantOffset(llvm::Value *vec, llvm::Value **constOffset, else if (c1 == NULL || llvm::isa(c1)) *constOffset = c0; else - *constOffset = + *constOffset = llvm::BinaryOperator::Create(llvm::Instruction::Add, c0, c1, LLVMGetName("add", c0, c1), insertBefore); @@ -1322,7 +1322,7 @@ lExtractConstantOffset(llvm::Value *vec, llvm::Value **constOffset, else if (v1 == NULL || llvm::isa(v1)) *variableOffset = v0; else - *variableOffset = + *variableOffset = llvm::BinaryOperator::Create(llvm::Instruction::Add, v0, v1, LLVMGetName("add", v0, v1), insertBefore); @@ -1355,7 +1355,7 @@ lExtractConstantOffset(llvm::Value *vec, llvm::Value **constOffset, vc = llvm::BinaryOperator::Create(llvm::Instruction::Mul, v0, v1, LLVMGetName("mul", v0, v1), insertBefore); - + llvm::Value *vab = NULL; if (va != NULL && vb != NULL) vab = llvm::BinaryOperator::Create(llvm::Instruction::Add, va, vb, @@ -1366,7 +1366,7 @@ lExtractConstantOffset(llvm::Value *vec, llvm::Value **constOffset, vab = vb; if (vab != NULL && vc != NULL) - *variableOffset = + *variableOffset = llvm::BinaryOperator::Create(llvm::Instruction::Add, vab, vc, LLVMGetName("add", vab, vc), insertBefore); else if (vab != NULL) @@ -1389,7 +1389,7 @@ lExtractConstantOffset(llvm::Value *vec, llvm::Value **constOffset, *splat, if so). */ static bool lIsIntegerSplat(llvm::Value *v, int *splat) { - llvm::ConstantDataVector *cvec = + llvm::ConstantDataVector *cvec = llvm::dyn_cast(v); if (cvec == NULL) return false; @@ -1398,7 +1398,7 @@ lIsIntegerSplat(llvm::Value *v, int *splat) { if (splatConst == NULL) return false; - llvm::ConstantInt *ci = + llvm::ConstantInt *ci = llvm::dyn_cast(splatConst); if (ci == NULL) return false; @@ -1407,10 +1407,10 @@ lIsIntegerSplat(llvm::Value *v, int *splat) { *splat = (int)splatVal; return true; } - + static llvm::Value * -lExtract248Scale(llvm::Value *splatOperand, int splatValue, +lExtract248Scale(llvm::Value *splatOperand, int splatValue, llvm::Value *otherOperand, llvm::Value **result) { if (splatValue == 2 || splatValue == 4 || splatValue == 8) { *result = otherOperand; @@ -1420,20 +1420,20 @@ lExtract248Scale(llvm::Value *splatOperand, int splatValue, // see if we can pull out that much of the scale anyway; this may in // turn allow other optimizations later. for (int scale = 8; scale >= 2; scale /= 2) { - llvm::Instruction *insertBefore = + llvm::Instruction *insertBefore = llvm::dyn_cast(*result); Assert(insertBefore != NULL); if ((splatValue % scale) == 0) { // *result = otherOperand * splatOperand / scale; - llvm::Value *splatScaleVec = + llvm::Value *splatScaleVec = (splatOperand->getType() == LLVMTypes::Int32VectorType) ? LLVMInt32Vector(scale) : LLVMInt64Vector(scale); - llvm::Value *splatDiv = + llvm::Value *splatDiv = llvm::BinaryOperator::Create(llvm::Instruction::SDiv, splatOperand, splatScaleVec, "div", insertBefore); - *result = + *result = llvm::BinaryOperator::Create(llvm::Instruction::Mul, splatDiv, otherOperand, "mul", insertBefore); @@ -1546,7 +1546,7 @@ lExtractUniforms(llvm::Value **vec, llvm::Instruction *insertBefore) { if (LLVMVectorValuesAllEqual(*vec)) { // FIXME: we may want to redo all of the expression here, in scalar // form (if at all possible), for code quality... - llvm::Value *unif = + llvm::Value *unif = llvm::ExtractElementInst::Create(*vec, LLVMInt32(0), "first_uniform", insertBefore); *vec = NULL; @@ -1572,7 +1572,7 @@ lExtractUniforms(llvm::Value **vec, llvm::Instruction *insertBefore) { *vec = llvm::BinaryOperator::Create(llvm::Instruction::Add, op0, op1, "new_add", insertBefore); - if (s0 == NULL) + if (s0 == NULL) return s1; else if (s1 == NULL) return s0; @@ -1602,8 +1602,8 @@ lExtractUniforms(llvm::Value **vec, llvm::Instruction *insertBefore) { static void -lExtractUniformsFromOffset(llvm::Value **basePtr, llvm::Value **offsetVector, - llvm::Value *offsetScale, +lExtractUniformsFromOffset(llvm::Value **basePtr, llvm::Value **offsetVector, + llvm::Value *offsetScale, llvm::Instruction *insertBefore) { #if 1 (*basePtr)->dump(); @@ -1646,15 +1646,15 @@ lVectorIs32BitInts(llvm::Value *v) { 32-bit values. If so, return true and update the pointed-to llvm::Value *s to be the 32-bit equivalents. */ static bool -lOffsets32BitSafe(llvm::Value **variableOffsetPtr, - llvm::Value **constOffsetPtr, +lOffsets32BitSafe(llvm::Value **variableOffsetPtr, + llvm::Value **constOffsetPtr, llvm::Instruction *insertBefore) { llvm::Value *variableOffset = *variableOffsetPtr; llvm::Value *constOffset = *constOffsetPtr; if (variableOffset->getType() != LLVMTypes::Int32VectorType) { llvm::SExtInst *sext = llvm::dyn_cast(variableOffset); - if (sext != NULL && + if (sext != NULL && sext->getOperand(0)->getType() == LLVMTypes::Int32VectorType) // sext of a 32-bit vector -> the 32-bit vector is good variableOffset = sext->getOperand(0); @@ -1662,7 +1662,7 @@ lOffsets32BitSafe(llvm::Value **variableOffsetPtr, // The only constant vector we should have here is a vector of // all zeros (i.e. a ConstantAggregateZero, but just in case, // do the more general check with lVectorIs32BitInts(). - variableOffset = + variableOffset = new llvm::TruncInst(variableOffset, LLVMTypes::Int32VectorType, LLVMGetName(variableOffset, "_trunc"), insertBefore); @@ -1673,7 +1673,7 @@ lOffsets32BitSafe(llvm::Value **variableOffsetPtr, if (constOffset->getType() != LLVMTypes::Int32VectorType) { if (lVectorIs32BitInts(constOffset)) { // Truncate them so we have a 32-bit vector type for them. - constOffset = + constOffset = new llvm::TruncInst(constOffset, LLVMTypes::Int32VectorType, LLVMGetName(constOffset, "_trunc"), insertBefore); } @@ -1686,7 +1686,7 @@ lOffsets32BitSafe(llvm::Value **variableOffsetPtr, // constant vector out of them, and // llvm::ConstantFoldInstruction() doesn't seem to be doing // enough for us in some cases if we call it from here. - constOffset = + constOffset = new llvm::TruncInst(constOffset, LLVMTypes::Int32VectorType, LLVMGetName(constOffset, "_trunc"), insertBefore); } @@ -1702,7 +1702,7 @@ lOffsets32BitSafe(llvm::Value **variableOffsetPtr, 32-bit values. If so, return true and update the pointed-to llvm::Value * to be the 32-bit equivalent. */ static bool -lOffsets32BitSafe(llvm::Value **offsetPtr, +lOffsets32BitSafe(llvm::Value **offsetPtr, llvm::Instruction *insertBefore) { llvm::Value *offset = *offsetPtr; @@ -1710,7 +1710,7 @@ lOffsets32BitSafe(llvm::Value **offsetPtr, return true; llvm::SExtInst *sext = llvm::dyn_cast(offset); - if (sext != NULL && + if (sext != NULL && sext->getOperand(0)->getType() == LLVMTypes::Int32VectorType) { // sext of a 32-bit vector -> the 32-bit vector is good *offsetPtr = sext->getOperand(0); @@ -1720,7 +1720,7 @@ lOffsets32BitSafe(llvm::Value **offsetPtr, // The only constant vector we should have here is a vector of // all zeros (i.e. a ConstantAggregateZero, but just in case, // do the more general check with lVectorIs32BitInts(). - *offsetPtr = + *offsetPtr = new llvm::TruncInst(offset, LLVMTypes::Int32VectorType, LLVMGetName(offset, "_trunc"), insertBefore); @@ -1734,8 +1734,8 @@ lOffsets32BitSafe(llvm::Value **offsetPtr, static bool lGSToGSBaseOffsets(llvm::CallInst *callInst) { struct GSInfo { - GSInfo(const char *pgFuncName, const char *pgboFuncName, - const char *pgbo32FuncName, bool ig) + GSInfo(const char *pgFuncName, const char *pgboFuncName, + const char *pgbo32FuncName, bool ig) : isGather(ig) { func = m->module->getFunction(pgFuncName); baseOffsetsFunc = m->module->getFunction(pgboFuncName); @@ -1747,7 +1747,7 @@ lGSToGSBaseOffsets(llvm::CallInst *callInst) { }; GSInfo gsFuncs[] = { - GSInfo("__pseudo_gather32_i8", + GSInfo("__pseudo_gather32_i8", g->target.hasGather ? "__pseudo_gather_base_offsets32_i8" : "__pseudo_gather_factored_base_offsets32_i8", g->target.hasGather ? "__pseudo_gather_base_offsets32_i8" : @@ -1916,7 +1916,7 @@ lGSToGSBaseOffsets(llvm::CallInst *callInst) { // lGetBasePtrAndOffsets). llvm::Value *ptrs = callInst->getArgOperand(0); llvm::Value *offsetVector = NULL; - llvm::Value *basePtr = lGetBasePtrAndOffsets(ptrs, &offsetVector, + llvm::Value *basePtr = lGetBasePtrAndOffsets(ptrs, &offsetVector, callInst); if (basePtr == NULL || offsetVector == NULL) @@ -1943,7 +1943,7 @@ lGSToGSBaseOffsets(llvm::CallInst *callInst) { // If we're doing 32-bit addressing on a 64-bit target, here we // will see if we can call one of the 32-bit variants of the pseudo // gather/scatter functions. - if (g->opt.force32BitAddressing && + if (g->opt.force32BitAddressing && lOffsets32BitSafe(&offsetVector, callInst)) { gatherScatterFunc = info->baseOffsets32Func; } @@ -1956,7 +1956,7 @@ lGSToGSBaseOffsets(llvm::CallInst *callInst) { // llvm::Instruction to llvm::CallInst::Create; this means that // the instruction isn't inserted into a basic block and that // way we can then call ReplaceInstWithInst(). - llvm::Instruction *newCall = + llvm::Instruction *newCall = lCallInst(gatherScatterFunc, basePtr, offsetScale, offsetVector, mask, callInst->getName().str().c_str(), NULL); @@ -1970,8 +1970,8 @@ lGSToGSBaseOffsets(llvm::CallInst *callInst) { // Generate a new function call to the next pseudo scatter // base+offsets instruction. See above for why passing NULL // for the Instruction * is intended. - llvm::Instruction *newCall = - lCallInst(gatherScatterFunc, basePtr, offsetScale, + llvm::Instruction *newCall = + lCallInst(gatherScatterFunc, basePtr, offsetScale, offsetVector, storeValue, mask, "", NULL); lCopyMetadata(newCall, callInst); llvm::ReplaceInstWithInst(callInst, newCall); @@ -1984,7 +1984,7 @@ lGSToGSBaseOffsets(llvm::CallInst *callInst) { // which in turn allows their implementations to end up emitting // x86 instructions with constant offsets encoded in them. llvm::Value *constOffset, *variableOffset; - lExtractConstantOffset(offsetVector, &constOffset, &variableOffset, + lExtractConstantOffset(offsetVector, &constOffset, &variableOffset, callInst); if (constOffset == NULL) constOffset = LLVMIntAsType(0, offsetVector->getType()); @@ -2000,7 +2000,7 @@ lGSToGSBaseOffsets(llvm::CallInst *callInst) { // If we're doing 32-bit addressing on a 64-bit target, here we // will see if we can call one of the 32-bit variants of the pseudo // gather/scatter functions. - if (g->opt.force32BitAddressing && + if (g->opt.force32BitAddressing && lOffsets32BitSafe(&variableOffset, &constOffset, callInst)) { gatherScatterFunc = info->baseOffsets32Func; } @@ -2013,7 +2013,7 @@ lGSToGSBaseOffsets(llvm::CallInst *callInst) { // llvm::Instruction to llvm::CallInst::Create; this means that // the instruction isn't inserted into a basic block and that // way we can then call ReplaceInstWithInst(). - llvm::Instruction *newCall = + llvm::Instruction *newCall = lCallInst(gatherScatterFunc, basePtr, variableOffset, offsetScale, constOffset, mask, callInst->getName().str().c_str(), NULL); @@ -2027,7 +2027,7 @@ lGSToGSBaseOffsets(llvm::CallInst *callInst) { // Generate a new function call to the next pseudo scatter // base+offsets instruction. See above for why passing NULL // for the Instruction * is intended. - llvm::Instruction *newCall = + llvm::Instruction *newCall = lCallInst(gatherScatterFunc, basePtr, variableOffset, offsetScale, constOffset, storeValue, mask, "", NULL); lCopyMetadata(newCall, callInst); @@ -2047,7 +2047,7 @@ lGSToGSBaseOffsets(llvm::CallInst *callInst) { static bool lGSBaseOffsetsGetMoreConst(llvm::CallInst *callInst) { struct GSBOInfo { - GSBOInfo(const char *pgboFuncName, const char *pgbo32FuncName, bool ig) + GSBOInfo(const char *pgboFuncName, const char *pgbo32FuncName, bool ig) : isGather(ig) { baseOffsetsFunc = m->module->getFunction(pgboFuncName); baseOffsets32Func = m->module->getFunction(pgbo32FuncName); @@ -2058,65 +2058,65 @@ lGSBaseOffsetsGetMoreConst(llvm::CallInst *callInst) { GSBOInfo gsFuncs[] = { GSBOInfo(g->target.hasGather ? "__pseudo_gather_base_offsets32_i8" : - "__pseudo_gather_factored_base_offsets32_i8", + "__pseudo_gather_factored_base_offsets32_i8", g->target.hasGather ? "__pseudo_gather_base_offsets32_i8" : - "__pseudo_gather_factored_base_offsets32_i8", + "__pseudo_gather_factored_base_offsets32_i8", true), GSBOInfo(g->target.hasGather ? "__pseudo_gather_base_offsets32_i16" : - "__pseudo_gather_factored_base_offsets32_i16", + "__pseudo_gather_factored_base_offsets32_i16", g->target.hasGather ? "__pseudo_gather_base_offsets32_i16" : - "__pseudo_gather_factored_base_offsets32_i16", + "__pseudo_gather_factored_base_offsets32_i16", true), GSBOInfo(g->target.hasGather ? "__pseudo_gather_base_offsets32_i32" : - "__pseudo_gather_factored_base_offsets32_i32", + "__pseudo_gather_factored_base_offsets32_i32", g->target.hasGather ? "__pseudo_gather_base_offsets32_i32" : - "__pseudo_gather_factored_base_offsets32_i32", + "__pseudo_gather_factored_base_offsets32_i32", true), GSBOInfo(g->target.hasGather ? "__pseudo_gather_base_offsets32_float" : - "__pseudo_gather_factored_base_offsets32_float", + "__pseudo_gather_factored_base_offsets32_float", g->target.hasGather ? "__pseudo_gather_base_offsets32_float" : - "__pseudo_gather_factored_base_offsets32_float", + "__pseudo_gather_factored_base_offsets32_float", true), GSBOInfo(g->target.hasGather ? "__pseudo_gather_base_offsets32_i64" : - "__pseudo_gather_factored_base_offsets32_i64", + "__pseudo_gather_factored_base_offsets32_i64", g->target.hasGather ? "__pseudo_gather_base_offsets32_i64" : - "__pseudo_gather_factored_base_offsets32_i64", + "__pseudo_gather_factored_base_offsets32_i64", true), GSBOInfo(g->target.hasGather ? "__pseudo_gather_base_offsets32_double" : - "__pseudo_gather_factored_base_offsets32_double", + "__pseudo_gather_factored_base_offsets32_double", g->target.hasGather ? "__pseudo_gather_base_offsets32_double" : - "__pseudo_gather_factored_base_offsets32_double", + "__pseudo_gather_factored_base_offsets32_double", true), GSBOInfo( g->target.hasScatter ? "__pseudo_scatter_base_offsets32_i8" : - "__pseudo_scatter_factored_base_offsets32_i8", + "__pseudo_scatter_factored_base_offsets32_i8", g->target.hasScatter ? "__pseudo_scatter_base_offsets32_i8" : - "__pseudo_scatter_factored_base_offsets32_i8", + "__pseudo_scatter_factored_base_offsets32_i8", false), GSBOInfo(g->target.hasScatter ? "__pseudo_scatter_base_offsets32_i16" : - "__pseudo_scatter_factored_base_offsets32_i16", + "__pseudo_scatter_factored_base_offsets32_i16", g->target.hasScatter ? "__pseudo_scatter_base_offsets32_i16" : - "__pseudo_scatter_factored_base_offsets32_i16", + "__pseudo_scatter_factored_base_offsets32_i16", false), GSBOInfo(g->target.hasScatter ? "__pseudo_scatter_base_offsets32_i32" : - "__pseudo_scatter_factored_base_offsets32_i32", + "__pseudo_scatter_factored_base_offsets32_i32", g->target.hasScatter ? "__pseudo_scatter_base_offsets32_i32" : - "__pseudo_scatter_factored_base_offsets32_i32", + "__pseudo_scatter_factored_base_offsets32_i32", false), GSBOInfo(g->target.hasScatter ? "__pseudo_scatter_base_offsets32_float" : - "__pseudo_scatter_factored_base_offsets32_float", + "__pseudo_scatter_factored_base_offsets32_float", g->target.hasScatter ? "__pseudo_scatter_base_offsets32_float" : - "__pseudo_scatter_factored_base_offsets32_float", + "__pseudo_scatter_factored_base_offsets32_float", false), GSBOInfo(g->target.hasScatter ? "__pseudo_scatter_base_offsets32_i64" : - "__pseudo_scatter_factored_base_offsets32_i64", + "__pseudo_scatter_factored_base_offsets32_i64", g->target.hasScatter ? "__pseudo_scatter_base_offsets32_i64" : - "__pseudo_scatter_factored_base_offsets32_i64", + "__pseudo_scatter_factored_base_offsets32_i64", false), GSBOInfo(g->target.hasScatter ? "__pseudo_scatter_base_offsets32_double" : - "__pseudo_scatter_factored_base_offsets32_double", + "__pseudo_scatter_factored_base_offsets32_double", g->target.hasScatter ? "__pseudo_scatter_base_offsets32_double" : - "__pseudo_scatter_factored_base_offsets32_double", + "__pseudo_scatter_factored_base_offsets32_double", false), }; @@ -2150,7 +2150,7 @@ lGSBaseOffsetsGetMoreConst(llvm::CallInst *callInst) { // Try to decompose the old variable offset llvm::Value *constOffset, *variableOffset; - lExtractConstantOffset(origVariableOffset, &constOffset, &variableOffset, + lExtractConstantOffset(origVariableOffset, &constOffset, &variableOffset, callInst); // No luck @@ -2163,7 +2163,7 @@ lGSBaseOffsetsGetMoreConst(llvm::CallInst *callInst) { // We need to scale the value we add to the constant offset by the // 2/4/8 scale for the variable offset, if present. - llvm::ConstantInt *varScale = + llvm::ConstantInt *varScale = llvm::dyn_cast(callInst->getArgOperand(2)); Assert(varScale != NULL); @@ -2202,7 +2202,7 @@ lComputeCommonPointer(llvm::Value *base, llvm::Value *offsets, static llvm::Constant * lGetOffsetScaleVec(llvm::Value *offsetScale, llvm::Type *vecType) { - llvm::ConstantInt *offsetScaleInt = + llvm::ConstantInt *offsetScaleInt = llvm::dyn_cast(offsetScale); Assert(offsetScaleInt != NULL); uint64_t scaleValue = offsetScaleInt->getZExtValue(); @@ -2239,7 +2239,7 @@ static bool lGSToLoadStore(llvm::CallInst *callInst) { struct GatherImpInfo { GatherImpInfo(const char *pName, const char *lmName, llvm::Type *st, - int a) + int a) : align(a), isFactored(!g->target.hasGather) { pseudoFunc = m->module->getFunction(pName); loadMaskedFunc = m->module->getFunction(lmName); @@ -2256,45 +2256,45 @@ lGSToLoadStore(llvm::CallInst *callInst) { GatherImpInfo gInfo[] = { GatherImpInfo(g->target.hasGather ? "__pseudo_gather_base_offsets32_i8" : - "__pseudo_gather_factored_base_offsets32_i8", + "__pseudo_gather_factored_base_offsets32_i8", "__masked_load_i8", LLVMTypes::Int8Type, 1), GatherImpInfo(g->target.hasGather ? "__pseudo_gather_base_offsets32_i16" : - "__pseudo_gather_factored_base_offsets32_i16", + "__pseudo_gather_factored_base_offsets32_i16", "__masked_load_i16", LLVMTypes::Int16Type, 2), GatherImpInfo(g->target.hasGather ? "__pseudo_gather_base_offsets32_i32" : - "__pseudo_gather_factored_base_offsets32_i32", + "__pseudo_gather_factored_base_offsets32_i32", "__masked_load_i32", LLVMTypes::Int32Type, 4), GatherImpInfo(g->target.hasGather ? "__pseudo_gather_base_offsets32_float" : - "__pseudo_gather_factored_base_offsets32_float", + "__pseudo_gather_factored_base_offsets32_float", "__masked_load_float", LLVMTypes::FloatType, 4), GatherImpInfo(g->target.hasGather ? "__pseudo_gather_base_offsets32_i64" : - "__pseudo_gather_factored_base_offsets32_i64", + "__pseudo_gather_factored_base_offsets32_i64", "__masked_load_i64", LLVMTypes::Int64Type, 8), GatherImpInfo(g->target.hasGather ? "__pseudo_gather_base_offsets32_double" : - "__pseudo_gather_factored_base_offsets32_double", + "__pseudo_gather_factored_base_offsets32_double", "__masked_load_double", LLVMTypes::DoubleType, 8), GatherImpInfo(g->target.hasGather ? "__pseudo_gather_base_offsets64_i8" : - "__pseudo_gather_factored_base_offsets64_i8", + "__pseudo_gather_factored_base_offsets64_i8", "__masked_load_i8", LLVMTypes::Int8Type, 1), GatherImpInfo(g->target.hasGather ? "__pseudo_gather_base_offsets64_i16" : - "__pseudo_gather_factored_base_offsets64_i16", + "__pseudo_gather_factored_base_offsets64_i16", "__masked_load_i16", LLVMTypes::Int16Type, 2), GatherImpInfo(g->target.hasGather ? "__pseudo_gather_base_offsets64_i32" : - "__pseudo_gather_factored_base_offsets64_i32", + "__pseudo_gather_factored_base_offsets64_i32", "__masked_load_i32", LLVMTypes::Int32Type, 4), GatherImpInfo(g->target.hasGather ? "__pseudo_gather_base_offsets64_float" : - "__pseudo_gather_factored_base_offsets64_float", + "__pseudo_gather_factored_base_offsets64_float", "__masked_load_float", LLVMTypes::FloatType, 4), GatherImpInfo(g->target.hasGather ? "__pseudo_gather_base_offsets64_i64" : - "__pseudo_gather_factored_base_offsets64_i64", + "__pseudo_gather_factored_base_offsets64_i64", "__masked_load_i64", LLVMTypes::Int64Type, 8), GatherImpInfo(g->target.hasGather ? "__pseudo_gather_base_offsets64_double" : - "__pseudo_gather_factored_base_offsets64_double", + "__pseudo_gather_factored_base_offsets64_double", "__masked_load_double", LLVMTypes::DoubleType, 8), }; struct ScatterImpInfo { - ScatterImpInfo(const char *pName, const char *msName, + ScatterImpInfo(const char *pName, const char *msName, llvm::Type *vpt, int a) : align(a), isFactored(!g->target.hasScatter) { pseudoFunc = m->module->getFunction(pName); @@ -2308,7 +2308,7 @@ lGSToLoadStore(llvm::CallInst *callInst) { const int align; const bool isFactored; }; - + ScatterImpInfo sInfo[] = { ScatterImpInfo(g->target.hasScatter ? "__pseudo_scatter_base_offsets32_i8" : "__pseudo_scatter_factored_base_offsets32_i8", @@ -2387,10 +2387,10 @@ lGSToLoadStore(llvm::CallInst *callInst) { mask = callInst->getArgOperand((gatherInfo != NULL) ? 4 : 5); // Compute the full offset vector: offsetScale * varyingOffsets + constOffsets - llvm::Constant *offsetScaleVec = + llvm::Constant *offsetScaleVec = lGetOffsetScaleVec(offsetScale, varyingOffsets->getType()); - llvm::Value *scaledVarying = + llvm::Value *scaledVarying = llvm::BinaryOperator::Create(llvm::Instruction::Mul, offsetScaleVec, varyingOffsets, "scaled_varying", callInst); fullOffsets = @@ -2405,7 +2405,7 @@ lGSToLoadStore(llvm::CallInst *callInst) { llvm::Value *offsetScale = callInst->getArgOperand(1); llvm::Value *offsets = callInst->getArgOperand(2); - llvm::Value *offsetScaleVec = + llvm::Value *offsetScaleVec = lGetOffsetScaleVec(offsetScale, offsets->getType()); fullOffsets = @@ -2413,7 +2413,7 @@ lGSToLoadStore(llvm::CallInst *callInst) { offsets, "scaled_offsets", callInst); } - Debug(SourcePos(), "GSToLoadStore: %s.", + Debug(SourcePos(), "GSToLoadStore: %s.", fullOffsets->getName().str().c_str()); if (LLVMVectorValuesAllEqual(fullOffsets)) { @@ -2441,7 +2441,7 @@ lGSToLoadStore(llvm::CallInst *callInst) { callInst->getName()); } lCopyMetadata(vecValue, callInst); - llvm::ReplaceInstWithInst(callInst, + llvm::ReplaceInstWithInst(callInst, llvm::dyn_cast(vecValue)); return true; } @@ -2476,8 +2476,8 @@ lGSToLoadStore(llvm::CallInst *callInst) { if (gatherInfo != NULL) { Debug(pos, "Transformed gather to unaligned vector load!"); - llvm::Instruction *newCall = - lCallInst(gatherInfo->loadMaskedFunc, ptr, mask, + llvm::Instruction *newCall = + lCallInst(gatherInfo->loadMaskedFunc, ptr, mask, LLVMGetName(ptr, "_masked_load")); lCopyMetadata(newCall, callInst); llvm::ReplaceInstWithInst(callInst, newCall); @@ -2485,10 +2485,10 @@ lGSToLoadStore(llvm::CallInst *callInst) { } else { Debug(pos, "Transformed scatter to unaligned vector store!"); - ptr = new llvm::BitCastInst(ptr, scatterInfo->vecPtrType, "ptrcast", + ptr = new llvm::BitCastInst(ptr, scatterInfo->vecPtrType, "ptrcast", callInst); llvm::Instruction *newCall = - lCallInst(scatterInfo->maskedStoreFunc, ptr, storeValue, + lCallInst(scatterInfo->maskedStoreFunc, ptr, storeValue, mask, ""); lCopyMetadata(newCall, callInst); llvm::ReplaceInstWithInst(callInst, newCall); @@ -2512,7 +2512,7 @@ lGSToLoadStore(llvm::CallInst *callInst) { static bool lImproveMaskedStore(llvm::CallInst *callInst) { struct MSInfo { - MSInfo(const char *name, const int a) + MSInfo(const char *name, const int a) : align(a) { func = m->module->getFunction(name); Assert(func != NULL); @@ -2520,7 +2520,7 @@ lImproveMaskedStore(llvm::CallInst *callInst) { llvm::Function *func; const int align; }; - + MSInfo msInfo[] = { MSInfo("__pseudo_masked_store_i8", 1), MSInfo("__pseudo_masked_store_i16", 2), @@ -2575,7 +2575,7 @@ lImproveMaskedStore(llvm::CallInst *callInst) { lvalue = new llvm::BitCastInst(lvalue, ptrType, "lvalue_to_ptr_type", callInst); lCopyMetadata(lvalue, callInst); - llvm::Instruction *store = + llvm::Instruction *store = new llvm::StoreInst(rvalue, lvalue, false /* not volatile */, g->opt.forceAlignedMemory ? 0 : info->align); lCopyMetadata(store, callInst); @@ -2591,7 +2591,7 @@ static bool lImproveMaskedLoad(llvm::CallInst *callInst, llvm::BasicBlock::iterator iter) { struct MLInfo { - MLInfo(const char *name, const int a) + MLInfo(const char *name, const int a) : align(a) { func = m->module->getFunction(name); Assert(func != NULL); @@ -2599,7 +2599,7 @@ lImproveMaskedLoad(llvm::CallInst *callInst, llvm::Function *func; const int align; }; - + MLInfo mlInfo[] = { MLInfo("__masked_load_i8", 1), MLInfo("__masked_load_i16", 2), @@ -2636,9 +2636,9 @@ lImproveMaskedLoad(llvm::CallInst *callInst, else if (maskStatus == ALL_ON) { // The mask is all on, so turn this into a regular load llvm::Type *ptrType = llvm::PointerType::get(callInst->getType(), 0); - ptr = new llvm::BitCastInst(ptr, ptrType, "ptr_cast_for_load", + ptr = new llvm::BitCastInst(ptr, ptrType, "ptr_cast_for_load", callInst); - llvm::Instruction *load = + llvm::Instruction *load = new llvm::LoadInst(ptr, callInst->getName(), false /* not volatile */, g->opt.forceAlignedMemory ? 0 : info->align, (llvm::Instruction *)NULL); @@ -2713,7 +2713,7 @@ CreateImproveMemoryOpsPass() { // any of scalar, 2-wide (i.e. 64-bit), 4-wide, or 8-wide loads. Further, // we generate code that shuffles these loads around. Doing fewer, larger // loads in this manner, when possible, can be more efficient. -// +// // Second, this pass can coalesce memory accesses across multiple // gathers. If we have a series of gathers without any memory writes in // the middle, then we try to analyze their reads collectively and choose @@ -2739,9 +2739,9 @@ char GatherCoalescePass::ID = 0; decided to generate. */ struct CoalescedLoadOp { - CoalescedLoadOp(int64_t s, int c) { - start = s; - count = c; + CoalescedLoadOp(int64_t s, int c) { + start = s; + count = c; load = element0 = element1 = NULL; } @@ -2769,7 +2769,7 @@ struct CoalescedLoadOp { load of the given width doesn't make sense, then false is returned. */ static bool -lVectorLoadIsEfficient(std::set::iterator iter, +lVectorLoadIsEfficient(std::set::iterator iter, std::set::iterator end, std::set::iterator *newIter, int vectorWidth) { // We're considering a vector load of width vectorWidth, starting at @@ -2785,7 +2785,7 @@ lVectorLoadIsEfficient(std::set::iterator iter, // // 2. And don't have too large a gap in between them (e.g., it's not // worth generating an 8-wide load for two elements with offsets 0 - // and 7, but no loads requested in between). + // and 7, but no loads requested in between). // // Then we continue moving forward through the elements until we either // fill up the vector or run out of elements. @@ -2844,7 +2844,7 @@ lVectorLoadIsEfficient(std::set::iterator iter, // except under contrived circumstances, but better safe // than sorry.) const int pageSize = 4096; - if (vectorWidth != 2 && + if (vectorWidth != 2 && (lastAccepted - start) > (vectorWidth / 2) && (*iter - lastAccepted) < pageSize) { *newIter = iter; @@ -2854,7 +2854,7 @@ lVectorLoadIsEfficient(std::set::iterator iter, return false; } - // Continue moving forward + // Continue moving forward lastAccepted = *iter; ++iter; } @@ -2898,7 +2898,7 @@ lSelectLoads(const std::vector &loadOffsets, // effective (i.e. would cover a reasonable number of the // offsets that need to be loaded from). std::set::iterator newIter; - if (lVectorLoadIsEfficient(iter, allOffsets.end(), &newIter, + if (lVectorLoadIsEfficient(iter, allOffsets.end(), &newIter, vectorWidths[i])) { // Yes: create the corresponding coalesced load and update // the iterator to the returned iterator; doing so skips @@ -2910,7 +2910,7 @@ lSelectLoads(const std::vector &loadOffsets, break; } } - + if (gotOne == false) { // We couldn't find a vector load starting from this offset // that made sense, so emit a scalar load and continue onward. @@ -2958,7 +2958,7 @@ lCoalescePerfInfo(const std::vector &coalesceGroup, for (int i = 0; i < (int)loadOps.size(); ++i) ++loadOpsCount[loadOps[i].count]; - // Generate a string the describes the mix of load ops + // Generate a string the describes the mix of load ops char loadOpsInfo[512]; loadOpsInfo[0] = '\0'; std::map::const_iterator iter = loadOpsCount.begin(); @@ -2973,12 +2973,12 @@ lCoalescePerfInfo(const std::vector &coalesceGroup, if (coalesceGroup.size() == 1) PerformanceWarning(pos, "Coalesced gather into %d load%s (%s).", - (int)loadOps.size(), + (int)loadOps.size(), (loadOps.size() > 1) ? "s" : "", loadOpsInfo); else PerformanceWarning(pos, "Coalesced %d gathers starting here %sinto %d " - "load%s (%s).", (int)coalesceGroup.size(), - otherPositions,(int)loadOps.size(), + "load%s (%s).", (int)coalesceGroup.size(), + otherPositions,(int)loadOps.size(), (loadOps.size() > 1) ? "s" : "", loadOpsInfo); } @@ -3006,11 +3006,11 @@ lGEPAndLoad(llvm::Value *basePtr, int64_t offset, int align, instructions. */ static void -lEmitLoads(llvm::Value *basePtr, std::vector &loadOps, +lEmitLoads(llvm::Value *basePtr, std::vector &loadOps, int elementSize, llvm::Instruction *insertBefore) { Debug(SourcePos(), "Coalesce doing %d loads.", (int)loadOps.size()); for (int i = 0; i < (int)loadOps.size(); ++i) { - Debug(SourcePos(), "Load #%d @ %" PRId64 ", %d items", i, loadOps[i].start, + Debug(SourcePos(), "Load #%d @ %" PRId64 ", %d items", i, loadOps[i].start, loadOps[i].count); // basePtr is an i8 *, so the offset from it should be in terms of @@ -3030,7 +3030,7 @@ lEmitLoads(llvm::Value *basePtr, std::vector &loadOps, loadOps[i].load = lGEPAndLoad(basePtr, start, align, insertBefore, LLVMTypes::Int64Type); // element0 = (int32)value; - loadOps[i].element0 = + loadOps[i].element0 = new llvm::TruncInst(loadOps[i].load, LLVMTypes::Int32Type, "load64_elt0", insertBefore); // element1 = (int32)(value >> 32) @@ -3038,7 +3038,7 @@ lEmitLoads(llvm::Value *basePtr, std::vector &loadOps, llvm::BinaryOperator::Create(llvm::Instruction::LShr, loadOps[i].load, LLVMInt64(32), "load64_shift", insertBefore); - loadOps[i].element1 = + loadOps[i].element1 = new llvm::TruncInst(shift, LLVMTypes::Int32Type, "load64_elt1", insertBefore); break; @@ -3055,7 +3055,7 @@ lEmitLoads(llvm::Value *basePtr, std::vector &loadOps, // 8-wide vector load llvm::VectorType *vt = llvm::VectorType::get(LLVMTypes::Int32Type, 8); - loadOps[i].load = lGEPAndLoad(basePtr, start, align, + loadOps[i].load = lGEPAndLoad(basePtr, start, align, insertBefore, vt); break; } @@ -3072,7 +3072,7 @@ lEmitLoads(llvm::Value *basePtr, std::vector &loadOps, load operations. */ static std::vector -lSplit8WideLoads(const std::vector &loadOps, +lSplit8WideLoads(const std::vector &loadOps, llvm::Instruction *insertBefore) { std::vector ret; for (unsigned int i = 0; i < loadOps.size(); ++i) { @@ -3102,7 +3102,7 @@ lSplit8WideLoads(const std::vector &loadOps, vector for any and all elements for which it applies. */ static llvm::Value * -lApplyLoad1(llvm::Value *result, const CoalescedLoadOp &load, +lApplyLoad1(llvm::Value *result, const CoalescedLoadOp &load, const int64_t offsets[4], bool set[4], llvm::Instruction *insertBefore) { for (int elt = 0; elt < 4; ++elt) { @@ -3113,7 +3113,7 @@ lApplyLoad1(llvm::Value *result, const CoalescedLoadOp &load, // If this load gives one of the values that we need, then we // can just insert it in directly Assert(set[elt] == false); - result = + result = llvm::InsertElementInst::Create(result, load.load, LLVMInt32(elt), "insert_load", insertBefore); set[elt] = true; @@ -3128,7 +3128,7 @@ lApplyLoad1(llvm::Value *result, const CoalescedLoadOp &load, elements that they apply to. */ static llvm::Value * lApplyLoad2(llvm::Value *result, const CoalescedLoadOp &load, - const int64_t offsets[4], bool set[4], + const int64_t offsets[4], bool set[4], llvm::Instruction *insertBefore) { for (int elt = 0; elt < 4; ++elt) { // First, try to do a 64-bit-wide insert into the result vector. @@ -3145,7 +3145,7 @@ lApplyLoad2(llvm::Value *result, const CoalescedLoadOp &load, Assert(set[elt] == false && set[elt+1] == false); // In this case, we bitcast from a 4xi32 to a 2xi64 vector - llvm::Type *vec2x64Type = + llvm::Type *vec2x64Type = llvm::VectorType::get(LLVMTypes::Int64Type, 2); result = new llvm::BitCastInst(result, vec2x64Type, "to2x64", insertBefore); @@ -3153,11 +3153,11 @@ lApplyLoad2(llvm::Value *result, const CoalescedLoadOp &load, // And now we can insert the 64-bit wide value into the // appropriate elment result = llvm::InsertElementInst::Create(result, load.load, - LLVMInt32(elt/2), + LLVMInt32(elt/2), "insert64", insertBefore); - + // And back to 4xi32. - llvm::Type *vec4x32Type = + llvm::Type *vec4x32Type = llvm::VectorType::get(LLVMTypes::Int32Type, 4); result = new llvm::BitCastInst(result, vec4x32Type, "to4x32", insertBefore); @@ -3167,7 +3167,7 @@ lApplyLoad2(llvm::Value *result, const CoalescedLoadOp &load, // elements ++elt; } - else if (offsets[elt] >= load.start && + else if (offsets[elt] >= load.start && offsets[elt] < load.start + load.count) { Debug(SourcePos(), "Load 2 @ %" PRId64 " matches for element #%d " "(value %" PRId64 ")", load.start, elt, offsets[elt]); @@ -3176,7 +3176,7 @@ lApplyLoad2(llvm::Value *result, const CoalescedLoadOp &load, Assert(set[elt] == false); llvm::Value *toInsert = (offsets[elt] == load.start) ? load.element0 : load.element1; - result = + result = llvm::InsertElementInst::Create(result, toInsert, LLVMInt32(elt), "insert_load", insertBefore); set[elt] = true; @@ -3195,7 +3195,7 @@ lApplyLoad2(llvm::Value *result, const CoalescedLoadOp &load, /** And handle a 4-wide load */ static llvm::Value * lApplyLoad4(llvm::Value *result, const CoalescedLoadOp &load, - const int64_t offsets[4], bool set[4], + const int64_t offsets[4], bool set[4], llvm::Instruction *insertBefore) { // Conceptually, we're doing to consider doing a shuffle vector with // the 4-wide load and the 4-wide result we have so far to generate a @@ -3204,11 +3204,11 @@ lApplyLoad4(llvm::Value *result, const CoalescedLoadOp &load, int32_t shuf[4] = { 4, 5, 6, 7 }; for (int elt = 0; elt < 4; ++elt) { - if (offsets[elt] >= load.start && + if (offsets[elt] >= load.start && offsets[elt] < load.start + load.count) { Debug(SourcePos(), "Load 4 @ %" PRId64 " matches for element #%d " "(value %" PRId64 ")", load.start, elt, offsets[elt]); - + // If the current element falls within the range of locations // that the 4-wide load covers, then compute the appropriate // shuffle index that extracts the appropriate element from the @@ -3235,13 +3235,13 @@ lApplyLoad4(llvm::Value *result, const CoalescedLoadOp &load, for the four elements of the result. */ static llvm::Value * -lAssemble4Vector(const std::vector &loadOps, +lAssemble4Vector(const std::vector &loadOps, const int64_t offsets[4], llvm::Instruction *insertBefore) { - llvm::Type *returnType = + llvm::Type *returnType = llvm::VectorType::get(LLVMTypes::Int32Type, 4); llvm::Value *result = llvm::UndefValue::get(returnType); - Debug(SourcePos(), "Starting search for loads [%" PRId64 " %" PRId64 " %" + Debug(SourcePos(), "Starting search for loads [%" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 "].", offsets[0], offsets[1], offsets[2], offsets[3]); // Track whether we have found a valid value for each of the four @@ -3281,7 +3281,7 @@ lAssemble4Vector(const std::vector &loadOps, static llvm::Value * lApplyLoad4s(llvm::Value *result, const std::vector &loadOps, - const int64_t offsets[4], bool set[4], + const int64_t offsets[4], bool set[4], llvm::Instruction *insertBefore) { int32_t firstMatchElements[4] = { -1, -1, -1, -1 }; const CoalescedLoadOp *firstMatch = NULL; @@ -3296,7 +3296,7 @@ lApplyLoad4s(llvm::Value *result, const std::vector &loadOps, int32_t matchElements[4] = { -1, -1, -1, -1 }; bool anyMatched = false; for (int elt = 0; elt < 4; ++elt) { - if (offsets[elt] >= loadop.start && + if (offsets[elt] >= loadop.start && offsets[elt] < loadop.start + loadop.count) { Debug(SourcePos(), "Load 4 @ %" PRId64 " matches for element #%d " "(value %" PRId64 ")", loadop.start, elt, offsets[elt]); @@ -3350,8 +3350,8 @@ lApplyLoad4s(llvm::Value *result, const std::vector &loadOps, static llvm::Value * -lApplyLoad12s(llvm::Value *result, const std::vector &loadOps, - const int64_t offsets[4], bool set[4], +lApplyLoad12s(llvm::Value *result, const std::vector &loadOps, + const int64_t offsets[4], bool set[4], llvm::Instruction *insertBefore) { // Loop over all of the loads and check each one to see if it provides // a value that's applicable to the result @@ -3375,13 +3375,13 @@ lApplyLoad12s(llvm::Value *result, const std::vector &loadOps, for the four elements of the result. */ static llvm::Value * -lAssemble4Vector(const std::vector &loadOps, +lAssemble4Vector(const std::vector &loadOps, const int64_t offsets[4], llvm::Instruction *insertBefore) { - llvm::Type *returnType = + llvm::Type *returnType = llvm::VectorType::get(LLVMTypes::Int32Type, 4); llvm::Value *result = llvm::UndefValue::get(returnType); - Debug(SourcePos(), "Starting search for loads [%" PRId64 " %" PRId64 " %" + Debug(SourcePos(), "Starting search for loads [%" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 "].", offsets[0], offsets[1], offsets[2], offsets[3]); // Track whether we have found a valid value for each of the four @@ -3407,7 +3407,7 @@ lAssemble4Vector(const std::vector &loadOps, result vector. */ static void -lAssembleResultVectors(const std::vector &loadOps, +lAssembleResultVectors(const std::vector &loadOps, const std::vector &constOffsets, std::vector &results, llvm::Instruction *insertBefore) { @@ -3460,7 +3460,7 @@ lComputeBasePtr(llvm::CallInst *gatherInst, llvm::Instruction *insertBefore) { llvm::Value *basePtr = gatherInst->getArgOperand(0); llvm::Value *variableOffsets = gatherInst->getArgOperand(1); llvm::Value *offsetScale = gatherInst->getArgOperand(2); - + // All of the variable offsets values should be the same, due to // checking for this in GatherCoalescePass::runOnBasicBlock(). Thus, // extract the first value and use that as a scalar. @@ -3468,8 +3468,8 @@ lComputeBasePtr(llvm::CallInst *gatherInst, llvm::Instruction *insertBefore) { if (variable->getType() == LLVMTypes::Int64Type) offsetScale = new llvm::ZExtInst(offsetScale, LLVMTypes::Int64Type, "scale_to64", insertBefore); - llvm::Value *offset = - llvm::BinaryOperator::Create(llvm::Instruction::Mul, variable, + llvm::Value *offset = + llvm::BinaryOperator::Create(llvm::Instruction::Mul, variable, offsetScale, "offset", insertBefore); return lGEPInst(basePtr, offset, "new_base", insertBefore); @@ -3566,7 +3566,7 @@ lCoalesceGathers(const std::vector &coalesceGroup) { llvm::Type *origType = coalesceGroup[i]->getType(); if (origType != ir->getType()) - ir = new llvm::BitCastInst(ir, origType, ir->getName(), + ir = new llvm::BitCastInst(ir, origType, ir->getName(), coalesceGroup[i]); // Previously, all of the instructions to compute the final result @@ -3803,7 +3803,7 @@ lIsSafeToBlend(llvm::Value *lvalue) { llvm::AllocaInst *ai = llvm::dyn_cast(lvalue); if (ai) { llvm::Type *type = ai->getType(); - llvm::PointerType *pt = + llvm::PointerType *pt = llvm::dyn_cast(type); assert(pt != NULL); type = pt->getElementType(); @@ -3811,13 +3811,13 @@ lIsSafeToBlend(llvm::Value *lvalue) { while ((at = llvm::dyn_cast(type))) { type = at->getElementType(); } - llvm::VectorType *vt = + llvm::VectorType *vt = llvm::dyn_cast(type); - return (vt != NULL && + return (vt != NULL && (int)vt->getNumElements() == g->target.vectorWidth); } else { - llvm::GetElementPtrInst *gep = + llvm::GetElementPtrInst *gep = llvm::dyn_cast(lvalue); if (gep != NULL) return lIsSafeToBlend(gep->getOperand(0)); @@ -3835,7 +3835,7 @@ lReplacePseudoMaskedStore(llvm::CallInst *callInst) { pseudoFunc = m->module->getFunction(pname); blendFunc = m->module->getFunction(bname); maskedStoreFunc = m->module->getFunction(msname); - Assert(pseudoFunc != NULL && blendFunc != NULL && + Assert(pseudoFunc != NULL && blendFunc != NULL && maskedStoreFunc != NULL); } llvm::Function *pseudoFunc; @@ -3844,17 +3844,17 @@ lReplacePseudoMaskedStore(llvm::CallInst *callInst) { }; LMSInfo msInfo[] = { - LMSInfo("__pseudo_masked_store_i8", "__masked_store_blend_i8", + LMSInfo("__pseudo_masked_store_i8", "__masked_store_blend_i8", "__masked_store_i8"), - LMSInfo("__pseudo_masked_store_i16", "__masked_store_blend_i16", + LMSInfo("__pseudo_masked_store_i16", "__masked_store_blend_i16", "__masked_store_i16"), - LMSInfo("__pseudo_masked_store_i32", "__masked_store_blend_i32", + LMSInfo("__pseudo_masked_store_i32", "__masked_store_blend_i32", "__masked_store_i32"), - LMSInfo("__pseudo_masked_store_float", "__masked_store_blend_float", + LMSInfo("__pseudo_masked_store_float", "__masked_store_blend_float", "__masked_store_float"), - LMSInfo("__pseudo_masked_store_i64", "__masked_store_blend_i64", + LMSInfo("__pseudo_masked_store_i64", "__masked_store_blend_i64", "__masked_store_i64"), - LMSInfo("__pseudo_masked_store_double", "__masked_store_blend_double", + LMSInfo("__pseudo_masked_store_double", "__masked_store_blend_double", "__masked_store_double") }; @@ -3891,7 +3891,7 @@ lReplacePseudoMaskedStore(llvm::CallInst *callInst) { } -static bool +static bool lReplacePseudoGS(llvm::CallInst *callInst) { struct LowerGSInfo { LowerGSInfo(const char *pName, const char *aName, bool ig) @@ -3919,56 +3919,56 @@ lReplacePseudoGS(llvm::CallInst *callInst) { LowerGSInfo("__pseudo_gather64_i64", "__gather64_i64", true), LowerGSInfo("__pseudo_gather64_double", "__gather64_double", true), - LowerGSInfo("__pseudo_gather_factored_base_offsets32_i8", + LowerGSInfo("__pseudo_gather_factored_base_offsets32_i8", "__gather_factored_base_offsets32_i8", true), - LowerGSInfo("__pseudo_gather_factored_base_offsets32_i16", + LowerGSInfo("__pseudo_gather_factored_base_offsets32_i16", "__gather_factored_base_offsets32_i16", true), - LowerGSInfo("__pseudo_gather_factored_base_offsets32_i32", + LowerGSInfo("__pseudo_gather_factored_base_offsets32_i32", "__gather_factored_base_offsets32_i32", true), LowerGSInfo("__pseudo_gather_factored_base_offsets32_float", "__gather_factored_base_offsets32_float", true), - LowerGSInfo("__pseudo_gather_factored_base_offsets32_i64", + LowerGSInfo("__pseudo_gather_factored_base_offsets32_i64", "__gather_factored_base_offsets32_i64", true), - LowerGSInfo("__pseudo_gather_factored_base_offsets32_double", + LowerGSInfo("__pseudo_gather_factored_base_offsets32_double", "__gather_factored_base_offsets32_double", true), - LowerGSInfo("__pseudo_gather_factored_base_offsets64_i8", + LowerGSInfo("__pseudo_gather_factored_base_offsets64_i8", "__gather_factored_base_offsets64_i8", true), - LowerGSInfo("__pseudo_gather_factored_base_offsets64_i16", + LowerGSInfo("__pseudo_gather_factored_base_offsets64_i16", "__gather_factored_base_offsets64_i16", true), - LowerGSInfo("__pseudo_gather_factored_base_offsets64_i32", + LowerGSInfo("__pseudo_gather_factored_base_offsets64_i32", "__gather_factored_base_offsets64_i32", true), - LowerGSInfo("__pseudo_gather_factored_base_offsets64_float", + LowerGSInfo("__pseudo_gather_factored_base_offsets64_float", "__gather_factored_base_offsets64_float", true), - LowerGSInfo("__pseudo_gather_factored_base_offsets64_i64", + LowerGSInfo("__pseudo_gather_factored_base_offsets64_i64", "__gather_factored_base_offsets64_i64", true), - LowerGSInfo("__pseudo_gather_factored_base_offsets64_double", + LowerGSInfo("__pseudo_gather_factored_base_offsets64_double", "__gather_factored_base_offsets64_double", true), - LowerGSInfo("__pseudo_gather_base_offsets32_i8", + LowerGSInfo("__pseudo_gather_base_offsets32_i8", "__gather_base_offsets32_i8", true), - LowerGSInfo("__pseudo_gather_base_offsets32_i16", + LowerGSInfo("__pseudo_gather_base_offsets32_i16", "__gather_base_offsets32_i16", true), - LowerGSInfo("__pseudo_gather_base_offsets32_i32", + LowerGSInfo("__pseudo_gather_base_offsets32_i32", "__gather_base_offsets32_i32", true), LowerGSInfo("__pseudo_gather_base_offsets32_float", "__gather_base_offsets32_float", true), - LowerGSInfo("__pseudo_gather_base_offsets32_i64", + LowerGSInfo("__pseudo_gather_base_offsets32_i64", "__gather_base_offsets32_i64", true), - LowerGSInfo("__pseudo_gather_base_offsets32_double", + LowerGSInfo("__pseudo_gather_base_offsets32_double", "__gather_base_offsets32_double", true), - LowerGSInfo("__pseudo_gather_base_offsets64_i8", + LowerGSInfo("__pseudo_gather_base_offsets64_i8", "__gather_base_offsets64_i8", true), - LowerGSInfo("__pseudo_gather_base_offsets64_i16", + LowerGSInfo("__pseudo_gather_base_offsets64_i16", "__gather_base_offsets64_i16", true), - LowerGSInfo("__pseudo_gather_base_offsets64_i32", + LowerGSInfo("__pseudo_gather_base_offsets64_i32", "__gather_base_offsets64_i32", true), - LowerGSInfo("__pseudo_gather_base_offsets64_float", + LowerGSInfo("__pseudo_gather_base_offsets64_float", "__gather_base_offsets64_float", true), - LowerGSInfo("__pseudo_gather_base_offsets64_i64", + LowerGSInfo("__pseudo_gather_base_offsets64_i64", "__gather_base_offsets64_i64", true), - LowerGSInfo("__pseudo_gather_base_offsets64_double", + LowerGSInfo("__pseudo_gather_base_offsets64_double", "__gather_base_offsets64_double", true), LowerGSInfo("__pseudo_scatter32_i8", "__scatter32_i8", false), @@ -3985,57 +3985,57 @@ lReplacePseudoGS(llvm::CallInst *callInst) { LowerGSInfo("__pseudo_scatter64_i64", "__scatter64_i64", false), LowerGSInfo("__pseudo_scatter64_double", "__scatter64_double", false), - LowerGSInfo("__pseudo_scatter_factored_base_offsets32_i8", + LowerGSInfo("__pseudo_scatter_factored_base_offsets32_i8", "__scatter_factored_base_offsets32_i8", false), - LowerGSInfo("__pseudo_scatter_factored_base_offsets32_i16", + LowerGSInfo("__pseudo_scatter_factored_base_offsets32_i16", "__scatter_factored_base_offsets32_i16", false), - LowerGSInfo("__pseudo_scatter_factored_base_offsets32_i32", + LowerGSInfo("__pseudo_scatter_factored_base_offsets32_i32", "__scatter_factored_base_offsets32_i32", false), - LowerGSInfo("__pseudo_scatter_factored_base_offsets32_float", + LowerGSInfo("__pseudo_scatter_factored_base_offsets32_float", "__scatter_factored_base_offsets32_float", false), - LowerGSInfo("__pseudo_scatter_factored_base_offsets32_i64", + LowerGSInfo("__pseudo_scatter_factored_base_offsets32_i64", "__scatter_factored_base_offsets32_i64", false), LowerGSInfo("__pseudo_scatter_factored_base_offsets32_double", "__scatter_factored_base_offsets32_double", false), - LowerGSInfo("__pseudo_scatter_factored_base_offsets64_i8", + LowerGSInfo("__pseudo_scatter_factored_base_offsets64_i8", "__scatter_factored_base_offsets64_i8", false), - LowerGSInfo("__pseudo_scatter_factored_base_offsets64_i16", + LowerGSInfo("__pseudo_scatter_factored_base_offsets64_i16", "__scatter_factored_base_offsets64_i16", false), - LowerGSInfo("__pseudo_scatter_factored_base_offsets64_i32", + LowerGSInfo("__pseudo_scatter_factored_base_offsets64_i32", "__scatter_factored_base_offsets64_i32", false), LowerGSInfo("__pseudo_scatter_factored_base_offsets64_float", "__scatter_factored_base_offsets64_float", false), - LowerGSInfo("__pseudo_scatter_factored_base_offsets64_i64", + LowerGSInfo("__pseudo_scatter_factored_base_offsets64_i64", "__scatter_factored_base_offsets64_i64", false), - LowerGSInfo("__pseudo_scatter_factored_base_offsets64_double", + LowerGSInfo("__pseudo_scatter_factored_base_offsets64_double", "__scatter_factored_base_offsets64_double", false), - LowerGSInfo("__pseudo_scatter_base_offsets32_i8", + LowerGSInfo("__pseudo_scatter_base_offsets32_i8", "__scatter_base_offsets32_i8", false), - LowerGSInfo("__pseudo_scatter_base_offsets32_i16", + LowerGSInfo("__pseudo_scatter_base_offsets32_i16", "__scatter_base_offsets32_i16", false), - LowerGSInfo("__pseudo_scatter_base_offsets32_i32", + LowerGSInfo("__pseudo_scatter_base_offsets32_i32", "__scatter_base_offsets32_i32", false), - LowerGSInfo("__pseudo_scatter_base_offsets32_float", + LowerGSInfo("__pseudo_scatter_base_offsets32_float", "__scatter_base_offsets32_float", false), - LowerGSInfo("__pseudo_scatter_base_offsets32_i64", + LowerGSInfo("__pseudo_scatter_base_offsets32_i64", "__scatter_base_offsets32_i64", false), LowerGSInfo("__pseudo_scatter_base_offsets32_double", "__scatter_base_offsets32_double", false), - LowerGSInfo("__pseudo_scatter_base_offsets64_i8", + LowerGSInfo("__pseudo_scatter_base_offsets64_i8", "__scatter_base_offsets64_i8", false), - LowerGSInfo("__pseudo_scatter_base_offsets64_i16", + LowerGSInfo("__pseudo_scatter_base_offsets64_i16", "__scatter_base_offsets64_i16", false), - LowerGSInfo("__pseudo_scatter_base_offsets64_i32", + LowerGSInfo("__pseudo_scatter_base_offsets64_i32", "__scatter_base_offsets64_i32", false), LowerGSInfo("__pseudo_scatter_base_offsets64_float", "__scatter_base_offsets64_float", false), - LowerGSInfo("__pseudo_scatter_base_offsets64_i64", + LowerGSInfo("__pseudo_scatter_base_offsets64_i64", "__scatter_base_offsets64_i64", false), - LowerGSInfo("__pseudo_scatter_base_offsets64_double", + LowerGSInfo("__pseudo_scatter_base_offsets64_double", "__scatter_base_offsets64_double", false), }; @@ -4118,7 +4118,7 @@ CreateReplacePseudoMemoryOpsPass() { This pass resolves these calls into either 'true' or 'false' values so that later optimization passes can operate with these as constants. - See stdlib.m4 for a number of uses of this idiom. + See stdlib.m4 for a number of uses of this idiom. */ class IsCompileTimeConstantPass : public llvm::BasicBlockPass { @@ -4159,7 +4159,7 @@ IsCompileTimeConstantPass::runOnBasicBlock(llvm::BasicBlock &bb) { int j; int nFuncs = sizeof(funcs) / sizeof(funcs[0]); for (j = 0; j < nFuncs; ++j) { - if (funcs[j] != NULL && callInst->getCalledFunction() == funcs[j]) + if (funcs[j] != NULL && callInst->getCalledFunction() == funcs[j]) break; } if (j == nFuncs) @@ -4242,7 +4242,7 @@ char MakeInternalFuncsStaticPass::ID = 0; bool MakeInternalFuncsStaticPass::runOnModule(llvm::Module &module) { const char *names[] = { - "__fast_masked_vload", + "__fast_masked_vload", "__gather_factored_base_offsets32_i8", "__gather_factored_base_offsets32_i16", "__gather_factored_base_offsets32_i32", "__gather_factored_base_offsets32_i64", "__gather_factored_base_offsets32_float", "__gather_factored_base_offsets32_double", @@ -4261,12 +4261,12 @@ MakeInternalFuncsStaticPass::runOnModule(llvm::Module &module) { "__gather64_i8", "__gather64_i16", "__gather64_i32", "__gather64_i64", "__gather64_float", "__gather64_double", - "__gather_elt32_i8", "__gather_elt32_i16", - "__gather_elt32_i32", "__gather_elt32_i64", - "__gather_elt32_float", "__gather_elt32_double", - "__gather_elt64_i8", "__gather_elt64_i16", - "__gather_elt64_i32", "__gather_elt64_i64", - "__gather_elt64_float", "__gather_elt64_double", + "__gather_elt32_i8", "__gather_elt32_i16", + "__gather_elt32_i32", "__gather_elt32_i64", + "__gather_elt32_float", "__gather_elt32_double", + "__gather_elt64_i8", "__gather_elt64_i16", + "__gather_elt64_i32", "__gather_elt64_i64", + "__gather_elt64_float", "__gather_elt64_double", "__masked_load_i8", "__masked_load_i16", "__masked_load_i32", "__masked_load_i64", "__masked_load_float", "__masked_load_double", @@ -4288,12 +4288,12 @@ MakeInternalFuncsStaticPass::runOnModule(llvm::Module &module) { "__scatter_base_offsets64_i8", "__scatter_base_offsets64_i16", "__scatter_base_offsets64_i32", "__scatter_base_offsets64_i64", "__scatter_base_offsets64_float", "__scatter_base_offsets64_double", - "__scatter_elt32_i8", "__scatter_elt32_i16", - "__scatter_elt32_i32", "__scatter_elt32_i64", - "__scatter_elt32_float", "__scatter_elt32_double", - "__scatter_elt64_i8", "__scatter_elt64_i16", - "__scatter_elt64_i32", "__scatter_elt64_i64", - "__scatter_elt64_float", "__scatter_elt64_double", + "__scatter_elt32_i8", "__scatter_elt32_i16", + "__scatter_elt32_i32", "__scatter_elt32_i64", + "__scatter_elt32_float", "__scatter_elt32_double", + "__scatter_elt64_i8", "__scatter_elt64_i16", + "__scatter_elt64_i32", "__scatter_elt64_i64", + "__scatter_elt64_float", "__scatter_elt64_double", "__scatter32_i8", "__scatter32_i16", "__scatter32_i32", "__scatter32_i64", "__scatter32_float", "__scatter32_double", diff --git a/opt.h b/opt.h index 7584fd71..63c5d5b4 100644 --- a/opt.h +++ b/opt.h @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file opt.h @@ -43,7 +43,7 @@ /** Optimize the functions in the given module, applying the specified level of optimization. optLevel zero corresponds to essentially no optimization--just enough to generate correct code, while level one - corresponds to full optimization. + corresponds to full optimization. */ void Optimize(llvm::Module *module, int optLevel); diff --git a/parse.yy b/parse.yy index ed040146..d50ec680 100644 --- a/parse.yy +++ b/parse.yy @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ %locations @@ -125,14 +125,14 @@ static const char *lBuiltinTokens[] = { "foreach_unique", "goto", "if", "in", "inline", "int", "int8", "int16", "int32", "int64", "launch", "new", "NULL", "print", "return", "signed", "sizeof", "static", "struct", "switch", - "sync", "task", "true", "typedef", "uniform", "unmasked", "unsigned", - "varying", "void", "while", NULL + "sync", "task", "true", "typedef", "uniform", "unmasked", "unsigned", + "varying", "void", "while", NULL }; static const char *lParamListTokens[] = { "bool", "const", "double", "enum", "false", "float", "int", "int8", "int16", "int32", "int64", "signed", "struct", "true", - "uniform", "unsigned", "varying", "void", NULL + "uniform", "unsigned", "varying", "void", NULL }; struct ForeachDimension { @@ -179,27 +179,27 @@ struct ForeachDimension { } -%token TOKEN_INT32_CONSTANT TOKEN_UINT32_CONSTANT -%token TOKEN_INT64_CONSTANT TOKEN_UINT64_CONSTANT -%token TOKEN_INT32DOTDOTDOT_CONSTANT TOKEN_UINT32DOTDOTDOT_CONSTANT +%token TOKEN_INT32_CONSTANT TOKEN_UINT32_CONSTANT +%token TOKEN_INT64_CONSTANT TOKEN_UINT64_CONSTANT +%token TOKEN_INT32DOTDOTDOT_CONSTANT TOKEN_UINT32DOTDOTDOT_CONSTANT %token TOKEN_INT64DOTDOTDOT_CONSTANT TOKEN_UINT64DOTDOTDOT_CONSTANT %token TOKEN_FLOAT_CONSTANT TOKEN_STRING_C_LITERAL %token TOKEN_IDENTIFIER TOKEN_STRING_LITERAL TOKEN_TYPE_NAME TOKEN_NULL -%token TOKEN_PTR_OP TOKEN_INC_OP TOKEN_DEC_OP TOKEN_LEFT_OP TOKEN_RIGHT_OP +%token TOKEN_PTR_OP TOKEN_INC_OP TOKEN_DEC_OP TOKEN_LEFT_OP TOKEN_RIGHT_OP %token TOKEN_LE_OP TOKEN_GE_OP TOKEN_EQ_OP TOKEN_NE_OP -%token TOKEN_AND_OP TOKEN_OR_OP TOKEN_MUL_ASSIGN TOKEN_DIV_ASSIGN TOKEN_MOD_ASSIGN -%token TOKEN_ADD_ASSIGN TOKEN_SUB_ASSIGN TOKEN_LEFT_ASSIGN TOKEN_RIGHT_ASSIGN +%token TOKEN_AND_OP TOKEN_OR_OP TOKEN_MUL_ASSIGN TOKEN_DIV_ASSIGN TOKEN_MOD_ASSIGN +%token TOKEN_ADD_ASSIGN TOKEN_SUB_ASSIGN TOKEN_LEFT_ASSIGN TOKEN_RIGHT_ASSIGN %token TOKEN_AND_ASSIGN TOKEN_OR_ASSIGN TOKEN_XOR_ASSIGN %token TOKEN_SIZEOF TOKEN_NEW TOKEN_DELETE TOKEN_IN %token TOKEN_EXTERN TOKEN_EXPORT TOKEN_STATIC TOKEN_INLINE TOKEN_TASK TOKEN_DECLSPEC %token TOKEN_UNIFORM TOKEN_VARYING TOKEN_TYPEDEF TOKEN_SOA TOKEN_UNMASKED %token TOKEN_CHAR TOKEN_INT TOKEN_SIGNED TOKEN_UNSIGNED TOKEN_FLOAT TOKEN_DOUBLE -%token TOKEN_INT8 TOKEN_INT16 TOKEN_INT64 TOKEN_CONST TOKEN_VOID TOKEN_BOOL +%token TOKEN_INT8 TOKEN_INT16 TOKEN_INT64 TOKEN_CONST TOKEN_VOID TOKEN_BOOL %token TOKEN_ENUM TOKEN_STRUCT TOKEN_TRUE TOKEN_FALSE %token TOKEN_CASE TOKEN_DEFAULT TOKEN_IF TOKEN_ELSE TOKEN_SWITCH -%token TOKEN_WHILE TOKEN_DO TOKEN_LAUNCH TOKEN_FOREACH TOKEN_FOREACH_TILED +%token TOKEN_WHILE TOKEN_DO TOKEN_LAUNCH TOKEN_FOREACH TOKEN_FOREACH_TILED %token TOKEN_FOREACH_UNIQUE TOKEN_FOREACH_ACTIVE TOKEN_DOTDOTDOT %token TOKEN_FOR TOKEN_GOTO TOKEN_CONTINUE TOKEN_BREAK TOKEN_RETURN %token TOKEN_CIF TOKEN_CDO TOKEN_CFOR TOKEN_CWHILE @@ -221,7 +221,7 @@ struct ForeachDimension { %type assert_statement sync_statement delete_statement unmasked_statement %type declaration parameter_declaration -%type init_declarator_list +%type init_declarator_list %type parameter_list parameter_type_list %type declarator pointer reference %type init_declarator direct_declarator struct_declarator @@ -244,10 +244,10 @@ struct ForeachDimension { %type type_qualifier type_qualifier_list %type storage_class_specifier -%type declaration_specifiers +%type declaration_specifiers -%type string_constant -%type struct_or_union_name enum_identifier goto_identifier +%type string_constant +%type struct_or_union_name enum_identifier goto_identifier %type foreach_unique_identifier %type int_constant soa_width_specifier rate_qualified_new @@ -277,7 +277,7 @@ primary_expression Symbol *s = m->symbolTable->LookupVariable(name); $$ = NULL; if (s) - $$ = new SymbolExpr(s, @1); + $$ = new SymbolExpr(s, @1); else { std::vector funs; m->symbolTable->LookupFunction(name, &funs); @@ -285,7 +285,7 @@ primary_expression $$ = new FunctionSymbolExpr(name, funs, @1); } if ($$ == NULL) { - std::vector alternates = + std::vector alternates = m->symbolTable->ClosestVariableOrFunctionMatch(name); std::string alts = lGetAlternates(alternates); Error(@1, "Undeclared symbol \"%s\".%s", name, alts.c_str()); @@ -293,23 +293,23 @@ primary_expression } | TOKEN_INT32_CONSTANT { $$ = new ConstExpr(AtomicType::UniformInt32->GetAsConstType(), - (int32_t)yylval.intVal, @1); + (int32_t)yylval.intVal, @1); } | TOKEN_UINT32_CONSTANT { $$ = new ConstExpr(AtomicType::UniformUInt32->GetAsConstType(), - (uint32_t)yylval.intVal, @1); + (uint32_t)yylval.intVal, @1); } | TOKEN_INT64_CONSTANT { $$ = new ConstExpr(AtomicType::UniformInt64->GetAsConstType(), - (int64_t)yylval.intVal, @1); + (int64_t)yylval.intVal, @1); } | TOKEN_UINT64_CONSTANT { $$ = new ConstExpr(AtomicType::UniformUInt64->GetAsConstType(), - (uint64_t)yylval.intVal, @1); + (uint64_t)yylval.intVal, @1); } | TOKEN_FLOAT_CONSTANT { $$ = new ConstExpr(AtomicType::UniformFloat->GetAsConstType(), - (float)yylval.floatVal, @1); + (float)yylval.floatVal, @1); } | TOKEN_TRUE { $$ = new ConstExpr(AtomicType::UniformBool->GetAsConstType(), true, @1); @@ -328,7 +328,7 @@ primary_expression launch_expression : TOKEN_LAUNCH postfix_expression '(' argument_expression_list ')' - { + { ConstExpr *oneExpr = new ConstExpr(AtomicType::UniformInt32, (int32_t)1, @2); $$ = new FunctionCallExpr($2, $4, Union(@2, @5), true, oneExpr); } @@ -343,7 +343,7 @@ launch_expression { $$ = new FunctionCallExpr($5, new ExprList(Union(@5,@6)), Union(@5,@7), true, $3); } | TOKEN_LAUNCH '<' postfix_expression '(' argument_expression_list ')' '>' - { + { Error(Union(@2, @7), "\"launch\" expressions no longer take '<' '>' " "around function call expression."); $$ = NULL; @@ -412,21 +412,21 @@ argument_expression_list unary_expression : funcall_expression - | TOKEN_INC_OP unary_expression + | TOKEN_INC_OP unary_expression { $$ = new UnaryExpr(UnaryExpr::PreInc, $2, Union(@1, @2)); } - | TOKEN_DEC_OP unary_expression + | TOKEN_DEC_OP unary_expression { $$ = new UnaryExpr(UnaryExpr::PreDec, $2, Union(@1, @2)); } | '&' unary_expression { $$ = new AddressOfExpr($2, Union(@1, @2)); } | '*' unary_expression { $$ = new PtrDerefExpr($2, Union(@1, @2)); } - | '+' cast_expression + | '+' cast_expression { $$ = $2; } - | '-' cast_expression + | '-' cast_expression { $$ = new UnaryExpr(UnaryExpr::Negate, $2, Union(@1, @2)); } - | '~' cast_expression + | '~' cast_expression { $$ = new UnaryExpr(UnaryExpr::BitNot, $2, Union(@1, @2)); } - | '!' cast_expression + | '!' cast_expression { $$ = new UnaryExpr(UnaryExpr::LogicalNot, $2, Union(@1, @2)); } | TOKEN_SIZEOF unary_expression { $$ = new SizeOfExpr($2, Union(@1, @2)); } @@ -438,7 +438,7 @@ cast_expression : unary_expression | '(' type_name ')' cast_expression { - $$ = new TypeCastExpr($2, $4, Union(@1,@4)); + $$ = new TypeCastExpr($2, $4, Union(@1,@4)); } ; @@ -630,7 +630,7 @@ constant_expression ; declaration_statement - : declaration + : declaration { if ($1 == NULL) { AssertPos(@1, m->errorCount > 0); @@ -810,11 +810,11 @@ init_declarator_list init_declarator : declarator - | declarator '=' initializer + | declarator '=' initializer { if ($1 != NULL) - $1->initExpr = $3; - $$ = $1; + $1->initExpr = $3; + $$ = $1; } ; @@ -829,7 +829,7 @@ type_specifier : atomic_var_type_specifier { $$ = $1; } | TOKEN_TYPE_NAME { - const Type *t = m->symbolTable->LookupType(yytext); + const Type *t = m->symbolTable->LookupType(yytext); $$ = t; } | struct_or_union_specifier { $$ = $1; } @@ -837,7 +837,7 @@ type_specifier ; type_specifier_list - : type_specifier + : type_specifier { if ($1 == NULL) $$ = NULL; @@ -883,8 +883,8 @@ struct_or_union_name struct_or_union_and_name : struct_or_union struct_or_union_name - { - const Type *st = m->symbolTable->LookupType($2); + { + const Type *st = m->symbolTable->LookupType($2); if (st == NULL) { st = new UndefinedStructType($2, Variability::Unbound, false, @2); m->symbolTable->AddType($2, st, @2); @@ -905,7 +905,7 @@ struct_or_union_and_name struct_or_union_specifier : struct_or_union_and_name - | struct_or_union_and_name '{' struct_declaration_list '}' + | struct_or_union_and_name '{' struct_declaration_list '}' { if ($3 != NULL) { llvm::SmallVector elementTypes; @@ -924,7 +924,7 @@ struct_or_union_specifier else $$ = NULL; } - | struct_or_union '{' struct_declaration_list '}' + | struct_or_union '{' struct_declaration_list '}' { if ($3 != NULL) { llvm::SmallVector elementTypes; @@ -938,7 +938,7 @@ struct_or_union_specifier else $$ = NULL; } - | struct_or_union '{' '}' + | struct_or_union '{' '}' { llvm::SmallVector elementTypes; llvm::SmallVector elementNames; @@ -946,7 +946,7 @@ struct_or_union_specifier $$ = new StructType("", elementTypes, elementNames, elementPositions, false, Variability::Unbound, @1); } - | struct_or_union_and_name '{' '}' + | struct_or_union_and_name '{' '}' { llvm::SmallVector elementTypes; llvm::SmallVector elementNames; @@ -963,18 +963,18 @@ struct_or_union_specifier ; struct_or_union - : TOKEN_STRUCT + : TOKEN_STRUCT ; struct_declaration_list - : struct_declaration - { + : struct_declaration + { std::vector *sdl = new std::vector; if ($1 != NULL) sdl->push_back($1); $$ = sdl; } - | struct_declaration_list struct_declaration + | struct_declaration_list struct_declaration { std::vector *sdl = (std::vector *)$1; if (sdl == NULL) { @@ -988,7 +988,7 @@ struct_declaration_list ; struct_declaration - : specifier_qualifier_list struct_declarator_list ';' + : specifier_qualifier_list struct_declarator_list ';' { $$ = ($1 != NULL && $2 != NULL) ? new StructDeclaration($1, $2) : NULL; } ; @@ -996,7 +996,7 @@ specifier_qualifier_list : type_specifier specifier_qualifier_list | type_specifier | short_vec_specifier - | type_qualifier specifier_qualifier_list + | type_qualifier specifier_qualifier_list { if ($2 != NULL) { if ($1 == TYPEQUAL_UNIFORM) { @@ -1033,7 +1033,7 @@ specifier_qualifier_list $2->ResolveUnboundVariability(Variability::Varying)->GetString().c_str()); $$ = $2; } - } + } else if ($1 == TYPEQUAL_INLINE) { Error(@1, "\"inline\" qualifier is illegal outside of " "function declarations."); @@ -1059,7 +1059,7 @@ specifier_qualifier_list } else { if (m->errorCount == 0) - Error(@1, "Lost type qualifier in parser."); + Error(@1, "Lost type qualifier in parser."); $$ = NULL; } } @@ -1067,14 +1067,14 @@ specifier_qualifier_list struct_declarator_list - : struct_declarator + : struct_declarator { std::vector *sdl = new std::vector; if ($1 != NULL) sdl->push_back($1); $$ = sdl; } - | struct_declarator_list ',' struct_declarator + | struct_declarator_list ',' struct_declarator { std::vector *sdl = (std::vector *)$1; if (sdl == NULL) { @@ -1099,19 +1099,19 @@ enum_identifier : TOKEN_IDENTIFIER { $$ = strdup(yytext); } enum_specifier - : TOKEN_ENUM '{' enumerator_list '}' + : TOKEN_ENUM '{' enumerator_list '}' { $$ = lCreateEnumType(NULL, $3, @1); } - | TOKEN_ENUM enum_identifier '{' enumerator_list '}' + | TOKEN_ENUM enum_identifier '{' enumerator_list '}' { $$ = lCreateEnumType($2, $4, @2); } - | TOKEN_ENUM '{' enumerator_list ',' '}' + | TOKEN_ENUM '{' enumerator_list ',' '}' { $$ = lCreateEnumType(NULL, $3, @1); } - | TOKEN_ENUM enum_identifier '{' enumerator_list ',' '}' + | TOKEN_ENUM enum_identifier '{' enumerator_list ',' '}' { $$ = lCreateEnumType($2, $4, @2); } @@ -1138,12 +1138,12 @@ enum_specifier ; enumerator_list - : enumerator + : enumerator { if ($1 == NULL) $$ = NULL; else { - std::vector *el = new std::vector; + std::vector *el = new std::vector; el->push_back($1); $$ = el; } @@ -1198,7 +1198,7 @@ type_qualifier_list { $$ = $1; } - | type_qualifier_list type_qualifier + | type_qualifier_list type_qualifier { $$ = $1 | $2; } @@ -1243,9 +1243,9 @@ direct_declarator d->name = yytext; $$ = d; } - | '(' declarator ')' + | '(' declarator ')' { - $$ = $2; + $$ = $2; } | direct_declarator '[' constant_expression ']' { @@ -1310,9 +1310,9 @@ direct_declarator pointer - : '*' + : '*' { - $$ = new Declarator(DK_POINTER, @1); + $$ = new Declarator(DK_POINTER, @1); } | '*' type_qualifier_list { @@ -1337,9 +1337,9 @@ pointer reference - : '&' + : '&' { - $$ = new Declarator(DK_REFERENCE, @1); + $$ = new Declarator(DK_REFERENCE, @1); } ; @@ -1375,10 +1375,10 @@ parameter_list parameter_declaration : declaration_specifiers declarator { - $$ = new Declaration($1, $2); + $$ = new Declaration($1, $2); } | declaration_specifiers declarator '=' initializer - { + { if ($1 != NULL && $2 != NULL) { $2->initExpr = $4; $$ = new Declaration($1, $2); @@ -1398,11 +1398,11 @@ parameter_declaration if ($1 == NULL) $$ = NULL; else - $$ = new Declaration($1); + $$ = new Declaration($1); } ; -/* K&R? +/* K&R? identifier_list : IDENTIFIER | identifier_list ',' IDENTIFIER @@ -1588,9 +1588,9 @@ labeled_statement $$ = new LabeledStmt($1, $3, @1); } | TOKEN_CASE constant_expression ':' statement - { + { int value; - if ($2 != NULL && + if ($2 != NULL && lGetConstantInt($2, &value, @2, "Case statement value")) { $$ = new CaseStmt(value, $4, Union(@1, @2)); } @@ -1700,19 +1700,19 @@ foreach_active_identifier integer_dotdotdot : TOKEN_INT32DOTDOTDOT_CONSTANT { $$ = new ConstExpr(AtomicType::UniformInt32->GetAsConstType(), - (int32_t)yylval.intVal, @1); + (int32_t)yylval.intVal, @1); } | TOKEN_UINT32DOTDOTDOT_CONSTANT { $$ = new ConstExpr(AtomicType::UniformUInt32->GetAsConstType(), - (uint32_t)yylval.intVal, @1); + (uint32_t)yylval.intVal, @1); } | TOKEN_INT64DOTDOTDOT_CONSTANT { $$ = new ConstExpr(AtomicType::UniformInt64->GetAsConstType(), - (int64_t)yylval.intVal, @1); + (int64_t)yylval.intVal, @1); } | TOKEN_UINT64DOTDOTDOT_CONSTANT { $$ = new ConstExpr(AtomicType::UniformUInt64->GetAsConstType(), - (uint64_t)yylval.intVal, @1); + (uint64_t)yylval.intVal, @1); } ; @@ -1749,7 +1749,7 @@ foreach_dimension_list foreach_unique_scope : TOKEN_FOREACH_UNIQUE { m->symbolTable->PushScope(); } ; - + foreach_unique_identifier : TOKEN_IDENTIFIER { $$ = yylval.stringVal->c_str(); } ; @@ -1764,11 +1764,11 @@ iteration_statement | TOKEN_CDO statement TOKEN_WHILE '(' expression ')' ';' { $$ = new DoStmt($5, $2, true, @1); } | for_scope '(' for_init_statement for_test ')' statement - { $$ = new ForStmt($3, $4, NULL, $6, false, @1); + { $$ = new ForStmt($3, $4, NULL, $6, false, @1); m->symbolTable->PopScope(); } | for_scope '(' for_init_statement for_test expression ')' statement - { $$ = new ForStmt($3, $4, new ExprStmt($5, @5), $7, false, @1); + { $$ = new ForStmt($3, $4, new ExprStmt($5, @5), $7, false, @1); m->symbolTable->PopScope(); } | cfor_scope '(' for_init_statement for_test ')' statement @@ -1847,7 +1847,7 @@ iteration_statement m->symbolTable->PopScope(); } | foreach_unique_scope '(' foreach_unique_identifier TOKEN_IN - expression ')' + expression ')' { Expr *expr = $5; const Type *type; @@ -1905,11 +1905,11 @@ unmasked_statement print_statement : TOKEN_PRINT '(' string_constant ')' ';' { - $$ = new PrintStmt(*$3, NULL, @1); + $$ = new PrintStmt(*$3, NULL, @1); } | TOKEN_PRINT '(' string_constant ',' argument_expression_list ')' ';' { - $$ = new PrintStmt(*$3, $5, @1); + $$ = new PrintStmt(*$3, $5, @1); } ; @@ -1934,8 +1934,8 @@ external_declaration if ($3 != NULL) m->AddExportedTypes(*$3); } - | declaration - { + | declaration + { if ($1 != NULL) for (unsigned int i = 0; i < $1->declarators.size(); ++i) lAddDeclaration($1->declSpecs, $1->declarators[i]); @@ -1944,14 +1944,14 @@ external_declaration ; function_definition - : declaration_specifiers declarator + : declaration_specifiers declarator { lAddDeclaration($1, $2); - lAddFunctionParams($2); + lAddFunctionParams($2); lAddMaskToSymbolTable(@2); if ($1->typeQualifiers & TYPEQUAL_TASK) lAddThreadIndexCountToSymbolTable(@2); - } + } compound_statement { if ($2 != NULL) { @@ -1970,7 +1970,7 @@ function_definition m->symbolTable->PopScope(); // push in lAddFunctionParams(); } /* function with no declared return type?? -func(...) +func(...) | declarator { lAddFunctionParams($1); } compound_statement { m->AddFunction(new DeclSpecs(XXX, $1, $3); @@ -2002,34 +2002,34 @@ lYYTNameErr (char *yyres, const char *yystr) else return yystpcpy(yyres, n.c_str()) - yyres; } - + if (*yystr == '"') { YYSIZE_T yyn = 0; char const *yyp = yystr; for (;;) - switch (*++yyp) - { - case '\'': - case ',': - goto do_not_strip_quotes; + switch (*++yyp) + { + case '\'': + case ',': + goto do_not_strip_quotes; - case '\\': - if (*++yyp != '\\') - goto do_not_strip_quotes; - /* Fall through. */ - default: - if (yyres) - yyres[yyn] = *yyp; - yyn++; - break; + case '\\': + if (*++yyp != '\\') + goto do_not_strip_quotes; + /* Fall through. */ + default: + if (yyres) + yyres[yyn] = *yyp; + yyn++; + break; - case '"': - if (yyres) - yyres[yyn] = '\0'; - return yyn; - } + case '"': + if (yyres) + yyres[yyn] = '\0'; + return yyn; + } do_not_strip_quotes: ; } @@ -2085,7 +2085,7 @@ lAddDeclaration(DeclSpecs *ds, Declarator *decl) { } decl->type = decl->type->ResolveUnboundVariability(Variability::Varying); - + const FunctionType *ft = CastType(decl->type); if (ft != NULL) { bool isInline = (ds->typeQualifiers & TYPEQUAL_INLINE); @@ -2113,7 +2113,7 @@ lAddFunctionParams(Declarator *decl) { return; } - // walk down to the declarator for the function itself + // walk down to the declarator for the function itself while (decl->kind != DK_FUNCTION && decl->child != NULL) decl = decl->child; if (decl->kind != DK_FUNCTION) { @@ -2265,13 +2265,13 @@ lCreateEnumType(const char *name, std::vector *enums, SourcePos pos) { /** Given an array of enumerator symbols, make sure each of them has a - ConstExpr * in their Symbol::constValue member that stores their + ConstExpr * in their Symbol::constValue member that stores their unsigned integer value. Symbols that had values explicitly provided in the source file will already have ConstExpr * set; we just need to set the values for the others here. */ static void -lFinalizeEnumeratorSymbols(std::vector &enums, +lFinalizeEnumeratorSymbols(std::vector &enums, const EnumType *enumType) { enumType = enumType->GetAsConstType(); enumType = enumType->GetAsUniformType(); @@ -2304,7 +2304,7 @@ lFinalizeEnumeratorSymbols(std::vector &enums, AssertPos(enums[i]->pos, enums[i]->constValue != NULL); } else { - enums[i]->constValue = new ConstExpr(enumType, nextVal++, + enums[i]->constValue = new ConstExpr(enumType, nextVal++, enums[i]->pos); } } diff --git a/stmt.cpp b/stmt.cpp index fd0ebfc5..4f4d9671 100644 --- a/stmt.cpp +++ b/stmt.cpp @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file stmt.cpp @@ -81,18 +81,18 @@ Stmt::Optimize() { /////////////////////////////////////////////////////////////////////////// // ExprStmt -ExprStmt::ExprStmt(Expr *e, SourcePos p) +ExprStmt::ExprStmt(Expr *e, SourcePos p) : Stmt(p) { expr = e; } void ExprStmt::EmitCode(FunctionEmitContext *ctx) const { - if (!ctx->GetCurrentBasicBlock()) + if (!ctx->GetCurrentBasicBlock()) return; ctx->SetDebugPos(pos); - if (expr) + if (expr) expr->GetValue(ctx); } @@ -105,7 +105,7 @@ ExprStmt::TypeCheck() { void ExprStmt::Print(int indent) const { - if (!expr) + if (!expr) return; printf("%*c", indent, ' '); @@ -141,11 +141,11 @@ lHasUnsizedArrays(const Type *type) { else return lHasUnsizedArrays(at->GetElementType()); } - + void DeclStmt::EmitCode(FunctionEmitContext *ctx) const { - if (!ctx->GetCurrentBasicBlock()) + if (!ctx->GetCurrentBasicBlock()) return; for (unsigned int i = 0; i < vars.size(); ++i) { @@ -236,13 +236,13 @@ DeclStmt::EmitCode(FunctionEmitContext *ctx) const { // Allocate space for the static variable in global scope, so // that it persists across function calls sym->storagePtr = - new llvm::GlobalVariable(*m->module, llvmType, + new llvm::GlobalVariable(*m->module, llvmType, sym->type->IsConstType(), llvm::GlobalValue::InternalLinkage, cinit, llvm::Twine("static.") + - llvm::Twine(sym->pos.first_line) + + llvm::Twine(sym->pos.first_line) + llvm::Twine(".") + sym->name.c_str()); - // Tell the FunctionEmitContext about the variable + // Tell the FunctionEmitContext about the variable ctx->EmitVariableDebugInfo(sym); } else { @@ -282,7 +282,7 @@ DeclStmt::Optimize() { // computing array sizes from non-trivial expressions is // consequently limited. Symbol *sym = vars[i].sym; - if (sym->type && sym->type->IsConstType() && + if (sym->type && sym->type->IsConstType() && Type::Equal(init->GetType(), sym->type)) sym->constValue = dynamic_cast(init); } @@ -329,7 +329,7 @@ DeclStmt::Print(int indent) const { printf("%*cDecl Stmt:", indent, ' '); pos.Print(); for (unsigned int i = 0; i < vars.size(); ++i) { - printf("%*cVariable %s (%s)", indent+4, ' ', + printf("%*cVariable %s (%s)", indent+4, ' ', vars[i].sym->name.c_str(), vars[i].sym->type->GetString().c_str()); if (vars[i].init != NULL) { @@ -351,8 +351,8 @@ DeclStmt::EstimateCost() const { /////////////////////////////////////////////////////////////////////////// // IfStmt -IfStmt::IfStmt(Expr *t, Stmt *ts, Stmt *fs, bool checkCoherence, SourcePos p) - : Stmt(p), test(t), trueStmts(ts), falseStmts(fs), +IfStmt::IfStmt(Expr *t, Stmt *ts, Stmt *fs, bool checkCoherence, SourcePos p) + : Stmt(p), test(t), trueStmts(ts), falseStmts(fs), doAllCheck(checkCoherence && !g->opt.disableCoherentControlFlow) { } @@ -399,9 +399,9 @@ IfStmt::EmitCode(FunctionEmitContext *ctx) const { // First check all of the things that might happen due to errors // earlier in compilation and bail out if needed so that we don't // dereference NULL pointers in the below... - if (!ctx->GetCurrentBasicBlock()) + if (!ctx->GetCurrentBasicBlock()) return; - if (!test) + if (!test) return; const Type *testType = test->GetType(); if (!testType) @@ -433,7 +433,7 @@ IfStmt::EmitCode(FunctionEmitContext *ctx) const { // Emit code for the 'true' case ctx->SetCurrentBasicBlock(bthen); lEmitIfStatements(ctx, trueStmts, "true"); - if (ctx->GetCurrentBasicBlock()) + if (ctx->GetCurrentBasicBlock()) ctx->BranchInst(bexit); // Emit code for the 'false' case @@ -466,10 +466,10 @@ IfStmt::TypeCheck() { if (test != NULL) { const Type *testType = test->GetType(); if (testType != NULL) { - bool isUniform = (testType->IsUniformType() && + bool isUniform = (testType->IsUniformType() && !g->opt.disableUniformControlFlow); - test = TypeConvertExpr(test, isUniform ? AtomicType::UniformBool : - AtomicType::VaryingBool, + test = TypeConvertExpr(test, isUniform ? AtomicType::UniformBool : + AtomicType::VaryingBool, "\"if\" statement test"); if (test == NULL) return NULL; @@ -509,17 +509,17 @@ IfStmt::Print(int indent) const { /** Emit code to run both the true and false statements for the if test, - with the mask set appropriately before running each one. + with the mask set appropriately before running each one. */ void -IfStmt::emitMaskedTrueAndFalse(FunctionEmitContext *ctx, llvm::Value *oldMask, +IfStmt::emitMaskedTrueAndFalse(FunctionEmitContext *ctx, llvm::Value *oldMask, llvm::Value *test) const { if (trueStmts) { ctx->SetInternalMaskAnd(oldMask, test); lEmitIfStatements(ctx, trueStmts, "if: expr mixed, true statements"); // under varying control flow,, returns can't stop instruction // emission, so this better be non-NULL... - AssertPos(ctx->GetDebugPos(), ctx->GetCurrentBasicBlock()); + AssertPos(ctx->GetDebugPos(), ctx->GetCurrentBasicBlock()); } if (falseStmts) { ctx->SetInternalMaskAndNot(oldMask, test); @@ -544,14 +544,14 @@ IfStmt::emitVaryingIf(FunctionEmitContext *ctx, llvm::Value *ltest) const { llvm::BasicBlock *bMixedOn = ctx->CreateBasicBlock("cif_mask_mixed"); llvm::BasicBlock *bDone = ctx->CreateBasicBlock("cif_done"); - // Jump to either bAllOn or bMixedOn, depending on the mask's value + // Jump to either bAllOn or bMixedOn, depending on the mask's value llvm::Value *maskAllQ = ctx->All(ctx->GetFullMask()); ctx->BranchInst(bAllOn, bMixedOn, maskAllQ); // Emit code for the 'mask all on' case ctx->SetCurrentBasicBlock(bAllOn); emitMaskAllOn(ctx, ltest, bDone); - + // And emit code for the mixed mask case ctx->SetCurrentBasicBlock(bMixedOn); emitMaskMixed(ctx, oldMask, ltest, bDone); @@ -580,7 +580,7 @@ IfStmt::emitVaryingIf(FunctionEmitContext *ctx, llvm::Value *ltest) const { // // where our use of blend for conditional assignments doesn't check // for the 'all lanes' off case. - int trueFalseCost = (::EstimateCost(trueStmts) + + int trueFalseCost = (::EstimateCost(trueStmts) + ::EstimateCost(falseStmts)); bool costIsAcceptable = (trueFalseCost < PREDICATE_SAFE_IF_STATEMENT_COST); @@ -591,7 +591,7 @@ IfStmt::emitVaryingIf(FunctionEmitContext *ctx, llvm::Value *ltest) const { Debug(pos, "If statement: true cost %d (safe %d), false cost %d (safe %d).", ::EstimateCost(trueStmts), (int)SafeToRunWithMaskAllOff(trueStmts), ::EstimateCost(falseStmts), (int)SafeToRunWithMaskAllOff(falseStmts)); - + if (safeToRunWithAllLanesOff && (costIsAcceptable || g->opt.disableCoherentControlFlow)) { ctx->StartVaryingIf(oldMask); @@ -612,7 +612,7 @@ IfStmt::emitVaryingIf(FunctionEmitContext *ctx, llvm::Value *ltest) const { mask is all on going into the 'if'. */ void -IfStmt::emitMaskAllOn(FunctionEmitContext *ctx, llvm::Value *ltest, +IfStmt::emitMaskAllOn(FunctionEmitContext *ctx, llvm::Value *ltest, llvm::BasicBlock *bDone) const { // We start by explicitly storing "all on" into the mask mask. Note // that this doesn't change its actual value, but doing so lets the @@ -682,7 +682,7 @@ IfStmt::emitMaskAllOn(FunctionEmitContext *ctx, llvm::Value *ltest, on/off going into it. */ void -IfStmt::emitMaskMixed(FunctionEmitContext *ctx, llvm::Value *oldMask, +IfStmt::emitMaskMixed(FunctionEmitContext *ctx, llvm::Value *oldMask, llvm::Value *ltest, llvm::BasicBlock *bDone) const { ctx->StartVaryingIf(oldMask); llvm::BasicBlock *bNext = ctx->CreateBasicBlock("safe_if_after_true"); @@ -699,7 +699,7 @@ IfStmt::emitMaskMixed(FunctionEmitContext *ctx, llvm::Value *oldMask, ctx->SetCurrentBasicBlock(bRunTrue); if (trueStmts != NULL) lEmitIfStatements(ctx, trueStmts, "if: expr mixed, true statements"); - AssertPos(pos, ctx->GetCurrentBasicBlock()); + AssertPos(pos, ctx->GetCurrentBasicBlock()); ctx->BranchInst(bNext); ctx->SetCurrentBasicBlock(bNext); @@ -801,7 +801,7 @@ lVaryingBCPostFunc(ASTNode *node, void *d) { flow. We need to detect this case for loops since what might otherwise look like a 'uniform' loop needs to have code emitted to do all of the lane management stuff if this is the case. - */ + */ static bool lHasVaryingBreakOrContinue(Stmt *stmt) { VaryingBCCheckInfo info; @@ -810,8 +810,8 @@ lHasVaryingBreakOrContinue(Stmt *stmt) { } -DoStmt::DoStmt(Expr *t, Stmt *s, bool cc, SourcePos p) - : Stmt(p), testExpr(t), bodyStmts(s), +DoStmt::DoStmt(Expr *t, Stmt *s, bool cc, SourcePos p) + : Stmt(p), testExpr(t), bodyStmts(s), doCoherentCheck(cc && !g->opt.disableCoherentControlFlow) { } @@ -819,9 +819,9 @@ DoStmt::DoStmt(Expr *t, Stmt *s, bool cc, SourcePos p) void DoStmt::EmitCode(FunctionEmitContext *ctx) const { // Check for things that could be NULL due to earlier errors during // compilation. - if (!ctx->GetCurrentBasicBlock()) + if (!ctx->GetCurrentBasicBlock()) return; - if (!testExpr || !testExpr->GetType()) + if (!testExpr || !testExpr->GetType()) return; bool uniformTest = testExpr->GetType()->IsUniformType(); @@ -984,15 +984,15 @@ DoStmt::Print(int indent) const { /////////////////////////////////////////////////////////////////////////// // ForStmt -ForStmt::ForStmt(Stmt *i, Expr *t, Stmt *s, Stmt *st, bool cc, SourcePos p) - : Stmt(p), init(i), test(t), step(s), stmts(st), +ForStmt::ForStmt(Stmt *i, Expr *t, Stmt *s, Stmt *st, bool cc, SourcePos p) + : Stmt(p), init(i), test(t), step(s), stmts(st), doCoherentCheck(cc && !g->opt.disableCoherentControlFlow) { } void ForStmt::EmitCode(FunctionEmitContext *ctx) const { - if (!ctx->GetCurrentBasicBlock()) + if (!ctx->GetCurrentBasicBlock()) return; llvm::BasicBlock *btest = ctx->CreateBasicBlock("for_test"); @@ -1176,14 +1176,14 @@ ForStmt::Print(int indent) const { /////////////////////////////////////////////////////////////////////////// // BreakStmt -BreakStmt::BreakStmt(SourcePos p) +BreakStmt::BreakStmt(SourcePos p) : Stmt(p) { } void BreakStmt::EmitCode(FunctionEmitContext *ctx) const { - if (!ctx->GetCurrentBasicBlock()) + if (!ctx->GetCurrentBasicBlock()) return; ctx->SetDebugPos(pos); @@ -1214,14 +1214,14 @@ BreakStmt::Print(int indent) const { /////////////////////////////////////////////////////////////////////////// // ContinueStmt -ContinueStmt::ContinueStmt(SourcePos p) +ContinueStmt::ContinueStmt(SourcePos p) : Stmt(p) { } void ContinueStmt::EmitCode(FunctionEmitContext *ctx) const { - if (!ctx->GetCurrentBasicBlock()) + if (!ctx->GetCurrentBasicBlock()) return; ctx->SetDebugPos(pos); @@ -1252,9 +1252,9 @@ ContinueStmt::Print(int indent) const { /////////////////////////////////////////////////////////////////////////// // ForeachStmt -ForeachStmt::ForeachStmt(const std::vector &lvs, - const std::vector &se, - const std::vector &ee, +ForeachStmt::ForeachStmt(const std::vector &lvs, + const std::vector &se, + const std::vector &ee, Stmt *s, bool t, SourcePos pos) : Stmt(pos), dimVariables(lvs), startExprs(se), endExprs(ee), isTiled(t), stmts(s) { @@ -1266,16 +1266,16 @@ ForeachStmt::ForeachStmt(const std::vector &lvs, values for use within the loop body. */ static llvm::Value * -lUpdateVaryingCounter(int dim, int nDims, FunctionEmitContext *ctx, +lUpdateVaryingCounter(int dim, int nDims, FunctionEmitContext *ctx, llvm::Value *uniformCounterPtr, llvm::Value *varyingCounterPtr, const std::vector &spans) { // Smear the uniform counter value out to be varying llvm::Value *counter = ctx->LoadInst(uniformCounterPtr); - llvm::Value *smearCounter = + llvm::Value *smearCounter = llvm::UndefValue::get(LLVMTypes::Int32VectorType); for (int i = 0; i < g->target.vectorWidth; ++i) - smearCounter = + smearCounter = ctx->InsertInst(smearCounter, counter, i, "smear_counter"); // Figure out the offsets; this is a little bit tricky. As an example, @@ -1300,7 +1300,7 @@ lUpdateVaryingCounter(int dim, int nDims, FunctionEmitContext *ctx, // Add the deltas to compute the varying counter values; store the // result to memory and then return it directly as well. - llvm::Value *varyingCounter = + llvm::Value *varyingCounter = ctx->BinaryOperator(llvm::Instruction::Add, smearCounter, LLVMInt32Vector(delta), "iter_val"); ctx->StoreInst(varyingCounter, varyingCounterPtr); @@ -1349,7 +1349,7 @@ lGetSpans(int dimsLeft, int nDims, int itemsLeft, bool isTiled, int *a) { // 16-wide. *a = 4; else - // Otherwise give this dimension a span of two. + // Otherwise give this dimension a span of two. *a = 2; lGetSpans(dimsLeft-1, nDims, itemsLeft / *a, isTiled, a+1); @@ -1364,7 +1364,7 @@ lGetSpans(int dimsLeft, int nDims, int itemsLeft, bool isTiled, int *a) { */ void ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { - if (ctx->GetCurrentBasicBlock() == NULL || stmts == NULL) + if (ctx->GetCurrentBasicBlock() == NULL || stmts == NULL) return; llvm::BasicBlock *bbFullBody = ctx->CreateBasicBlock("foreach_full_body"); @@ -1381,7 +1381,7 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { ctx->SetFunctionMask(LLVMMaskAllOn); // This should be caught during typechecking - AssertPos(pos, startExprs.size() == dimVariables.size() && + AssertPos(pos, startExprs.size() == dimVariables.size() && endExprs.size() == dimVariables.size()); int nDims = (int)dimVariables.size(); @@ -1413,7 +1413,7 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { endVals.push_back(ev); // nItems = endVal - startVal - llvm::Value *nItems = + llvm::Value *nItems = ctx->BinaryOperator(llvm::Instruction::Sub, ev, sv, "nitems"); // nExtras = nItems % (span for this dimension) @@ -1432,15 +1432,15 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { // goes from startVal to endVal, in steps of the span for this // dimension. Its value is only used internally here for looping // logic and isn't directly available in the user's program code. - uniformCounterPtrs.push_back(ctx->AllocaInst(LLVMTypes::Int32Type, + uniformCounterPtrs.push_back(ctx->AllocaInst(LLVMTypes::Int32Type, "counter")); ctx->StoreInst(startVals[i], uniformCounterPtrs[i]); // There is also a varying variable that holds the set of index // values for each dimension in the current loop iteration; this is // the value that is program-visible. - dimVariables[i]->storagePtr = - ctx->AllocaInst(LLVMTypes::Int32VectorType, + dimVariables[i]->storagePtr = + ctx->AllocaInst(LLVMTypes::Int32VectorType, dimVariables[i]->name.c_str()); dimVariables[i]->parentFunction = ctx->GetFunction(); ctx->EmitVariableDebugInfo(dimVariables[i]); @@ -1482,7 +1482,7 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { for (int i = 0; i < nDims-1; ++i) { ctx->SetCurrentBasicBlock(bbStep[i]); llvm::Value *counter = ctx->LoadInst(uniformCounterPtrs[i]); - llvm::Value *newCounter = + llvm::Value *newCounter = ctx->BinaryOperator(llvm::Instruction::Add, counter, LLVMInt32(span[i]), "new_counter"); ctx->StoreInst(newCounter, uniformCounterPtrs[i]); @@ -1495,15 +1495,15 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { for (int i = 0; i < nDims-1; ++i) { ctx->SetCurrentBasicBlock(bbTest[i]); - llvm::Value *haveExtras = + llvm::Value *haveExtras = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_SGT, endVals[i], alignedEnd[i], "have_extras"); llvm::Value *counter = ctx->LoadInst(uniformCounterPtrs[i], "counter"); - llvm::Value *atAlignedEnd = + llvm::Value *atAlignedEnd = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_EQ, counter, alignedEnd[i], "at_aligned_end"); - llvm::Value *inEx = + llvm::Value *inEx = ctx->BinaryOperator(llvm::Instruction::And, haveExtras, atAlignedEnd, "in_extras"); @@ -1513,8 +1513,8 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { inExtras.push_back(ctx->BinaryOperator(llvm::Instruction::Or, inEx, inExtras[i-1], "in_extras_all")); - llvm::Value *varyingCounter = - lUpdateVaryingCounter(i, nDims, ctx, uniformCounterPtrs[i], + llvm::Value *varyingCounter = + lUpdateVaryingCounter(i, nDims, ctx, uniformCounterPtrs[i], dimVariables[i]->storagePtr, span); llvm::Value *smearEnd = llvm::UndefValue::get(LLVMTypes::Int32VectorType); @@ -1522,7 +1522,7 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { smearEnd = ctx->InsertInst(smearEnd, endVals[i], j, "smear_end"); // Do a vector compare of its value to the end value to generate a // mask for this last bit of work. - llvm::Value *emask = + llvm::Value *emask = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_SLT, varyingCounter, smearEnd); emask = ctx->I1VecToBoolVec(emask); @@ -1537,7 +1537,7 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { ctx->StoreInst(newMask, extrasMaskPtrs[i]); } - llvm::Value *notAtEnd = + llvm::Value *notAtEnd = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_SLT, counter, endVals[i]); ctx->BranchInst(bbTest[i+1], bbReset[i], notAtEnd); @@ -1576,9 +1576,9 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { // whether any of the enclosing dimensions is partially active // (i.e. processing extra elements that don't exactly fit into a // vector). - llvm::BasicBlock *bbOuterInExtras = + llvm::BasicBlock *bbOuterInExtras = ctx->CreateBasicBlock("outer_in_extras"); - llvm::BasicBlock *bbOuterNotInExtras = + llvm::BasicBlock *bbOuterNotInExtras = ctx->CreateBasicBlock("outer_not_in_extras"); ctx->SetCurrentBasicBlock(bbTest[nDims-1]); @@ -1609,12 +1609,12 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { ctx->SetCurrentBasicBlock(bbOuterInExtras); { // Update the varying counter value here, since all subsequent // blocks along this path need it. - lUpdateVaryingCounter(nDims-1, nDims, ctx, uniformCounterPtrs[nDims-1], + lUpdateVaryingCounter(nDims-1, nDims, ctx, uniformCounterPtrs[nDims-1], dimVariables[nDims-1]->storagePtr, span); // here we just check to see if counter < alignedEnd llvm::Value *counter = ctx->LoadInst(uniformCounterPtrs[nDims-1], "counter"); - llvm::Value *beforeAlignedEnd = + llvm::Value *beforeAlignedEnd = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_SLT, counter, alignedEnd[nDims-1], "before_aligned_end"); ctx->BranchInst(bbAllInnerPartialOuter, bbPartial, beforeAlignedEnd); @@ -1624,7 +1624,7 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { // case where the mask is partially but not fully on. This same block // runs in multiple cases: both for handling any ragged extra data for // the innermost dimension but also when outer dimensions have set the - // mask to be partially on. + // mask to be partially on. // // The value stored in stepIndexAfterMaskedBodyPtr is used after each // execution of the body code to determine whether the innermost index @@ -1660,12 +1660,12 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { // We need to include the effect of the innermost dimension in the mask // for the final bits here ctx->SetCurrentBasicBlock(bbPartial); { - llvm::Value *varyingCounter = + llvm::Value *varyingCounter = ctx->LoadInst(dimVariables[nDims-1]->storagePtr); llvm::Value *smearEnd = llvm::UndefValue::get(LLVMTypes::Int32VectorType); for (int j = 0; j < g->target.vectorWidth; ++j) smearEnd = ctx->InsertInst(smearEnd, endVals[nDims-1], j, "smear_end"); - llvm::Value *emask = + llvm::Value *emask = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_SLT, varyingCounter, smearEnd); emask = ctx->I1VecToBoolVec(emask); @@ -1701,7 +1701,7 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { ctx->CreateBasicBlock("partial_inner_all_outer"); ctx->SetCurrentBasicBlock(bbOuterNotInExtras); { llvm::Value *counter = ctx->LoadInst(uniformCounterPtrs[nDims-1], "counter"); - llvm::Value *beforeAlignedEnd = + llvm::Value *beforeAlignedEnd = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_SLT, counter, alignedEnd[nDims-1], "before_aligned_end"); ctx->BranchInst(bbFullBody, bbPartialInnerAllOuter, @@ -1714,12 +1714,12 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { // on'. This ends up being relatively straightforward: just update the // value of the varying loop counter and have the statements in the // loop body emit their code. - llvm::BasicBlock *bbFullBodyContinue = + llvm::BasicBlock *bbFullBodyContinue = ctx->CreateBasicBlock("foreach_full_continue"); ctx->SetCurrentBasicBlock(bbFullBody); { ctx->SetInternalMask(LLVMMaskAllOn); ctx->SetBlockEntryMask(LLVMMaskAllOn); - lUpdateVaryingCounter(nDims-1, nDims, ctx, uniformCounterPtrs[nDims-1], + lUpdateVaryingCounter(nDims-1, nDims, ctx, uniformCounterPtrs[nDims-1], dimVariables[nDims-1]->storagePtr, span); ctx->SetContinueTarget(bbFullBodyContinue); ctx->AddInstrumentationPoint("foreach loop body (all on)"); @@ -1730,7 +1730,7 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { ctx->SetCurrentBasicBlock(bbFullBodyContinue); { ctx->RestoreContinuedLanes(); llvm::Value *counter = ctx->LoadInst(uniformCounterPtrs[nDims-1]); - llvm::Value *newCounter = + llvm::Value *newCounter = ctx->BinaryOperator(llvm::Instruction::Add, counter, LLVMInt32(span[nDims-1]), "new_counter"); ctx->StoreInst(newCounter, uniformCounterPtrs[nDims-1]); @@ -1741,11 +1741,11 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { // We're done running blocks with the mask all on; see if the counter is // less than the end value, in which case we need to run the body one // more time to get the extra bits. - llvm::BasicBlock *bbSetInnerMask = + llvm::BasicBlock *bbSetInnerMask = ctx->CreateBasicBlock("partial_inner_only"); ctx->SetCurrentBasicBlock(bbPartialInnerAllOuter); { llvm::Value *counter = ctx->LoadInst(uniformCounterPtrs[nDims-1], "counter"); - llvm::Value *beforeFullEnd = + llvm::Value *beforeFullEnd = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_SLT, counter, endVals[nDims-1], "before_full_end"); ctx->BranchInst(bbSetInnerMask, bbReset[nDims-1], beforeFullEnd); @@ -1755,13 +1755,13 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { // The outer dimensions are all on, so the mask is just given by the // mask for the innermost dimension ctx->SetCurrentBasicBlock(bbSetInnerMask); { - llvm::Value *varyingCounter = - lUpdateVaryingCounter(nDims-1, nDims, ctx, uniformCounterPtrs[nDims-1], + llvm::Value *varyingCounter = + lUpdateVaryingCounter(nDims-1, nDims, ctx, uniformCounterPtrs[nDims-1], dimVariables[nDims-1]->storagePtr, span); llvm::Value *smearEnd = llvm::UndefValue::get(LLVMTypes::Int32VectorType); for (int j = 0; j < g->target.vectorWidth; ++j) smearEnd = ctx->InsertInst(smearEnd, endVals[nDims-1], j, "smear_end"); - llvm::Value *emask = + llvm::Value *emask = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_SLT, varyingCounter, smearEnd); emask = ctx->I1VecToBoolVec(emask); @@ -1778,9 +1778,9 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { // of the statements' code, since the code above is emitted with the // mask known to be all-on, which in turn leads to more efficient code // for that case. - llvm::BasicBlock *bbStepInnerIndex = + llvm::BasicBlock *bbStepInnerIndex = ctx->CreateBasicBlock("step_inner_index"); - llvm::BasicBlock *bbMaskedBodyContinue = + llvm::BasicBlock *bbMaskedBodyContinue = ctx->CreateBasicBlock("foreach_masked_continue"); ctx->SetCurrentBasicBlock(bbMaskedBody); { ctx->AddInstrumentationPoint("foreach loop body (masked)"); @@ -1802,7 +1802,7 @@ ForeachStmt::EmitCode(FunctionEmitContext *ctx) const { // innermost for loop over full vectors. ctx->SetCurrentBasicBlock(bbStepInnerIndex); { llvm::Value *counter = ctx->LoadInst(uniformCounterPtrs[nDims-1]); - llvm::Value *newCounter = + llvm::Value *newCounter = ctx->BinaryOperator(llvm::Instruction::Add, counter, LLVMInt32(span[nDims-1]), "new_counter"); ctx->StoreInst(newCounter, uniformCounterPtrs[nDims-1]); @@ -1826,8 +1826,8 @@ ForeachStmt::TypeCheck() { bool anyErrors = false; for (unsigned int i = 0; i < startExprs.size(); ++i) { if (startExprs[i] != NULL) - startExprs[i] = TypeConvertExpr(startExprs[i], - AtomicType::UniformInt32, + startExprs[i] = TypeConvertExpr(startExprs[i], + AtomicType::UniformInt32, "foreach starting value"); anyErrors |= (startExprs[i] == NULL); } @@ -1875,12 +1875,12 @@ ForeachStmt::Print(int indent) const { printf("%*cForeach Stmt", indent, ' '); pos.Print(); printf("\n"); - + for (unsigned int i = 0; i < dimVariables.size(); ++i) if (dimVariables[i] != NULL) - printf("%*cVar %d: %s\n", indent+4, ' ', i, + printf("%*cVar %d: %s\n", indent+4, ' ', i, dimVariables[i]->name.c_str()); - else + else printf("%*cVar %d: NULL\n", indent+4, ' ', i); printf("Start values:\n"); @@ -1917,7 +1917,7 @@ ForeachStmt::Print(int indent) const { /////////////////////////////////////////////////////////////////////////// // ForeachActiveStmt -ForeachActiveStmt::ForeachActiveStmt(Symbol *s, Stmt *st, SourcePos pos) +ForeachActiveStmt::ForeachActiveStmt(Symbol *s, Stmt *st, SourcePos pos) : Stmt(pos) { sym = s; stmts = st; @@ -1926,7 +1926,7 @@ ForeachActiveStmt::ForeachActiveStmt(Symbol *s, Stmt *st, SourcePos pos) void ForeachActiveStmt::EmitCode(FunctionEmitContext *ctx) const { - if (!ctx->GetCurrentBasicBlock()) + if (!ctx->GetCurrentBasicBlock()) return; // Allocate storage for the symbol that we'll use for the uniform @@ -1936,7 +1936,7 @@ ForeachActiveStmt::EmitCode(FunctionEmitContext *ctx) const { Assert(m->errorCount > 0); return; } - Assert(Type::Equal(sym->type, + Assert(Type::Equal(sym->type, AtomicType::UniformInt64->GetAsConstType())); sym->storagePtr = ctx->AllocaInst(LLVMTypes::Int64Type, sym->name.c_str()); @@ -1944,22 +1944,22 @@ ForeachActiveStmt::EmitCode(FunctionEmitContext *ctx) const { ctx->EmitVariableDebugInfo(sym); // The various basic blocks that we'll need in the below - llvm::BasicBlock *bbFindNext = + llvm::BasicBlock *bbFindNext = ctx->CreateBasicBlock("foreach_active_find_next"); llvm::BasicBlock *bbBody = ctx->CreateBasicBlock("foreach_active_body"); - llvm::BasicBlock *bbCheckForMore = + llvm::BasicBlock *bbCheckForMore = ctx->CreateBasicBlock("foreach_active_check_for_more"); llvm::BasicBlock *bbDone = ctx->CreateBasicBlock("foreach_active_done"); // Save the old mask so that we can restore it at the end llvm::Value *oldInternalMask = ctx->GetInternalMask(); - + // Now, *maskBitsPtr will maintain a bitmask for the lanes that remain // to be processed by a pass through the loop body. It starts out with // the current execution mask (which should never be all off going in // to this)... llvm::Value *oldFullMask = ctx->GetFullMask(); - llvm::Value *maskBitsPtr = + llvm::Value *maskBitsPtr = ctx->AllocaInst(LLVMTypes::Int64Type, "mask_bits"); llvm::Value *movmsk = ctx->LaneMask(oldFullMask); ctx->StoreInst(movmsk, maskBitsPtr); @@ -1971,13 +1971,13 @@ ForeachActiveStmt::EmitCode(FunctionEmitContext *ctx) const { // Onward to find the first set of program instance to run the loop for ctx->BranchInst(bbFindNext); - + ctx->SetCurrentBasicBlock(bbFindNext); { // Load the bitmask of the lanes left to be processed llvm::Value *remainingBits = ctx->LoadInst(maskBitsPtr, "remaining_bits"); // Find the index of the first set bit in the mask - llvm::Function *ctlzFunc = + llvm::Function *ctlzFunc = m->module->getFunction("__count_trailing_zeros_i64"); Assert(ctlzFunc != NULL); llvm::Value *firstSet = ctx->CallInst(ctlzFunc, NULL, remainingBits, @@ -1993,20 +1993,20 @@ ForeachActiveStmt::EmitCode(FunctionEmitContext *ctx) const { // math...) // Get the "program index" vector value - llvm::Value *programIndex = + llvm::Value *programIndex = llvm::UndefValue::get(LLVMTypes::Int32VectorType); for (int i = 0; i < g->target.vectorWidth; ++i) programIndex = ctx->InsertInst(programIndex, LLVMInt32(i), i, "prog_index"); // And smear the current lane out to a vector - llvm::Value *firstSet32 = + llvm::Value *firstSet32 = ctx->TruncInst(firstSet, LLVMTypes::Int32Type, "first_set32"); llvm::Value *firstSet32Smear = ctx->SmearUniform(firstSet32); // Now set the execution mask based on doing a vector compare of // these two - llvm::Value *iterMask = + llvm::Value *iterMask = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_EQ, firstSet32Smear, programIndex); iterMask = ctx->I1VecToBoolVec(iterMask); @@ -2015,12 +2015,12 @@ ForeachActiveStmt::EmitCode(FunctionEmitContext *ctx) const { // Also update the bitvector of lanes left to turn off the bit for // the lane we're about to run. - llvm::Value *setMask = + llvm::Value *setMask = ctx->BinaryOperator(llvm::Instruction::Shl, LLVMInt64(1), firstSet, "set_mask"); llvm::Value *notSetMask = ctx->NotOperator(setMask); - llvm::Value *newRemaining = - ctx->BinaryOperator(llvm::Instruction::And, remainingBits, + llvm::Value *newRemaining = + ctx->BinaryOperator(llvm::Instruction::And, remainingBits, notSetMask, "new_remaining"); ctx->StoreInst(newRemaining, maskBitsPtr); @@ -2046,7 +2046,7 @@ ForeachActiveStmt::EmitCode(FunctionEmitContext *ctx) const { // the loop that jumps to the end, see if there are any lanes left // to be processed. llvm::Value *remainingBits = ctx->LoadInst(maskBitsPtr, "remaining_bits"); - llvm::Value *nonZero = + llvm::Value *nonZero = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_NE, remainingBits, LLVMInt64(0), "remaining_ne_zero"); ctx->BranchInst(bbFindNext, bbDone, nonZero); @@ -2102,8 +2102,8 @@ ForeachActiveStmt::EstimateCost() const { /////////////////////////////////////////////////////////////////////////// // ForeachUniqueStmt -ForeachUniqueStmt::ForeachUniqueStmt(const char *iterName, Expr *e, - Stmt *s, SourcePos pos) +ForeachUniqueStmt::ForeachUniqueStmt(const char *iterName, Expr *e, + Stmt *s, SourcePos pos) : Stmt(pos) { sym = m->symbolTable->LookupVariable(iterName); expr = e; @@ -2113,7 +2113,7 @@ ForeachUniqueStmt::ForeachUniqueStmt(const char *iterName, Expr *e, void ForeachUniqueStmt::EmitCode(FunctionEmitContext *ctx) const { - if (!ctx->GetCurrentBasicBlock()) + if (!ctx->GetCurrentBasicBlock()) return; // First, allocate local storage for the symbol that we'll use for the @@ -2167,7 +2167,7 @@ ForeachUniqueStmt::EmitCode(FunctionEmitContext *ctx) const { llvm::VectorType *llvmExprType; if (exprValue == NULL || (exprType = expr->GetType()) == NULL || - (llvmExprType = + (llvmExprType = llvm::dyn_cast(exprValue->getType())) == NULL) { Assert(m->errorCount > 0); return; @@ -2179,13 +2179,13 @@ ForeachUniqueStmt::EmitCode(FunctionEmitContext *ctx) const { // Onward to find the first set of lanes to run the loop for ctx->BranchInst(bbFindNext); - + ctx->SetCurrentBasicBlock(bbFindNext); { // Load the bitmask of the lanes left to be processed llvm::Value *remainingBits = ctx->LoadInst(maskBitsPtr, "remaining_bits"); // Find the index of the first set bit in the mask - llvm::Function *ctlzFunc = + llvm::Function *ctlzFunc = m->module->getFunction("__count_trailing_zeros_i64"); Assert(ctlzFunc != NULL); llvm::Value *firstSet = ctx->CallInst(ctlzFunc, NULL, remainingBits, @@ -2193,7 +2193,7 @@ ForeachUniqueStmt::EmitCode(FunctionEmitContext *ctx) const { // And load the corresponding element value from the temporary // memory storing the value of the varying expr. - llvm::Value *uniqueValuePtr = + llvm::Value *uniqueValuePtr = ctx->GetElementPtrInst(exprMem, LLVMInt64(0), firstSet, exprPtrType, "unique_index_ptr"); llvm::Value *uniqueValue = ctx->LoadInst(uniqueValuePtr, "unique_value"); @@ -2215,16 +2215,16 @@ ForeachUniqueStmt::EmitCode(FunctionEmitContext *ctx) const { llvm::Value *uniqueSmear = ctx->SmearUniform(uniqueValue, "unique_smear"); llvm::Value *matchingLanes = NULL; if (uniqueValue->getType()->isFloatingPointTy()) - matchingLanes = + matchingLanes = ctx->CmpInst(llvm::Instruction::FCmp, llvm::CmpInst::FCMP_OEQ, uniqueSmear, exprValue, "matching_lanes"); else - matchingLanes = + matchingLanes = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_EQ, uniqueSmear, exprValue, "matching_lanes"); matchingLanes = ctx->I1VecToBoolVec(matchingLanes); - llvm::Value *loopMask = + llvm::Value *loopMask = ctx->BinaryOperator(llvm::Instruction::And, oldMask, matchingLanes, "foreach_unique_loop_mask"); ctx->SetInternalMask(loopMask); @@ -2234,8 +2234,8 @@ ForeachUniqueStmt::EmitCode(FunctionEmitContext *ctx) const { // remainingBits &= ~movmsk(current mask) llvm::Value *loopMaskMM = ctx->LaneMask(loopMask); llvm::Value *notLoopMaskMM = ctx->NotOperator(loopMaskMM); - llvm::Value *newRemaining = - ctx->BinaryOperator(llvm::Instruction::And, remainingBits, + llvm::Value *newRemaining = + ctx->BinaryOperator(llvm::Instruction::And, remainingBits, notLoopMaskMM, "new_remaining"); ctx->StoreInst(newRemaining, maskBitsPtr); @@ -2260,7 +2260,7 @@ ForeachUniqueStmt::EmitCode(FunctionEmitContext *ctx) const { // to be processed. ctx->RestoreContinuedLanes(); llvm::Value *remainingBits = ctx->LoadInst(maskBitsPtr, "remaining_bits"); - llvm::Value *nonZero = + llvm::Value *nonZero = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_NE, remainingBits, LLVMInt64(0), "remaining_ne_zero"); ctx->BranchInst(bbFindNext, bbDone, nonZero); @@ -2359,7 +2359,7 @@ lCheckMask(Stmt *stmts) { } -CaseStmt::CaseStmt(int v, Stmt *s, SourcePos pos) +CaseStmt::CaseStmt(int v, Stmt *s, SourcePos pos) : Stmt(pos), value(v) { stmts = s; } @@ -2397,7 +2397,7 @@ CaseStmt::EstimateCost() const { /////////////////////////////////////////////////////////////////////////// // DefaultStmt -DefaultStmt::DefaultStmt(Stmt *s, SourcePos pos) +DefaultStmt::DefaultStmt(Stmt *s, SourcePos pos) : Stmt(pos) { stmts = s; } @@ -2435,7 +2435,7 @@ DefaultStmt::EstimateCost() const { /////////////////////////////////////////////////////////////////////////// // SwitchStmt -SwitchStmt::SwitchStmt(Expr *e, Stmt *s, SourcePos pos) +SwitchStmt::SwitchStmt(Expr *e, Stmt *s, SourcePos pos) : Stmt(pos) { expr = e; stmts = s; @@ -2447,9 +2447,9 @@ SwitchStmt::SwitchStmt(Expr *e, Stmt *s, SourcePos pos) structure to record all of the 'case' and 'default' statements after the "switch". */ struct SwitchVisitInfo { - SwitchVisitInfo(FunctionEmitContext *c) { + SwitchVisitInfo(FunctionEmitContext *c) { ctx = c; - defaultBlock = NULL; + defaultBlock = NULL; lastBlock = NULL; } @@ -2491,11 +2491,11 @@ lSwitchASTPreVisit(ASTNode *node, void *d) { // already for (int i = 0; i < (int)svi->caseBlocks.size(); ++i) { if (svi->caseBlocks[i].first == cs->value) { - Error(cs->pos, "Duplicate case value \"%d\".", cs->value); + Error(cs->pos, "Duplicate case value \"%d\".", cs->value); return true; } } - + // Otherwise create a new basic block for the code following this // 'case' statement and record the mappign between the case label // value and the basic block @@ -2689,14 +2689,14 @@ UnmaskedStmt::EstimateCost() const { /////////////////////////////////////////////////////////////////////////// // ReturnStmt -ReturnStmt::ReturnStmt(Expr *e, SourcePos p) +ReturnStmt::ReturnStmt(Expr *e, SourcePos p) : Stmt(p), expr(e) { } void ReturnStmt::EmitCode(FunctionEmitContext *ctx) const { - if (!ctx->GetCurrentBasicBlock()) + if (!ctx->GetCurrentBasicBlock()) return; if (ctx->InForeachLoop()) { @@ -2756,7 +2756,7 @@ ReturnStmt::Print(int indent) const { /////////////////////////////////////////////////////////////////////////// // GotoStmt -GotoStmt::GotoStmt(const char *l, SourcePos gotoPos, SourcePos ip) +GotoStmt::GotoStmt(const char *l, SourcePos gotoPos, SourcePos ip) : Stmt(gotoPos) { label = l; identifierPos = ip; @@ -2765,7 +2765,7 @@ GotoStmt::GotoStmt(const char *l, SourcePos gotoPos, SourcePos ip) void GotoStmt::EmitCode(FunctionEmitContext *ctx) const { - if (!ctx->GetCurrentBasicBlock()) + if (!ctx->GetCurrentBasicBlock()) return; if (ctx->VaryingCFDepth() > 0) { @@ -2793,7 +2793,7 @@ GotoStmt::EmitCode(FunctionEmitContext *ctx) const { } /* Label wasn't found. Emit an error */ - Error(identifierPos, + Error(identifierPos, "No label named \"%s\" found in current function.%s", label.c_str(), match_output.c_str()); @@ -2832,7 +2832,7 @@ GotoStmt::EstimateCost() const { /////////////////////////////////////////////////////////////////////////// // LabeledStmt -LabeledStmt::LabeledStmt(const char *n, Stmt *s, SourcePos p) +LabeledStmt::LabeledStmt(const char *n, Stmt *s, SourcePos p) : Stmt(p) { name = n; stmt = s; @@ -2934,7 +2934,7 @@ StmtList::Print(int indent) const { /////////////////////////////////////////////////////////////////////////// // PrintStmt -PrintStmt::PrintStmt(const std::string &f, Expr *v, SourcePos p) +PrintStmt::PrintStmt(const std::string &f, Expr *v, SourcePos p) : Stmt(p), format(f), values(v) { } @@ -2995,11 +2995,11 @@ lProcessPrintArg(Expr *expr, FunctionEmitContext *ctx, std::string &argTypes) { Type::Equal(baseType, AtomicType::UniformInt16) || Type::Equal(baseType, AtomicType::UniformUInt16)) { expr = new TypeCastExpr(type->IsUniformType() ? AtomicType::UniformInt32 : - AtomicType::VaryingInt32, + AtomicType::VaryingInt32, expr, expr->pos); type = expr->GetType(); } - + char t = lEncodeType(type->GetAsNonConstType()); if (t == '\0') { Error(expr->pos, "Only atomic types are allowed in print statements; " @@ -3030,7 +3030,7 @@ lProcessPrintArg(Expr *expr, FunctionEmitContext *ctx, std::string &argTypes) { */ void PrintStmt::EmitCode(FunctionEmitContext *ctx) const { - if (!ctx->GetCurrentBasicBlock()) + if (!ctx->GetCurrentBasicBlock()) return; ctx->SetDebugPos(pos); @@ -3039,7 +3039,7 @@ PrintStmt::EmitCode(FunctionEmitContext *ctx) const { // in the code emitted below // // 1. the format string - // 2. a string encoding the types of the values being printed, + // 2. a string encoding the types of the values being printed, // one character per value // 3. the number of running program instances (i.e. the target's // vector width) @@ -3049,7 +3049,7 @@ PrintStmt::EmitCode(FunctionEmitContext *ctx) const { std::string argTypes; if (values == NULL) { - llvm::Type *ptrPtrType = + llvm::Type *ptrPtrType = llvm::PointerType::get(LLVMTypes::VoidPointerType, 0); args[4] = llvm::Constant::getNullValue(ptrPtrType); } @@ -3060,14 +3060,14 @@ PrintStmt::EmitCode(FunctionEmitContext *ctx) const { ExprList *elist = dynamic_cast(values); int nArgs = elist ? elist->exprs.size() : 1; - // Allocate space for the array of pointers to values to be printed - llvm::Type *argPtrArrayType = + // Allocate space for the array of pointers to values to be printed + llvm::Type *argPtrArrayType = llvm::ArrayType::get(LLVMTypes::VoidPointerType, nArgs); llvm::Value *argPtrArray = ctx->AllocaInst(argPtrArrayType, "print_arg_ptrs"); // Store the array pointer as a void **, which is what __do_print() // expects - args[4] = ctx->BitCastInst(argPtrArray, + args[4] = ctx->BitCastInst(argPtrArray, llvm::PointerType::get(LLVMTypes::VoidPointerType, 0)); // Now, for each of the arguments, emit code to evaluate its value @@ -3131,14 +3131,14 @@ PrintStmt::EstimateCost() const { /////////////////////////////////////////////////////////////////////////// // AssertStmt -AssertStmt::AssertStmt(const std::string &msg, Expr *e, SourcePos p) +AssertStmt::AssertStmt(const std::string &msg, Expr *e, SourcePos p) : Stmt(p), message(msg), expr(e) { } void AssertStmt::EmitCode(FunctionEmitContext *ctx) const { - if (!ctx->GetCurrentBasicBlock()) + if (!ctx->GetCurrentBasicBlock()) return; const Type *type; @@ -3151,14 +3151,14 @@ AssertStmt::EmitCode(FunctionEmitContext *ctx) const { // The actual functionality to do the check and then handle falure is // done via a builtin written in bitcode in builtins/util.m4. - llvm::Function *assertFunc = + llvm::Function *assertFunc = isUniform ? m->module->getFunction("__do_assert_uniform") : m->module->getFunction("__do_assert_varying"); AssertPos(pos, assertFunc != NULL); char *errorString; - if (asprintf(&errorString, "%s:%d:%d: Assertion failed: %s", - pos.name, pos.first_line, pos.first_column, + if (asprintf(&errorString, "%s:%d:%d: Assertion failed: %s", + pos.name, pos.first_line, pos.first_column, message.c_str()) == -1) { Error(pos, "Fatal error when generating assert string: asprintf() " "unable to allocate memory!"); @@ -3191,8 +3191,8 @@ AssertStmt::TypeCheck() { const Type *type; if (expr && (type = expr->GetType()) != NULL) { bool isUniform = type->IsUniformType(); - expr = TypeConvertExpr(expr, isUniform ? AtomicType::UniformBool : - AtomicType::VaryingBool, + expr = TypeConvertExpr(expr, isUniform ? AtomicType::UniformBool : + AtomicType::VaryingBool, "\"assert\" statement"); if (expr == NULL) return NULL; @@ -3218,7 +3218,7 @@ DeleteStmt::DeleteStmt(Expr *e, SourcePos p) void DeleteStmt::EmitCode(FunctionEmitContext *ctx) const { - if (!ctx->GetCurrentBasicBlock()) + if (!ctx->GetCurrentBasicBlock()) return; const Type *exprType; diff --git a/stmt.h b/stmt.h index 380bc8d9..7ed1f0ef 100644 --- a/stmt.h +++ b/stmt.h @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file stmt.h @@ -84,8 +84,8 @@ public: struct VariableDeclaration { - VariableDeclaration(Symbol *s = NULL, Expr *i = NULL) { - sym = s; init = i; + VariableDeclaration(Symbol *s = NULL, Expr *i = NULL) { + sym = s; init = i; } Symbol *sym; Expr *init; @@ -137,12 +137,12 @@ private: 'false' blocks. */ const bool doAllCheck; - void emitMaskedTrueAndFalse(FunctionEmitContext *ctx, llvm::Value *oldMask, + void emitMaskedTrueAndFalse(FunctionEmitContext *ctx, llvm::Value *oldMask, llvm::Value *test) const; void emitVaryingIf(FunctionEmitContext *ctx, llvm::Value *test) const; void emitMaskAllOn(FunctionEmitContext *ctx, llvm::Value *test, llvm::BasicBlock *bDone) const; - void emitMaskMixed(FunctionEmitContext *ctx, llvm::Value *oldMask, + void emitMaskMixed(FunctionEmitContext *ctx, llvm::Value *oldMask, llvm::Value *test, llvm::BasicBlock *bDone) const; }; @@ -152,7 +152,7 @@ private: */ class DoStmt : public Stmt { public: - DoStmt(Expr *testExpr, Stmt *bodyStmts, bool doCoherentCheck, + DoStmt(Expr *testExpr, Stmt *bodyStmts, bool doCoherentCheck, SourcePos pos); void EmitCode(FunctionEmitContext *ctx) const; @@ -227,9 +227,9 @@ public: */ class ForeachStmt : public Stmt { public: - ForeachStmt(const std::vector &loopVars, - const std::vector &startExprs, - const std::vector &endExprs, + ForeachStmt(const std::vector &loopVars, + const std::vector &startExprs, + const std::vector &endExprs, Stmt *bodyStatements, bool tiled, SourcePos pos); void EmitCode(FunctionEmitContext *ctx) const; @@ -283,7 +283,7 @@ public: }; -/** +/** */ class UnmaskedStmt : public Stmt { public: @@ -493,7 +493,7 @@ public: Expr *expr; }; -extern Stmt *CreateForeachActiveStmt(Symbol *iterSym, Stmt *stmts, +extern Stmt *CreateForeachActiveStmt(Symbol *iterSym, Stmt *stmts, SourcePos pos); #endif // ISPC_STMT_H diff --git a/sym.cpp b/sym.cpp index 42d1f66f..f16f5e11 100644 --- a/sym.cpp +++ b/sym.cpp @@ -28,11 +28,11 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file sym.cpp - @brief file with definitions for symbol and symbol table classes. + @brief file with definitions for symbol and symbol table classes. */ #include "sym.h" @@ -43,8 +43,8 @@ /////////////////////////////////////////////////////////////////////////// // Symbol -Symbol::Symbol(const std::string &n, SourcePos p, const Type *t, - StorageClass sc) +Symbol::Symbol(const std::string &n, SourcePos p, const Type *t, + StorageClass sc) : pos(p), name(n) { storagePtr = NULL; function = exportedFunction = NULL; @@ -72,7 +72,7 @@ SymbolTable::~SymbolTable() { void -SymbolTable::PushScope() { +SymbolTable::PushScope() { SymbolMapType *sm; if (freeSymbolMaps.size() > 0) { sm = freeSymbolMaps.back(); @@ -87,7 +87,7 @@ SymbolTable::PushScope() { void -SymbolTable::PopScope() { +SymbolTable::PopScope() { Assert(variables.size() > 1); freeSymbolMaps.push_back(variables.back()); variables.pop_back(); @@ -105,14 +105,14 @@ SymbolTable::AddVariable(Symbol *symbol) { if (i == (int)variables.size()-1) { // If a symbol of the same name was declared in the // same scope, it's an error. - Error(symbol->pos, "Ignoring redeclaration of symbol \"%s\".", + Error(symbol->pos, "Ignoring redeclaration of symbol \"%s\".", symbol->name.c_str()); return false; } else { // Otherwise it's just shadowing something else, which // is legal but dangerous.. - Warning(symbol->pos, + Warning(symbol->pos, "Symbol \"%s\" shadows symbol declared in outer scope.", symbol->name.c_str()); (*variables.back())[symbol->name] = symbol; @@ -308,7 +308,7 @@ SymbolTable::Print() { for (iter = sm.begin(); iter != sm.end(); ++iter) { fprintf(stderr, "%*c", depth, ' '); Symbol *sym = iter->second; - fprintf(stderr, "%s [%s]", sym->name.c_str(), + fprintf(stderr, "%s [%s]", sym->name.c_str(), sym->type->GetString().c_str()); } fprintf(stderr, "\n"); diff --git a/sym.h b/sym.h index 07bbe187..efb532a3 100644 --- a/sym.h +++ b/sym.h @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file sym.h @@ -75,7 +75,7 @@ public: location in memory.) */ llvm::Function *function; /*!< For symbols that represent functions, this stores the LLVM Function value for - the symbol once it has been created. */ + the symbol once it has been created. */ llvm::Function *exportedFunction; /*!< For symbols that represent functions with 'export' qualifiers, this points to the LLVM @@ -91,18 +91,18 @@ public: struct types. For cases like these, ConstExpr is NULL, though for all const symbols, the value pointed to by the storagePtr member will be its constant value. (This - messiness is due to needing an ispc ConstExpr for the early + messiness is due to needing an ispc ConstExpr for the early constant folding optimizations). */ StorageClass storageClass;/*!< Records the storage class (if any) provided with the symbol's declaration. */ - int varyingCFDepth; /*!< This member records the number of levels of nested 'varying' + int varyingCFDepth; /*!< This member records the number of levels of nested 'varying' control flow within which the symbol was declared. Having this value available makes it possible to avoid performing masked stores when modifying the symbol's value when the - store is done at the same 'varying' control flow depth as + store is done at the same 'varying' control flow depth as the one where the symbol was originally declared. */ - const Function *parentFunction; - /*!< For symbols that are parameters to functions or are + const Function *parentFunction; + /*!< For symbols that are parameters to functions or are variables declared inside functions, this gives the function they're in. */ }; @@ -130,7 +130,7 @@ public: /** For each scope started by a call to SymbolTable::PushScope(), there must be a matching call to SymbolTable::PopScope() at the end of - that scope. */ + that scope. */ void PopScope(); /** Adds the given variable symbol to the symbol table. @@ -147,7 +147,7 @@ public: returning the first match found. @param name The name of the variable to be searched for. - @return A pointer to the Symbol, if a match is found. NULL if no + @return A pointer to the Symbol, if a match is found. NULL if no Symbol with the given name is in the symbol table. */ Symbol *LookupVariable(const char *name); @@ -165,7 +165,7 @@ public: be returned in the provided vector and it's up the the caller to resolve which one (if any) to use. Returns true if any matches were found. */ - bool LookupFunction(const char *name, + bool LookupFunction(const char *name, std::vector *matches = NULL); /** Looks for a function with the given name and type @@ -174,28 +174,28 @@ public: @return pointer to matching Symbol; NULL if none is found. */ Symbol *LookupFunction(const char *name, const FunctionType *type); - /** Returns all of the functions in the symbol table that match the given + /** Returns all of the functions in the symbol table that match the given predicate. - @param pred A unary predicate that returns true or false, given a Symbol - pointer, based on whether the symbol should be included in the returned - set of matches. It can either be a function, with signature - bool pred(const Symbol *s), or a unary predicate object with + @param pred A unary predicate that returns true or false, given a Symbol + pointer, based on whether the symbol should be included in the returned + set of matches. It can either be a function, with signature + bool pred(const Symbol *s), or a unary predicate object with an bool operator()(const Symbol *) method. @param matches Pointer to a vector in which to return the matching - symbols. + symbols. */ - template - void GetMatchingFunctions(Predicate pred, + template + void GetMatchingFunctions(Predicate pred, std::vector *matches) const; /** Returns all of the variable symbols in the symbol table that match the given predicate. The predicate is defined as in the GetMatchingFunctions() method. */ - template - void GetMatchingVariables(Predicate pred, + template + void GetMatchingVariables(Predicate pred, std::vector *matches) const; /** Adds the named type to the symbol table. This is used for both @@ -210,7 +210,7 @@ public: @param pos Position in source file where the type was named @return true if the named type was successfully added. False if a type with the same name has already been defined. - + */ bool AddType(const char *name, const Type *type, SourcePos pos); @@ -248,7 +248,7 @@ public: const Type *RandomType(); private: - std::vector closestTypeMatch(const char *str, + std::vector closestTypeMatch(const char *str, bool structsVsEnums) const; /** This member variable holds one SymbolMap for each of the current @@ -278,7 +278,7 @@ private: template void -SymbolTable::GetMatchingFunctions(Predicate pred, +SymbolTable::GetMatchingFunctions(Predicate pred, std::vector *matches) const { // Iterate through all function symbols and apply the given predicate. // If it returns true, add the Symbol * to the provided vector. @@ -294,7 +294,7 @@ SymbolTable::GetMatchingFunctions(Predicate pred, template void -SymbolTable::GetMatchingVariables(Predicate pred, +SymbolTable::GetMatchingVariables(Predicate pred, std::vector *matches) const { for (unsigned int i = 0; i < variables.size(); ++i) { SymbolMapType &sm = *(variables[i]); diff --git a/test_static.cpp b/test_static.cpp index ec91960e..8985fdb3 100644 --- a/test_static.cpp +++ b/test_static.cpp @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #if defined(_WIN32) || defined(_WIN64) @@ -61,7 +61,7 @@ extern "C" { extern void f_duf(float *result, double *a, float b); extern void f_di(float *result, double *a, int *b); extern void result(float *val); - + void ISPCLaunch(void **handlePtr, void *f, void *d, int); void ISPCSync(void *handle); void *ISPCAlloc(void **handlePtr, int64_t size, int32_t alignment); @@ -141,7 +141,7 @@ int main(int argc, char *argv[]) { f_di(returned_result, vdouble, vint2); #else #error "Unknown or unset TEST_SIG value" -#endif +#endif float expected_result[64]; memset(expected_result, 0, 64*sizeof(float)); @@ -155,7 +155,7 @@ int main(int argc, char *argv[]) { return 1; #else printf("%s: value %d disagrees: returned %f [%a], expected %f [%a]\n", - argv[0], i, returned_result[i], returned_result[i], + argv[0], i, returned_result[i], returned_result[i], expected_result[i], expected_result[i]); ++errors; #endif // EXPECT_FAILURE diff --git a/type.cpp b/type.cpp index d35baf05..45b8e4d6 100644 --- a/type.cpp +++ b/type.cpp @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file type.cpp @@ -79,7 +79,7 @@ lShouldPrintName(const std::string &name) { /** Utility routine to create a llvm DIArray type of the given number of the given element type. */ -static llvm::DIType +static llvm::DIType lCreateDIArray(llvm::DIType eltType, int count) { int lowerBound = 0, upperBound = count-1; @@ -125,9 +125,9 @@ Variability::GetString() const { std::string Variability::MangleString() const { switch (type) { - case Uniform: + case Uniform: return "un"; - case Varying: + case Varying: return "vy"; case SOA: { char buf[32]; @@ -189,11 +189,11 @@ const AtomicType *AtomicType::UniformDouble = new AtomicType(AtomicType::TYPE_DOUBLE, Variability::Uniform, false); const AtomicType *AtomicType::VaryingDouble = new AtomicType(AtomicType::TYPE_DOUBLE, Variability::Varying, false); -const AtomicType *AtomicType::Void = +const AtomicType *AtomicType::Void = new AtomicType(TYPE_VOID, Variability::Uniform, false); -AtomicType::AtomicType(BasicType bt, Variability v, bool ic) +AtomicType::AtomicType(BasicType bt, Variability v, bool ic) : Type(ATOMIC_TYPE), basicType(bt), variability(v), isConst(ic) { asOtherConstType = NULL; asUniformType = asVaryingType = NULL; @@ -255,14 +255,14 @@ AtomicType::IsBoolType() const { bool -AtomicType::IsConstType() const { - return isConst; +AtomicType::IsConstType() const { + return isConst; } const AtomicType * AtomicType::GetAsUnsignedType() const { - if (IsUnsignedType() == true) + if (IsUnsignedType() == true) return this; if (IsIntType() == false) @@ -286,9 +286,9 @@ AtomicType::GetAsUnsignedType() const { const AtomicType * AtomicType::GetAsConstType() const { - if (basicType == TYPE_VOID || isConst == true) + if (basicType == TYPE_VOID || isConst == true) return this; - + if (asOtherConstType == NULL) { asOtherConstType = new AtomicType(basicType, variability, true); asOtherConstType->asOtherConstType = this; @@ -299,7 +299,7 @@ AtomicType::GetAsConstType() const { const AtomicType * AtomicType::GetAsNonConstType() const { - if (basicType == TYPE_VOID || isConst == false) + if (basicType == TYPE_VOID || isConst == false) return this; if (asOtherConstType == NULL) { @@ -429,7 +429,7 @@ AtomicType::Mangle() const { std::string AtomicType::GetCDeclaration(const std::string &name) const { std::string ret; - if (variability != Variability::Uniform && + if (variability != Variability::Uniform && variability != Variability::SOA) { Assert(m->errorCount > 0); return ret; @@ -583,7 +583,7 @@ AtomicType::GetDIType(llvm::DIDescriptor scope) const { /////////////////////////////////////////////////////////////////////////// // EnumType -EnumType::EnumType(SourcePos p) +EnumType::EnumType(SourcePos p) : Type(ENUM_TYPE), pos(p) { // name = "/* (anonymous) */"; isConst = false; @@ -591,7 +591,7 @@ EnumType::EnumType(SourcePos p) } -EnumType::EnumType(const char *n, SourcePos p) +EnumType::EnumType(const char *n, SourcePos p) : Type(ENUM_TYPE), pos(p), name(n) { isConst = false; variability = Variability(Variability::Unbound); @@ -604,31 +604,31 @@ EnumType::GetVariability() const { } -bool +bool EnumType::IsBoolType() const { return false; } -bool +bool EnumType::IsFloatType() const { return false; } -bool +bool EnumType::IsIntType() const { return true; } -bool +bool EnumType::IsUnsignedType() const { return true; } -bool +bool EnumType::IsConstType() const { return isConst; } @@ -724,7 +724,7 @@ EnumType::GetAsNonConstType() const { } -std::string +std::string EnumType::GetString() const { std::string ret; if (isConst) ret += "const "; @@ -737,7 +737,7 @@ EnumType::GetString() const { } -std::string +std::string EnumType::Mangle() const { Assert(variability != Variability::Unbound); @@ -749,7 +749,7 @@ EnumType::Mangle() const { } -std::string +std::string EnumType::GetCDeclaration(const std::string &varName) const { if (variability != Variability::Uniform && variability != Variability::SOA) { @@ -798,7 +798,7 @@ EnumType::LLVMType(llvm::LLVMContext *ctx) const { } -llvm::DIType +llvm::DIType EnumType::GetDIType(llvm::DIDescriptor scope) const { std::vector enumeratorDescriptors; for (unsigned int i = 0; i < enumerators.size(); ++i) { @@ -807,11 +807,11 @@ EnumType::GetDIType(llvm::DIDescriptor scope) const { int count = enumerators[i]->constValue->AsUInt32(&enumeratorValue); Assert(count == 1); - llvm::Value *descriptor = + llvm::Value *descriptor = m->diBuilder->createEnumerator(enumerators[i]->name, enumeratorValue); enumeratorDescriptors.push_back(descriptor); } - llvm::DIArray elementArray = + llvm::DIArray elementArray = m->diBuilder->getOrCreateArray(enumeratorDescriptors); llvm::DIFile diFile = pos.GetDIFile(); @@ -867,11 +867,11 @@ EnumType::GetEnumerator(int i) const { /////////////////////////////////////////////////////////////////////////// // PointerType -PointerType *PointerType::Void = +PointerType *PointerType::Void = new PointerType(AtomicType::Void, Variability(Variability::Uniform), false); -PointerType::PointerType(const Type *t, Variability v, bool ic, bool is, +PointerType::PointerType(const Type *t, Variability v, bool ic, bool is, bool fr) : Type(POINTER_TYPE), variability(v), isConst(ic), isSlice(is), isFrozen(fr) { baseType = t; @@ -1013,7 +1013,7 @@ PointerType::ResolveUnboundVariability(Variability v) const { Assert(v != Variability::Unbound); Variability ptrVariability = (variability == Variability::Unbound) ? v : variability; - const Type *resolvedBaseType = + const Type *resolvedBaseType = baseType->ResolveUnboundVariability(Variability::Uniform); return new PointerType(resolvedBaseType, ptrVariability, isConst, isSlice, isFrozen); @@ -1038,7 +1038,7 @@ PointerType::GetAsNonConstType() const { } -std::string +std::string PointerType::GetString() const { if (baseType == NULL) { Assert(m->errorCount > 0); @@ -1114,7 +1114,7 @@ PointerType::LLVMType(llvm::LLVMContext *ctx) const { if (isSlice) { llvm::Type *types[2]; types[0] = GetAsNonSlice()->LLVMType(ctx); - + switch (variability.type) { case Variability::Uniform: types[1] = LLVMTypes::Int32Type; @@ -1140,7 +1140,7 @@ PointerType::LLVMType(llvm::LLVMContext *ctx) const { case Variability::Uniform: { llvm::Type *ptype = NULL; const FunctionType *ftype = CastType(baseType); - if (ftype != NULL) + if (ftype != NULL) ptype = llvm::PointerType::get(ftype->LLVMFunctionType(ctx), 0); else { if (baseType == AtomicType::Void) @@ -1177,11 +1177,11 @@ PointerType::GetDIType(llvm::DIDescriptor scope) const { int ptrAlignBits = bitsSize; switch (variability.type) { case Variability::Uniform: - return m->diBuilder->createPointerType(diTargetType, bitsSize, + return m->diBuilder->createPointerType(diTargetType, bitsSize, ptrAlignBits); case Variability::Varying: { // emit them as an array of pointers - llvm::DIType eltType = m->diBuilder->createPointerType(diTargetType, + llvm::DIType eltType = m->diBuilder->createPointerType(diTargetType, bitsSize, ptrAlignBits); return lCreateDIArray(eltType, g->target.vectorWidth); } @@ -1207,7 +1207,7 @@ const Type *SequentialType::GetElementType(int index) const { /////////////////////////////////////////////////////////////////////////// // ArrayType -ArrayType::ArrayType(const Type *c, int a) +ArrayType::ArrayType(const Type *c, int a) : SequentialType(ARRAY_TYPE), child(c), numElements(a) { // 0 -> unsized array. Assert(numElements >= 0); @@ -1239,25 +1239,25 @@ ArrayType::GetVariability() const { bool ArrayType::IsFloatType() const { - return false; + return false; } bool ArrayType::IsIntType() const { - return false; + return false; } bool ArrayType::IsUnsignedType() const { - return false; + return false; } bool ArrayType::IsBoolType() const { - return false; + return false; } @@ -1519,7 +1519,7 @@ ArrayType::SizeUnsizedArrays(const Type *type, Expr *initExpr) { ExprList *el = dynamic_cast(exprList->exprs[i]); if (el == NULL || el->exprs.size() != nextSize) { - Error(Union(exprList->exprs[0]->pos, exprList->exprs[i]->pos), + Error(Union(exprList->exprs[0]->pos, exprList->exprs[i]->pos), "Inconsistent initializer expression list lengths " "make it impossible to size unsized array dimensions."); return NULL; @@ -1537,7 +1537,7 @@ ArrayType::SizeUnsizedArrays(const Type *type, Expr *initExpr) { /////////////////////////////////////////////////////////////////////////// // VectorType -VectorType::VectorType(const AtomicType *b, int a) +VectorType::VectorType(const AtomicType *b, int a) : SequentialType(VECTOR_TYPE), base(b), numElements(a) { Assert(numElements > 0); Assert(base != NULL); @@ -1546,37 +1546,37 @@ VectorType::VectorType(const AtomicType *b, int a) Variability VectorType::GetVariability() const { - return base->GetVariability(); + return base->GetVariability(); } bool VectorType::IsFloatType() const { - return base->IsFloatType(); + return base->IsFloatType(); } bool VectorType::IsIntType() const { - return base->IsIntType(); + return base->IsIntType(); } bool VectorType::IsUnsignedType() const { - return base->IsUnsignedType(); + return base->IsUnsignedType(); } bool VectorType::IsBoolType() const { - return base->IsBoolType(); + return base->IsBoolType(); } bool VectorType::IsConstType() const { - return base->IsConstType(); + return base->IsConstType(); } @@ -1796,22 +1796,22 @@ lMangleStructName(const std::string &name, Variability variability) { default: FATAL("Unexpected variability in lMangleStructName()"); } - + // And stuff the name at the end.... n += name; return n; } - -StructType::StructType(const std::string &n, const llvm::SmallVector &elts, + +StructType::StructType(const std::string &n, const llvm::SmallVector &elts, const llvm::SmallVector &en, const llvm::SmallVector &ep, - bool ic, Variability v, SourcePos p) - : CollectionType(STRUCT_TYPE), name(n), elementTypes(elts), elementNames(en), + bool ic, Variability v, SourcePos p) + : CollectionType(STRUCT_TYPE), name(n), elementTypes(elts), elementNames(en), elementPositions(ep), variability(v), isConst(ic), pos(p) { oppositeConstStructType = NULL; finalElementTypes.resize(elts.size(), NULL); - + if (variability != Variability::Unbound) { // For structs with non-unbound variability, we'll create the // correspoing LLVM struct type now, if one hasn't been made @@ -1875,37 +1875,37 @@ StructType::StructType(const std::string &n, const llvm::SmallVectoroppositeConstStructType = this; @@ -1997,7 +1997,7 @@ StructType::GetAsNonConstType() const { else if (oppositeConstStructType != NULL) return oppositeConstStructType; else { - oppositeConstStructType = + oppositeConstStructType = new StructType(name, elementTypes, elementNames, elementPositions, false, variability, pos); oppositeConstStructType->oppositeConstStructType = this; @@ -2115,9 +2115,9 @@ StructType::GetDIType(llvm::DIDescriptor scope) const { llvm::DIFile diFile = elementPositions[i].GetDIFile(); int line = elementPositions[i].first_line; - llvm::DIType fieldType = - m->diBuilder->createMemberType(scope, elementNames[i], diFile, - line, eltSize, eltAlign, + llvm::DIType fieldType = + m->diBuilder->createMemberType(scope, elementNames[i], diFile, + line, eltSize, eltAlign, currentSize, 0, eltType); elementLLVMTypes.push_back(fieldType); @@ -2201,7 +2201,7 @@ StructType::checkIfCanBeSOA(const StructType *st) { Error(st->elementPositions[i], "Unable to apply SOA conversion to " "struct due to \"%s\" member \"%s\" with bound \"%s\" " "variability.", eltType->GetString().c_str(), - st->elementNames[i].c_str(), + st->elementNames[i].c_str(), eltType->IsUniformType() ? "uniform" : "varying"); ok = false; } @@ -2219,9 +2219,9 @@ StructType::checkIfCanBeSOA(const StructType *st) { /////////////////////////////////////////////////////////////////////////// // UndefinedStructType -UndefinedStructType::UndefinedStructType(const std::string &n, +UndefinedStructType::UndefinedStructType(const std::string &n, const Variability var, bool ic, - SourcePos p) + SourcePos p) : Type(UNDEFINED_STRUCT_TYPE), name(n), variability(var), isConst(ic), pos(p) { Assert(name != ""); if (variability != Variability::Unbound) { @@ -2392,7 +2392,7 @@ UndefinedStructType::GetDIType(llvm::DIDescriptor scope) const { /////////////////////////////////////////////////////////////////////////// // ReferenceType -ReferenceType::ReferenceType(const Type *t) +ReferenceType::ReferenceType(const Type *t) : Type(REFERENCE_TYPE), targetType(t) { asOtherConstType = NULL; } @@ -2404,7 +2404,7 @@ ReferenceType::GetVariability() const { Assert(m->errorCount > 0); return Variability(Variability::Unbound); } - return targetType->GetVariability(); + return targetType->GetVariability(); } @@ -2414,7 +2414,7 @@ ReferenceType::IsBoolType() const { Assert(m->errorCount > 0); return false; } - return targetType->IsBoolType(); + return targetType->IsBoolType(); } @@ -2424,7 +2424,7 @@ ReferenceType::IsFloatType() const { Assert(m->errorCount > 0); return false; } - return targetType->IsFloatType(); + return targetType->IsFloatType(); } @@ -2434,7 +2434,7 @@ ReferenceType::IsIntType() const { Assert(m->errorCount > 0); return false; } - return targetType->IsIntType(); + return targetType->IsIntType(); } @@ -2444,7 +2444,7 @@ ReferenceType::IsUnsignedType() const { Assert(m->errorCount > 0); return false; } - return targetType->IsUnsignedType(); + return targetType->IsUnsignedType(); } @@ -2480,7 +2480,7 @@ ReferenceType::GetAsVaryingType() const { Assert(m->errorCount > 0); return NULL; } - if (IsVaryingType()) + if (IsVaryingType()) return this; return new ReferenceType(targetType->GetAsVaryingType()); } @@ -2492,7 +2492,7 @@ ReferenceType::GetAsUniformType() const { Assert(m->errorCount > 0); return NULL; } - if (IsUniformType()) + if (IsUniformType()) return this; return new ReferenceType(targetType->GetAsUniformType()); } @@ -2504,7 +2504,7 @@ ReferenceType::GetAsUnboundVariabilityType() const { Assert(m->errorCount > 0); return NULL; } - if (HasUnboundVariability()) + if (HasUnboundVariability()) return this; return new ReferenceType(targetType->GetAsUnboundVariabilityType()); } @@ -2525,7 +2525,7 @@ ReferenceType::ResolveUnboundVariability(Variability v) const { } return new ReferenceType(targetType->ResolveUnboundVariability(v)); } - + const ReferenceType * ReferenceType::GetAsConstType() const { @@ -2599,7 +2599,7 @@ ReferenceType::GetCDeclaration(const std::string &name) const { if (at->GetElementCount() == 0) { // emit unsized arrays as pointers to the base type.. std::string ret; - ret += at->GetElementType()->GetAsNonConstType()->GetCDeclaration("") + + ret += at->GetElementType()->GetAsNonConstType()->GetCDeclaration("") + std::string(" *"); if (lShouldPrintName(name)) ret += name; @@ -2657,11 +2657,11 @@ ReferenceType::GetDIType(llvm::DIDescriptor scope) const { /////////////////////////////////////////////////////////////////////////// // FunctionType -FunctionType::FunctionType(const Type *r, - const llvm::SmallVector &a, +FunctionType::FunctionType(const Type *r, + const llvm::SmallVector &a, SourcePos p) - : Type(FUNCTION_TYPE), isTask(false), isExported(false), isExternC(false), - isUnmasked(false), returnType(r), paramTypes(a), + : Type(FUNCTION_TYPE), isTask(false), isExported(false), isExternC(false), + isUnmasked(false), returnType(r), paramTypes(a), paramNames(llvm::SmallVector(a.size(), "")), paramDefaults(llvm::SmallVector(a.size(), NULL)), paramPositions(llvm::SmallVector(a.size(), p)) { @@ -2672,15 +2672,15 @@ FunctionType::FunctionType(const Type *r, FunctionType::FunctionType(const Type *r, - const llvm::SmallVector &a, - const llvm::SmallVector &an, + const llvm::SmallVector &a, + const llvm::SmallVector &an, const llvm::SmallVector &ad, const llvm::SmallVector &ap, bool it, bool is, bool ec, bool ium) - : Type(FUNCTION_TYPE), isTask(it), isExported(is), isExternC(ec), - isUnmasked(ium), returnType(r), paramTypes(a), paramNames(an), + : Type(FUNCTION_TYPE), isTask(it), isExported(is), isExternC(ec), + isUnmasked(ium), returnType(r), paramTypes(a), paramNames(an), paramDefaults(ad), paramPositions(ap) { - Assert(paramTypes.size() == paramNames.size() && + Assert(paramTypes.size() == paramNames.size() && paramNames.size() == paramDefaults.size() && paramDefaults.size() == paramPositions.size()); Assert(returnType != NULL); @@ -2847,7 +2847,7 @@ FunctionType::GetCDeclaration(const std::string &fname) const { // to print out for multidimensional arrays (i.e. "float foo[][4] " // versus "float (foo *)[4]"). const PointerType *pt = CastType(type); - if (pt != NULL && + if (pt != NULL && CastType(pt->GetBaseType()) != NULL) { type = new ArrayType(pt->GetBaseType(), 0); } @@ -2883,10 +2883,10 @@ FunctionType::GetDIType(llvm::DIDescriptor scope) const { retArgTypes.push_back(t->GetDIType(scope)); } - llvm::DIArray retArgTypesArray = + llvm::DIArray retArgTypesArray = m->diBuilder->getOrCreateArray(llvm::ArrayRef(retArgTypes)); - llvm::DIType diType = - // FIXME: DIFile + llvm::DIType diType = + // FIXME: DIFile m->diBuilder->createSubroutineType(llvm::DIFile(), retArgTypesArray); return diType; } @@ -2906,7 +2906,7 @@ FunctionType::GetReturnTypeString() const { ret += "extern \"C\" "; if (isUnmasked) ret += "unmasked "; - if (isSafe) + if (isSafe) ret += "/*safe*/ "; if (costOverride > 0) { char buf[32]; @@ -2920,7 +2920,7 @@ FunctionType::GetReturnTypeString() const { llvm::FunctionType * FunctionType::LLVMFunctionType(llvm::LLVMContext *ctx, bool removeMask) const { - if (isTask == true) + if (isTask == true) Assert(removeMask == false); // Get the LLVM Type *s for the function arguments @@ -2959,7 +2959,7 @@ FunctionType::LLVMFunctionType(llvm::LLVMContext *ctx, bool removeMask) const { callTypes.push_back(LLVMTypes::Int32Type); // taskCount } else - // Otherwise we already have the types of the arguments + // Otherwise we already have the types of the arguments callTypes = llvmArgTypes; if (returnType == NULL) { @@ -2976,30 +2976,30 @@ FunctionType::LLVMFunctionType(llvm::LLVMContext *ctx, bool removeMask) const { const Type * -FunctionType::GetParameterType(int i) const { +FunctionType::GetParameterType(int i) const { Assert(i < (int)paramTypes.size()); return paramTypes[i]; } Expr * -FunctionType::GetParameterDefault(int i) const { +FunctionType::GetParameterDefault(int i) const { Assert(i < (int)paramDefaults.size()); - return paramDefaults[i]; + return paramDefaults[i]; } const SourcePos & -FunctionType::GetParameterSourcePos(int i) const { +FunctionType::GetParameterSourcePos(int i) const { Assert(i < (int)paramPositions.size()); return paramPositions[i]; } const std::string & -FunctionType::GetParameterName(int i) const { +FunctionType::GetParameterName(int i) const { Assert(i < (int)paramNames.size()); - return paramNames[i]; + return paramNames[i]; } @@ -3030,7 +3030,7 @@ lVectorConvert(const Type *type, SourcePos pos, const char *reason, int vecSize) if (vt) { if (vt->GetElementCount() != vecSize) { Error(pos, "Implicit conversion between from vector type " - "\"%s\" to vector type of length %d for %s is not possible.", + "\"%s\" to vector type of length %d for %s is not possible.", type->GetString().c_str(), vecSize, reason); return NULL; } @@ -3049,7 +3049,7 @@ lVectorConvert(const Type *type, SourcePos pos, const char *reason, int vecSize) const Type * -Type::MoreGeneralType(const Type *t0, const Type *t1, SourcePos pos, const char *reason, +Type::MoreGeneralType(const Type *t0, const Type *t1, SourcePos pos, const char *reason, bool forceVarying, int vecSize) { Assert(reason != NULL); @@ -3080,9 +3080,9 @@ Type::MoreGeneralType(const Type *t0, const Type *t1, SourcePos pos, const char } // Are they both the same type? If so, we're done, QED. - if (Type::Equal(t0, t1)) + if (Type::Equal(t0, t1)) return t0; - + // If they're function types, it's hopeless if they didn't match in the // Type::Equal() call above. Fail here so that we don't get into // trouble calling GetAsConstType()... @@ -3125,7 +3125,7 @@ Type::MoreGeneralType(const Type *t0, const Type *t1, SourcePos pos, const char } const Type *t = MoreGeneralType(vt0->GetElementType(), vt1->GetElementType(), pos, reason, forceVarying); - if (!t) + if (!t) return NULL; // The 'more general' version of the two vector element types must @@ -3140,9 +3140,9 @@ Type::MoreGeneralType(const Type *t0, const Type *t1, SourcePos pos, const char // promote the other one to a vector type. This will fail and // return NULL if t1 is e.g. an array type and it's illegal to have // a vector of it.. - const Type *t = MoreGeneralType(vt0->GetElementType(), t1, pos, + const Type *t = MoreGeneralType(vt0->GetElementType(), t1, pos, reason, forceVarying); - if (!t) + if (!t) return NULL; const AtomicType *at = CastType(t); @@ -3152,9 +3152,9 @@ Type::MoreGeneralType(const Type *t0, const Type *t1, SourcePos pos, const char else if (vt1) { // As in the above case, see if we can promote t0 to make a vector // that matches vt1. - const Type *t = MoreGeneralType(t0, vt1->GetElementType(), pos, + const Type *t = MoreGeneralType(t0, vt1->GetElementType(), pos, reason, forceVarying); - if (!t) + if (!t) return NULL; const AtomicType *at = CastType(t); @@ -3234,7 +3234,7 @@ lCheckTypeEquality(const Type *a, const Type *b, bool ignoreConst) { const AtomicType *ata = CastType(a); const AtomicType *atb = CastType(b); if (ata != NULL && atb != NULL) { - return ((ata->basicType == atb->basicType) && + return ((ata->basicType == atb->basicType) && (ata->GetVariability() == atb->GetVariability())); } @@ -3251,14 +3251,14 @@ lCheckTypeEquality(const Type *a, const Type *b, bool ignoreConst) { const ArrayType *arta = CastType(a); const ArrayType *artb = CastType(b); if (arta != NULL && artb != NULL) - return (arta->GetElementCount() == artb->GetElementCount() && - lCheckTypeEquality(arta->GetElementType(), artb->GetElementType(), + return (arta->GetElementCount() == artb->GetElementCount() && + lCheckTypeEquality(arta->GetElementType(), artb->GetElementType(), ignoreConst)); const VectorType *vta = CastType(a); const VectorType *vtb = CastType(b); if (vta != NULL && vtb != NULL) - return (vta->GetElementCount() == vtb->GetElementCount() && + return (vta->GetElementCount() == vtb->GetElementCount() && lCheckTypeEquality(vta->GetElementType(), vtb->GetElementType(), ignoreConst)); @@ -3272,7 +3272,7 @@ lCheckTypeEquality(const Type *a, const Type *b, bool ignoreConst) { if (a->GetVariability() != b->GetVariability()) return false; - const std::string &namea = sta ? sta->GetStructName() : + const std::string &namea = sta ? sta->GetStructName() : usta->GetStructName(); const std::string &nameb = stb ? stb->GetStructName() : ustb->GetStructName(); @@ -3285,7 +3285,7 @@ lCheckTypeEquality(const Type *a, const Type *b, bool ignoreConst) { return (pta->IsUniformType() == ptb->IsUniformType() && pta->IsSlice() == ptb->IsSlice() && pta->IsFrozenSlice() == ptb->IsFrozenSlice() && - lCheckTypeEquality(pta->GetBaseType(), ptb->GetBaseType(), + lCheckTypeEquality(pta->GetBaseType(), ptb->GetBaseType(), ignoreConst)); const ReferenceType *rta = CastType(a); @@ -3299,7 +3299,7 @@ lCheckTypeEquality(const Type *a, const Type *b, bool ignoreConst) { if (fta != NULL && ftb != NULL) { // Both the return types and all of the argument types must match // for function types to match - if (!lCheckTypeEquality(fta->GetReturnType(), ftb->GetReturnType(), + if (!lCheckTypeEquality(fta->GetReturnType(), ftb->GetReturnType(), ignoreConst)) return false; diff --git a/type.h b/type.h index 70bdeb58..880f8574 100644 --- a/type.h +++ b/type.h @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file type.h @@ -53,17 +53,17 @@ class ConstExpr; class StructType; /** Types may have uniform, varying, SOA, or unbound variability; this - struct is used by Type implementations to record their variability. + struct is used by Type implementations to record their variability. */ struct Variability { enum VarType { Unbound, Uniform, Varying, SOA }; Variability(VarType t = Unbound, int w = 0) : type(t), soaWidth(w) { } - bool operator==(const Variability &v) const { - return v.type == type && v.soaWidth == soaWidth; + bool operator==(const Variability &v) const { + return v.type == type && v.soaWidth == soaWidth; } - bool operator!=(const Variability &v) const { + bool operator!=(const Variability &v) const { return v.type != type || v.soaWidth != soaWidth; } @@ -72,7 +72,7 @@ struct Variability { std::string GetString() const; std::string MangleString() const; - + VarType type; int soaWidth; }; @@ -122,19 +122,19 @@ public: /** Returns true if the underlying type is either a pointer type */ bool IsPointerType() const; - + /** Returns true if the underlying type is a array type */ bool IsArrayType() const; - + /** Returns true if the underlying type is a array type */ bool IsReferenceType() const; - + /** Returns true if the underlying type is either a pointer or an array */ bool IsVoidType() const; /** Returns true if this type is 'const'-qualified. */ virtual bool IsConstType() const = 0; - + /** Returns true if the underlying type is a float or integer type. */ bool IsNumericType() const { return IsFloatType() || IsIntType(); } @@ -142,13 +142,13 @@ public: virtual Variability GetVariability() const = 0; /** Returns true if the underlying type is uniform */ - bool IsUniformType() const { - return GetVariability() == Variability::Uniform; + bool IsUniformType() const { + return GetVariability() == Variability::Uniform; } /** Returns true if the underlying type is varying */ - bool IsVaryingType() const { - return GetVariability() == Variability::Varying; + bool IsVaryingType() const { + return GetVariability() == Variability::Varying; } /** Returns true if the type is laid out in "structure of arrays" @@ -161,8 +161,8 @@ public: /** Returns true if the underlying type's uniform/varying-ness is unbound. */ - bool HasUnboundVariability() const { - return GetVariability() == Variability::Unbound; + bool HasUnboundVariability() const { + return GetVariability() == Variability::Unbound; } /* Returns a type wherein any elements of the original type and @@ -196,7 +196,7 @@ public: For all other types, just returns its own type. */ virtual const Type *GetReferenceTarget() const; - /** Get a const version of this type. If it's already const, then the old + /** Get a const version of this type. If it's already const, then the old Type pointer is returned. */ virtual const Type *GetAsConstType() const = 0; @@ -244,7 +244,7 @@ public: needed. @param reason String describing the context of why the general type is needed (e.g. "+ operator"). - @param forceVarying If \c true, then make sure that the returned + @param forceVarying If \c true, then make sure that the returned type is "varying". @param vecSize The vector size of the returned type. If non-zero, the returned type will be a VectorType of the @@ -254,7 +254,7 @@ public: @todo the vecSize and forceVarying parts of this should probably be factored out and done separately in the cases when needed. - + */ static const Type *MoreGeneralType(const Type *type0, const Type *type1, SourcePos pos, const char *reason, @@ -275,7 +275,7 @@ protected: }; -/** @brief AtomicType represents basic types like floats, ints, etc. +/** @brief AtomicType represents basic types like floats, ints, etc. AtomicTypes can be either uniform or varying. Unique instances of all of the possible AtomicTypes are available in the static members @@ -313,7 +313,7 @@ public: llvm::Type *LLVMType(llvm::LLVMContext *ctx) const; llvm::DIType GetDIType(llvm::DIDescriptor scope) const; - /** This enumerator records the basic types that AtomicTypes can be + /** This enumerator records the basic types that AtomicTypes can be built from. */ enum BasicType { TYPE_VOID, @@ -431,7 +431,7 @@ private: */ class PointerType : public Type { public: - PointerType(const Type *t, Variability v, bool isConst, + PointerType(const Type *t, Variability v, bool isConst, bool isSlice = false, bool frozen = false); /** Helper method to return a uniform pointer to the given type. */ @@ -488,7 +488,7 @@ private: This is a common base class that StructTypes, ArrayTypes, and VectorTypes all inherit from. -*/ +*/ class CollectionType : public Type { public: /** Returns the total number of elements in the collection. */ @@ -532,7 +532,7 @@ protected: ArrayType represents a one-dimensional array of instances of some other type. (Multi-dimensional arrays are represented by ArrayTypes that in - turn hold ArrayTypes as their child types.) + turn hold ArrayTypes as their child types.) */ class ArrayType : public SequentialType { public: @@ -592,7 +592,7 @@ public: any array dimensions that are unsized according to the number of elements in the corresponding sectoin of the initializer expression. - */ + */ static const Type *SizeUnsizedArrays(const Type *type, Expr *initExpr); private: @@ -663,9 +663,9 @@ private: */ class StructType : public CollectionType { public: - StructType(const std::string &name, const llvm::SmallVector &elts, - const llvm::SmallVector &eltNames, - const llvm::SmallVector &eltPositions, bool isConst, + StructType(const std::string &name, const llvm::SmallVector &elts, + const llvm::SmallVector &eltNames, + const llvm::SmallVector &eltPositions, bool isConst, Variability variability, SourcePos pos); Variability GetVariability() const; @@ -707,7 +707,7 @@ public: /** Returns the name of the i'th element of the structure. */ const std::string &GetElementName(int i) const { return elementNames[i]; } - + /** Returns the total number of elements in the structure. */ int GetElementCount() const { return int(elementTypes.size()); } @@ -842,9 +842,9 @@ private: */ class FunctionType : public Type { public: - FunctionType(const Type *returnType, + FunctionType(const Type *returnType, const llvm::SmallVector &argTypes, SourcePos pos); - FunctionType(const Type *returnType, + FunctionType(const Type *returnType, const llvm::SmallVector &argTypes, const llvm::SmallVector &argNames, const llvm::SmallVector &argDefaults, @@ -884,7 +884,7 @@ public: function type. The \c disableMask parameter indicates whether the llvm::FunctionType should have the trailing mask parameter, if present, removed from the return function signature. */ - llvm::FunctionType *LLVMFunctionType(llvm::LLVMContext *ctx, + llvm::FunctionType *LLVMFunctionType(llvm::LLVMContext *ctx, bool disableMask = false) const; int GetNumParameters() const { return (int)paramTypes.size(); } @@ -915,7 +915,7 @@ public: bool isSafe; /** If non-negative, this provides a user-supplied override to the cost - function estimate for the function. */ + function estimate for the function. */ int costOverride; private: @@ -993,7 +993,7 @@ template <> inline const SequentialType * CastType(const Type *type) { // Note that this function must be updated if other sequential type // implementations are added. - if (type != NULL && + if (type != NULL && (type->typeId == ARRAY_TYPE || type->typeId == VECTOR_TYPE)) return (const SequentialType *)type; else @@ -1004,7 +1004,7 @@ template <> inline const CollectionType * CastType(const Type *type) { // Similarly a new collection type implementation requires updating // this function. - if (type != NULL && + if (type != NULL && (type->typeId == ARRAY_TYPE || type->typeId == VECTOR_TYPE || type->typeId == STRUCT_TYPE)) return (const CollectionType *)type; diff --git a/util.cpp b/util.cpp index cbf6ef17..ee5b8d55 100644 --- a/util.cpp +++ b/util.cpp @@ -28,7 +28,7 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file util.cpp @@ -139,7 +139,7 @@ lResetColor() { } /** Given a pointer into a string, find the end of the current word and - return a pointer to its last character. + return a pointer to its last character. */ static const char * lFindWordEnd(const char *buf) { @@ -166,7 +166,7 @@ lPrintFileLineContext(SourcePos p) { while ((c = fgetc(f)) != EOF) { // Don't print more than three lines of context. (More than that, // and we're probably doing the wrong thing...) - if (curLine >= std::max(p.first_line, p.last_line-2) && + if (curLine >= std::max(p.first_line, p.last_line-2) && curLine <= p.last_line) fputc(c, stderr); if (c == '\n') @@ -322,7 +322,7 @@ asprintf(char **sptr, const char *fmt, ...) @param args Arguments with values for format string % entries */ static void -lPrint(const char *type, bool isError, SourcePos p, const char *fmt, +lPrint(const char *type, bool isError, SourcePos p, const char *fmt, va_list args) { char *errorBuf, *formattedBuf; if (vasprintf(&errorBuf, fmt, args) == -1) { @@ -335,7 +335,7 @@ lPrint(const char *type, bool isError, SourcePos p, const char *fmt, // We don't have a valid SourcePos, so create a message without it if (asprintf(&formattedBuf, "%s%s%s%s%s: %s%s", lStartBold(), isError ? lStartRed() : lStartBlue(), type, - lResetColor(), lStartBold(), errorBuf, + lResetColor(), lStartBold(), errorBuf, lResetColor()) == -1) { fprintf(stderr, "asprintf() unable to allocate memory!\n"); exit(1); @@ -344,10 +344,10 @@ lPrint(const char *type, bool isError, SourcePos p, const char *fmt, } else { // Create an error message that includes the file and line number - if (asprintf(&formattedBuf, "%s%s:%d:%d: %s%s%s%s: %s%s", - lStartBold(), p.name, p.first_line, p.first_column, - isError ? lStartRed() : lStartBlue(), type, - lResetColor(), lStartBold(), errorBuf, + if (asprintf(&formattedBuf, "%s%s:%d:%d: %s%s%s%s: %s%s", + lStartBold(), p.name, p.first_line, p.first_column, + isError ? lStartRed() : lStartBlue(), type, + lResetColor(), lStartBold(), errorBuf, lResetColor()) == -1) { fprintf(stderr, "asprintf() unable to allocate memory!\n"); exit(1); @@ -507,7 +507,7 @@ StringEditDistance(const std::string &str1, const std::string &str2, int maxDist } -std::vector +std::vector MatchStrings(const std::string &str, const std::vector &options) { if (str.size() == 0 || (str.size() == 1 && !isalpha(str[0]))) // don't even try... @@ -536,7 +536,7 @@ MatchStrings(const std::string &str, const std::vector &options) { void -GetDirectoryAndFileName(const std::string ¤tDirectory, +GetDirectoryAndFileName(const std::string ¤tDirectory, const std::string &relativeName, std::string *directory, std::string *filename) { #ifdef ISPC_IS_WINDOWS @@ -550,7 +550,7 @@ GetDirectoryAndFileName(const std::string ¤tDirectory, #else // We need a fully qualified path. First, see if the current file name // is fully qualified itself--in that case, the current working - // directory isn't needed. + // directory isn't needed. // @todo This probably needs to be smarter for Windows... std::string fullPath; if (relativeName[0] == '/') diff --git a/util.h b/util.h index e7575379..4880182a 100644 --- a/util.h +++ b/util.h @@ -28,12 +28,12 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file util.h - @brief + @brief */ #ifndef ISPC_UTIL_H @@ -50,9 +50,9 @@ struct SourcePos; of two already. */ inline uint32_t RoundUpPow2(uint32_t v) { v--; - v |= v >> 1; + v |= v >> 1; v |= v >> 2; - v |= v >> 4; + v |= v >> 4; v |= v >> 8; v |= v >> 16; return v+1; @@ -75,7 +75,7 @@ int asprintf(char **sptr, const char *fmt, ...); g->debugPrint is \c true. In addition to a program source code position to associate with the message, a printf()-style format string is passed along with any values needed for items in the format - string. + string. */ void Debug(SourcePos p, const char *format, ...) PRINTF_FUNC; @@ -93,7 +93,7 @@ void Warning(SourcePos p, const char *format, ...) PRINTF_FUNC; able to issue any subsequent error messages. In addition to a program source code position to associate with the message, a printf()-style format string is passed along with any values needed for items in the - format string. + format string. */ void Error(SourcePos p, const char *format, ...) PRINTF_FUNC; @@ -102,7 +102,7 @@ void Error(SourcePos p, const char *format, ...) PRINTF_FUNC; completion of compilation. In addition to a program source code position to associate with the message, a printf()-style format string is passed along with any values needed for items in the format - string. + string. */ void PerformanceWarning(SourcePos p, const char *format, ...) PRINTF_FUNC; @@ -143,7 +143,7 @@ std::vector MatchStrings(const std::string &str, /** Given the current working directory and a filename relative to that directory, this function returns the final directory that the resulting file is in and the base name of the file itself. */ -void GetDirectoryAndFileName(const std::string ¤tDir, +void GetDirectoryAndFileName(const std::string ¤tDir, const std::string &relativeName, std::string *directory, std::string *filename);