Improve naming of llvm Instructions created.

We now try harder to keep the names of instructions related to the
initial names of variables they're derived from and so forth.  This
is useful for making both LLVM IR as well as generated C++ code
easier to correlate back to the original ispc source code.

Issue #244.
This commit is contained in:
Matt Pharr
2012-04-19 16:36:46 -07:00
parent 71bdc67a45
commit 32815e628d
4 changed files with 279 additions and 184 deletions

View File

@@ -4402,7 +4402,8 @@ SmearCleanupPass::runOnBasicBlock(llvm::BasicBlock &bb) {
Value *args[1] = { toMatch };
ArrayRef<llvm::Value *> argArray(&args[0], &args[1]);
Instruction *smearCall =
CallInst::Create(smearFunc, argArray, "smear", (Instruction *)NULL);
CallInst::Create(smearFunc, argArray, LLVMGetName(toMatch, "_smear"),
(Instruction *)NULL);
ReplaceInstWithInst(iter, smearCall);

174
ctx.cpp
View File

@@ -1236,7 +1236,7 @@ llvm::Value *
FunctionEmitContext::Any(llvm::Value *mask) {
llvm::Value *mmval = LaneMask(mask);
return CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_NE, mmval,
LLVMInt32(0), "any_mm_cmp");
LLVMInt32(0), LLVMGetName(mask, "_any"));
}
@@ -1244,7 +1244,8 @@ llvm::Value *
FunctionEmitContext::All(llvm::Value *mask) {
llvm::Value *mmval = LaneMask(mask);
return CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_EQ, mmval,
LLVMInt32((1<<g->target.vectorWidth)-1), "all_mm_cmp");
LLVMInt32((1<<g->target.vectorWidth)-1),
LLVMGetName(mask, "_all"));
}
@@ -1252,7 +1253,7 @@ llvm::Value *
FunctionEmitContext::None(llvm::Value *mask) {
llvm::Value *mmval = LaneMask(mask);
return CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_EQ, mmval,
LLVMInt32(0), "none_mm_cmp");
LLVMInt32(0), LLVMGetName(mask, "_none"));
}
@@ -1270,7 +1271,7 @@ FunctionEmitContext::LaneMask(llvm::Value *v) {
// We can actually call either one, since both are i32s as far as
// LLVM's type system is concerned...
llvm::Function *fmm = mm[0]->function;
return CallInst(fmm, NULL, v, "val_movmsk");
return CallInst(fmm, NULL, v, LLVMGetName(v, "_movmsk"));
}
@@ -1288,7 +1289,7 @@ FunctionEmitContext::MasksAllEqual(llvm::Value *v1, llvm::Value *v2) {
llvm::Value *mm1 = LaneMask(v1);
llvm::Value *mm2 = LaneMask(v2);
return CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_EQ, mm1, mm2,
"v1==v2");
LLVMGetName("equal", v1, v2));
#endif
}
@@ -1337,7 +1338,7 @@ FunctionEmitContext::I1VecToBoolVec(llvm::Value *b) {
for (unsigned int i = 0; i < at->getNumElements(); ++i) {
llvm::Value *elt = ExtractInst(b, i);
llvm::Value *sext = SExtInst(elt, LLVMTypes::BoolVectorType,
"val_to_boolvec32");
LLVMGetName(elt, "_to_boolvec32"));
ret = InsertInst(ret, sext, i);
}
return ret;
@@ -1664,16 +1665,17 @@ FunctionEmitContext::SmearUniform(llvm::Value *value, const char *name) {
llvm::Value *
FunctionEmitContext::BitCastInst(llvm::Value *value,
llvm::Type *type,
FunctionEmitContext::BitCastInst(llvm::Value *value, llvm::Type *type,
const char *name) {
if (value == NULL) {
Assert(m->errorCount > 0);
return NULL;
}
llvm::Instruction *inst =
new llvm::BitCastInst(value, type, name ? name : "bitcast", bblock);
if (name == NULL)
name = LLVMGetName(value, "_bitcast");
llvm::Instruction *inst = new llvm::BitCastInst(value, type, name, bblock);
AddDebugPos(inst);
return inst;
}
@@ -1690,23 +1692,26 @@ FunctionEmitContext::PtrToIntInst(llvm::Value *value, const char *name) {
// no-op for varying pointers; they're already vectors of ints
return value;
if (name == NULL)
name = LLVMGetName(value, "_ptr2int");
llvm::Type *type = LLVMTypes::PointerIntType;
llvm::Instruction *inst =
new llvm::PtrToIntInst(value, type, name ? name : "ptr2int", bblock);
llvm::Instruction *inst = new llvm::PtrToIntInst(value, type, name, bblock);
AddDebugPos(inst);
return inst;
}
llvm::Value *
FunctionEmitContext::PtrToIntInst(llvm::Value *value,
llvm::Type *toType,
FunctionEmitContext::PtrToIntInst(llvm::Value *value, llvm::Type *toType,
const char *name) {
if (value == NULL) {
Assert(m->errorCount > 0);
return NULL;
}
if (name == NULL)
name = LLVMGetName(value, "_ptr2int");
llvm::Type *fromType = value->getType();
if (llvm::isa<llvm::VectorType>(fromType)) {
// varying pointer
@@ -1714,30 +1719,31 @@ FunctionEmitContext::PtrToIntInst(llvm::Value *value,
// already the right type--done
return value;
else if (fromType->getScalarSizeInBits() > toType->getScalarSizeInBits())
return TruncInst(value, toType, "ptr_to_int");
return TruncInst(value, toType, name);
else {
Assert(fromType->getScalarSizeInBits() <
toType->getScalarSizeInBits());
return ZExtInst(value, toType, "ptr_to_int");
return ZExtInst(value, toType, name);
}
}
llvm::Instruction *inst =
new llvm::PtrToIntInst(value, toType, name ? name : "ptr2int", bblock);
llvm::Instruction *inst = new llvm::PtrToIntInst(value, toType, name, bblock);
AddDebugPos(inst);
return inst;
}
llvm::Value *
FunctionEmitContext::IntToPtrInst(llvm::Value *value,
llvm::Type *toType,
FunctionEmitContext::IntToPtrInst(llvm::Value *value, llvm::Type *toType,
const char *name) {
if (value == NULL) {
Assert(m->errorCount > 0);
return NULL;
}
if (name == NULL)
name = LLVMGetName(value, "_int2ptr");
llvm::Type *fromType = value->getType();
if (llvm::isa<llvm::VectorType>(fromType)) {
// varying pointer
@@ -1745,16 +1751,16 @@ FunctionEmitContext::IntToPtrInst(llvm::Value *value,
// done
return value;
else if (fromType->getScalarSizeInBits() > toType->getScalarSizeInBits())
return TruncInst(value, toType, "int_to_ptr");
return TruncInst(value, toType, name);
else {
Assert(fromType->getScalarSizeInBits() <
toType->getScalarSizeInBits());
return ZExtInst(value, toType, "int_to_ptr");
return ZExtInst(value, toType, name);
}
}
llvm::Instruction *inst =
new llvm::IntToPtrInst(value, toType, name ? name : "int2ptr", bblock);
llvm::Instruction *inst = new llvm::IntToPtrInst(value, toType, name,
bblock);
AddDebugPos(inst);
return inst;
}
@@ -1768,10 +1774,12 @@ FunctionEmitContext::TruncInst(llvm::Value *value, llvm::Type *type,
return NULL;
}
if (name == NULL)
name = LLVMGetName(value, "_trunc");
// TODO: we should probably handle the array case as in
// e.g. BitCastInst(), but we don't currently need that functionality
llvm::Instruction *inst =
new llvm::TruncInst(value, type, name ? name : "trunc", bblock);
llvm::Instruction *inst = new llvm::TruncInst(value, type, name, bblock);
AddDebugPos(inst);
return inst;
}
@@ -1785,10 +1793,13 @@ FunctionEmitContext::CastInst(llvm::Instruction::CastOps op, llvm::Value *value,
return NULL;
}
if (name == NULL)
name = LLVMGetName(value, "_cast");
// TODO: we should probably handle the array case as in
// e.g. BitCastInst(), but we don't currently need that functionality
llvm::Instruction *inst =
llvm::CastInst::Create(op, value, type, name ? name : "cast", bblock);
llvm::Instruction *inst = llvm::CastInst::Create(op, value, type, name,
bblock);
AddDebugPos(inst);
return inst;
}
@@ -1802,10 +1813,12 @@ FunctionEmitContext::FPCastInst(llvm::Value *value, llvm::Type *type,
return NULL;
}
if (name == NULL)
name = LLVMGetName(value, "_cast");
// TODO: we should probably handle the array case as in
// e.g. BitCastInst(), but we don't currently need that functionality
llvm::Instruction *inst =
llvm::CastInst::CreateFPCast(value, type, name ? name : "fpcast", bblock);
llvm::Instruction *inst = llvm::CastInst::CreateFPCast(value, type, name, bblock);
AddDebugPos(inst);
return inst;
}
@@ -1819,10 +1832,12 @@ FunctionEmitContext::SExtInst(llvm::Value *value, llvm::Type *type,
return NULL;
}
if (name == NULL)
name = LLVMGetName(value, "_sext");
// TODO: we should probably handle the array case as in
// e.g. BitCastInst(), but we don't currently need that functionality
llvm::Instruction *inst =
new llvm::SExtInst(value, type, name ? name : "sext", bblock);
llvm::Instruction *inst = new llvm::SExtInst(value, type, name, bblock);
AddDebugPos(inst);
return inst;
}
@@ -1836,10 +1851,12 @@ FunctionEmitContext::ZExtInst(llvm::Value *value, llvm::Type *type,
return NULL;
}
if (name == NULL)
name = LLVMGetName(value, "_zext");
// TODO: we should probably handle the array case as in
// e.g. BitCastInst(), but we don't currently need that functionality
llvm::Instruction *inst =
new llvm::ZExtInst(value, type, name ? name : "zext", bblock);
llvm::Instruction *inst = new llvm::ZExtInst(value, type, name, bblock);
AddDebugPos(inst);
return inst;
}
@@ -1867,50 +1884,52 @@ FunctionEmitContext::applyVaryingGEP(llvm::Value *basePtr, llvm::Value *index,
// 64-bit type.
if ((g->target.is32Bit || g->opt.force32BitAddressing) &&
index->getType() == LLVMTypes::Int64Type)
index = TruncInst(index, LLVMTypes::Int32Type, "trunc_index");
index = TruncInst(index, LLVMTypes::Int32Type);
else if ((!g->target.is32Bit && !g->opt.force32BitAddressing) &&
index->getType() == LLVMTypes::Int32Type)
index = SExtInst(index, LLVMTypes::Int64Type, "sext_index");
index = SExtInst(index, LLVMTypes::Int64Type);
// do a scalar multiply to get the offset as index * scale and then
// smear the result out to be a vector; this is more efficient than
// first promoting both the scale and the index to vectors and then
// multiplying.
offset = BinaryOperator(llvm::Instruction::Mul, scale, index);
offset = SmearUniform(offset, "offset_smear");
offset = SmearUniform(offset);
}
else {
// Similarly, truncate or sign extend the index to be a 32 or 64
// bit vector type
if ((g->target.is32Bit || g->opt.force32BitAddressing) &&
index->getType() == LLVMTypes::Int64VectorType)
index = TruncInst(index, LLVMTypes::Int32VectorType, "trunc_index");
index = TruncInst(index, LLVMTypes::Int32VectorType);
else if ((!g->target.is32Bit && !g->opt.force32BitAddressing) &&
index->getType() == LLVMTypes::Int32VectorType)
index = SExtInst(index, LLVMTypes::Int64VectorType, "sext_index");
index = SExtInst(index, LLVMTypes::Int64VectorType);
scale = SmearUniform(scale, "scale_smear");
scale = SmearUniform(scale);
// offset = index * scale
offset = BinaryOperator(llvm::Instruction::Mul, scale, index, "offset");
offset = BinaryOperator(llvm::Instruction::Mul, scale, index,
LLVMGetName("mul", scale, index));
}
// For 64-bit targets, if we've been doing our offset calculations in
// 32 bits, we still have to convert to a 64-bit value before we
// actually add the offset to the pointer.
if (g->target.is32Bit == false && g->opt.force32BitAddressing == true)
offset = SExtInst(offset, LLVMTypes::Int64VectorType, "offset_to_64");
offset = SExtInst(offset, LLVMTypes::Int64VectorType,
LLVMGetName(offset, "_to_64"));
// Smear out the pointer to be varying; either the base pointer or the
// index must be varying for this method to be called.
bool baseIsUniform =
(llvm::isa<llvm::PointerType>(basePtr->getType()));
Assert(baseIsUniform == false || indexIsVarying == true);
llvm::Value *varyingPtr = baseIsUniform ?
SmearUniform(basePtr, "ptr_smear") : basePtr;
llvm::Value *varyingPtr = baseIsUniform ? SmearUniform(basePtr) : basePtr;
// newPtr = ptr + offset
return BinaryOperator(llvm::Instruction::Add, varyingPtr, offset, "new_ptr");
return BinaryOperator(llvm::Instruction::Add, varyingPtr, offset,
LLVMGetName(basePtr, "_offset"));
}
@@ -1999,8 +2018,8 @@ FunctionEmitContext::MakeSlicePointer(llvm::Value *ptr, llvm::Value *offset) {
llvm::StructType::get(*g->ctx, eltTypes);
llvm::Value *ret = llvm::UndefValue::get(st);
ret = InsertInst(ret, ptr, 0);
ret = InsertInst(ret, offset, 1);
ret = InsertInst(ret, ptr, 0, LLVMGetName(ret, "_slice_ptr"));
ret = InsertInst(ret, offset, 1, LLVMGetName(ret, "_slice_offset"));
return ret;
}
@@ -2267,6 +2286,9 @@ FunctionEmitContext::LoadInst(llvm::Value *ptr, const char *name) {
llvm::dyn_cast<llvm::PointerType>(ptr->getType());
Assert(pt != NULL);
if (name == NULL)
name = LLVMGetName(ptr, "_load");
// FIXME: it's not clear to me that we generate unaligned vector loads
// of varying stuff out of the front-end any more. (Only by the
// optimization passes that lower gathers to vector loads, I think..)
@@ -2274,7 +2296,7 @@ FunctionEmitContext::LoadInst(llvm::Value *ptr, const char *name) {
int align = 0;
if (llvm::isa<llvm::VectorType>(pt->getElementType()))
align = 1;
llvm::Instruction *inst = new llvm::LoadInst(ptr, name ? name : "load",
llvm::Instruction *inst = new llvm::LoadInst(ptr, name,
false /* not volatile */,
align, bblock);
AddDebugPos(inst);
@@ -2291,8 +2313,8 @@ lFinalSliceOffset(FunctionEmitContext *ctx, llvm::Value *ptr,
const PointerType **ptrType) {
Assert(dynamic_cast<const PointerType *>(*ptrType) != NULL);
llvm::Value *slicePtr = ctx->ExtractInst(ptr, 0, "slice_ptr");
llvm::Value *sliceOffset = ctx->ExtractInst(ptr, 1, "slice_offset");
llvm::Value *slicePtr = ctx->ExtractInst(ptr, 0, LLVMGetName(ptr, "_ptr"));
llvm::Value *sliceOffset = ctx->ExtractInst(ptr, 1, LLVMGetName(ptr, "_offset"));
// slicePtr should be a pointer to an soa-width wide array of the
// final atomic/enum/pointer type
@@ -2313,7 +2335,7 @@ lFinalSliceOffset(FunctionEmitContext *ctx, llvm::Value *ptr,
// And finally index based on the slice offset
return ctx->GetElementPtrInst(slicePtr, sliceOffset, *ptrType,
"final_slice_gep");
LLVMGetName(slicePtr, "_final_gep"));
}
@@ -2365,6 +2387,9 @@ FunctionEmitContext::LoadInst(llvm::Value *ptr, llvm::Value *mask,
Assert(ptrRefType != NULL && mask != NULL);
if (name == NULL)
name = LLVMGetName(ptr, "_load");
const PointerType *ptrType;
if (dynamic_cast<const ReferenceType *>(ptrRefType) != NULL)
ptrType = PointerType::GetUniform(ptrRefType->GetReferenceTarget());
@@ -2394,7 +2419,7 @@ FunctionEmitContext::LoadInst(llvm::Value *ptr, llvm::Value *mask,
// it's totally unaligned. (This shouldn't make any difference
// vs the proper alignment in practice.)
align = 1;
llvm::Instruction *inst = new llvm::LoadInst(ptr, name ? name : "load",
llvm::Instruction *inst = new llvm::LoadInst(ptr, name,
false /* not volatile */,
align, bblock);
AddDebugPos(inst);
@@ -2487,7 +2512,7 @@ FunctionEmitContext::gather(llvm::Value *ptr, const PointerType *ptrType,
if (disableGSWarningCount == 0)
addGSMetadata(call, currentPos);
return BitCastInst(call, llvmReturnType, "gather_bitcast");
return BitCastInst(call, llvmReturnType, LLVMGetName(call, "_gather_bitcast"));
}
@@ -2652,9 +2677,9 @@ FunctionEmitContext::maskedStore(llvm::Value *value, llvm::Value *ptr,
Type::Equal(valueType, AtomicType::VaryingUInt64)) {
maskedStoreFunc = m->module->getFunction("__pseudo_masked_store_64");
ptr = BitCastInst(ptr, LLVMTypes::Int64VectorPointerType,
"ptr_to_int64vecptr");
LLVMGetName(ptr, "_to_int64vecptr"));
value = BitCastInst(value, LLVMTypes::Int64VectorType,
"value_to_int64");
LLVMGetName(value, "_to_int64"));
}
else if (Type::Equal(valueType, AtomicType::VaryingFloat) ||
Type::Equal(valueType, AtomicType::VaryingBool) ||
@@ -2663,22 +2688,22 @@ FunctionEmitContext::maskedStore(llvm::Value *value, llvm::Value *ptr,
dynamic_cast<const EnumType *>(valueType) != NULL) {
maskedStoreFunc = m->module->getFunction("__pseudo_masked_store_32");
ptr = BitCastInst(ptr, LLVMTypes::Int32VectorPointerType,
"ptr_to_int32vecptr");
LLVMGetName(ptr, "_to_int32vecptr"));
if (Type::Equal(valueType, AtomicType::VaryingFloat))
value = BitCastInst(value, LLVMTypes::Int32VectorType,
"value_to_int32");
LLVMGetName(value, "_to_int32"));
}
else if (Type::Equal(valueType, AtomicType::VaryingInt16) ||
Type::Equal(valueType, AtomicType::VaryingUInt16)) {
maskedStoreFunc = m->module->getFunction("__pseudo_masked_store_16");
ptr = BitCastInst(ptr, LLVMTypes::Int16VectorPointerType,
"ptr_to_int16vecptr");
LLVMGetName(ptr, "_to_int16vecptr"));
}
else if (Type::Equal(valueType, AtomicType::VaryingInt8) ||
Type::Equal(valueType, AtomicType::VaryingUInt8)) {
maskedStoreFunc = m->module->getFunction("__pseudo_masked_store_8");
ptr = BitCastInst(ptr, LLVMTypes::Int8VectorPointerType,
"ptr_to_int8vecptr");
LLVMGetName(ptr, "_to_int8vecptr"));
}
Assert(maskedStoreFunc != NULL);
@@ -2964,13 +2989,17 @@ FunctionEmitContext::ExtractInst(llvm::Value *v, int elt, const char *name) {
return NULL;
}
if (name == NULL) {
char buf[32];
sprintf(buf, "_extract_%d", elt);
name = LLVMGetName(v, buf);
}
llvm::Instruction *ei = NULL;
if (llvm::isa<llvm::VectorType>(v->getType()))
ei = llvm::ExtractElementInst::Create(v, LLVMInt32(elt),
name ? name : "extract", bblock);
ei = llvm::ExtractElementInst::Create(v, LLVMInt32(elt), name, bblock);
else
ei = llvm::ExtractValueInst::Create(v, elt, name ? name : "extract",
bblock);
ei = llvm::ExtractValueInst::Create(v, elt, name, bblock);
AddDebugPos(ei);
return ei;
}
@@ -2984,13 +3013,18 @@ FunctionEmitContext::InsertInst(llvm::Value *v, llvm::Value *eltVal, int elt,
return NULL;
}
if (name == NULL) {
char buf[32];
sprintf(buf, "_insert_%d", elt);
name = LLVMGetName(v, buf);
}
llvm::Instruction *ii = NULL;
if (llvm::isa<llvm::VectorType>(v->getType()))
ii = llvm::InsertElementInst::Create(v, eltVal, LLVMInt32(elt),
name ? name : "insert", bblock);
name, bblock);
else
ii = llvm::InsertValueInst::Create(v, eltVal, elt,
name ? name : "insert", bblock);
ii = llvm::InsertValueInst::Create(v, eltVal, elt, name, bblock);
AddDebugPos(ii);
return ii;
}
@@ -3014,9 +3048,11 @@ FunctionEmitContext::SelectInst(llvm::Value *test, llvm::Value *val0,
return NULL;
}
llvm::Instruction *inst =
llvm::SelectInst::Create(test, val0, val1, name ? name : "select",
bblock);
if (name == NULL)
name = LLVMGetName(test, "_select");
llvm::Instruction *inst = llvm::SelectInst::Create(test, val0, val1, name,
bblock);
AddDebugPos(inst);
return inst;
}

231
expr.cpp
View File

@@ -68,6 +68,7 @@
#include <llvm/ExecutionEngine/GenericValue.h>
#include <llvm/Support/InstIterator.h>
/////////////////////////////////////////////////////////////////////////////////////
// Expr
@@ -1029,20 +1030,26 @@ lEmitPrePostIncDec(UnaryExpr::Op op, Expr *expr, SourcePos pos,
llvm::Value *binop = NULL;
int delta = (op == UnaryExpr::PreInc || op == UnaryExpr::PostInc) ? 1 : -1;
std::string opName = rvalue->getName().str();
if (op == UnaryExpr::PreInc || op == UnaryExpr::PostInc)
opName += "_plus1";
else
opName += "_minus1";
if (dynamic_cast<const PointerType *>(type) != NULL) {
const Type *incType = type->IsUniformType() ? AtomicType::UniformInt32 :
AtomicType::VaryingInt32;
llvm::Constant *dval = lLLVMConstantValue(incType, g->ctx, delta);
binop = ctx->GetElementPtrInst(rvalue, dval, type, "ptr_inc_or_dec");
binop = ctx->GetElementPtrInst(rvalue, dval, type, opName.c_str());
}
else {
llvm::Constant *dval = lLLVMConstantValue(type, g->ctx, delta);
if (type->IsFloatType())
binop = ctx->BinaryOperator(llvm::Instruction::FAdd, rvalue,
dval, "val_inc_or_dec");
dval, opName.c_str());
else
binop = ctx->BinaryOperator(llvm::Instruction::Add, rvalue,
dval, "val_inc_or_dec");
dval, opName.c_str());
}
// And store the result out to the lvalue
@@ -1071,11 +1078,11 @@ lEmitNegate(Expr *arg, SourcePos pos, FunctionEmitContext *ctx) {
ctx->SetDebugPos(pos);
if (type->IsFloatType())
return ctx->BinaryOperator(llvm::Instruction::FSub, zero, argVal,
"fnegate");
LLVMGetName(argVal, "_negate"));
else {
Assert(type->IsIntType());
return ctx->BinaryOperator(llvm::Instruction::Sub, zero, argVal,
"inegate");
LLVMGetName(argVal, "_negate"));
}
}
@@ -1103,11 +1110,11 @@ UnaryExpr::GetValue(FunctionEmitContext *ctx) const {
return lEmitNegate(expr, pos, ctx);
case LogicalNot: {
llvm::Value *argVal = expr->GetValue(ctx);
return ctx->NotOperator(argVal, "logicalnot");
return ctx->NotOperator(argVal, LLVMGetName(argVal, "_logicalnot"));
}
case BitNot: {
llvm::Value *argVal = expr->GetValue(ctx);
return ctx->NotOperator(argVal, "bitnot");
return ctx->NotOperator(argVal, LLVMGetName(argVal, "_bitnot"));
}
default:
FATAL("logic error");
@@ -1506,17 +1513,22 @@ lEmitBinaryArith(BinaryExpr::Op op, llvm::Value *value0, llvm::Value *value1,
bool isFloatOp = type0->IsFloatType();
bool isUnsignedOp = type0->IsUnsignedType();
const char *opName = NULL;
switch (op) {
case BinaryExpr::Add:
opName = "add";
inst = isFloatOp ? llvm::Instruction::FAdd : llvm::Instruction::Add;
break;
case BinaryExpr::Sub:
opName = "sub";
inst = isFloatOp ? llvm::Instruction::FSub : llvm::Instruction::Sub;
break;
case BinaryExpr::Mul:
opName = "mul";
inst = isFloatOp ? llvm::Instruction::FMul : llvm::Instruction::Mul;
break;
case BinaryExpr::Div:
opName = "div";
if (type0->IsVaryingType() && !isFloatOp)
PerformanceWarning(pos, "Division with varying integer types is "
"very inefficient.");
@@ -1524,6 +1536,7 @@ lEmitBinaryArith(BinaryExpr::Op op, llvm::Value *value0, llvm::Value *value1,
(isUnsignedOp ? llvm::Instruction::UDiv : llvm::Instruction::SDiv);
break;
case BinaryExpr::Mod:
opName = "mod";
if (type0->IsVaryingType() && !isFloatOp)
PerformanceWarning(pos, "Modulus operator with varying types is "
"very inefficient.");
@@ -1535,7 +1548,7 @@ lEmitBinaryArith(BinaryExpr::Op op, llvm::Value *value0, llvm::Value *value1,
return NULL;
}
return ctx->BinaryOperator(inst, value0, value1, "binop");
return ctx->BinaryOperator(inst, value0, value1, LLVMGetName(opName, value0, value1));
}
}
@@ -1550,27 +1563,34 @@ lEmitBinaryCmp(BinaryExpr::Op op, llvm::Value *e0Val, llvm::Value *e1Val,
bool isUnsignedOp = type->IsUnsignedType();
llvm::CmpInst::Predicate pred;
const char *opName = NULL;
switch (op) {
case BinaryExpr::Lt:
opName = "less";
pred = isFloatOp ? llvm::CmpInst::FCMP_OLT :
(isUnsignedOp ? llvm::CmpInst::ICMP_ULT : llvm::CmpInst::ICMP_SLT);
break;
case BinaryExpr::Gt:
opName = "greater";
pred = isFloatOp ? llvm::CmpInst::FCMP_OGT :
(isUnsignedOp ? llvm::CmpInst::ICMP_UGT : llvm::CmpInst::ICMP_SGT);
break;
case BinaryExpr::Le:
opName = "lessequal";
pred = isFloatOp ? llvm::CmpInst::FCMP_OLE :
(isUnsignedOp ? llvm::CmpInst::ICMP_ULE : llvm::CmpInst::ICMP_SLE);
break;
case BinaryExpr::Ge:
opName = "greaterequal";
pred = isFloatOp ? llvm::CmpInst::FCMP_OGE :
(isUnsignedOp ? llvm::CmpInst::ICMP_UGE : llvm::CmpInst::ICMP_SGE);
break;
case BinaryExpr::Equal:
opName = "equal";
pred = isFloatOp ? llvm::CmpInst::FCMP_OEQ : llvm::CmpInst::ICMP_EQ;
break;
case BinaryExpr::NotEqual:
opName = "notequal";
pred = isFloatOp ? llvm::CmpInst::FCMP_ONE : llvm::CmpInst::ICMP_NE;
break;
default:
@@ -1580,7 +1600,8 @@ lEmitBinaryCmp(BinaryExpr::Op op, llvm::Value *e0Val, llvm::Value *e1Val,
llvm::Value *cmp = ctx->CmpInst(isFloatOp ? llvm::Instruction::FCmp :
llvm::Instruction::ICmp,
pred, e0Val, e1Val, "bincmp");
pred, e0Val, e1Val,
LLVMGetName(opName, e0Val, e1Val));
// This is a little ugly: CmpInst returns i1 values, but we use vectors
// of i32s for varying bool values; type convert the result here if
// needed.
@@ -2618,7 +2639,7 @@ lEmitOpAssign(AssignExpr::Op op, Expr *arg0, Expr *arg1, const Type *type,
llvm::Value *rvalue = arg1->GetValue(ctx);
ctx->SetDebugPos(pos);
llvm::Value *mask = lMaskForSymbol(baseSym, ctx);
llvm::Value *oldLHS = ctx->LoadInst(lv, mask, lvalueType, "opassign_load");
llvm::Value *oldLHS = ctx->LoadInst(lv, mask, lvalueType);
// Map the operator to the corresponding BinaryExpr::Op operator
BinaryExpr::Op basicop;
@@ -3955,7 +3976,7 @@ IndexExpr::GetValue(FunctionEmitContext *ctx) const {
}
ctx->SetDebugPos(pos);
return ctx->LoadInst(ptr, mask, lvalueType, "index");
return ctx->LoadInst(ptr, mask, lvalueType);
}
@@ -4026,7 +4047,7 @@ lConvertToSlicePointer(FunctionEmitContext *ctx, llvm::Value *ptr,
// offsets
llvm::Value *result = llvm::Constant::getNullValue(sliceStructType);
// And replace the pointer in the struct with the given pointer
return ctx->InsertInst(result, ptr, 0);
return ctx->InsertInst(result, ptr, 0, LLVMGetName(ptr, "_slice"));
}
@@ -4117,7 +4138,8 @@ IndexExpr::GetLValue(FunctionEmitContext *ctx) const {
&baseExprType);
llvm::Value *ptr = ctx->GetElementPtrInst(basePtrValue, indexValue,
baseExprType, "ptr_offset");
baseExprType,
LLVMGetName(basePtrValue, "_offset"));
return lAddVaryingOffsetsIfNeeded(ctx, ptr, GetLValueType());
}
@@ -4153,7 +4175,7 @@ IndexExpr::GetLValue(FunctionEmitContext *ctx) const {
// And do the actual indexing calculation..
llvm::Value *ptr =
ctx->GetElementPtrInst(basePtr, LLVMInt32(0), indexValue,
basePtrType);
basePtrType, LLVMGetName(basePtr, "_offset"));
return lAddVaryingOffsetsIfNeeded(ctx, ptr, GetLValueType());
}
@@ -4643,7 +4665,7 @@ VectorMemberExpr::GetValue(FunctionEmitContext *ctx) const {
// Allocate temporary memory to tore the result
llvm::Value *resultPtr = ctx->AllocaInst(memberType->LLVMType(g->ctx),
"vector_tmp");
"vector_tmp");
// FIXME: we should be able to use the internal mask here according
// to the same logic where it's used elsewhere
@@ -4655,17 +4677,19 @@ VectorMemberExpr::GetValue(FunctionEmitContext *ctx) const {
ctx->SetDebugPos(pos);
for (size_t i = 0; i < identifier.size(); ++i) {
char idStr[2] = { identifier[i], '\0' };
llvm::Value *elementPtr = ctx->AddElementOffset(basePtr, indices[i],
basePtrType);
basePtrType,
LLVMGetName(basePtr, idStr));
llvm::Value *elementValue =
ctx->LoadInst(elementPtr, elementMask, elementPtrType,
"vec_element");
ctx->LoadInst(elementPtr, elementMask, elementPtrType);
llvm::Value *ptmp = ctx->AddElementOffset(resultPtr, i, NULL);
const char *resultName = LLVMGetName(resultPtr, idStr);
llvm::Value *ptmp = ctx->AddElementOffset(resultPtr, i, NULL, resultName);
ctx->StoreInst(elementValue, ptmp);
}
return ctx->LoadInst(resultPtr, "swizzle_vec");
return ctx->LoadInst(resultPtr, LLVMGetName(basePtr, "_swizzle"));
}
}
@@ -4799,7 +4823,9 @@ MemberExpr::GetValue(FunctionEmitContext *ctx) const {
}
ctx->SetDebugPos(pos);
return ctx->LoadInst(lvalue, mask, lvalueType, "structelement");
std::string suffix = std::string("_") + identifier;
return ctx->LoadInst(lvalue, mask, lvalueType,
LLVMGetName(lvalue, suffix.c_str()));
}
@@ -4841,7 +4867,8 @@ MemberExpr::GetLValue(FunctionEmitContext *ctx) const {
expr->GetLValueType();
ctx->SetDebugPos(pos);
llvm::Value *ptr = ctx->AddElementOffset(basePtr, elementNumber,
exprLValueType);
exprLValueType,
basePtr->getName().str().c_str());
ptr = lAddVaryingOffsetsIfNeeded(ctx, ptr, GetLValueType());
@@ -5814,6 +5841,23 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
SourcePos pos) {
llvm::Value *cast = NULL;
std::string opName = exprVal->getName().str();
switch (toType->basicType) {
case AtomicType::TYPE_BOOL: opName += "_to_bool"; break;
case AtomicType::TYPE_INT8: opName += "_to_int8"; break;
case AtomicType::TYPE_UINT8: opName += "_to_uint8"; break;
case AtomicType::TYPE_INT16: opName += "_to_int16"; break;
case AtomicType::TYPE_UINT16: opName += "_to_uint16"; break;
case AtomicType::TYPE_INT32: opName += "_to_int32"; break;
case AtomicType::TYPE_UINT32: opName += "_to_uint32"; break;
case AtomicType::TYPE_INT64: opName += "_to_int64"; break;
case AtomicType::TYPE_UINT64: opName += "_to_uint64"; break;
case AtomicType::TYPE_FLOAT: opName += "_to_float"; break;
case AtomicType::TYPE_DOUBLE: opName += "_to_double"; break;
default: FATAL("Unimplemented");
}
const char *cOpName = opName.c_str();
switch (toType->basicType) {
case AtomicType::TYPE_FLOAT: {
llvm::Type *targetType =
@@ -5825,17 +5869,17 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType)
// If we have a bool vector of i32 elements, first truncate
// down to a single bit
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, "bool_to_i1");
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName);
// And then do an unisgned int->float cast
cast = ctx->CastInst(llvm::Instruction::UIToFP, // unsigned int
exprVal, targetType, "bool2float");
exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT8:
case AtomicType::TYPE_INT16:
case AtomicType::TYPE_INT32:
case AtomicType::TYPE_INT64:
cast = ctx->CastInst(llvm::Instruction::SIToFP, // signed int to float
exprVal, targetType, "int2float");
exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_UINT8:
case AtomicType::TYPE_UINT16:
@@ -5845,14 +5889,14 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
PerformanceWarning(pos, "Conversion from unsigned int to float is slow. "
"Use \"int\" if possible");
cast = ctx->CastInst(llvm::Instruction::UIToFP, // unsigned int to float
exprVal, targetType, "uint2float");
exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_FLOAT:
// No-op cast.
cast = exprVal;
break;
case AtomicType::TYPE_DOUBLE:
cast = ctx->FPCastInst(exprVal, targetType, "double2float");
cast = ctx->FPCastInst(exprVal, targetType, cOpName);
break;
default:
FATAL("unimplemented");
@@ -5868,26 +5912,26 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
if (fromType->IsVaryingType() &&
LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType)
// truncate i32 bool vector values to i1s
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, "bool_to_i1");
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName);
cast = ctx->CastInst(llvm::Instruction::UIToFP, // unsigned int to double
exprVal, targetType, "bool2double");
exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT8:
case AtomicType::TYPE_INT16:
case AtomicType::TYPE_INT32:
case AtomicType::TYPE_INT64:
cast = ctx->CastInst(llvm::Instruction::SIToFP, // signed int
exprVal, targetType, "int2double");
exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_UINT8:
case AtomicType::TYPE_UINT16:
case AtomicType::TYPE_UINT32:
case AtomicType::TYPE_UINT64:
cast = ctx->CastInst(llvm::Instruction::UIToFP, // unsigned int
exprVal, targetType, "uint2double");
exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_FLOAT:
cast = ctx->FPCastInst(exprVal, targetType, "float2double");
cast = ctx->FPCastInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_DOUBLE:
cast = exprVal;
@@ -5905,8 +5949,8 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
case AtomicType::TYPE_BOOL:
if (fromType->IsVaryingType() &&
LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType)
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, "bool_to_i1");
cast = ctx->ZExtInst(exprVal, targetType, "bool2int");
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName);
cast = ctx->ZExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT8:
case AtomicType::TYPE_UINT8:
@@ -5918,15 +5962,15 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
case AtomicType::TYPE_UINT32:
case AtomicType::TYPE_INT64:
case AtomicType::TYPE_UINT64:
cast = ctx->TruncInst(exprVal, targetType, "int64_to_int8");
cast = ctx->TruncInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_FLOAT:
cast = ctx->CastInst(llvm::Instruction::FPToSI, // signed int
exprVal, targetType, "float2int");
exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_DOUBLE:
cast = ctx->CastInst(llvm::Instruction::FPToSI, // signed int
exprVal, targetType, "double2int");
exprVal, targetType, cOpName);
break;
default:
FATAL("unimplemented");
@@ -5941,8 +5985,8 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
case AtomicType::TYPE_BOOL:
if (fromType->IsVaryingType() &&
LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType)
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, "bool_to_i1");
cast = ctx->ZExtInst(exprVal, targetType, "bool2uint");
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName);
cast = ctx->ZExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT8:
case AtomicType::TYPE_UINT8:
@@ -5954,21 +5998,21 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
case AtomicType::TYPE_UINT32:
case AtomicType::TYPE_INT64:
case AtomicType::TYPE_UINT64:
cast = ctx->TruncInst(exprVal, targetType, "int64_to_uint8");
cast = ctx->TruncInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_FLOAT:
if (fromType->IsVaryingType())
PerformanceWarning(pos, "Conversion from float to unsigned int is slow. "
"Use \"int\" if possible");
cast = ctx->CastInst(llvm::Instruction::FPToUI, // unsigned int
exprVal, targetType, "float2uint");
exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_DOUBLE:
if (fromType->IsVaryingType())
PerformanceWarning(pos, "Conversion from double to unsigned int is slow. "
"Use \"int\" if possible");
cast = ctx->CastInst(llvm::Instruction::FPToUI, // unsigned int
exprVal, targetType, "double2uint");
exprVal, targetType, cOpName);
break;
default:
FATAL("unimplemented");
@@ -5983,14 +6027,14 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
case AtomicType::TYPE_BOOL:
if (fromType->IsVaryingType() &&
LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType)
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, "bool_to_i1");
cast = ctx->ZExtInst(exprVal, targetType, "bool2int");
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName);
cast = ctx->ZExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT8:
cast = ctx->SExtInst(exprVal, targetType, "int2int16");
cast = ctx->SExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_UINT8:
cast = ctx->ZExtInst(exprVal, targetType, "uint2uint16");
cast = ctx->ZExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT16:
case AtomicType::TYPE_UINT16:
@@ -5998,17 +6042,17 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
break;
case AtomicType::TYPE_FLOAT:
cast = ctx->CastInst(llvm::Instruction::FPToSI, // signed int
exprVal, targetType, "float2int");
exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT32:
case AtomicType::TYPE_UINT32:
case AtomicType::TYPE_INT64:
case AtomicType::TYPE_UINT64:
cast = ctx->TruncInst(exprVal, targetType, "int64_to_int16");
cast = ctx->TruncInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_DOUBLE:
cast = ctx->CastInst(llvm::Instruction::FPToSI, // signed int
exprVal, targetType, "double2int");
exprVal, targetType, cOpName);
break;
default:
FATAL("unimplemented");
@@ -6023,14 +6067,14 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
case AtomicType::TYPE_BOOL:
if (fromType->IsVaryingType() &&
LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType)
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, "bool_to_i1");
cast = ctx->ZExtInst(exprVal, targetType, "bool2uint16");
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName);
cast = ctx->ZExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT8:
cast = ctx->SExtInst(exprVal, targetType, "uint2uint16");
cast = ctx->SExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_UINT8:
cast = ctx->ZExtInst(exprVal, targetType, "uint2uint16");
cast = ctx->ZExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT16:
case AtomicType::TYPE_UINT16:
@@ -6041,20 +6085,20 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
PerformanceWarning(pos, "Conversion from float to unsigned int is slow. "
"Use \"int\" if possible");
cast = ctx->CastInst(llvm::Instruction::FPToUI, // unsigned int
exprVal, targetType, "float2uint");
exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT32:
case AtomicType::TYPE_UINT32:
case AtomicType::TYPE_INT64:
case AtomicType::TYPE_UINT64:
cast = ctx->TruncInst(exprVal, targetType, "int64_to_uint16");
cast = ctx->TruncInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_DOUBLE:
if (fromType->IsVaryingType())
PerformanceWarning(pos, "Conversion from double to unsigned int is slow. "
"Use \"int\" if possible");
cast = ctx->CastInst(llvm::Instruction::FPToUI, // unsigned int
exprVal, targetType, "double2uint");
exprVal, targetType, cOpName);
break;
default:
FATAL("unimplemented");
@@ -6069,16 +6113,16 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
case AtomicType::TYPE_BOOL:
if (fromType->IsVaryingType() &&
LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType)
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, "bool_to_i1");
cast = ctx->ZExtInst(exprVal, targetType, "bool2int");
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName);
cast = ctx->ZExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT8:
case AtomicType::TYPE_INT16:
cast = ctx->SExtInst(exprVal, targetType, "int2int32");
cast = ctx->SExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_UINT8:
case AtomicType::TYPE_UINT16:
cast = ctx->ZExtInst(exprVal, targetType, "uint2uint32");
cast = ctx->ZExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT32:
case AtomicType::TYPE_UINT32:
@@ -6086,15 +6130,15 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
break;
case AtomicType::TYPE_FLOAT:
cast = ctx->CastInst(llvm::Instruction::FPToSI, // signed int
exprVal, targetType, "float2int");
exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT64:
case AtomicType::TYPE_UINT64:
cast = ctx->TruncInst(exprVal, targetType, "int64_to_int32");
cast = ctx->TruncInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_DOUBLE:
cast = ctx->CastInst(llvm::Instruction::FPToSI, // signed int
exprVal, targetType, "double2int");
exprVal, targetType, cOpName);
break;
default:
FATAL("unimplemented");
@@ -6109,16 +6153,16 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
case AtomicType::TYPE_BOOL:
if (fromType->IsVaryingType() &&
LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType)
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, "bool_to_i1");
cast = ctx->ZExtInst(exprVal, targetType, "bool2uint");
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName);
cast = ctx->ZExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT8:
case AtomicType::TYPE_INT16:
cast = ctx->SExtInst(exprVal, targetType, "uint2uint");
cast = ctx->SExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_UINT8:
case AtomicType::TYPE_UINT16:
cast = ctx->ZExtInst(exprVal, targetType, "uint2uint");
cast = ctx->ZExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT32:
case AtomicType::TYPE_UINT32:
@@ -6129,18 +6173,18 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
PerformanceWarning(pos, "Conversion from float to unsigned int is slow. "
"Use \"int\" if possible");
cast = ctx->CastInst(llvm::Instruction::FPToUI, // unsigned int
exprVal, targetType, "float2uint");
exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT64:
case AtomicType::TYPE_UINT64:
cast = ctx->TruncInst(exprVal, targetType, "int64_to_uint32");
cast = ctx->TruncInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_DOUBLE:
if (fromType->IsVaryingType())
PerformanceWarning(pos, "Conversion from double to unsigned int is slow. "
"Use \"int\" if possible");
cast = ctx->CastInst(llvm::Instruction::FPToUI, // unsigned int
exprVal, targetType, "double2uint");
exprVal, targetType, cOpName);
break;
default:
FATAL("unimplemented");
@@ -6155,22 +6199,22 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
case AtomicType::TYPE_BOOL:
if (fromType->IsVaryingType() &&
LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType)
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, "bool_to_i1");
cast = ctx->ZExtInst(exprVal, targetType, "bool2int64");
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName);
cast = ctx->ZExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT8:
case AtomicType::TYPE_INT16:
case AtomicType::TYPE_INT32:
cast = ctx->SExtInst(exprVal, targetType, "int_to_int64");
cast = ctx->SExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_UINT8:
case AtomicType::TYPE_UINT16:
case AtomicType::TYPE_UINT32:
cast = ctx->ZExtInst(exprVal, targetType, "uint_to_int64");
cast = ctx->ZExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_FLOAT:
cast = ctx->CastInst(llvm::Instruction::FPToSI, // signed int
exprVal, targetType, "float2int64");
exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT64:
case AtomicType::TYPE_UINT64:
@@ -6178,7 +6222,7 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
break;
case AtomicType::TYPE_DOUBLE:
cast = ctx->CastInst(llvm::Instruction::FPToSI, // signed int
exprVal, targetType, "double2int64");
exprVal, targetType, cOpName);
break;
default:
FATAL("unimplemented");
@@ -6193,25 +6237,25 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
case AtomicType::TYPE_BOOL:
if (fromType->IsVaryingType() &&
LLVMTypes::BoolVectorType == LLVMTypes::Int32VectorType)
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, "bool_to_i1");
cast = ctx->ZExtInst(exprVal, targetType, "bool2uint");
exprVal = ctx->TruncInst(exprVal, LLVMTypes::Int1VectorType, cOpName);
cast = ctx->ZExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT8:
case AtomicType::TYPE_INT16:
case AtomicType::TYPE_INT32:
cast = ctx->SExtInst(exprVal, targetType, "int_to_uint64");
cast = ctx->SExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_UINT8:
case AtomicType::TYPE_UINT16:
case AtomicType::TYPE_UINT32:
cast = ctx->ZExtInst(exprVal, targetType, "uint_to_uint64");
cast = ctx->ZExtInst(exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_FLOAT:
if (fromType->IsVaryingType())
PerformanceWarning(pos, "Conversion from float to unsigned int64 is slow. "
"Use \"int64\" if possible");
cast = ctx->CastInst(llvm::Instruction::FPToUI, // signed int
exprVal, targetType, "float2uint");
exprVal, targetType, cOpName);
break;
case AtomicType::TYPE_INT64:
case AtomicType::TYPE_UINT64:
@@ -6222,7 +6266,7 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
PerformanceWarning(pos, "Conversion from double to unsigned int64 is slow. "
"Use \"int64\" if possible");
cast = ctx->CastInst(llvm::Instruction::FPToUI, // signed int
exprVal, targetType, "double2uint");
exprVal, targetType, cOpName);
break;
default:
FATAL("unimplemented");
@@ -6239,7 +6283,7 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMInt8(0) :
(llvm::Value *)LLVMInt8Vector((int8_t)0);
cast = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_NE,
exprVal, zero, "cmpi0");
exprVal, zero, cOpName);
break;
}
case AtomicType::TYPE_INT16:
@@ -6247,7 +6291,7 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMInt16(0) :
(llvm::Value *)LLVMInt16Vector((int16_t)0);
cast = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_NE,
exprVal, zero, "cmpi0");
exprVal, zero, cOpName);
break;
}
case AtomicType::TYPE_INT32:
@@ -6255,14 +6299,14 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMInt32(0) :
(llvm::Value *)LLVMInt32Vector(0);
cast = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_NE,
exprVal, zero, "cmpi0");
exprVal, zero, cOpName);
break;
}
case AtomicType::TYPE_FLOAT: {
llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMFloat(0.f) :
(llvm::Value *)LLVMFloatVector(0.f);
cast = ctx->CmpInst(llvm::Instruction::FCmp, llvm::CmpInst::FCMP_ONE,
exprVal, zero, "cmpf0");
exprVal, zero, cOpName);
break;
}
case AtomicType::TYPE_INT64:
@@ -6270,14 +6314,14 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMInt64(0) :
(llvm::Value *)LLVMInt64Vector((int64_t)0);
cast = ctx->CmpInst(llvm::Instruction::ICmp, llvm::CmpInst::ICMP_NE,
exprVal, zero, "cmpi0");
exprVal, zero, cOpName);
break;
}
case AtomicType::TYPE_DOUBLE: {
llvm::Value *zero = fromType->IsUniformType() ? (llvm::Value *)LLVMDouble(0.) :
(llvm::Value *)LLVMDoubleVector(0.);
cast = ctx->CmpInst(llvm::Instruction::FCmp, llvm::CmpInst::FCMP_ONE,
exprVal, zero, "cmpd0");
exprVal, zero, cOpName);
break;
}
default:
@@ -6291,7 +6335,7 @@ lTypeConvAtomic(FunctionEmitContext *ctx, llvm::Value *exprVal,
// turn into a vector below, the way it does for everyone
// else...
cast = ctx->SExtInst(cast, LLVMTypes::BoolVectorType->getElementType(),
"i1bool_to_i32bool");
LLVMGetName(cast, "to_i32bool"));
}
}
else
@@ -7022,7 +7066,7 @@ DerefExpr::GetValue(FunctionEmitContext *ctx) const {
ctx->GetFullMask();
ctx->SetDebugPos(pos);
return ctx->LoadInst(ptr, mask, type, "deref_load");
return ctx->LoadInst(ptr, mask, type);
}
@@ -7395,7 +7439,9 @@ SymbolExpr::GetValue(FunctionEmitContext *ctx) const {
if (!symbol || !symbol->storagePtr)
return NULL;
ctx->SetDebugPos(pos);
return ctx->LoadInst(symbol->storagePtr, symbol->name.c_str());
std::string loadName = symbol->name + std::string("_load");
return ctx->LoadInst(symbol->storagePtr, loadName.c_str());
}
@@ -8112,7 +8158,8 @@ NewExpr::GetValue(FunctionEmitContext *ctx) const {
// pointer of the return type and to run the code for initializers,
// if present.
llvm::Type *ptrType = retType->LLVMType(g->ctx);
ptrValue = ctx->BitCastInst(ptrValue, ptrType, "cast_new_ptr");
ptrValue = ctx->BitCastInst(ptrValue, ptrType,
LLVMGetName(ptrValue, "_cast_ptr"));
if (initExpr != NULL)
InitSymbol(ptrValue, allocType, initExpr, ctx, pos);

53
opt.cpp
View File

@@ -829,14 +829,16 @@ IntrinsicsOpt::runOnBasicBlock(llvm::BasicBlock &bb) {
llvm::Type *returnType = callInst->getType();
Assert(llvm::isa<llvm::VectorType>(returnType));
// cast the i8 * to the appropriate type
const char *name = LLVMGetName(callInst->getArgOperand(0), "_cast");
llvm::Value *castPtr =
new llvm::BitCastInst(callInst->getArgOperand(0),
llvm::PointerType::get(returnType, 0),
"ptr2vec", callInst);
name, callInst);
lCopyMetadata(castPtr, callInst);
int align = callInst->getCalledFunction() == avxMaskedLoad32 ? 4 : 8;
name = LLVMGetName(callInst->getArgOperand(0), "_load");
llvm::Instruction *loadInst =
new llvm::LoadInst(castPtr, "load", false /* not volatile */,
new llvm::LoadInst(castPtr, name, false /* not volatile */,
align, (llvm::Instruction *)NULL);
lCopyMetadata(loadInst, callInst);
llvm::ReplaceInstWithInst(callInst, loadInst);
@@ -859,10 +861,12 @@ IntrinsicsOpt::runOnBasicBlock(llvm::BasicBlock &bb) {
// all lanes storing, so replace with a regular store
llvm::Value *rvalue = callInst->getArgOperand(2);
llvm::Type *storeType = rvalue->getType();
const char *name = LLVMGetName(callInst->getArgOperand(0),
"_ptrcast");
llvm::Value *castPtr =
new llvm::BitCastInst(callInst->getArgOperand(0),
llvm::PointerType::get(storeType, 0),
"ptr2vec", callInst);
name, callInst);
lCopyMetadata(castPtr, callInst);
llvm::StoreInst *storeInst =
@@ -1291,12 +1295,13 @@ lExtractConstantOffset(llvm::Value *vec, llvm::Value **constOffset,
*constOffset = NULL;
else
*constOffset = new llvm::SExtInst(co, sext->getType(),
"const_offset_sext", insertBefore);
LLVMGetName(co, "_sext"),
insertBefore);
if (vo == NULL)
*variableOffset = NULL;
else
*variableOffset = new llvm::SExtInst(vo, sext->getType(),
"variable_offset_sext",
LLVMGetName(vo, "_sext"),
insertBefore);
return;
}
@@ -1320,7 +1325,8 @@ lExtractConstantOffset(llvm::Value *vec, llvm::Value **constOffset,
else
*constOffset =
llvm::BinaryOperator::Create(llvm::Instruction::Add, c0, c1,
"const_op", insertBefore);
LLVMGetName("add", c0, c1),
insertBefore);
if (v0 == NULL || llvm::isa<llvm::ConstantAggregateZero>(v0))
*variableOffset = v1;
@@ -1329,7 +1335,8 @@ lExtractConstantOffset(llvm::Value *vec, llvm::Value **constOffset,
else
*variableOffset =
llvm::BinaryOperator::Create(llvm::Instruction::Add, v0, v1,
"variable_op", insertBefore);
LLVMGetName("add", v0, v1),
insertBefore);
return;
}
else if (bop->getOpcode() == llvm::Instruction::Mul) {
@@ -1343,26 +1350,27 @@ lExtractConstantOffset(llvm::Value *vec, llvm::Value **constOffset,
if (c0 != NULL && c1 != NULL)
*constOffset =
llvm::BinaryOperator::Create(llvm::Instruction::Mul, c0, c1,
"const_mul", insertBefore);
LLVMGetName("mul", c0, c1),
insertBefore);
else
*constOffset = NULL;
llvm::Value *va = NULL, *vb = NULL, *vc = NULL;
if (v0 != NULL && c1 != NULL)
va = llvm::BinaryOperator::Create(llvm::Instruction::Mul, v0, c1,
"va_mul", insertBefore);
LLVMGetName("mul", v0, c1), insertBefore);
if (c0 != NULL && v1 != NULL)
vb = llvm::BinaryOperator::Create(llvm::Instruction::Mul, c0, v1,
"vb_mul", insertBefore);
LLVMGetName("mul", c0, v1), insertBefore);
if (v0 != NULL && v1 != NULL)
vc = llvm::BinaryOperator::Create(llvm::Instruction::Mul, v0, v1,
"vc_mul", insertBefore);
LLVMGetName("mul", v0, v1), insertBefore);
llvm::Value *vab = NULL;
if (va != NULL && vb != NULL)
vab = llvm::BinaryOperator::Create(llvm::Instruction::Add, va, vb,
"vab_add", insertBefore);
LLVMGetName("add", va, vb), insertBefore);
else if (va != NULL)
vab = va;
else
@@ -1371,7 +1379,7 @@ lExtractConstantOffset(llvm::Value *vec, llvm::Value **constOffset,
if (vab != NULL && vc != NULL)
*variableOffset =
llvm::BinaryOperator::Create(llvm::Instruction::Add, vab, vc,
"vabc_add", insertBefore);
LLVMGetName("add", vab, vc), insertBefore);
else if (vab != NULL)
*variableOffset = vab;
else
@@ -1443,7 +1451,7 @@ lExtract248Scale(llvm::Value *splatOperand, int splatValue,
*result =
llvm::BinaryOperator::Create(llvm::Instruction::Mul,
splatDiv, otherOperand,
"add", insertBefore);
"mul", insertBefore);
return LLVMInt32(scale);
}
}
@@ -1673,7 +1681,8 @@ lOffsets32BitSafe(llvm::Value **variableOffsetPtr,
// do the more general check with lVectorIs32BitInts().
variableOffset =
new llvm::TruncInst(variableOffset, LLVMTypes::Int32VectorType,
"trunc_variable_offset", insertBefore);
LLVMGetName(variableOffset, "_trunc"),
insertBefore);
else
return false;
}
@@ -1683,7 +1692,7 @@ lOffsets32BitSafe(llvm::Value **variableOffsetPtr,
// Truncate them so we have a 32-bit vector type for them.
constOffset =
new llvm::TruncInst(constOffset, LLVMTypes::Int32VectorType,
"trunc_const_offset", insertBefore);
LLVMGetName(constOffset, "_trunc"), insertBefore);
}
else {
// FIXME: otherwise we just assume that all constant offsets
@@ -1696,7 +1705,7 @@ lOffsets32BitSafe(llvm::Value **variableOffsetPtr,
// enough for us in some cases if we call it from here.
constOffset =
new llvm::TruncInst(constOffset, LLVMTypes::Int32VectorType,
"trunc_const_offset", insertBefore);
LLVMGetName(constOffset, "_trunc"), insertBefore);
}
}
@@ -1819,7 +1828,7 @@ DetectGSBaseOffsetsPass::runOnBasicBlock(llvm::BasicBlock &bb) {
// Cast the base pointer to a void *, since that's what the
// __pseudo_*_base_offsets_* functions want.
basePtr = new llvm::IntToPtrInst(basePtr, LLVMTypes::VoidPointerType,
"base2void", callInst);
LLVMGetName(basePtr, "_2void"), callInst);
lCopyMetadata(basePtr, callInst);
llvm::Function *gatherScatterFunc = info->baseOffsetsFunc;
@@ -1842,7 +1851,8 @@ DetectGSBaseOffsetsPass::runOnBasicBlock(llvm::BasicBlock &bb) {
// way we can then call ReplaceInstWithInst().
llvm::Instruction *newCall =
lCallInst(gatherScatterFunc, basePtr, variableOffset, offsetScale,
constOffset, mask, "newgather", NULL);
constOffset, mask, callInst->getName().str().c_str(),
NULL);
lCopyMetadata(newCall, callInst);
llvm::ReplaceInstWithInst(callInst, newCall);
}
@@ -2443,7 +2453,7 @@ GSToLoadStorePass::runOnBasicBlock(llvm::BasicBlock &bb) {
Debug(pos, "Transformed gather to scalar load and broadcast!");
llvm::Instruction *newCall =
lCallInst(gatherInfo->loadBroadcastFunc, ptr, mask,
"load_braodcast");
LLVMGetName(callInst, "_broadcast"));
lCopyMetadata(newCall, callInst);
llvm::ReplaceInstWithInst(callInst, newCall);
@@ -2481,7 +2491,8 @@ GSToLoadStorePass::runOnBasicBlock(llvm::BasicBlock &bb) {
if (gatherInfo != NULL) {
Debug(pos, "Transformed gather to unaligned vector load!");
llvm::Instruction *newCall =
lCallInst(gatherInfo->loadMaskedFunc, ptr, mask, "masked_load");
lCallInst(gatherInfo->loadMaskedFunc, ptr, mask,
LLVMGetName(ptr, "_masked_load"));
lCopyMetadata(newCall, callInst);
llvm::ReplaceInstWithInst(callInst, newCall);
}