Fixed a number of issues related to memory alignment; a number of places
were expecting vector-width-aligned pointers where in point of fact, there's no guarantee that they would have been in general. Removed the aligned memory allocation routines from some of the examples; they're no longer needed. No perf. difference on Core2/Core i5 CPUs; older CPUs may see some regressions. Still need to update the documentation for this change and finish reviewing alignment issues in Load/Store instructions generated by .cpp files.
This commit is contained in:
8
ctx.cpp
8
ctx.cpp
@@ -1644,7 +1644,8 @@ FunctionEmitContext::StoreInst(llvm::Value *rvalue, llvm::Value *lvalue,
|
||||
return;
|
||||
}
|
||||
|
||||
llvm::Instruction *inst = new llvm::StoreInst(rvalue, lvalue, name, bblock);
|
||||
llvm::Instruction *inst = new llvm::StoreInst(rvalue, lvalue, false /* not volatile */,
|
||||
4, bblock);
|
||||
AddDebugPos(inst);
|
||||
}
|
||||
|
||||
@@ -1662,7 +1663,8 @@ FunctionEmitContext::StoreInst(llvm::Value *rvalue, llvm::Value *lvalue,
|
||||
// Figure out what kind of store we're doing here
|
||||
if (rvalueType->IsUniformType()) {
|
||||
// The easy case; a regular store
|
||||
llvm::Instruction *si = new llvm::StoreInst(rvalue, lvalue, name, bblock);
|
||||
llvm::Instruction *si = new llvm::StoreInst(rvalue, lvalue, false /* not volatile */,
|
||||
4, bblock);
|
||||
AddDebugPos(si);
|
||||
}
|
||||
else if (llvm::isa<const llvm::ArrayType>(lvalue->getType()))
|
||||
@@ -1673,7 +1675,7 @@ FunctionEmitContext::StoreInst(llvm::Value *rvalue, llvm::Value *lvalue,
|
||||
// Otherwise it is a masked store unless we can determine that the
|
||||
// mask is all on...
|
||||
llvm::Instruction *si =
|
||||
new llvm::StoreInst(rvalue, lvalue, name, bblock);
|
||||
new llvm::StoreInst(rvalue, lvalue, false /*not volatile*/, 4, bblock);
|
||||
AddDebugPos(si);
|
||||
}
|
||||
else
|
||||
|
||||
Reference in New Issue
Block a user