Fixed a number of issues related to memory alignment; a number of places

were expecting vector-width-aligned pointers where in point of fact,
there's no guarantee that they would have been in general.

Removed the aligned memory allocation routines from some of the examples;
they're no longer needed.

No perf. difference on Core2/Core i5 CPUs; older CPUs may see some
regressions.

Still need to update the documentation for this change and finish reviewing
alignment issues in Load/Store instructions generated by .cpp files.
This commit is contained in:
Matt Pharr
2011-06-23 18:18:33 -07:00
parent d340dcbfcc
commit b84167dddd
11 changed files with 45 additions and 112 deletions

View File

@@ -1644,7 +1644,8 @@ FunctionEmitContext::StoreInst(llvm::Value *rvalue, llvm::Value *lvalue,
return;
}
llvm::Instruction *inst = new llvm::StoreInst(rvalue, lvalue, name, bblock);
llvm::Instruction *inst = new llvm::StoreInst(rvalue, lvalue, false /* not volatile */,
4, bblock);
AddDebugPos(inst);
}
@@ -1662,7 +1663,8 @@ FunctionEmitContext::StoreInst(llvm::Value *rvalue, llvm::Value *lvalue,
// Figure out what kind of store we're doing here
if (rvalueType->IsUniformType()) {
// The easy case; a regular store
llvm::Instruction *si = new llvm::StoreInst(rvalue, lvalue, name, bblock);
llvm::Instruction *si = new llvm::StoreInst(rvalue, lvalue, false /* not volatile */,
4, bblock);
AddDebugPos(si);
}
else if (llvm::isa<const llvm::ArrayType>(lvalue->getType()))
@@ -1673,7 +1675,7 @@ FunctionEmitContext::StoreInst(llvm::Value *rvalue, llvm::Value *lvalue,
// Otherwise it is a masked store unless we can determine that the
// mask is all on...
llvm::Instruction *si =
new llvm::StoreInst(rvalue, lvalue, name, bblock);
new llvm::StoreInst(rvalue, lvalue, false /*not volatile*/, 4, bblock);
AddDebugPos(si);
}
else