forked from OSchip/llvm-project
[BasicAA] Use index size instead of pointer size
When accumulating the GEP offset in BasicAA, we should use the pointer index size rather than the pointer size. Differential Revision: https://reviews.llvm.org/D112370
This commit is contained in:
parent
aee86f9b6c
commit
a8c318b50e
|
@ -377,8 +377,8 @@ public:
|
||||||
/// the backends/clients are updated.
|
/// the backends/clients are updated.
|
||||||
unsigned getPointerSize(unsigned AS = 0) const;
|
unsigned getPointerSize(unsigned AS = 0) const;
|
||||||
|
|
||||||
/// Returns the maximum pointer size over all address spaces.
|
/// Returns the maximum index size over all address spaces.
|
||||||
unsigned getMaxPointerSize() const;
|
unsigned getMaxIndexSize() const;
|
||||||
|
|
||||||
// Index size used for address calculation.
|
// Index size used for address calculation.
|
||||||
unsigned getIndexSize(unsigned AS) const;
|
unsigned getIndexSize(unsigned AS) const;
|
||||||
|
@ -410,9 +410,9 @@ public:
|
||||||
return getPointerSize(AS) * 8;
|
return getPointerSize(AS) * 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the maximum pointer size over all address spaces.
|
/// Returns the maximum index size over all address spaces.
|
||||||
unsigned getMaxPointerSizeInBits() const {
|
unsigned getMaxIndexSizeInBits() const {
|
||||||
return getMaxPointerSize() * 8;
|
return getMaxIndexSize() * 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Size in bits of index used for address calculation in getelementptr.
|
/// Size in bits of index used for address calculation in getelementptr.
|
||||||
|
|
|
@ -465,14 +465,14 @@ static LinearExpression GetLinearExpression(
|
||||||
return Val;
|
return Val;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// To ensure a pointer offset fits in an integer of size PointerSize
|
/// To ensure a pointer offset fits in an integer of size IndexSize
|
||||||
/// (in bits) when that size is smaller than the maximum pointer size. This is
|
/// (in bits) when that size is smaller than the maximum index size. This is
|
||||||
/// an issue, for example, in particular for 32b pointers with negative indices
|
/// an issue, for example, in particular for 32b pointers with negative indices
|
||||||
/// that rely on two's complement wrap-arounds for precise alias information
|
/// that rely on two's complement wrap-arounds for precise alias information
|
||||||
/// where the maximum pointer size is 64b.
|
/// where the maximum index size is 64b.
|
||||||
static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) {
|
static APInt adjustToIndexSize(const APInt &Offset, unsigned IndexSize) {
|
||||||
assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
|
assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!");
|
||||||
unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
|
unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
|
||||||
return (Offset << ShiftBits).ashr(ShiftBits);
|
return (Offset << ShiftBits).ashr(ShiftBits);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -549,9 +549,9 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
|
||||||
SearchTimes++;
|
SearchTimes++;
|
||||||
const Instruction *CxtI = dyn_cast<Instruction>(V);
|
const Instruction *CxtI = dyn_cast<Instruction>(V);
|
||||||
|
|
||||||
unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
|
unsigned MaxIndexSize = DL.getMaxIndexSizeInBits();
|
||||||
DecomposedGEP Decomposed;
|
DecomposedGEP Decomposed;
|
||||||
Decomposed.Offset = APInt(MaxPointerSize, 0);
|
Decomposed.Offset = APInt(MaxIndexSize, 0);
|
||||||
do {
|
do {
|
||||||
// See if this is a bitcast or GEP.
|
// See if this is a bitcast or GEP.
|
||||||
const Operator *Op = dyn_cast<Operator>(V);
|
const Operator *Op = dyn_cast<Operator>(V);
|
||||||
|
@ -620,7 +620,7 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
|
||||||
unsigned AS = GEPOp->getPointerAddressSpace();
|
unsigned AS = GEPOp->getPointerAddressSpace();
|
||||||
// Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
|
// Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
|
||||||
gep_type_iterator GTI = gep_type_begin(GEPOp);
|
gep_type_iterator GTI = gep_type_begin(GEPOp);
|
||||||
unsigned PointerSize = DL.getPointerSizeInBits(AS);
|
unsigned IndexSize = DL.getIndexSizeInBits(AS);
|
||||||
// Assume all GEP operands are constants until proven otherwise.
|
// Assume all GEP operands are constants until proven otherwise.
|
||||||
bool GepHasConstantOffset = true;
|
bool GepHasConstantOffset = true;
|
||||||
for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
|
for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
|
||||||
|
@ -643,26 +643,26 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
|
||||||
continue;
|
continue;
|
||||||
Decomposed.Offset +=
|
Decomposed.Offset +=
|
||||||
DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
|
DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
|
||||||
CIdx->getValue().sextOrTrunc(MaxPointerSize);
|
CIdx->getValue().sextOrTrunc(MaxIndexSize);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
GepHasConstantOffset = false;
|
GepHasConstantOffset = false;
|
||||||
|
|
||||||
// If the integer type is smaller than the pointer size, it is implicitly
|
// If the integer type is smaller than the index size, it is implicitly
|
||||||
// sign extended to pointer size.
|
// sign extended or truncated to index size.
|
||||||
unsigned Width = Index->getType()->getIntegerBitWidth();
|
unsigned Width = Index->getType()->getIntegerBitWidth();
|
||||||
unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0;
|
unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
|
||||||
unsigned TruncBits = PointerSize < Width ? Width - PointerSize : 0;
|
unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
|
||||||
LinearExpression LE = GetLinearExpression(
|
LinearExpression LE = GetLinearExpression(
|
||||||
CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
|
CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
|
||||||
|
|
||||||
// Scale by the type size.
|
// Scale by the type size.
|
||||||
unsigned TypeSize =
|
unsigned TypeSize =
|
||||||
DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize();
|
DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize();
|
||||||
LE = LE.mul(APInt(PointerSize, TypeSize), GEPOp->isInBounds());
|
LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds());
|
||||||
Decomposed.Offset += LE.Offset.sextOrSelf(MaxPointerSize);
|
Decomposed.Offset += LE.Offset.sextOrSelf(MaxIndexSize);
|
||||||
APInt Scale = LE.Scale.sextOrSelf(MaxPointerSize);
|
APInt Scale = LE.Scale.sextOrSelf(MaxIndexSize);
|
||||||
|
|
||||||
// If we already had an occurrence of this index variable, merge this
|
// If we already had an occurrence of this index variable, merge this
|
||||||
// scale into it. For example, we want to handle:
|
// scale into it. For example, we want to handle:
|
||||||
|
@ -678,8 +678,8 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure that we have a scale that makes sense for this target's
|
// Make sure that we have a scale that makes sense for this target's
|
||||||
// pointer size.
|
// index size.
|
||||||
Scale = adjustToPointerSize(Scale, PointerSize);
|
Scale = adjustToIndexSize(Scale, IndexSize);
|
||||||
|
|
||||||
if (!!Scale) {
|
if (!!Scale) {
|
||||||
VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW};
|
VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW};
|
||||||
|
@ -689,7 +689,7 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
|
||||||
|
|
||||||
// Take care of wrap-arounds
|
// Take care of wrap-arounds
|
||||||
if (GepHasConstantOffset)
|
if (GepHasConstantOffset)
|
||||||
Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize);
|
Decomposed.Offset = adjustToIndexSize(Decomposed.Offset, IndexSize);
|
||||||
|
|
||||||
// Analyze the base pointer next.
|
// Analyze the base pointer next.
|
||||||
V = GEPOp->getOperand(0);
|
V = GEPOp->getOperand(0);
|
||||||
|
@ -1258,7 +1258,7 @@ AliasResult BasicAAResult::aliasGEP(
|
||||||
CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
|
CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
|
||||||
|
|
||||||
assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
|
assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
|
||||||
"Bit widths are normalized to MaxPointerSize");
|
"Bit widths are normalized to MaxIndexSize");
|
||||||
if (Index.IsNSW)
|
if (Index.IsNSW)
|
||||||
OffsetRange = OffsetRange.add(CR.smul_sat(ConstantRange(Scale)));
|
OffsetRange = OffsetRange.add(CR.smul_sat(ConstantRange(Scale)));
|
||||||
else
|
else
|
||||||
|
|
|
@ -707,12 +707,12 @@ unsigned DataLayout::getPointerSize(unsigned AS) const {
|
||||||
return getPointerAlignElem(AS).TypeByteWidth;
|
return getPointerAlignElem(AS).TypeByteWidth;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned DataLayout::getMaxPointerSize() const {
|
unsigned DataLayout::getMaxIndexSize() const {
|
||||||
unsigned MaxPointerSize = 0;
|
unsigned MaxIndexSize = 0;
|
||||||
for (auto &P : Pointers)
|
for (auto &P : Pointers)
|
||||||
MaxPointerSize = std::max(MaxPointerSize, P.TypeByteWidth);
|
MaxIndexSize = std::max(MaxIndexSize, P.IndexWidth);
|
||||||
|
|
||||||
return MaxPointerSize;
|
return MaxIndexSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const {
|
unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const {
|
||||||
|
|
|
@ -506,7 +506,7 @@ void WebAssemblyAsmPrinter::EmitTargetFeatures(Module &M) {
|
||||||
// This is an "architecture", not a "feature", but we emit it as such for
|
// This is an "architecture", not a "feature", but we emit it as such for
|
||||||
// the benefit of tools like Binaryen and consistency with other producers.
|
// the benefit of tools like Binaryen and consistency with other producers.
|
||||||
// FIXME: Subtarget is null here, so can't Subtarget->hasAddr64() ?
|
// FIXME: Subtarget is null here, so can't Subtarget->hasAddr64() ?
|
||||||
if (M.getDataLayout().getMaxPointerSize() == 8) {
|
if (M.getDataLayout().getPointerSize() == 8) {
|
||||||
// Can't use EmitFeature since "wasm-feature-memory64" is not a module
|
// Can't use EmitFeature since "wasm-feature-memory64" is not a module
|
||||||
// flag.
|
// flag.
|
||||||
EmittedFeatures.push_back({wasm::WASM_FEATURE_PREFIX_USED, "memory64"});
|
EmittedFeatures.push_back({wasm::WASM_FEATURE_PREFIX_USED, "memory64"});
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
; RUN: opt -basic-aa -aa-eval -print-all-alias-modref-info -disable-output %s 2>&1 | FileCheck %s
|
||||||
|
|
||||||
|
target datalayout = "p:64:64:64:32"
|
||||||
|
|
||||||
|
; gep.1 and gep.2 must alias, because they are truncated to the index size
|
||||||
|
; (32-bit), not the pointer size (64-bit).
|
||||||
|
define void @mustalias_due_to_index_size(i8* %ptr) {
|
||||||
|
; CHECK-LABEL: Function: mustalias_due_to_index_size
|
||||||
|
; CHECK-NEXT: MustAlias: i8* %gep.1, i8* %ptr
|
||||||
|
; CHECK-NEXT: MustAlias: i8* %gep.2, i8* %ptr
|
||||||
|
; CHECK-NEXT: MustAlias: i8* %gep.1, i8* %gep.2
|
||||||
|
;
|
||||||
|
%gep.1 = getelementptr i8, i8* %ptr, i64 4294967296
|
||||||
|
store i8 0, i8* %gep.1
|
||||||
|
%gep.2 = getelementptr i8, i8* %ptr, i64 0
|
||||||
|
store i8 1, i8* %gep.2
|
||||||
|
ret void
|
||||||
|
}
|
Loading…
Reference in New Issue