mirror of https://github.com/microsoft/clang.git
Add the alloc_size attribute to clang, attempt 2.
This is a recommit of r290149, which was reverted in r290169 due to msan failures. msan was failing because we were calling `isMostDerivedAnUnsizedArray` on an invalid designator, which caused us to read uninitialized memory. To fix this, the logic of the caller of said function was simplified, and we now have a `!Invalid` assert in `isMostDerivedAnUnsizedArray`, so we can catch this particular bug more easily in the future. Fingers crossed that this patch sticks this time. :) Original commit message: This patch does three things: - Gives us the alloc_size attribute in clang, which lets us infer the number of bytes handed back to us by malloc/realloc/calloc/any user functions that act in a similar manner. - Teaches our constexpr evaluator that evaluating some `const` variables is OK sometimes. This is why we have a change in test/SemaCXX/constant-expression-cxx11.cpp and other seemingly unrelated tests. Richard Smith okay'ed this idea some time ago in person. - Uniques some Blocks in CodeGen, which was reviewed separately at D26410. Lack of uniquing only really shows up as a problem when combined with our new eagerness in the face of const. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@290297 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
a8bebbeb2b
commit
aa365cb2fe
|
@ -780,6 +780,15 @@ def EmptyBases : InheritableAttr, TargetSpecificAttr<TargetMicrosoftCXXABI> {
|
|||
let Documentation = [EmptyBasesDocs];
|
||||
}
|
||||
|
||||
def AllocSize : InheritableAttr {
|
||||
let Spellings = [GCC<"alloc_size">];
|
||||
let Subjects = SubjectList<[HasFunctionProto], WarnDiag,
|
||||
"ExpectedFunctionWithProtoType">;
|
||||
let Args = [IntArgument<"ElemSizeParam">, IntArgument<"NumElemsParam", 1>];
|
||||
let TemplateDependent = 1;
|
||||
let Documentation = [AllocSizeDocs];
|
||||
}
|
||||
|
||||
def EnableIf : InheritableAttr {
|
||||
let Spellings = [GNU<"enable_if">];
|
||||
let Subjects = SubjectList<[Function]>;
|
||||
|
|
|
@ -206,6 +206,44 @@ to enforce the provided alignment assumption.
|
|||
}];
|
||||
}
|
||||
|
||||
def AllocSizeDocs : Documentation {
|
||||
let Category = DocCatFunction;
|
||||
let Content = [{
|
||||
The ``alloc_size`` attribute can be placed on functions that return pointers in
|
||||
order to hint to the compiler how many bytes of memory will be available at the
|
||||
returned poiner. ``alloc_size`` takes one or two arguments.
|
||||
|
||||
- ``alloc_size(N)`` implies that argument number N equals the number of
|
||||
available bytes at the returned pointer.
|
||||
- ``alloc_size(N, M)`` implies that the product of argument number N and
|
||||
argument number M equals the number of available bytes at the returned
|
||||
pointer.
|
||||
|
||||
Argument numbers are 1-based.
|
||||
|
||||
An example of how to use ``alloc_size``
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
void *my_malloc(int a) __attribute__((alloc_size(1)));
|
||||
void *my_calloc(int a, int b) __attribute__((alloc_size(1, 2)));
|
||||
|
||||
int main() {
|
||||
void *const p = my_malloc(100);
|
||||
assert(__builtin_object_size(p, 0) == 100);
|
||||
void *const a = my_calloc(20, 5);
|
||||
assert(__builtin_object_size(a, 0) == 100);
|
||||
}
|
||||
|
||||
.. Note:: This attribute works differently in clang than it does in GCC.
|
||||
Specifically, clang will only trace ``const`` pointers (as above); we give up
|
||||
on pointers that are not marked as ``const``. In the vast majority of cases,
|
||||
this is unimportant, because LLVM has support for the ``alloc_size``
|
||||
attribute. However, this may cause mildly unintuitive behavior when used with
|
||||
other attributes, such as ``enable_if``.
|
||||
}];
|
||||
}
|
||||
|
||||
def EnableIfDocs : Documentation {
|
||||
let Category = DocCatFunction;
|
||||
let Content = [{
|
||||
|
|
|
@ -2299,6 +2299,9 @@ def warn_attribute_pointers_only : Warning<
|
|||
"%0 attribute only applies to%select{| constant}1 pointer arguments">,
|
||||
InGroup<IgnoredAttributes>;
|
||||
def err_attribute_pointers_only : Error<warn_attribute_pointers_only.Text>;
|
||||
def err_attribute_integers_only : Error<
|
||||
"%0 attribute argument may only refer to a function parameter of integer "
|
||||
"type">;
|
||||
def warn_attribute_return_pointers_only : Warning<
|
||||
"%0 attribute only applies to return values that are pointers">,
|
||||
InGroup<IgnoredAttributes>;
|
||||
|
|
|
@ -109,19 +109,57 @@ namespace {
|
|||
return getAsBaseOrMember(E).getInt();
|
||||
}
|
||||
|
||||
/// Given a CallExpr, try to get the alloc_size attribute. May return null.
|
||||
static const AllocSizeAttr *getAllocSizeAttr(const CallExpr *CE) {
|
||||
const FunctionDecl *Callee = CE->getDirectCallee();
|
||||
return Callee ? Callee->getAttr<AllocSizeAttr>() : nullptr;
|
||||
}
|
||||
|
||||
/// Attempts to unwrap a CallExpr (with an alloc_size attribute) from an Expr.
|
||||
/// This will look through a single cast.
|
||||
///
|
||||
/// Returns null if we couldn't unwrap a function with alloc_size.
|
||||
static const CallExpr *tryUnwrapAllocSizeCall(const Expr *E) {
|
||||
if (!E->getType()->isPointerType())
|
||||
return nullptr;
|
||||
|
||||
E = E->IgnoreParens();
|
||||
// If we're doing a variable assignment from e.g. malloc(N), there will
|
||||
// probably be a cast of some kind. Ignore it.
|
||||
if (const auto *Cast = dyn_cast<CastExpr>(E))
|
||||
E = Cast->getSubExpr()->IgnoreParens();
|
||||
|
||||
if (const auto *CE = dyn_cast<CallExpr>(E))
|
||||
return getAllocSizeAttr(CE) ? CE : nullptr;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// Determines whether or not the given Base contains a call to a function
|
||||
/// with the alloc_size attribute.
|
||||
static bool isBaseAnAllocSizeCall(APValue::LValueBase Base) {
|
||||
const auto *E = Base.dyn_cast<const Expr *>();
|
||||
return E && E->getType()->isPointerType() && tryUnwrapAllocSizeCall(E);
|
||||
}
|
||||
|
||||
/// Determines if an LValue with the given LValueBase will have an unsized
|
||||
/// array in its designator.
|
||||
/// Find the path length and type of the most-derived subobject in the given
|
||||
/// path, and find the size of the containing array, if any.
|
||||
static
|
||||
unsigned findMostDerivedSubobject(ASTContext &Ctx, QualType Base,
|
||||
ArrayRef<APValue::LValuePathEntry> Path,
|
||||
uint64_t &ArraySize, QualType &Type,
|
||||
bool &IsArray) {
|
||||
static unsigned
|
||||
findMostDerivedSubobject(ASTContext &Ctx, APValue::LValueBase Base,
|
||||
ArrayRef<APValue::LValuePathEntry> Path,
|
||||
uint64_t &ArraySize, QualType &Type, bool &IsArray) {
|
||||
// This only accepts LValueBases from APValues, and APValues don't support
|
||||
// arrays that lack size info.
|
||||
assert(!isBaseAnAllocSizeCall(Base) &&
|
||||
"Unsized arrays shouldn't appear here");
|
||||
unsigned MostDerivedLength = 0;
|
||||
Type = Base;
|
||||
Type = getType(Base);
|
||||
|
||||
for (unsigned I = 0, N = Path.size(); I != N; ++I) {
|
||||
if (Type->isArrayType()) {
|
||||
const ConstantArrayType *CAT =
|
||||
cast<ConstantArrayType>(Ctx.getAsArrayType(Type));
|
||||
cast<ConstantArrayType>(Ctx.getAsArrayType(Type));
|
||||
Type = CAT->getElementType();
|
||||
ArraySize = CAT->getSize().getZExtValue();
|
||||
MostDerivedLength = I + 1;
|
||||
|
@ -162,17 +200,23 @@ namespace {
|
|||
/// Is this a pointer one past the end of an object?
|
||||
unsigned IsOnePastTheEnd : 1;
|
||||
|
||||
/// Indicator of whether the first entry is an unsized array.
|
||||
unsigned FirstEntryIsAnUnsizedArray : 1;
|
||||
|
||||
/// Indicator of whether the most-derived object is an array element.
|
||||
unsigned MostDerivedIsArrayElement : 1;
|
||||
|
||||
/// The length of the path to the most-derived object of which this is a
|
||||
/// subobject.
|
||||
unsigned MostDerivedPathLength : 29;
|
||||
unsigned MostDerivedPathLength : 28;
|
||||
|
||||
/// The size of the array of which the most-derived object is an element.
|
||||
/// This will always be 0 if the most-derived object is not an array
|
||||
/// element. 0 is not an indicator of whether or not the most-derived object
|
||||
/// is an array, however, because 0-length arrays are allowed.
|
||||
///
|
||||
/// If the current array is an unsized array, the value of this is
|
||||
/// undefined.
|
||||
uint64_t MostDerivedArraySize;
|
||||
|
||||
/// The type of the most derived object referred to by this address.
|
||||
|
@ -187,23 +231,24 @@ namespace {
|
|||
|
||||
explicit SubobjectDesignator(QualType T)
|
||||
: Invalid(false), IsOnePastTheEnd(false),
|
||||
MostDerivedIsArrayElement(false), MostDerivedPathLength(0),
|
||||
MostDerivedArraySize(0), MostDerivedType(T) {}
|
||||
FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
|
||||
MostDerivedPathLength(0), MostDerivedArraySize(0),
|
||||
MostDerivedType(T) {}
|
||||
|
||||
SubobjectDesignator(ASTContext &Ctx, const APValue &V)
|
||||
: Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
|
||||
MostDerivedIsArrayElement(false), MostDerivedPathLength(0),
|
||||
MostDerivedArraySize(0) {
|
||||
FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
|
||||
MostDerivedPathLength(0), MostDerivedArraySize(0) {
|
||||
assert(V.isLValue() && "Non-LValue used to make an LValue designator?");
|
||||
if (!Invalid) {
|
||||
IsOnePastTheEnd = V.isLValueOnePastTheEnd();
|
||||
ArrayRef<PathEntry> VEntries = V.getLValuePath();
|
||||
Entries.insert(Entries.end(), VEntries.begin(), VEntries.end());
|
||||
if (V.getLValueBase()) {
|
||||
bool IsArray = false;
|
||||
MostDerivedPathLength =
|
||||
findMostDerivedSubobject(Ctx, getType(V.getLValueBase()),
|
||||
V.getLValuePath(), MostDerivedArraySize,
|
||||
MostDerivedType, IsArray);
|
||||
MostDerivedPathLength = findMostDerivedSubobject(
|
||||
Ctx, V.getLValueBase(), V.getLValuePath(), MostDerivedArraySize,
|
||||
MostDerivedType, IsArray);
|
||||
MostDerivedIsArrayElement = IsArray;
|
||||
}
|
||||
}
|
||||
|
@ -214,12 +259,26 @@ namespace {
|
|||
Entries.clear();
|
||||
}
|
||||
|
||||
/// Determine whether the most derived subobject is an array without a
|
||||
/// known bound.
|
||||
bool isMostDerivedAnUnsizedArray() const {
|
||||
assert(!Invalid && "Calling this makes no sense on invalid designators");
|
||||
return Entries.size() == 1 && FirstEntryIsAnUnsizedArray;
|
||||
}
|
||||
|
||||
/// Determine what the most derived array's size is. Results in an assertion
|
||||
/// failure if the most derived array lacks a size.
|
||||
uint64_t getMostDerivedArraySize() const {
|
||||
assert(!isMostDerivedAnUnsizedArray() && "Unsized array has no size");
|
||||
return MostDerivedArraySize;
|
||||
}
|
||||
|
||||
/// Determine whether this is a one-past-the-end pointer.
|
||||
bool isOnePastTheEnd() const {
|
||||
assert(!Invalid);
|
||||
if (IsOnePastTheEnd)
|
||||
return true;
|
||||
if (MostDerivedIsArrayElement &&
|
||||
if (!isMostDerivedAnUnsizedArray() && MostDerivedIsArrayElement &&
|
||||
Entries[MostDerivedPathLength - 1].ArrayIndex == MostDerivedArraySize)
|
||||
return true;
|
||||
return false;
|
||||
|
@ -247,6 +306,21 @@ namespace {
|
|||
MostDerivedArraySize = CAT->getSize().getZExtValue();
|
||||
MostDerivedPathLength = Entries.size();
|
||||
}
|
||||
/// Update this designator to refer to the first element within the array of
|
||||
/// elements of type T. This is an array of unknown size.
|
||||
void addUnsizedArrayUnchecked(QualType ElemTy) {
|
||||
PathEntry Entry;
|
||||
Entry.ArrayIndex = 0;
|
||||
Entries.push_back(Entry);
|
||||
|
||||
MostDerivedType = ElemTy;
|
||||
MostDerivedIsArrayElement = true;
|
||||
// The value in MostDerivedArraySize is undefined in this case. So, set it
|
||||
// to an arbitrary value that's likely to loudly break things if it's
|
||||
// used.
|
||||
MostDerivedArraySize = std::numeric_limits<uint64_t>::max() / 2;
|
||||
MostDerivedPathLength = Entries.size();
|
||||
}
|
||||
/// Update this designator to refer to the given base or member of this
|
||||
/// object.
|
||||
void addDeclUnchecked(const Decl *D, bool Virtual = false) {
|
||||
|
@ -280,10 +354,16 @@ namespace {
|
|||
/// Add N to the address of this subobject.
|
||||
void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
|
||||
if (Invalid) return;
|
||||
if (isMostDerivedAnUnsizedArray()) {
|
||||
// Can't verify -- trust that the user is doing the right thing (or if
|
||||
// not, trust that the caller will catch the bad behavior).
|
||||
Entries.back().ArrayIndex += N;
|
||||
return;
|
||||
}
|
||||
if (MostDerivedPathLength == Entries.size() &&
|
||||
MostDerivedIsArrayElement) {
|
||||
Entries.back().ArrayIndex += N;
|
||||
if (Entries.back().ArrayIndex > MostDerivedArraySize) {
|
||||
if (Entries.back().ArrayIndex > getMostDerivedArraySize()) {
|
||||
diagnosePointerArithmetic(Info, E, Entries.back().ArrayIndex);
|
||||
setInvalid();
|
||||
}
|
||||
|
@ -524,9 +604,15 @@ namespace {
|
|||
/// gets a chance to look at it.
|
||||
EM_PotentialConstantExpressionUnevaluated,
|
||||
|
||||
/// Evaluate as a constant expression. Continue evaluating if we find a
|
||||
/// MemberExpr with a base that can't be evaluated.
|
||||
EM_DesignatorFold,
|
||||
/// Evaluate as a constant expression. Continue evaluating if either:
|
||||
/// - We find a MemberExpr with a base that can't be evaluated.
|
||||
/// - We find a variable initialized with a call to a function that has
|
||||
/// the alloc_size attribute on it.
|
||||
/// In either case, the LValue returned shall have an invalid base; in the
|
||||
/// former, the base will be the invalid MemberExpr, in the latter, the
|
||||
/// base will be either the alloc_size CallExpr or a CastExpr wrapping
|
||||
/// said CallExpr.
|
||||
EM_OffsetFold,
|
||||
} EvalMode;
|
||||
|
||||
/// Are we checking whether the expression is a potential constant
|
||||
|
@ -628,7 +714,7 @@ namespace {
|
|||
case EM_PotentialConstantExpression:
|
||||
case EM_ConstantExpressionUnevaluated:
|
||||
case EM_PotentialConstantExpressionUnevaluated:
|
||||
case EM_DesignatorFold:
|
||||
case EM_OffsetFold:
|
||||
HasActiveDiagnostic = false;
|
||||
return OptionalDiagnostic();
|
||||
}
|
||||
|
@ -720,7 +806,7 @@ namespace {
|
|||
case EM_ConstantExpression:
|
||||
case EM_ConstantExpressionUnevaluated:
|
||||
case EM_ConstantFold:
|
||||
case EM_DesignatorFold:
|
||||
case EM_OffsetFold:
|
||||
return false;
|
||||
}
|
||||
llvm_unreachable("Missed EvalMode case");
|
||||
|
@ -739,7 +825,7 @@ namespace {
|
|||
case EM_EvaluateForOverflow:
|
||||
case EM_IgnoreSideEffects:
|
||||
case EM_ConstantFold:
|
||||
case EM_DesignatorFold:
|
||||
case EM_OffsetFold:
|
||||
return true;
|
||||
|
||||
case EM_PotentialConstantExpression:
|
||||
|
@ -775,7 +861,7 @@ namespace {
|
|||
case EM_ConstantExpressionUnevaluated:
|
||||
case EM_ConstantFold:
|
||||
case EM_IgnoreSideEffects:
|
||||
case EM_DesignatorFold:
|
||||
case EM_OffsetFold:
|
||||
return false;
|
||||
}
|
||||
llvm_unreachable("Missed EvalMode case");
|
||||
|
@ -805,7 +891,7 @@ namespace {
|
|||
}
|
||||
|
||||
bool allowInvalidBaseExpr() const {
|
||||
return EvalMode == EM_DesignatorFold;
|
||||
return EvalMode == EM_OffsetFold;
|
||||
}
|
||||
|
||||
class ArrayInitLoopIndex {
|
||||
|
@ -856,11 +942,10 @@ namespace {
|
|||
struct FoldOffsetRAII {
|
||||
EvalInfo &Info;
|
||||
EvalInfo::EvaluationMode OldMode;
|
||||
explicit FoldOffsetRAII(EvalInfo &Info, bool Subobject)
|
||||
explicit FoldOffsetRAII(EvalInfo &Info)
|
||||
: Info(Info), OldMode(Info.EvalMode) {
|
||||
if (!Info.checkingPotentialConstantExpression())
|
||||
Info.EvalMode = Subobject ? EvalInfo::EM_DesignatorFold
|
||||
: EvalInfo::EM_ConstantFold;
|
||||
Info.EvalMode = EvalInfo::EM_OffsetFold;
|
||||
}
|
||||
|
||||
~FoldOffsetRAII() { Info.EvalMode = OldMode; }
|
||||
|
@ -966,10 +1051,12 @@ bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
|
|||
|
||||
void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
|
||||
const Expr *E, uint64_t N) {
|
||||
// If we're complaining, we must be able to statically determine the size of
|
||||
// the most derived array.
|
||||
if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement)
|
||||
Info.CCEDiag(E, diag::note_constexpr_array_index)
|
||||
<< static_cast<int>(N) << /*array*/ 0
|
||||
<< static_cast<unsigned>(MostDerivedArraySize);
|
||||
<< static_cast<unsigned>(getMostDerivedArraySize());
|
||||
else
|
||||
Info.CCEDiag(E, diag::note_constexpr_array_index)
|
||||
<< static_cast<int>(N) << /*non-array*/ 1;
|
||||
|
@ -1102,12 +1189,16 @@ namespace {
|
|||
if (Designator.Invalid)
|
||||
V = APValue(Base, Offset, APValue::NoLValuePath(), CallIndex,
|
||||
IsNullPtr);
|
||||
else
|
||||
else {
|
||||
assert(!InvalidBase && "APValues can't handle invalid LValue bases");
|
||||
assert(!Designator.FirstEntryIsAnUnsizedArray &&
|
||||
"Unsized array with a valid base?");
|
||||
V = APValue(Base, Offset, Designator.Entries,
|
||||
Designator.IsOnePastTheEnd, CallIndex, IsNullPtr);
|
||||
}
|
||||
}
|
||||
void setFrom(ASTContext &Ctx, const APValue &V) {
|
||||
assert(V.isLValue());
|
||||
assert(V.isLValue() && "Setting LValue from a non-LValue?");
|
||||
Base = V.getLValueBase();
|
||||
Offset = V.getLValueOffset();
|
||||
InvalidBase = false;
|
||||
|
@ -1118,6 +1209,15 @@ namespace {
|
|||
|
||||
void set(APValue::LValueBase B, unsigned I = 0, bool BInvalid = false,
|
||||
bool IsNullPtr_ = false, uint64_t Offset_ = 0) {
|
||||
#ifndef NDEBUG
|
||||
// We only allow a few types of invalid bases. Enforce that here.
|
||||
if (BInvalid) {
|
||||
const auto *E = B.get<const Expr *>();
|
||||
assert((isa<MemberExpr>(E) || tryUnwrapAllocSizeCall(E)) &&
|
||||
"Unexpected type of invalid base");
|
||||
}
|
||||
#endif
|
||||
|
||||
Base = B;
|
||||
Offset = CharUnits::fromQuantity(Offset_);
|
||||
InvalidBase = BInvalid;
|
||||
|
@ -1157,6 +1257,13 @@ namespace {
|
|||
if (checkSubobject(Info, E, isa<FieldDecl>(D) ? CSK_Field : CSK_Base))
|
||||
Designator.addDeclUnchecked(D, Virtual);
|
||||
}
|
||||
void addUnsizedArray(EvalInfo &Info, QualType ElemTy) {
|
||||
assert(Designator.Entries.empty() && getType(Base)->isPointerType());
|
||||
assert(isBaseAnAllocSizeCall(Base) &&
|
||||
"Only alloc_size bases can have unsized arrays");
|
||||
Designator.FirstEntryIsAnUnsizedArray = true;
|
||||
Designator.addUnsizedArrayUnchecked(ElemTy);
|
||||
}
|
||||
void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) {
|
||||
if (checkSubobject(Info, E, CSK_ArrayToPointer))
|
||||
Designator.addArrayUnchecked(CAT);
|
||||
|
@ -2796,7 +2903,7 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
|
|||
// All the remaining cases only permit reading.
|
||||
Info.FFDiag(E, diag::note_constexpr_modify_global);
|
||||
return CompleteObject();
|
||||
} else if (VD->isConstexpr()) {
|
||||
} else if (VD->isConstexpr() || BaseType.isConstQualified()) {
|
||||
// OK, we can read this variable.
|
||||
} else if (BaseType->isIntegralOrEnumerationType()) {
|
||||
// In OpenCL if a variable is in constant address space it is a const value.
|
||||
|
@ -5079,6 +5186,105 @@ bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
|
|||
// Pointer Evaluation
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// \brief Attempts to compute the number of bytes available at the pointer
|
||||
/// returned by a function with the alloc_size attribute. Returns true if we
|
||||
/// were successful. Places an unsigned number into `Result`.
|
||||
///
|
||||
/// This expects the given CallExpr to be a call to a function with an
|
||||
/// alloc_size attribute.
|
||||
static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
|
||||
const CallExpr *Call,
|
||||
llvm::APInt &Result) {
|
||||
const AllocSizeAttr *AllocSize = getAllocSizeAttr(Call);
|
||||
|
||||
// alloc_size args are 1-indexed, 0 means not present.
|
||||
assert(AllocSize && AllocSize->getElemSizeParam() != 0);
|
||||
unsigned SizeArgNo = AllocSize->getElemSizeParam() - 1;
|
||||
unsigned BitsInSizeT = Ctx.getTypeSize(Ctx.getSizeType());
|
||||
if (Call->getNumArgs() <= SizeArgNo)
|
||||
return false;
|
||||
|
||||
auto EvaluateAsSizeT = [&](const Expr *E, APSInt &Into) {
|
||||
if (!E->EvaluateAsInt(Into, Ctx, Expr::SE_AllowSideEffects))
|
||||
return false;
|
||||
if (Into.isNegative() || !Into.isIntN(BitsInSizeT))
|
||||
return false;
|
||||
Into = Into.zextOrSelf(BitsInSizeT);
|
||||
return true;
|
||||
};
|
||||
|
||||
APSInt SizeOfElem;
|
||||
if (!EvaluateAsSizeT(Call->getArg(SizeArgNo), SizeOfElem))
|
||||
return false;
|
||||
|
||||
if (!AllocSize->getNumElemsParam()) {
|
||||
Result = std::move(SizeOfElem);
|
||||
return true;
|
||||
}
|
||||
|
||||
APSInt NumberOfElems;
|
||||
// Argument numbers start at 1
|
||||
unsigned NumArgNo = AllocSize->getNumElemsParam() - 1;
|
||||
if (!EvaluateAsSizeT(Call->getArg(NumArgNo), NumberOfElems))
|
||||
return false;
|
||||
|
||||
bool Overflow;
|
||||
llvm::APInt BytesAvailable = SizeOfElem.umul_ov(NumberOfElems, Overflow);
|
||||
if (Overflow)
|
||||
return false;
|
||||
|
||||
Result = std::move(BytesAvailable);
|
||||
return true;
|
||||
}
|
||||
|
||||
/// \brief Convenience function. LVal's base must be a call to an alloc_size
|
||||
/// function.
|
||||
static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
|
||||
const LValue &LVal,
|
||||
llvm::APInt &Result) {
|
||||
assert(isBaseAnAllocSizeCall(LVal.getLValueBase()) &&
|
||||
"Can't get the size of a non alloc_size function");
|
||||
const auto *Base = LVal.getLValueBase().get<const Expr *>();
|
||||
const CallExpr *CE = tryUnwrapAllocSizeCall(Base);
|
||||
return getBytesReturnedByAllocSizeCall(Ctx, CE, Result);
|
||||
}
|
||||
|
||||
/// \brief Attempts to evaluate the given LValueBase as the result of a call to
|
||||
/// a function with the alloc_size attribute. If it was possible to do so, this
|
||||
/// function will return true, make Result's Base point to said function call,
|
||||
/// and mark Result's Base as invalid.
|
||||
static bool evaluateLValueAsAllocSize(EvalInfo &Info, APValue::LValueBase Base,
|
||||
LValue &Result) {
|
||||
if (!Info.allowInvalidBaseExpr() || Base.isNull())
|
||||
return false;
|
||||
|
||||
// Because we do no form of static analysis, we only support const variables.
|
||||
//
|
||||
// Additionally, we can't support parameters, nor can we support static
|
||||
// variables (in the latter case, use-before-assign isn't UB; in the former,
|
||||
// we have no clue what they'll be assigned to).
|
||||
const auto *VD =
|
||||
dyn_cast_or_null<VarDecl>(Base.dyn_cast<const ValueDecl *>());
|
||||
if (!VD || !VD->isLocalVarDecl() || !VD->getType().isConstQualified())
|
||||
return false;
|
||||
|
||||
const Expr *Init = VD->getAnyInitializer();
|
||||
if (!Init)
|
||||
return false;
|
||||
|
||||
const Expr *E = Init->IgnoreParens();
|
||||
if (!tryUnwrapAllocSizeCall(E))
|
||||
return false;
|
||||
|
||||
// Store E instead of E unwrapped so that the type of the LValue's base is
|
||||
// what the user wanted.
|
||||
Result.setInvalid(E);
|
||||
|
||||
QualType Pointee = E->getType()->castAs<PointerType>()->getPointeeType();
|
||||
Result.addUnsizedArray(Info, Pointee);
|
||||
return true;
|
||||
}
|
||||
|
||||
namespace {
|
||||
class PointerExprEvaluator
|
||||
: public ExprEvaluatorBase<PointerExprEvaluator> {
|
||||
|
@ -5088,6 +5294,8 @@ class PointerExprEvaluator
|
|||
Result.set(E);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool visitNonBuiltinCallExpr(const CallExpr *E);
|
||||
public:
|
||||
|
||||
PointerExprEvaluator(EvalInfo &info, LValue &Result)
|
||||
|
@ -5270,6 +5478,19 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
|
|||
|
||||
case CK_FunctionToPointerDecay:
|
||||
return EvaluateLValue(SubExpr, Result, Info);
|
||||
|
||||
case CK_LValueToRValue: {
|
||||
LValue LVal;
|
||||
if (!EvaluateLValue(E->getSubExpr(), LVal, Info))
|
||||
return false;
|
||||
|
||||
APValue RVal;
|
||||
// Note, we use the subexpression's type in order to retain cv-qualifiers.
|
||||
if (!handleLValueToRValueConversion(Info, E, E->getSubExpr()->getType(),
|
||||
LVal, RVal))
|
||||
return evaluateLValueAsAllocSize(Info, LVal.Base, Result);
|
||||
return Success(RVal, E);
|
||||
}
|
||||
}
|
||||
|
||||
return ExprEvaluatorBaseTy::VisitCastExpr(E);
|
||||
|
@ -5307,6 +5528,20 @@ static CharUnits GetAlignOfExpr(EvalInfo &Info, const Expr *E) {
|
|||
return GetAlignOfType(Info, E->getType());
|
||||
}
|
||||
|
||||
// To be clear: this happily visits unsupported builtins. Better name welcomed.
|
||||
bool PointerExprEvaluator::visitNonBuiltinCallExpr(const CallExpr *E) {
|
||||
if (ExprEvaluatorBaseTy::VisitCallExpr(E))
|
||||
return true;
|
||||
|
||||
if (!(Info.allowInvalidBaseExpr() && getAllocSizeAttr(E)))
|
||||
return false;
|
||||
|
||||
Result.setInvalid(E);
|
||||
QualType PointeeTy = E->getType()->castAs<PointerType>()->getPointeeType();
|
||||
Result.addUnsizedArray(Info, PointeeTy);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
|
||||
if (IsStringLiteralCall(E))
|
||||
return Success(E);
|
||||
|
@ -5314,7 +5549,7 @@ bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
|
|||
if (unsigned BuiltinOp = E->getBuiltinCallee())
|
||||
return VisitBuiltinCallExpr(E, BuiltinOp);
|
||||
|
||||
return ExprEvaluatorBaseTy::VisitCallExpr(E);
|
||||
return visitNonBuiltinCallExpr(E);
|
||||
}
|
||||
|
||||
bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
|
||||
|
@ -5473,7 +5708,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
|
|||
}
|
||||
|
||||
default:
|
||||
return ExprEvaluatorBaseTy::VisitCallExpr(E);
|
||||
return visitNonBuiltinCallExpr(E);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6512,8 +6747,6 @@ public:
|
|||
bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E);
|
||||
bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
|
||||
|
||||
private:
|
||||
bool TryEvaluateBuiltinObjectSize(const CallExpr *E, unsigned Type);
|
||||
// FIXME: Missing: array subscript of vector, member of vector
|
||||
};
|
||||
} // end anonymous namespace
|
||||
|
@ -6785,7 +7018,7 @@ static QualType getObjectType(APValue::LValueBase B) {
|
|||
}
|
||||
|
||||
/// A more selective version of E->IgnoreParenCasts for
|
||||
/// TryEvaluateBuiltinObjectSize. This ignores some casts/parens that serve only
|
||||
/// tryEvaluateBuiltinObjectSize. This ignores some casts/parens that serve only
|
||||
/// to change the type of E.
|
||||
/// Ex. For E = `(short*)((char*)(&foo))`, returns `&foo`
|
||||
///
|
||||
|
@ -6852,165 +7085,132 @@ static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) {
|
|||
}
|
||||
}
|
||||
|
||||
unsigned I = 0;
|
||||
QualType BaseType = getType(Base);
|
||||
for (int I = 0, E = LVal.Designator.Entries.size(); I != E; ++I) {
|
||||
if (LVal.Designator.FirstEntryIsAnUnsizedArray) {
|
||||
assert(isBaseAnAllocSizeCall(Base) &&
|
||||
"Unsized array in non-alloc_size call?");
|
||||
// If this is an alloc_size base, we should ignore the initial array index
|
||||
++I;
|
||||
BaseType = BaseType->castAs<PointerType>()->getPointeeType();
|
||||
}
|
||||
|
||||
for (unsigned E = LVal.Designator.Entries.size(); I != E; ++I) {
|
||||
const auto &Entry = LVal.Designator.Entries[I];
|
||||
if (BaseType->isArrayType()) {
|
||||
// Because __builtin_object_size treats arrays as objects, we can ignore
|
||||
// the index iff this is the last array in the Designator.
|
||||
if (I + 1 == E)
|
||||
return true;
|
||||
auto *CAT = cast<ConstantArrayType>(Ctx.getAsArrayType(BaseType));
|
||||
uint64_t Index = LVal.Designator.Entries[I].ArrayIndex;
|
||||
const auto *CAT = cast<ConstantArrayType>(Ctx.getAsArrayType(BaseType));
|
||||
uint64_t Index = Entry.ArrayIndex;
|
||||
if (Index + 1 != CAT->getSize())
|
||||
return false;
|
||||
BaseType = CAT->getElementType();
|
||||
} else if (BaseType->isAnyComplexType()) {
|
||||
auto *CT = BaseType->castAs<ComplexType>();
|
||||
uint64_t Index = LVal.Designator.Entries[I].ArrayIndex;
|
||||
const auto *CT = BaseType->castAs<ComplexType>();
|
||||
uint64_t Index = Entry.ArrayIndex;
|
||||
if (Index != 1)
|
||||
return false;
|
||||
BaseType = CT->getElementType();
|
||||
} else if (auto *FD = getAsField(LVal.Designator.Entries[I])) {
|
||||
} else if (auto *FD = getAsField(Entry)) {
|
||||
bool Invalid;
|
||||
if (!IsLastOrInvalidFieldDecl(FD, Invalid))
|
||||
return Invalid;
|
||||
BaseType = FD->getType();
|
||||
} else {
|
||||
assert(getAsBaseClass(LVal.Designator.Entries[I]) != nullptr &&
|
||||
"Expecting cast to a base class");
|
||||
assert(getAsBaseClass(Entry) && "Expecting cast to a base class");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Tests to see if the LValue has a designator (that isn't necessarily valid).
|
||||
/// Tests to see if the LValue has a user-specified designator (that isn't
|
||||
/// necessarily valid). Note that this always returns 'true' if the LValue has
|
||||
/// an unsized array as its first designator entry, because there's currently no
|
||||
/// way to tell if the user typed *foo or foo[0].
|
||||
static bool refersToCompleteObject(const LValue &LVal) {
|
||||
if (LVal.Designator.Invalid || !LVal.Designator.Entries.empty())
|
||||
if (LVal.Designator.Invalid)
|
||||
return false;
|
||||
|
||||
if (!LVal.Designator.Entries.empty())
|
||||
return LVal.Designator.isMostDerivedAnUnsizedArray();
|
||||
|
||||
if (!LVal.InvalidBase)
|
||||
return true;
|
||||
|
||||
auto *E = LVal.Base.dyn_cast<const Expr *>();
|
||||
(void)E;
|
||||
assert(E != nullptr && isa<MemberExpr>(E));
|
||||
return false;
|
||||
// If `E` is a MemberExpr, then the first part of the designator is hiding in
|
||||
// the LValueBase.
|
||||
const auto *E = LVal.Base.dyn_cast<const Expr *>();
|
||||
return !E || !isa<MemberExpr>(E);
|
||||
}
|
||||
|
||||
/// Tries to evaluate the __builtin_object_size for @p E. If successful, returns
|
||||
/// true and stores the result in @p Size.
|
||||
/// Attempts to detect a user writing into a piece of memory that's impossible
|
||||
/// to figure out the size of by just using types.
|
||||
static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal) {
|
||||
const SubobjectDesignator &Designator = LVal.Designator;
|
||||
// Notes:
|
||||
// - Users can only write off of the end when we have an invalid base. Invalid
|
||||
// bases imply we don't know where the memory came from.
|
||||
// - We used to be a bit more aggressive here; we'd only be conservative if
|
||||
// the array at the end was flexible, or if it had 0 or 1 elements. This
|
||||
// broke some common standard library extensions (PR30346), but was
|
||||
// otherwise seemingly fine. It may be useful to reintroduce this behavior
|
||||
// with some sort of whitelist. OTOH, it seems that GCC is always
|
||||
// conservative with the last element in structs (if it's an array), so our
|
||||
// current behavior is more compatible than a whitelisting approach would
|
||||
// be.
|
||||
return LVal.InvalidBase &&
|
||||
Designator.Entries.size() == Designator.MostDerivedPathLength &&
|
||||
Designator.MostDerivedIsArrayElement &&
|
||||
isDesignatorAtObjectEnd(Ctx, LVal);
|
||||
}
|
||||
|
||||
/// Converts the given APInt to CharUnits, assuming the APInt is unsigned.
|
||||
/// Fails if the conversion would cause loss of precision.
|
||||
static bool convertUnsignedAPIntToCharUnits(const llvm::APInt &Int,
|
||||
CharUnits &Result) {
|
||||
auto CharUnitsMax = std::numeric_limits<CharUnits::QuantityType>::max();
|
||||
if (Int.ugt(CharUnitsMax))
|
||||
return false;
|
||||
Result = CharUnits::fromQuantity(Int.getZExtValue());
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Helper for tryEvaluateBuiltinObjectSize -- Given an LValue, this will
|
||||
/// determine how many bytes exist from the beginning of the object to either
|
||||
/// the end of the current subobject, or the end of the object itself, depending
|
||||
/// on what the LValue looks like + the value of Type.
|
||||
///
|
||||
/// If @p WasError is non-null, this will report whether the failure to evaluate
|
||||
/// is to be treated as an Error in IntExprEvaluator.
|
||||
static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
|
||||
EvalInfo &Info, uint64_t &Size,
|
||||
bool *WasError = nullptr) {
|
||||
if (WasError != nullptr)
|
||||
*WasError = false;
|
||||
/// If this returns false, the value of Result is undefined.
|
||||
static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc,
|
||||
unsigned Type, const LValue &LVal,
|
||||
CharUnits &EndOffset) {
|
||||
bool DetermineForCompleteObject = refersToCompleteObject(LVal);
|
||||
|
||||
auto Error = [&](const Expr *E) {
|
||||
if (WasError != nullptr)
|
||||
*WasError = true;
|
||||
return false;
|
||||
};
|
||||
|
||||
auto Success = [&](uint64_t S, const Expr *E) {
|
||||
Size = S;
|
||||
return true;
|
||||
};
|
||||
|
||||
// Determine the denoted object.
|
||||
LValue Base;
|
||||
{
|
||||
// The operand of __builtin_object_size is never evaluated for side-effects.
|
||||
// If there are any, but we can determine the pointed-to object anyway, then
|
||||
// ignore the side-effects.
|
||||
SpeculativeEvaluationRAII SpeculativeEval(Info);
|
||||
FoldOffsetRAII Fold(Info, Type & 1);
|
||||
|
||||
if (E->isGLValue()) {
|
||||
// It's possible for us to be given GLValues if we're called via
|
||||
// Expr::tryEvaluateObjectSize.
|
||||
APValue RVal;
|
||||
if (!EvaluateAsRValue(Info, E, RVal))
|
||||
return false;
|
||||
Base.setFrom(Info.Ctx, RVal);
|
||||
} else if (!EvaluatePointer(ignorePointerCastsAndParens(E), Base, Info))
|
||||
// We want to evaluate the size of the entire object. This is a valid fallback
|
||||
// for when Type=1 and the designator is invalid, because we're asked for an
|
||||
// upper-bound.
|
||||
if (!(Type & 1) || LVal.Designator.Invalid || DetermineForCompleteObject) {
|
||||
// Type=3 wants a lower bound, so we can't fall back to this.
|
||||
if (Type == 3 && !DetermineForCompleteObject)
|
||||
return false;
|
||||
|
||||
llvm::APInt APEndOffset;
|
||||
if (isBaseAnAllocSizeCall(LVal.getLValueBase()) &&
|
||||
getBytesReturnedByAllocSizeCall(Info.Ctx, LVal, APEndOffset))
|
||||
return convertUnsignedAPIntToCharUnits(APEndOffset, EndOffset);
|
||||
|
||||
if (LVal.InvalidBase)
|
||||
return false;
|
||||
|
||||
QualType BaseTy = getObjectType(LVal.getLValueBase());
|
||||
return !BaseTy.isNull() && HandleSizeof(Info, ExprLoc, BaseTy, EndOffset);
|
||||
}
|
||||
|
||||
CharUnits BaseOffset = Base.getLValueOffset();
|
||||
// If we point to before the start of the object, there are no accessible
|
||||
// bytes.
|
||||
if (BaseOffset.isNegative())
|
||||
return Success(0, E);
|
||||
|
||||
// In the case where we're not dealing with a subobject, we discard the
|
||||
// subobject bit.
|
||||
bool SubobjectOnly = (Type & 1) != 0 && !refersToCompleteObject(Base);
|
||||
|
||||
// If Type & 1 is 0, we need to be able to statically guarantee that the bytes
|
||||
// exist. If we can't verify the base, then we can't do that.
|
||||
//
|
||||
// As a special case, we produce a valid object size for an unknown object
|
||||
// with a known designator if Type & 1 is 1. For instance:
|
||||
//
|
||||
// extern struct X { char buff[32]; int a, b, c; } *p;
|
||||
// int a = __builtin_object_size(p->buff + 4, 3); // returns 28
|
||||
// int b = __builtin_object_size(p->buff + 4, 2); // returns 0, not 40
|
||||
//
|
||||
// This matches GCC's behavior.
|
||||
if (Base.InvalidBase && !SubobjectOnly)
|
||||
return Error(E);
|
||||
|
||||
// If we're not examining only the subobject, then we reset to a complete
|
||||
// object designator
|
||||
//
|
||||
// If Type is 1 and we've lost track of the subobject, just find the complete
|
||||
// object instead. (If Type is 3, that's not correct behavior and we should
|
||||
// return 0 instead.)
|
||||
LValue End = Base;
|
||||
if (!SubobjectOnly || (End.Designator.Invalid && Type == 1)) {
|
||||
QualType T = getObjectType(End.getLValueBase());
|
||||
if (T.isNull())
|
||||
End.Designator.setInvalid();
|
||||
else {
|
||||
End.Designator = SubobjectDesignator(T);
|
||||
End.Offset = CharUnits::Zero();
|
||||
}
|
||||
}
|
||||
|
||||
// If it is not possible to determine which objects ptr points to at compile
|
||||
// time, __builtin_object_size should return (size_t) -1 for type 0 or 1
|
||||
// and (size_t) 0 for type 2 or 3.
|
||||
if (End.Designator.Invalid)
|
||||
return false;
|
||||
|
||||
// According to the GCC documentation, we want the size of the subobject
|
||||
// denoted by the pointer. But that's not quite right -- what we actually
|
||||
// want is the size of the immediately-enclosing array, if there is one.
|
||||
int64_t AmountToAdd = 1;
|
||||
if (End.Designator.MostDerivedIsArrayElement &&
|
||||
End.Designator.Entries.size() == End.Designator.MostDerivedPathLength) {
|
||||
// We got a pointer to an array. Step to its end.
|
||||
AmountToAdd = End.Designator.MostDerivedArraySize -
|
||||
End.Designator.Entries.back().ArrayIndex;
|
||||
} else if (End.Designator.isOnePastTheEnd()) {
|
||||
// We're already pointing at the end of the object.
|
||||
AmountToAdd = 0;
|
||||
}
|
||||
|
||||
QualType PointeeType = End.Designator.MostDerivedType;
|
||||
assert(!PointeeType.isNull());
|
||||
if (PointeeType->isIncompleteType() || PointeeType->isFunctionType())
|
||||
return Error(E);
|
||||
|
||||
if (!HandleLValueArrayAdjustment(Info, E, End, End.Designator.MostDerivedType,
|
||||
AmountToAdd))
|
||||
return false;
|
||||
|
||||
auto EndOffset = End.getLValueOffset();
|
||||
// We want to evaluate the size of a subobject.
|
||||
const SubobjectDesignator &Designator = LVal.Designator;
|
||||
|
||||
// The following is a moderately common idiom in C:
|
||||
//
|
||||
|
@ -7018,39 +7218,88 @@ static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
|
|||
// struct Foo *F = (struct Foo *)malloc(sizeof(struct Foo) + strlen(Bar));
|
||||
// strcpy(&F->c[0], Bar);
|
||||
//
|
||||
// So, if we see that we're examining an array at the end of a struct with an
|
||||
// unknown base, we give up instead of breaking code that behaves this way.
|
||||
// Note that we only do this when Type=1, because Type=3 is a lower bound, so
|
||||
// answering conservatively is fine.
|
||||
//
|
||||
// We used to be a bit more aggressive here; we'd only be conservative if the
|
||||
// array at the end was flexible, or if it had 0 or 1 elements. This broke
|
||||
// some common standard library extensions (PR30346), but was otherwise
|
||||
// seemingly fine. It may be useful to reintroduce this behavior with some
|
||||
// sort of whitelist. OTOH, it seems that GCC is always conservative with the
|
||||
// last element in structs (if it's an array), so our current behavior is more
|
||||
// compatible than a whitelisting approach would be.
|
||||
if (End.InvalidBase && SubobjectOnly && Type == 1 &&
|
||||
End.Designator.Entries.size() == End.Designator.MostDerivedPathLength &&
|
||||
End.Designator.MostDerivedIsArrayElement &&
|
||||
isDesignatorAtObjectEnd(Info.Ctx, End))
|
||||
// In order to not break too much legacy code, we need to support it.
|
||||
if (isUserWritingOffTheEnd(Info.Ctx, LVal)) {
|
||||
// If we can resolve this to an alloc_size call, we can hand that back,
|
||||
// because we know for certain how many bytes there are to write to.
|
||||
llvm::APInt APEndOffset;
|
||||
if (isBaseAnAllocSizeCall(LVal.getLValueBase()) &&
|
||||
getBytesReturnedByAllocSizeCall(Info.Ctx, LVal, APEndOffset))
|
||||
return convertUnsignedAPIntToCharUnits(APEndOffset, EndOffset);
|
||||
|
||||
// If we cannot determine the size of the initial allocation, then we can't
|
||||
// given an accurate upper-bound. However, we are still able to give
|
||||
// conservative lower-bounds for Type=3.
|
||||
if (Type == 1)
|
||||
return false;
|
||||
}
|
||||
|
||||
CharUnits BytesPerElem;
|
||||
if (!HandleSizeof(Info, ExprLoc, Designator.MostDerivedType, BytesPerElem))
|
||||
return false;
|
||||
|
||||
if (BaseOffset > EndOffset)
|
||||
return Success(0, E);
|
||||
// According to the GCC documentation, we want the size of the subobject
|
||||
// denoted by the pointer. But that's not quite right -- what we actually
|
||||
// want is the size of the immediately-enclosing array, if there is one.
|
||||
int64_t ElemsRemaining;
|
||||
if (Designator.MostDerivedIsArrayElement &&
|
||||
Designator.Entries.size() == Designator.MostDerivedPathLength) {
|
||||
uint64_t ArraySize = Designator.getMostDerivedArraySize();
|
||||
uint64_t ArrayIndex = Designator.Entries.back().ArrayIndex;
|
||||
ElemsRemaining = ArraySize <= ArrayIndex ? 0 : ArraySize - ArrayIndex;
|
||||
} else {
|
||||
ElemsRemaining = Designator.isOnePastTheEnd() ? 0 : 1;
|
||||
}
|
||||
|
||||
return Success((EndOffset - BaseOffset).getQuantity(), E);
|
||||
EndOffset = LVal.getLValueOffset() + BytesPerElem * ElemsRemaining;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E,
|
||||
unsigned Type) {
|
||||
uint64_t Size;
|
||||
bool WasError;
|
||||
if (::tryEvaluateBuiltinObjectSize(E->getArg(0), Type, Info, Size, &WasError))
|
||||
return Success(Size, E);
|
||||
if (WasError)
|
||||
return Error(E);
|
||||
return false;
|
||||
/// \brief Tries to evaluate the __builtin_object_size for @p E. If successful,
|
||||
/// returns true and stores the result in @p Size.
|
||||
///
|
||||
/// If @p WasError is non-null, this will report whether the failure to evaluate
|
||||
/// is to be treated as an Error in IntExprEvaluator.
|
||||
static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
|
||||
EvalInfo &Info, uint64_t &Size) {
|
||||
// Determine the denoted object.
|
||||
LValue LVal;
|
||||
{
|
||||
// The operand of __builtin_object_size is never evaluated for side-effects.
|
||||
// If there are any, but we can determine the pointed-to object anyway, then
|
||||
// ignore the side-effects.
|
||||
SpeculativeEvaluationRAII SpeculativeEval(Info);
|
||||
FoldOffsetRAII Fold(Info);
|
||||
|
||||
if (E->isGLValue()) {
|
||||
// It's possible for us to be given GLValues if we're called via
|
||||
// Expr::tryEvaluateObjectSize.
|
||||
APValue RVal;
|
||||
if (!EvaluateAsRValue(Info, E, RVal))
|
||||
return false;
|
||||
LVal.setFrom(Info.Ctx, RVal);
|
||||
} else if (!EvaluatePointer(ignorePointerCastsAndParens(E), LVal, Info))
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we point to before the start of the object, there are no accessible
|
||||
// bytes.
|
||||
if (LVal.getLValueOffset().isNegative()) {
|
||||
Size = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
CharUnits EndOffset;
|
||||
if (!determineEndOffset(Info, E->getExprLoc(), Type, LVal, EndOffset))
|
||||
return false;
|
||||
|
||||
// If we've fallen outside of the end offset, just pretend there's nothing to
|
||||
// write to/read from.
|
||||
if (EndOffset <= LVal.getLValueOffset())
|
||||
Size = 0;
|
||||
else
|
||||
Size = (EndOffset - LVal.getLValueOffset()).getQuantity();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
|
||||
|
@ -7072,8 +7321,9 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
|
|||
E->getArg(1)->EvaluateKnownConstInt(Info.Ctx).getZExtValue();
|
||||
assert(Type <= 3 && "unexpected type");
|
||||
|
||||
if (TryEvaluateBuiltinObjectSize(E, Type))
|
||||
return true;
|
||||
uint64_t Size;
|
||||
if (tryEvaluateBuiltinObjectSize(E->getArg(0), Type, Info, Size))
|
||||
return Success(Size, E);
|
||||
|
||||
if (E->getArg(0)->HasSideEffects(Info.Ctx))
|
||||
return Success((Type & 2) ? 0 : -1, E);
|
||||
|
@ -7086,7 +7336,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
|
|||
case EvalInfo::EM_ConstantFold:
|
||||
case EvalInfo::EM_EvaluateForOverflow:
|
||||
case EvalInfo::EM_IgnoreSideEffects:
|
||||
case EvalInfo::EM_DesignatorFold:
|
||||
case EvalInfo::EM_OffsetFold:
|
||||
// Leave it to IR generation.
|
||||
return Error(E);
|
||||
case EvalInfo::EM_ConstantExpressionUnevaluated:
|
||||
|
@ -10189,5 +10439,5 @@ bool Expr::tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx,
|
|||
|
||||
Expr::EvalStatus Status;
|
||||
EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold);
|
||||
return ::tryEvaluateBuiltinObjectSize(this, Type, Info, Result);
|
||||
return tryEvaluateBuiltinObjectSize(this, Type, Info, Result);
|
||||
}
|
||||
|
|
|
@ -686,6 +686,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
|
|||
// If the block has no captures, we won't have a pre-computed
|
||||
// layout for it.
|
||||
if (!blockExpr->getBlockDecl()->hasCaptures()) {
|
||||
if (llvm::Constant *Block = CGM.getAddrOfGlobalBlockIfEmitted(blockExpr))
|
||||
return Block;
|
||||
CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
|
||||
computeBlockInfo(CGM, this, blockInfo);
|
||||
blockInfo.BlockExpression = blockExpr;
|
||||
|
@ -1047,9 +1049,19 @@ Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
|
|||
return addr;
|
||||
}
|
||||
|
||||
void CodeGenModule::setAddrOfGlobalBlock(const BlockExpr *BE,
|
||||
llvm::Constant *Addr) {
|
||||
bool Ok = EmittedGlobalBlocks.insert(std::make_pair(BE, Addr)).second;
|
||||
(void)Ok;
|
||||
assert(Ok && "Trying to replace an already-existing global block!");
|
||||
}
|
||||
|
||||
llvm::Constant *
|
||||
CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *BE,
|
||||
StringRef Name) {
|
||||
if (llvm::Constant *Block = getAddrOfGlobalBlockIfEmitted(BE))
|
||||
return Block;
|
||||
|
||||
CGBlockInfo blockInfo(BE->getBlockDecl(), Name);
|
||||
blockInfo.BlockExpression = BE;
|
||||
|
||||
|
@ -1074,6 +1086,11 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
|
|||
const CGBlockInfo &blockInfo,
|
||||
llvm::Constant *blockFn) {
|
||||
assert(blockInfo.CanBeGlobal);
|
||||
// Callers should detect this case on their own: calling this function
|
||||
// generally requires computing layout information, which is a waste of time
|
||||
// if we've already emitted this block.
|
||||
assert(!CGM.getAddrOfGlobalBlockIfEmitted(blockInfo.BlockExpression) &&
|
||||
"Refusing to re-emit a global block.");
|
||||
|
||||
// Generate the constants for the block literal initializer.
|
||||
ConstantInitBuilder builder(CGM);
|
||||
|
@ -1103,9 +1120,12 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
|
|||
/*constant*/ true);
|
||||
|
||||
// Return a constant of the appropriately-casted type.
|
||||
llvm::Type *requiredType =
|
||||
llvm::Type *RequiredType =
|
||||
CGM.getTypes().ConvertType(blockInfo.getBlockExpr()->getType());
|
||||
return llvm::ConstantExpr::getBitCast(literal, requiredType);
|
||||
llvm::Constant *Result =
|
||||
llvm::ConstantExpr::getBitCast(literal, RequiredType);
|
||||
CGM.setAddrOfGlobalBlock(blockInfo.BlockExpression, Result);
|
||||
return Result;
|
||||
}
|
||||
|
||||
void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D,
|
||||
|
|
|
@ -1683,6 +1683,14 @@ void CodeGenModule::ConstructAttributeList(
|
|||
|
||||
HasAnyX86InterruptAttr = TargetDecl->hasAttr<AnyX86InterruptAttr>();
|
||||
HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
|
||||
if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
|
||||
Optional<unsigned> NumElemsParam;
|
||||
// alloc_size args are base-1, 0 means not present.
|
||||
if (unsigned N = AllocSize->getNumElemsParam())
|
||||
NumElemsParam = N - 1;
|
||||
FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam() - 1,
|
||||
NumElemsParam);
|
||||
}
|
||||
}
|
||||
|
||||
// OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
|
||||
|
|
|
@ -1499,7 +1499,6 @@ public:
|
|||
//===--------------------------------------------------------------------===//
|
||||
|
||||
llvm::Value *EmitBlockLiteral(const BlockExpr *);
|
||||
llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
|
||||
static void destroyBlockInfos(CGBlockInfo *info);
|
||||
|
||||
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
|
||||
|
@ -2726,6 +2725,9 @@ public:
|
|||
OMPPrivateScope &LoopScope);
|
||||
|
||||
private:
|
||||
/// Helpers for blocks
|
||||
llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
|
||||
|
||||
/// Helpers for the OpenMP loop directives.
|
||||
void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
|
||||
void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
|
||||
|
|
|
@ -455,6 +455,10 @@ private:
|
|||
bool isTriviallyRecursive(const FunctionDecl *F);
|
||||
bool shouldEmitFunction(GlobalDecl GD);
|
||||
|
||||
/// Map of the global blocks we've emitted, so that we don't have to re-emit
|
||||
/// them if the constexpr evaluator gets aggressive.
|
||||
llvm::DenseMap<const BlockExpr *, llvm::Constant *> EmittedGlobalBlocks;
|
||||
|
||||
/// @name Cache for Blocks Runtime Globals
|
||||
/// @{
|
||||
|
||||
|
@ -776,6 +780,16 @@ public:
|
|||
|
||||
/// Gets the address of a block which requires no captures.
|
||||
llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, StringRef Name);
|
||||
|
||||
/// Returns the address of a block which requires no caputres, or null if
|
||||
/// we've yet to emit the block for BE.
|
||||
llvm::Constant *getAddrOfGlobalBlockIfEmitted(const BlockExpr *BE) {
|
||||
return EmittedGlobalBlocks.lookup(BE);
|
||||
}
|
||||
|
||||
/// Notes that BE's global block is available via Addr. Asserts that BE
|
||||
/// isn't already emitted.
|
||||
void setAddrOfGlobalBlock(const BlockExpr *BE, llvm::Constant *Addr);
|
||||
|
||||
/// Return a pointer to a constant CFString object for the given string.
|
||||
ConstantAddress GetAddrOfConstantCFString(const StringLiteral *Literal);
|
||||
|
|
|
@ -246,6 +246,28 @@ static bool checkUInt32Argument(Sema &S, const AttributeList &Attr,
|
|||
return true;
|
||||
}
|
||||
|
||||
/// \brief Wrapper around checkUInt32Argument, with an extra check to be sure
|
||||
/// that the result will fit into a regular (signed) int. All args have the same
|
||||
/// purpose as they do in checkUInt32Argument.
|
||||
static bool checkPositiveIntArgument(Sema &S, const AttributeList &Attr,
|
||||
const Expr *Expr, int &Val,
|
||||
unsigned Idx = UINT_MAX) {
|
||||
uint32_t UVal;
|
||||
if (!checkUInt32Argument(S, Attr, Expr, UVal, Idx))
|
||||
return false;
|
||||
|
||||
if (UVal > std::numeric_limits<int>::max()) {
|
||||
llvm::APSInt I(32); // for toString
|
||||
I = UVal;
|
||||
S.Diag(Expr->getExprLoc(), diag::err_ice_too_large)
|
||||
<< I.toString(10, false) << 32 << /* Unsigned */ 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
Val = UVal;
|
||||
return true;
|
||||
}
|
||||
|
||||
/// \brief Diagnose mutually exclusive attributes when present on a given
|
||||
/// declaration. Returns true if diagnosed.
|
||||
template <typename AttrTy>
|
||||
|
@ -730,6 +752,69 @@ static void handleAssertExclusiveLockAttr(Sema &S, Decl *D,
|
|||
Attr.getAttributeSpellingListIndex()));
|
||||
}
|
||||
|
||||
/// \brief Checks to be sure that the given parameter number is inbounds, and is
|
||||
/// an some integral type. Will emit appropriate diagnostics if this returns
|
||||
/// false.
|
||||
///
|
||||
/// FuncParamNo is expected to be from the user, so is base-1. AttrArgNo is used
|
||||
/// to actually retrieve the argument, so it's base-0.
|
||||
static bool checkParamIsIntegerType(Sema &S, const FunctionDecl *FD,
|
||||
const AttributeList &Attr,
|
||||
unsigned FuncParamNo, unsigned AttrArgNo) {
|
||||
assert(Attr.isArgExpr(AttrArgNo) && "Expected expression argument");
|
||||
uint64_t Idx;
|
||||
if (!checkFunctionOrMethodParameterIndex(S, FD, Attr, FuncParamNo,
|
||||
Attr.getArgAsExpr(AttrArgNo), Idx))
|
||||
return false;
|
||||
|
||||
const ParmVarDecl *Param = FD->getParamDecl(Idx);
|
||||
if (!Param->getType()->isIntegerType() && !Param->getType()->isCharType()) {
|
||||
SourceLocation SrcLoc = Attr.getArgAsExpr(AttrArgNo)->getLocStart();
|
||||
S.Diag(SrcLoc, diag::err_attribute_integers_only)
|
||||
<< Attr.getName() << Param->getSourceRange();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void handleAllocSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
|
||||
if (!checkAttributeAtLeastNumArgs(S, Attr, 1) ||
|
||||
!checkAttributeAtMostNumArgs(S, Attr, 2))
|
||||
return;
|
||||
|
||||
const auto *FD = cast<FunctionDecl>(D);
|
||||
if (!FD->getReturnType()->isPointerType()) {
|
||||
S.Diag(Attr.getLoc(), diag::warn_attribute_return_pointers_only)
|
||||
<< Attr.getName();
|
||||
return;
|
||||
}
|
||||
|
||||
const Expr *SizeExpr = Attr.getArgAsExpr(0);
|
||||
int SizeArgNo;
|
||||
// Paramater indices are 1-indexed, hence Index=1
|
||||
if (!checkPositiveIntArgument(S, Attr, SizeExpr, SizeArgNo, /*Index=*/1))
|
||||
return;
|
||||
|
||||
if (!checkParamIsIntegerType(S, FD, Attr, SizeArgNo, /*AttrArgNo=*/0))
|
||||
return;
|
||||
|
||||
// Args are 1-indexed, so 0 implies that the arg was not present
|
||||
int NumberArgNo = 0;
|
||||
if (Attr.getNumArgs() == 2) {
|
||||
const Expr *NumberExpr = Attr.getArgAsExpr(1);
|
||||
// Paramater indices are 1-based, hence Index=2
|
||||
if (!checkPositiveIntArgument(S, Attr, NumberExpr, NumberArgNo,
|
||||
/*Index=*/2))
|
||||
return;
|
||||
|
||||
if (!checkParamIsIntegerType(S, FD, Attr, NumberArgNo, /*AttrArgNo=*/1))
|
||||
return;
|
||||
}
|
||||
|
||||
D->addAttr(::new (S.Context) AllocSizeAttr(
|
||||
Attr.getRange(), S.Context, SizeArgNo, NumberArgNo,
|
||||
Attr.getAttributeSpellingListIndex()));
|
||||
}
|
||||
|
||||
static bool checkTryLockFunAttrCommon(Sema &S, Decl *D,
|
||||
const AttributeList &Attr,
|
||||
|
@ -5552,6 +5637,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
|
|||
case AttributeList::AT_AlignValue:
|
||||
handleAlignValueAttr(S, D, Attr);
|
||||
break;
|
||||
case AttributeList::AT_AllocSize:
|
||||
handleAllocSizeAttr(S, D, Attr);
|
||||
break;
|
||||
case AttributeList::AT_AlwaysInline:
|
||||
handleAlwaysInlineAttr(S, D, Attr);
|
||||
break;
|
||||
|
|
|
@ -0,0 +1,352 @@
|
|||
// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm %s -o - 2>&1 | FileCheck %s
|
||||
|
||||
#define NULL ((void *)0)
|
||||
|
||||
int gi;
|
||||
|
||||
typedef unsigned long size_t;
|
||||
|
||||
// CHECK-DAG-RE: define void @my_malloc({{.*}}) #[[MALLOC_ATTR_NUMBER:[0-9]+]]
|
||||
// N.B. LLVM's allocsize arguments are base-0, whereas ours are base-1 (for
|
||||
// compat with GCC)
|
||||
// CHECK-DAG-RE: attributes #[[MALLOC_ATTR_NUMBER]] = {.*allocsize(0).*}
|
||||
void *my_malloc(size_t) __attribute__((alloc_size(1)));
|
||||
|
||||
// CHECK-DAG-RE: define void @my_calloc({{.*}}) #[[CALLOC_ATTR_NUMBER:[0-9]+]]
|
||||
// CHECK-DAG-RE: attributes #[[CALLOC_ATTR_NUMBER]] = {.*allocsize(0, 1).*}
|
||||
void *my_calloc(size_t, size_t) __attribute__((alloc_size(1, 2)));
|
||||
|
||||
// CHECK-LABEL: @test1
|
||||
void test1() {
|
||||
void *const vp = my_malloc(100);
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(vp, 0);
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(vp, 1);
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(vp, 2);
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(vp, 3);
|
||||
|
||||
void *const arr = my_calloc(100, 5);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(arr, 0);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(arr, 1);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(arr, 2);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(arr, 3);
|
||||
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(my_malloc(100), 0);
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(my_malloc(100), 1);
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(my_malloc(100), 2);
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(my_malloc(100), 3);
|
||||
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(my_calloc(100, 5), 0);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(my_calloc(100, 5), 1);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(my_calloc(100, 5), 2);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(my_calloc(100, 5), 3);
|
||||
|
||||
void *const zeroPtr = my_malloc(0);
|
||||
// CHECK: store i32 0
|
||||
gi = __builtin_object_size(zeroPtr, 0);
|
||||
// CHECK: store i32 0
|
||||
gi = __builtin_object_size(my_malloc(0), 0);
|
||||
|
||||
void *const zeroArr1 = my_calloc(0, 1);
|
||||
void *const zeroArr2 = my_calloc(1, 0);
|
||||
// CHECK: store i32 0
|
||||
gi = __builtin_object_size(zeroArr1, 0);
|
||||
// CHECK: store i32 0
|
||||
gi = __builtin_object_size(zeroArr2, 0);
|
||||
// CHECK: store i32 0
|
||||
gi = __builtin_object_size(my_calloc(1, 0), 0);
|
||||
// CHECK: store i32 0
|
||||
gi = __builtin_object_size(my_calloc(0, 1), 0);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test2
|
||||
void test2() {
|
||||
void *const vp = my_malloc(gi);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(vp, 0);
|
||||
|
||||
void *const arr1 = my_calloc(gi, 1);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(arr1, 0);
|
||||
|
||||
void *const arr2 = my_calloc(1, gi);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(arr2, 0);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test3
|
||||
void test3() {
|
||||
char *const buf = (char *)my_calloc(100, 5);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(buf, 0);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(buf, 1);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(buf, 2);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(buf, 3);
|
||||
}
|
||||
|
||||
struct Data {
|
||||
int a;
|
||||
int t[10];
|
||||
char pad[3];
|
||||
char end[1];
|
||||
};
|
||||
|
||||
// CHECK-LABEL: @test5
|
||||
void test5() {
|
||||
struct Data *const data = my_malloc(sizeof(*data));
|
||||
// CHECK: store i32 48
|
||||
gi = __builtin_object_size(data, 0);
|
||||
// CHECK: store i32 48
|
||||
gi = __builtin_object_size(data, 1);
|
||||
// CHECK: store i32 48
|
||||
gi = __builtin_object_size(data, 2);
|
||||
// CHECK: store i32 48
|
||||
gi = __builtin_object_size(data, 3);
|
||||
|
||||
// CHECK: store i32 40
|
||||
gi = __builtin_object_size(&data->t[1], 0);
|
||||
// CHECK: store i32 36
|
||||
gi = __builtin_object_size(&data->t[1], 1);
|
||||
// CHECK: store i32 40
|
||||
gi = __builtin_object_size(&data->t[1], 2);
|
||||
// CHECK: store i32 36
|
||||
gi = __builtin_object_size(&data->t[1], 3);
|
||||
|
||||
struct Data *const arr = my_calloc(sizeof(*data), 2);
|
||||
// CHECK: store i32 96
|
||||
gi = __builtin_object_size(arr, 0);
|
||||
// CHECK: store i32 96
|
||||
gi = __builtin_object_size(arr, 1);
|
||||
// CHECK: store i32 96
|
||||
gi = __builtin_object_size(arr, 2);
|
||||
// CHECK: store i32 96
|
||||
gi = __builtin_object_size(arr, 3);
|
||||
|
||||
// CHECK: store i32 88
|
||||
gi = __builtin_object_size(&arr->t[1], 0);
|
||||
// CHECK: store i32 36
|
||||
gi = __builtin_object_size(&arr->t[1], 1);
|
||||
// CHECK: store i32 88
|
||||
gi = __builtin_object_size(&arr->t[1], 2);
|
||||
// CHECK: store i32 36
|
||||
gi = __builtin_object_size(&arr->t[1], 3);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test6
|
||||
void test6() {
|
||||
// Things that would normally trigger conservative estimates don't need to do
|
||||
// so when we know the source of the allocation.
|
||||
struct Data *const data = my_malloc(sizeof(*data) + 10);
|
||||
// CHECK: store i32 11
|
||||
gi = __builtin_object_size(data->end, 0);
|
||||
// CHECK: store i32 11
|
||||
gi = __builtin_object_size(data->end, 1);
|
||||
// CHECK: store i32 11
|
||||
gi = __builtin_object_size(data->end, 2);
|
||||
// CHECK: store i32 11
|
||||
gi = __builtin_object_size(data->end, 3);
|
||||
|
||||
struct Data *const arr = my_calloc(sizeof(*arr) + 5, 3);
|
||||
// AFAICT, GCC treats malloc and calloc identically. So, we should do the
|
||||
// same.
|
||||
//
|
||||
// Additionally, GCC ignores the initial array index when determining whether
|
||||
// we're writing off the end of an alloc_size base. e.g.
|
||||
// arr[0].end
|
||||
// arr[1].end
|
||||
// arr[2].end
|
||||
// ...Are all considered "writing off the end", because there's no way to tell
|
||||
// with high accuracy if the user meant "allocate a single N-byte `Data`",
|
||||
// or "allocate M smaller `Data`s with extra padding".
|
||||
|
||||
// CHECK: store i32 112
|
||||
gi = __builtin_object_size(arr->end, 0);
|
||||
// CHECK: store i32 112
|
||||
gi = __builtin_object_size(arr->end, 1);
|
||||
// CHECK: store i32 112
|
||||
gi = __builtin_object_size(arr->end, 2);
|
||||
// CHECK: store i32 112
|
||||
gi = __builtin_object_size(arr->end, 3);
|
||||
|
||||
// CHECK: store i32 112
|
||||
gi = __builtin_object_size(arr[0].end, 0);
|
||||
// CHECK: store i32 112
|
||||
gi = __builtin_object_size(arr[0].end, 1);
|
||||
// CHECK: store i32 112
|
||||
gi = __builtin_object_size(arr[0].end, 2);
|
||||
// CHECK: store i32 112
|
||||
gi = __builtin_object_size(arr[0].end, 3);
|
||||
|
||||
// CHECK: store i32 64
|
||||
gi = __builtin_object_size(arr[1].end, 0);
|
||||
// CHECK: store i32 64
|
||||
gi = __builtin_object_size(arr[1].end, 1);
|
||||
// CHECK: store i32 64
|
||||
gi = __builtin_object_size(arr[1].end, 2);
|
||||
// CHECK: store i32 64
|
||||
gi = __builtin_object_size(arr[1].end, 3);
|
||||
|
||||
// CHECK: store i32 16
|
||||
gi = __builtin_object_size(arr[2].end, 0);
|
||||
// CHECK: store i32 16
|
||||
gi = __builtin_object_size(arr[2].end, 1);
|
||||
// CHECK: store i32 16
|
||||
gi = __builtin_object_size(arr[2].end, 2);
|
||||
// CHECK: store i32 16
|
||||
gi = __builtin_object_size(arr[2].end, 3);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test7
|
||||
void test7() {
|
||||
struct Data *const data = my_malloc(sizeof(*data) + 5);
|
||||
// CHECK: store i32 9
|
||||
gi = __builtin_object_size(data->pad, 0);
|
||||
// CHECK: store i32 3
|
||||
gi = __builtin_object_size(data->pad, 1);
|
||||
// CHECK: store i32 9
|
||||
gi = __builtin_object_size(data->pad, 2);
|
||||
// CHECK: store i32 3
|
||||
gi = __builtin_object_size(data->pad, 3);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test8
|
||||
void test8() {
|
||||
// Non-const pointers aren't currently supported.
|
||||
void *buf = my_calloc(100, 5);
|
||||
// CHECK: @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
|
||||
gi = __builtin_object_size(buf, 0);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(buf, 1);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(buf, 2);
|
||||
// CHECK: store i32 0
|
||||
gi = __builtin_object_size(buf, 3);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test9
|
||||
void test9() {
|
||||
// Check to be sure that we unwrap things correctly.
|
||||
short *const buf0 = (my_malloc(100));
|
||||
short *const buf1 = (short*)(my_malloc(100));
|
||||
short *const buf2 = ((short*)(my_malloc(100)));
|
||||
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(buf0, 0);
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(buf1, 0);
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(buf2, 0);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test10
|
||||
void test10() {
|
||||
// Yay overflow
|
||||
short *const arr = my_calloc((size_t)-1 / 2 + 1, 2);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(arr, 0);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(arr, 1);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(arr, 2);
|
||||
// CHECK: store i32 0
|
||||
gi = __builtin_object_size(arr, 3);
|
||||
|
||||
// As an implementation detail, CharUnits can't handle numbers greater than or
|
||||
// equal to 2**63. Realistically, this shouldn't be a problem, but we should
|
||||
// be sure we don't emit crazy results for this case.
|
||||
short *const buf = my_malloc((size_t)-1);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(buf, 0);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(buf, 1);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(buf, 2);
|
||||
// CHECK: store i32 0
|
||||
gi = __builtin_object_size(buf, 3);
|
||||
|
||||
short *const arr_big = my_calloc((size_t)-1 / 2 - 1, 2);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(arr_big, 0);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(arr_big, 1);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(arr_big, 2);
|
||||
// CHECK: store i32 0
|
||||
gi = __builtin_object_size(arr_big, 3);
|
||||
}
|
||||
|
||||
void *my_tiny_malloc(char) __attribute__((alloc_size(1)));
|
||||
void *my_tiny_calloc(char, char) __attribute__((alloc_size(1, 2)));
|
||||
|
||||
// CHECK-LABEL: @test11
|
||||
void test11() {
|
||||
void *const vp = my_tiny_malloc(100);
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(vp, 0);
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(vp, 1);
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(vp, 2);
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(vp, 3);
|
||||
|
||||
// N.B. This causes char overflow, but not size_t overflow, so it should be
|
||||
// supported.
|
||||
void *const arr = my_tiny_calloc(100, 5);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(arr, 0);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(arr, 1);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(arr, 2);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(arr, 3);
|
||||
}
|
||||
|
||||
void *my_signed_malloc(long) __attribute__((alloc_size(1)));
|
||||
void *my_signed_calloc(long, long) __attribute__((alloc_size(1, 2)));
|
||||
|
||||
// CHECK-LABEL: @test12
|
||||
void test12() {
|
||||
// CHECK: store i32 100
|
||||
gi = __builtin_object_size(my_signed_malloc(100), 0);
|
||||
// CHECK: store i32 500
|
||||
gi = __builtin_object_size(my_signed_calloc(100, 5), 0);
|
||||
|
||||
void *const vp = my_signed_malloc(-2);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(vp, 0);
|
||||
// N.B. These get lowered to -1 because the function calls may have
|
||||
// side-effects, and we can't determine the objectsize.
|
||||
// CHECK: store i32 -1
|
||||
gi = __builtin_object_size(my_signed_malloc(-2), 0);
|
||||
|
||||
void *const arr1 = my_signed_calloc(-2, 1);
|
||||
void *const arr2 = my_signed_calloc(1, -2);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(arr1, 0);
|
||||
// CHECK: @llvm.objectsize
|
||||
gi = __builtin_object_size(arr2, 0);
|
||||
// CHECK: store i32 -1
|
||||
gi = __builtin_object_size(my_signed_calloc(1, -2), 0);
|
||||
// CHECK: store i32 -1
|
||||
gi = __builtin_object_size(my_signed_calloc(-2, 1), 0);
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm -O0 %s -o - 2>&1 -std=c++11 | FileCheck %s
|
||||
|
||||
namespace templates {
|
||||
void *my_malloc(int N) __attribute__((alloc_size(1)));
|
||||
void *my_calloc(int N, int M) __attribute__((alloc_size(1, 2)));
|
||||
|
||||
struct MyType {
|
||||
int arr[4];
|
||||
};
|
||||
|
||||
template <typename T> int callMalloc();
|
||||
|
||||
template <typename T, int N> int callCalloc();
|
||||
|
||||
// CHECK-LABEL: define i32 @_ZN9templates6testItEv()
|
||||
int testIt() {
|
||||
// CHECK: call i32 @_ZN9templates10callMallocINS_6MyTypeEEEiv
|
||||
// CHECK: call i32 @_ZN9templates10callCallocINS_6MyTypeELi4EEEiv
|
||||
return callMalloc<MyType>() + callCalloc<MyType, 4>();
|
||||
}
|
||||
|
||||
// CHECK-LABEL: define linkonce_odr i32
|
||||
// @_ZN9templates10callMallocINS_6MyTypeEEEiv
|
||||
template <typename T> int callMalloc() {
|
||||
static_assert(sizeof(T) == 16, "");
|
||||
// CHECK: ret i32 16
|
||||
return __builtin_object_size(my_malloc(sizeof(T)), 0);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: define linkonce_odr i32
|
||||
// @_ZN9templates10callCallocINS_6MyTypeELi4EEEiv
|
||||
template <typename T, int N> int callCalloc() {
|
||||
static_assert(sizeof(T) * N == 64, "");
|
||||
// CHECK: ret i32 64
|
||||
return __builtin_object_size(my_malloc(sizeof(T) * N), 0);
|
||||
}
|
||||
}
|
||||
|
||||
namespace templated_alloc_size {
|
||||
using size_t = unsigned long;
|
||||
|
||||
// We don't need bodies for any of these, because they're only used in
|
||||
// __builtin_object_size, and that shouldn't need anything but a function
|
||||
// decl with alloc_size on it.
|
||||
template <typename T>
|
||||
T *my_malloc(size_t N = sizeof(T)) __attribute__((alloc_size(1)));
|
||||
|
||||
template <typename T>
|
||||
T *my_calloc(size_t M, size_t N = sizeof(T)) __attribute__((alloc_size(2, 1)));
|
||||
|
||||
template <size_t N>
|
||||
void *dependent_malloc(size_t NT = N) __attribute__((alloc_size(1)));
|
||||
|
||||
template <size_t N, size_t M>
|
||||
void *dependent_calloc(size_t NT = N, size_t MT = M)
|
||||
__attribute__((alloc_size(1, 2)));
|
||||
|
||||
template <typename T, size_t M>
|
||||
void *dependent_calloc2(size_t NT = sizeof(T), size_t MT = M)
|
||||
__attribute__((alloc_size(1, 2)));
|
||||
|
||||
// CHECK-LABEL: define i32 @_ZN20templated_alloc_size6testItEv
|
||||
int testIt() {
|
||||
// 122 = 4 + 5*4 + 6 + 7*8 + 4*9
|
||||
// CHECK: ret i32 122
|
||||
return __builtin_object_size(my_malloc<int>(), 0) +
|
||||
__builtin_object_size(my_calloc<int>(5), 0) +
|
||||
__builtin_object_size(dependent_malloc<6>(), 0) +
|
||||
__builtin_object_size(dependent_calloc<7, 8>(), 0) +
|
||||
__builtin_object_size(dependent_calloc2<int, 9>(), 0);
|
||||
}
|
||||
}
|
|
@ -42,7 +42,5 @@ X::~X() {
|
|||
// CHECK-LABEL: define internal void @___ZN4ZoneD2Ev_block_invoke_
|
||||
// CHECK-LABEL: define internal void @___ZN1XC2Ev_block_invoke
|
||||
// CHECK-LABEL: define internal void @___ZN1XC2Ev_block_invoke_
|
||||
// CHECK-LABEL: define internal void @___ZN1XC1Ev_block_invoke
|
||||
// CHECK-LABEL: define internal void @___ZN1XC1Ev_block_invoke_
|
||||
// CHECK-LABEL: define internal void @___ZN1XD2Ev_block_invoke
|
||||
// CHECK-LABEL: define internal void @___ZN1XD2Ev_block_invoke_
|
||||
|
|
|
@ -18,9 +18,6 @@ struct D { ~D(); };
|
|||
// CHECK: @__dso_handle = external global i8
|
||||
// CHECK: @c = global %struct.C zeroinitializer, align 8
|
||||
|
||||
// It's okay if we ever implement the IR-generation optimization to remove this.
|
||||
// CHECK: @_ZN5test3L3varE = internal constant i8* getelementptr inbounds ([7 x i8], [7 x i8]*
|
||||
|
||||
// PR6205: The casts should not require global initializers
|
||||
// CHECK: @_ZN6PR59741cE = external global %"struct.PR5974::C"
|
||||
// CHECK: @_ZN6PR59741aE = global %"struct.PR5974::A"* getelementptr inbounds (%"struct.PR5974::C", %"struct.PR5974::C"* @_ZN6PR59741cE, i32 0, i32 0)
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
|
||||
typedef void (^bl_t)(local void *);
|
||||
|
||||
// N.B. The check here only exists to set BL_GLOBAL
|
||||
// COMMON: @block_G = {{.*}}bitcast ([[BL_GLOBAL:[^@]+@__block_literal_global(\.[0-9]+)?]]
|
||||
const bl_t block_G = (bl_t) ^ (local void *a) {};
|
||||
|
||||
kernel void device_side_enqueue(global int *a, global int *b, int i) {
|
||||
|
@ -122,28 +124,24 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
|
|||
},
|
||||
4294967296L);
|
||||
|
||||
|
||||
// The full type of these expressions are long (and repeated elsewhere), so we
|
||||
// capture it as part of the regex for convenience and clarity.
|
||||
// COMMON: store void ()* bitcast ([[BL_A:[^@]+@__block_literal_global.[0-9]+]] to void ()*), void ()** %block_A
|
||||
void (^const block_A)(void) = ^{
|
||||
return;
|
||||
};
|
||||
|
||||
// COMMON: store void (i8 addrspace(2)*)* bitcast ([[BL_B:[^@]+@__block_literal_global.[0-9]+]] to void (i8 addrspace(2)*)*), void (i8 addrspace(2)*)** %block_B
|
||||
void (^const block_B)(local void *) = ^(local void *a) {
|
||||
return;
|
||||
};
|
||||
|
||||
// COMMON: [[BL:%[0-9]+]] = load void ()*, void ()** %block_A
|
||||
// COMMON: [[BL_I8:%[0-9]+]] = bitcast void ()* [[BL]] to i8*
|
||||
// COMMON: call i32 @__get_kernel_work_group_size_impl(i8* [[BL_I8]])
|
||||
// COMMON: call i32 @__get_kernel_work_group_size_impl(i8* bitcast ([[BL_A]] to i8*))
|
||||
unsigned size = get_kernel_work_group_size(block_A);
|
||||
// COMMON: [[BL:%[0-9]+]] = load void (i8 addrspace(2)*)*, void (i8 addrspace(2)*)** %block_B
|
||||
// COMMON: [[BL_I8:%[0-9]+]] = bitcast void (i8 addrspace(2)*)* [[BL]] to i8*
|
||||
// COMMON: call i32 @__get_kernel_work_group_size_impl(i8* [[BL_I8]])
|
||||
// COMMON: call i32 @__get_kernel_work_group_size_impl(i8* bitcast ([[BL_B]] to i8*))
|
||||
size = get_kernel_work_group_size(block_B);
|
||||
// COMMON: [[BL:%[0-9]+]] = load void ()*, void ()** %block_A
|
||||
// COMMON: [[BL_I8:%[0-9]+]] = bitcast void ()* [[BL]] to i8*
|
||||
// COMMON: call i32 @__get_kernel_preferred_work_group_multiple_impl(i8* [[BL_I8]])
|
||||
// COMMON: call i32 @__get_kernel_preferred_work_group_multiple_impl(i8* bitcast ([[BL_A]] to i8*))
|
||||
size = get_kernel_preferred_work_group_size_multiple(block_A);
|
||||
// COMMON: [[BL:%[0-9]+]] = load void (i8 addrspace(2)*)*, void (i8 addrspace(2)*)* addrspace(1)* @block_G
|
||||
// COMMON: [[BL_I8:%[0-9]+]] = bitcast void (i8 addrspace(2)*)* [[BL]] to i8*
|
||||
// COMMON: call i32 @__get_kernel_preferred_work_group_multiple_impl(i8* [[BL_I8]])
|
||||
// COMMON: call i32 @__get_kernel_preferred_work_group_multiple_impl(i8* bitcast ([[BL_GLOBAL]] to i8*))
|
||||
size = get_kernel_preferred_work_group_size_multiple(block_G);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
// RUN: %clang_cc1 %s -verify
|
||||
|
||||
void *fail1(int a) __attribute__((alloc_size)); //expected-error{{'alloc_size' attribute takes at least 1 argument}}
|
||||
void *fail2(int a) __attribute__((alloc_size())); //expected-error{{'alloc_size' attribute takes at least 1 argument}}
|
||||
|
||||
void *fail3(int a) __attribute__((alloc_size(0))); //expected-error{{'alloc_size' attribute parameter 0 is out of bounds}}
|
||||
void *fail4(int a) __attribute__((alloc_size(2))); //expected-error{{'alloc_size' attribute parameter 2 is out of bounds}}
|
||||
|
||||
void *fail5(int a, int b) __attribute__((alloc_size(0, 1))); //expected-error{{'alloc_size' attribute parameter 0 is out of bounds}}
|
||||
void *fail6(int a, int b) __attribute__((alloc_size(3, 1))); //expected-error{{'alloc_size' attribute parameter 3 is out of bounds}}
|
||||
|
||||
void *fail7(int a, int b) __attribute__((alloc_size(1, 0))); //expected-error{{'alloc_size' attribute parameter 0 is out of bounds}}
|
||||
void *fail8(int a, int b) __attribute__((alloc_size(1, 3))); //expected-error{{'alloc_size' attribute parameter 3 is out of bounds}}
|
||||
|
||||
int fail9(int a) __attribute__((alloc_size(1))); //expected-warning{{'alloc_size' attribute only applies to return values that are pointers}}
|
||||
|
||||
int fail10 __attribute__((alloc_size(1))); //expected-warning{{'alloc_size' attribute only applies to non-K&R-style functions}}
|
||||
|
||||
void *fail11(void *a) __attribute__((alloc_size(1))); //expected-error{{'alloc_size' attribute argument may only refer to a function parameter of integer type}}
|
||||
|
||||
void *fail12(int a) __attribute__((alloc_size("abc"))); //expected-error{{'alloc_size' attribute requires parameter 1 to be an integer constant}}
|
||||
void *fail12(int a) __attribute__((alloc_size(1, "abc"))); //expected-error{{'alloc_size' attribute requires parameter 2 to be an integer constant}}
|
||||
void *fail13(int a) __attribute__((alloc_size(1U<<31))); //expected-error{{integer constant expression evaluates to value 2147483648 that cannot be represented in a 32-bit signed integer type}}
|
|
@ -1183,7 +1183,7 @@ constexpr int m1b = const_cast<const int&>(n1); // expected-error {{constant exp
|
|||
constexpr int m2b = const_cast<const int&>(n2); // expected-error {{constant expression}} expected-note {{read of volatile object 'n2'}}
|
||||
|
||||
struct T { int n; };
|
||||
const T t = { 42 }; // expected-note {{declared here}}
|
||||
const T t = { 42 };
|
||||
|
||||
constexpr int f(volatile int &&r) {
|
||||
return r; // expected-note {{read of volatile-qualified type 'volatile int'}}
|
||||
|
@ -1195,7 +1195,7 @@ struct S {
|
|||
int j : f(0); // expected-error {{constant expression}} expected-note {{in call to 'f(0)'}}
|
||||
int k : g(0); // expected-error {{constant expression}} expected-note {{temporary created here}} expected-note {{in call to 'g(0)'}}
|
||||
int l : n3; // expected-error {{constant expression}} expected-note {{read of non-const variable}}
|
||||
int m : t.n; // expected-error {{constant expression}} expected-note {{read of non-constexpr variable}}
|
||||
int m : t.n; // expected-warning{{width of bit-field 'm' (42 bits)}}
|
||||
};
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue