Adds Microsoft compatiable C++ record layout code to clang.

git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@192494 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Warren Hunt 2013-10-11 20:19:00 +00:00
parent 659837e0ce
commit b2969b1e50
15 changed files with 852 additions and 95 deletions

View File

@ -1660,6 +1660,7 @@ public:
/// record (struct/union/class) \p D, which indicates its size and field
/// position information.
const ASTRecordLayout &getASTRecordLayout(const RecordDecl *D) const;
const ASTRecordLayout *BuildMicrosoftASTRecordLayout(const RecordDecl *D) const;
/// \brief Get or compute information about the layout of the specified
/// Objective-C interface.

View File

@ -93,7 +93,20 @@ private:
/// HasOwnVFPtr - Does this class provide a virtual function table
/// (vtable in Itanium, vftbl in Microsoft) that is independent from
/// its base classes?
bool HasOwnVFPtr; // TODO: stash this somewhere more efficient
bool HasOwnVFPtr : 1;
/// HasVFPtr - Does this class have a vftable at all (could be inherited
/// from its primary base.)
bool HasVFPtr : 1;
/// HasOwnVBPtr - Does this class provide a virtual function table
/// (vtable in Itanium, VBtbl in Microsoft) that is independent from
/// its base classes?
bool HasOwnVBPtr : 1;
/// AlignAfterVBases - Force appropriate alignment after virtual bases are
/// laid out in MS-C++-ABI.
bool AlignAfterVBases : 1;
/// PrimaryBase - The primary base info for this record.
llvm::PointerIntPair<const CXXRecordDecl *, 1, bool> PrimaryBase;
@ -122,13 +135,15 @@ private:
typedef CXXRecordLayoutInfo::BaseOffsetsMapTy BaseOffsetsMapTy;
ASTRecordLayout(const ASTContext &Ctx,
CharUnits size, CharUnits alignment,
bool hasOwnVFPtr, CharUnits vbptroffset,
bool hasOwnVFPtr, bool hasVFPtr, bool hasOwnVBPtr,
CharUnits vbptroffset,
CharUnits datasize,
const uint64_t *fieldoffsets, unsigned fieldcount,
CharUnits nonvirtualsize, CharUnits nonvirtualalign,
CharUnits SizeOfLargestEmptySubobject,
const CXXRecordDecl *PrimaryBase,
bool IsPrimaryBaseVirtual,
bool ForceAlign,
const BaseOffsetsMapTy& BaseOffsets,
const VBaseOffsetsMapTy& VBaseOffsets);
@ -226,6 +241,35 @@ public:
return CXXInfo->HasOwnVFPtr;
}
/// hasVFPtr - Does this class have a virtual function table pointer.
bool hasVFPtr() const {
assert(CXXInfo && "Record layout does not have C++ specific info!");
return CXXInfo->HasVFPtr;
}
/// hasOwnVBPtr - Does this class provide its own virtual-base
/// table pointer, rather than inheriting one from a primary base
/// class?
///
/// This implies that the ABI has no primary base class, meaning
/// that it has no base classes that are suitable under the conditions
/// of the ABI.
bool hasOwnVBPtr() const {
assert(CXXInfo && "Record layout does not have C++ specific info!");
return CXXInfo->HasOwnVBPtr;
}
/// hasVBPtr - Does this class have a virtual function table pointer.
bool hasVBPtr() const {
assert(CXXInfo && "Record layout does not have C++ specific info!");
return !CXXInfo->VBPtrOffset.isNegative();
}
bool getAlignAfterVBases() const {
assert(CXXInfo && "Record layout does not have C++ specific info!");
return CXXInfo->AlignAfterVBases;
}
/// getVBPtrOffset - Get the offset for virtual base table pointer.
/// This is only meaningful with the Microsoft ABI.
CharUnits getVBPtrOffset() const {

View File

@ -463,6 +463,7 @@ def warn_pragma_pack_show : Warning<"value of #pragma pack(show) == %0">;
def warn_pragma_pack_pop_identifer_and_alignment : Warning<
"specifying both a name and alignment to 'pop' is undefined">;
def warn_pragma_pack_pop_failed : Warning<"#pragma pack(pop, ...) failed: %0">;
def warn_pragma_ms_struct_failed : Warning<"#pramga ms_struct can not be used with dynamic classes or structures">, InGroup<IgnoredAttributes>;
def warn_pragma_unused_undeclared_var : Warning<
"undeclared variable %0 used as an argument for '#pragma unused'">;

View File

@ -43,7 +43,9 @@ ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx, CharUnits size,
// Constructor for C++ records.
ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
CharUnits size, CharUnits alignment,
bool hasOwnVFPtr, CharUnits vbptroffset,
bool hasOwnVFPtr, bool hasVFPtr,
bool hasOwnVBPtr,
CharUnits vbptroffset,
CharUnits datasize,
const uint64_t *fieldoffsets,
unsigned fieldcount,
@ -52,6 +54,7 @@ ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
CharUnits SizeOfLargestEmptySubobject,
const CXXRecordDecl *PrimaryBase,
bool IsPrimaryBaseVirtual,
bool AlignAfterVBases,
const BaseOffsetsMapTy& BaseOffsets,
const VBaseOffsetsMapTy& VBaseOffsets)
: Size(size), DataSize(datasize), Alignment(alignment), FieldOffsets(0),
@ -71,6 +74,10 @@ ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
CXXInfo->VBaseOffsets = VBaseOffsets;
CXXInfo->HasOwnVFPtr = hasOwnVFPtr;
CXXInfo->VBPtrOffset = vbptroffset;
CXXInfo->HasVFPtr = hasVFPtr;
CXXInfo->HasOwnVBPtr = hasOwnVBPtr;
CXXInfo->AlignAfterVBases = AlignAfterVBases;
#ifndef NDEBUG
if (const CXXRecordDecl *PrimaryBase = getPrimaryBase()) {

View File

@ -604,6 +604,10 @@ protected:
/// pointer, as opposed to inheriting one from a primary base class.
bool HasOwnVFPtr;
/// HasOwnVBPtr - Whether the class provides its own vbtbl
/// pointer, as opposed to inheriting one from a base class. Only for MS.
bool HasOwnVBPtr;
/// VBPtrOffset - Virtual base table offset. Only for MS layout.
CharUnits VBPtrOffset;
@ -654,6 +658,7 @@ protected:
NonVirtualAlignment(CharUnits::One()),
PrimaryBase(0), PrimaryBaseIsVirtual(false),
HasOwnVFPtr(false),
HasOwnVBPtr(false),
VBPtrOffset(CharUnits::fromQuantity(-1)),
FirstNearlyEmptyVBase(0) { }
@ -1074,8 +1079,10 @@ RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
// Remember if this base has virtual bases itself.
if (BaseDecl->getNumVBases())
if (BaseDecl->getNumVBases()) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
HasNonVirtualBaseWithVBTable = true;
}
// Skip the primary base, because we've already laid it out. The
// !PrimaryBaseIsVirtual check is required because we might have a
@ -1116,6 +1123,7 @@ RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
PtrAlign = std::max(PtrAlign, Alignment);
EnsureVTablePointerAlignment(PtrAlign);
HasOwnVBPtr = true;
VBPtrOffset = getSize();
setSize(getSize() + PtrWidth);
setDataSize(getSize());
@ -2338,6 +2346,704 @@ static bool mustSkipTailPadding(TargetCXXABI ABI, const CXXRecordDecl *RD) {
llvm_unreachable("bad tail-padding use kind");
}
static bool isMsLayout(const RecordDecl* D) {
return (D->getASTContext().getTargetInfo().getCXXABI().isMicrosoft() ||
D->getASTContext().getTargetInfo().getTriple().getOS() ==
llvm::Triple::Win32) &&
D->getASTContext().getTargetInfo().getPointerWidth(0) == 32;
// FIXME: we intend to enable 64 bit mode once it's been verified.
}
// This section contains an implementation of struct layout that is, up to the
// included tests, compatible with cl.exe (2012). The layout produced is
// significantly different than those produced by the Itanium ABI. Here we note
// the most important differences.
//
// * The alignment of bitfields in unions is ignored when computing the
// alignment of the union.
// * The existance of zero-width bitfield that occurs after anything other than
// a non-zero length bitfield is ignored.
// * The Itanium equivalent vtable pointers are split into a vfptr (virtual
// function pointer) and a vbptr (virtual base pointer). They can each be
// shared with a, non-virtual bases. These bases need not be the same. vfptrs always occur at offset 0. vbptrs can occur at an
// arbitrary offset and are placed after non-virtual bases but before fields.
// * Virtual bases sometimes require a 'vtordisp' field that is laid out before
// the virtual base and is used in conjunction with virtual overrides during
// construction and destruction.
// * vfptrs are allocated in a block of memory equal to the alignment of the
// fields and non-virtual bases at offset 0.
// * vbptrs are allocated in a block of memory equal to the alignment of the
// fields and non-virtual bases. This block is at a potentially unaligned offset. If the
// allocation slot is unaligned and the alignment is less than or equal to the
// pointer size, additional space is allocated so that the pointer can be aligned properly. This causes very strange effects on the placement of objects after the allocated block. (see
// the code).
// * vtordisps are allocated in a block of memory with size and alignment equal
// to the alignment of the completed structure (before applying __declspec(
// align())). The vtordisp always occur at the end of the allocation block, immediately prior to the virtual base.
// * The last zero sized non-virtual base is allocated after the placement of
// vbptr if one exists and can be placed at the end of the struct, potentially
// aliasing either the first member or another struct allocated after this
// one.
// * The last zero size virtual base may be placed at the end of the struct.
// and can potentially alias a zero sized type in the next struct.
namespace {
struct MicrosoftRecordLayoutBuilder {
typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
MicrosoftRecordLayoutBuilder(const ASTContext &Context) : Context(Context) {}
private:
MicrosoftRecordLayoutBuilder(const MicrosoftRecordLayoutBuilder &)
LLVM_DELETED_FUNCTION;
void operator=(const MicrosoftRecordLayoutBuilder &) LLVM_DELETED_FUNCTION;
public:
void layout(const RecordDecl *RD);
void cxxLayout(const CXXRecordDecl *RD);
/// \brief Initializes size and alignment and honors some flags.
void initializeLayout(const RecordDecl *RD);
/// \brief Initialized C++ layout, compute alignment and virtual alignment and
/// existance of vfptrs and vbptrs. Alignment is needed before the vfptr is
/// laid out.
void initializeCXXLayout(const CXXRecordDecl *RD);
void layoutVFPtr(const CXXRecordDecl *RD);
void layoutNonVirtualBases(const CXXRecordDecl *RD);
void layoutNonVirtualBase(const CXXRecordDecl *RD);
void layoutVBPtr(const CXXRecordDecl *RD);
/// \brief Lays out the fields of the record. Also rounds size up to
/// alignment.
void layoutFields(const RecordDecl *RD);
void layoutField(const FieldDecl *FD);
void layoutBitField(const FieldDecl *FD);
/// \brief Lays out a single zero-width bit-field in the record and handles
/// special cases associated with zero-width bit-fields.
void layoutZeroWidthBitField(const FieldDecl *FD);
void layoutVirtualBases(const CXXRecordDecl *RD);
void layoutVirtualBase(const CXXRecordDecl *RD, bool HasVtordisp);
/// \brief Flushes the lazy virtual base and conditionally rounds up to
/// alignment.
void finalizeCXXLayout(const CXXRecordDecl *RD);
void honorDeclspecAlign(const RecordDecl *RD);
/// \brief Updates the alignment of the type. This function doesn't take any
/// properties (such as packedness) into account. getAdjustedFieldInfo()
/// adjustes for packedness.
void updateAlignment(CharUnits NewAlignment) {
Alignment = std::max(Alignment, NewAlignment);
}
/// \brief Gets the size and alignment taking attributes into account.
std::pair<CharUnits, CharUnits> getAdjustedFieldInfo(const FieldDecl *FD);
/// \brief Places a field at offset 0.
void placeFieldAtZero() { FieldOffsets.push_back(0); }
/// \brief Places a field at an offset in CharUnits.
void placeFieldAtOffset(CharUnits FieldOffset) {
FieldOffsets.push_back(Context.toBits(FieldOffset));
}
/// \brief Places a bitfield at a bit offset.
void placeFieldAtBitOffset(uint64_t FieldOffset) {
FieldOffsets.push_back(FieldOffset);
}
/// \brief Compute the set of virtual bases for which vtordisps are required.
llvm::SmallPtrSet<const CXXRecordDecl *, 2>
computeVtorDispSet(const CXXRecordDecl *RD);
const ASTContext &Context;
/// \brief The size of the record being laid out.
CharUnits Size;
/// \brief The current alignment of the record layout.
CharUnits Alignment;
/// \brief The collection of field offsets.
SmallVector<uint64_t, 16> FieldOffsets;
/// \brief The maximum allowed field alignment. This is set by #pragma pack.
CharUnits MaxFieldAlignment;
/// \brief Alignment does not occur for virtual bases unless something
/// forces it to by explicitly using __declspec(align())
bool AlignAfterVBases : 1;
bool IsUnion : 1;
/// \brief True if the last field laid out was a bitfield and was not 0
/// width.
bool LastFieldIsNonZeroWidthBitfield : 1;
/// \brief The size of the allocation of the currently active bitfield.
/// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield
/// is true.
CharUnits CurrentBitfieldSize;
/// \brief The number of remaining bits in our last bitfield allocation.
/// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield is
/// true.
unsigned RemainingBitsInField;
/// \brief The data alignment of the record layout.
CharUnits DataSize;
/// \brief The alignment of the non-virtual portion of the record layout
/// including. Only used for C++ layouts.
CharUnits NonVirtualAlignment;
/// \brief The additional alignment imposed by the virtual bases.
CharUnits VirtualAlignment;
/// \brief The primary base class (if one exists).
const CXXRecordDecl *PrimaryBase;
/// \brief The class we share our vb-pointer with.
const CXXRecordDecl *SharedVBPtrBase;
/// \brief True if the class has a (not necessarily its own) vftable pointer.
bool HasVFPtr : 1;
/// \brief True if the class has a (not necessarily its own) vbtable pointer.
bool HasVBPtr : 1;
/// \brief Offset to the virtual base table pointer (if one exists).
CharUnits VBPtrOffset;
/// \brief Base classes and their offsets in the record.
BaseOffsetsMapTy Bases;
/// \brief virtual base classes and their offsets in the record.
ASTRecordLayout::VBaseOffsetsMapTy VBases;
/// \brief The size of a pointer.
CharUnits PointerSize;
/// \brief The alignment of a pointer.
CharUnits PointerAlignment;
/// \brief Holds an empty base we haven't yet laid out.
const CXXRecordDecl *LazyEmptyBase;
};
} // namespace
std::pair<CharUnits, CharUnits>
MicrosoftRecordLayoutBuilder::getAdjustedFieldInfo(const FieldDecl *FD) {
std::pair<CharUnits, CharUnits> FieldInfo;
if (FD->getType()->isIncompleteArrayType()) {
// This is a flexible array member; we can't directly
// query getTypeInfo about these, so we figure it out here.
// Flexible array members don't have any size, but they
// have to be aligned appropriately for their element type.
FieldInfo.first = CharUnits::Zero();
const ArrayType *ATy = Context.getAsArrayType(FD->getType());
FieldInfo.second = Context.getTypeAlignInChars(ATy->getElementType());
} else if (const ReferenceType *RT = FD->getType()->getAs<ReferenceType>()) {
unsigned AS = RT->getPointeeType().getAddressSpace();
FieldInfo.first = Context
.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(AS));
FieldInfo.second = Context
.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(AS));
} else
FieldInfo = Context.getTypeInfoInChars(FD->getType());
// If we're not on win32 and using ms_struct the field alignment will be wrong
// for 64 bit types, so we fix that here.
if (FD->getASTContext().getTargetInfo().getTriple().getOS() !=
llvm::Triple::Win32) {
QualType T = Context.getBaseElementType(FD->getType());
if (const BuiltinType *BTy = T->getAs<BuiltinType>()) {
CharUnits TypeSize = Context.getTypeSizeInChars(BTy);
if (TypeSize > FieldInfo.second)
FieldInfo.second = TypeSize;
}
}
// Respect packed attribute.
if (FD->hasAttr<PackedAttr>())
FieldInfo.second = CharUnits::One();
// Respect pack pragma.
else if (!MaxFieldAlignment.isZero())
FieldInfo.second = std::min(FieldInfo.second, MaxFieldAlignment);
// Respect alignment attributes.
if (unsigned fieldAlign = FD->getMaxAlignment()) {
CharUnits FieldAlign = Context.toCharUnitsFromBits(fieldAlign);
AlignAfterVBases = true;
FieldInfo.second = std::max(FieldInfo.second, FieldAlign);
}
return FieldInfo;
}
void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) {
IsUnion = RD->isUnion();
Size = CharUnits::Zero();
Alignment = CharUnits::One();
AlignAfterVBases = false;
// Compute the maximum field alignment.
MaxFieldAlignment = CharUnits::Zero();
// Honor the default struct packing maximum alignment flag.
if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct)
MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment);
// Honor the packing attribute.
if (const MaxFieldAlignmentAttr *MFAA = RD->getAttr<MaxFieldAlignmentAttr>())
MaxFieldAlignment = Context.toCharUnitsFromBits(MFAA->getAlignment());
// Packed attribute forces max field alignment to be 1.
if (RD->hasAttr<PackedAttr>())
MaxFieldAlignment = CharUnits::One();
}
void MicrosoftRecordLayoutBuilder::layout(const RecordDecl *RD) {
initializeLayout(RD);
layoutFields(RD);
honorDeclspecAlign(RD);
}
void MicrosoftRecordLayoutBuilder::cxxLayout(const CXXRecordDecl *RD) {
initializeLayout(RD);
initializeCXXLayout(RD);
layoutVFPtr(RD);
layoutNonVirtualBases(RD);
layoutVBPtr(RD);
layoutFields(RD);
DataSize = Size;
NonVirtualAlignment = Alignment;
layoutVirtualBases(RD);
finalizeCXXLayout(RD);
honorDeclspecAlign(RD);
}
void
MicrosoftRecordLayoutBuilder::initializeCXXLayout(const CXXRecordDecl *RD) {
// Calculate pointer size and alignment.
PointerSize =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
PointerAlignment = PointerSize;
if (!MaxFieldAlignment.isZero())
PointerAlignment = std::min(PointerAlignment, MaxFieldAlignment);
// Initialize information about the bases.
HasVBPtr = false;
HasVFPtr = false;
SharedVBPtrBase = 0;
PrimaryBase = 0;
VirtualAlignment = CharUnits::One();
// If the record has a dynamic base class, attempt to choose a primary base
// class. It is the first (in direct base class order) non-virtual dynamic
// base class, if one exists.
for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
e = RD->bases_end();
i != e; ++i) {
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
// Handle forced alignment.
if (Layout.getAlignAfterVBases())
AlignAfterVBases = true;
// Handle virtual bases.
if (i->isVirtual()) {
VirtualAlignment = std::max(VirtualAlignment, Layout.getAlignment());
HasVBPtr = true;
continue;
}
// We located a primary base class!
if (!PrimaryBase && Layout.hasVFPtr()) {
PrimaryBase = BaseDecl;
HasVFPtr = true;
}
// We located a base to share a VBPtr with!
if (!SharedVBPtrBase && Layout.hasVBPtr()) {
SharedVBPtrBase = BaseDecl;
HasVBPtr = true;
}
updateAlignment(Layout.getAlignment());
}
// Use LayoutFields to compute the alignment of the fields. The layout
// is discarded. This is the simplest way to get all of the bit-field
// behavior correct and is not actually very expensive.
layoutFields(RD);
Size = CharUnits::Zero();
FieldOffsets.clear();
}
void MicrosoftRecordLayoutBuilder::layoutVFPtr(const CXXRecordDecl *RD) {
// If we have a primary base then our VFPtr was already laid out
if (PrimaryBase)
return;
// Look at all of our methods to determine if we need a VFPtr. We need a
// vfptr if we define a new virtual function.
if (!HasVFPtr && RD->isDynamicClass())
for (CXXRecordDecl::method_iterator i = RD->method_begin(),
e = RD->method_end();
!HasVFPtr && i != e; ++i)
HasVFPtr = i->isVirtual() && i->size_overridden_methods() == 0;
if (!HasVFPtr)
return;
// MSVC potentially over-aligns the vf-table pointer by giving it
// the max alignment of all the non-virtual data in the class. The resulting
// layout is essentially { vftbl, { nvdata } }. This is completely
// unnecessary, but we're not here to pass judgment.
Size += Alignment;
updateAlignment(PointerAlignment);
}
void
MicrosoftRecordLayoutBuilder::layoutNonVirtualBases(const CXXRecordDecl *RD) {
LazyEmptyBase = 0;
// Lay out the primary base first.
if (PrimaryBase)
layoutNonVirtualBase(PrimaryBase);
// Iterate through the bases and lay out the non-virtual ones.
for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
e = RD->bases_end();
i != e; ++i) {
if (i->isVirtual())
continue;
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(i->getType()->castAs<RecordType>()->getDecl());
if (BaseDecl != PrimaryBase)
layoutNonVirtualBase(BaseDecl);
}
}
void
MicrosoftRecordLayoutBuilder::layoutNonVirtualBase(const CXXRecordDecl *RD) {
const ASTRecordLayout *Layout = RD ? &Context.getASTRecordLayout(RD) : 0;
// If we have a lazy empty base we haven't laid out yet, do that now.
if (LazyEmptyBase) {
const ASTRecordLayout &LazyLayout =
Context.getASTRecordLayout(LazyEmptyBase);
Size = Size.RoundUpToAlignment(LazyLayout.getAlignment());
Bases.insert(std::make_pair(LazyEmptyBase, Size));
// Empty bases only consume space when followed by another empty base.
if (RD && Layout->getNonVirtualSize().isZero())
Size++;
LazyEmptyBase = 0;
}
// RD is null when flushing the final lazy base.
if (!RD)
return;
if (Layout->getNonVirtualSize().isZero()) {
LazyEmptyBase = RD;
return;
}
// Insert the base here.
CharUnits BaseOffset = Size.RoundUpToAlignment(Layout->getAlignment());
Bases.insert(std::make_pair(RD, BaseOffset));
Size = BaseOffset + Layout->getDataSize();
// Note: we don't update alignment here because it was accounted
// for during initalization.
}
void MicrosoftRecordLayoutBuilder::layoutVBPtr(const CXXRecordDecl *RD) {
if (!HasVBPtr)
VBPtrOffset = CharUnits::fromQuantity(-1);
else if (SharedVBPtrBase) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(SharedVBPtrBase);
VBPtrOffset = Bases[SharedVBPtrBase] + Layout.getVBPtrOffset();
} else {
updateAlignment(PointerAlignment);
VBPtrOffset = Size.RoundUpToAlignment(PointerAlignment);
if (Alignment == PointerAlignment && Size % PointerAlignment) {
CharUnits x = Size + Alignment + Alignment;
Size = VBPtrOffset + Alignment;
// Handle strange padding rules. I have no explanation for why the
// virtual base is padded in such an odd way. My guess is that they
// always Add 2 * Alignment and incorrectly round down to the appropriate
// alignment. It's important to get this case correct because it impacts
// the layout of the first member of the struct.
RecordDecl::field_iterator FieldBegin = RD->field_begin();
if (FieldBegin != RD->field_end())
Size += CharUnits::fromQuantity(
x % getAdjustedFieldInfo(*FieldBegin).second);
} else
Size += Alignment;
}
// Flush the lazy empty base.
layoutNonVirtualBase(0);
}
void MicrosoftRecordLayoutBuilder::layoutFields(const RecordDecl *RD) {
LastFieldIsNonZeroWidthBitfield = false;
for (RecordDecl::field_iterator Field = RD->field_begin(),
FieldEnd = RD->field_end();
Field != FieldEnd; ++Field)
layoutField(*Field);
Size = Size.RoundUpToAlignment(Alignment);
}
void MicrosoftRecordLayoutBuilder::layoutField(const FieldDecl *FD) {
if (FD->isBitField()) {
layoutBitField(FD);
return;
}
LastFieldIsNonZeroWidthBitfield = false;
std::pair<CharUnits, CharUnits> FieldInfo = getAdjustedFieldInfo(FD);
CharUnits FieldSize = FieldInfo.first;
CharUnits FieldAlign = FieldInfo.second;
updateAlignment(FieldAlign);
if (IsUnion) {
placeFieldAtZero();
Size = std::max(Size, FieldSize);
} else {
// Round up the current record size to the field's alignment boundary.
CharUnits FieldOffset = Size.RoundUpToAlignment(FieldAlign);
placeFieldAtOffset(FieldOffset);
Size = FieldOffset + FieldSize;
}
}
void MicrosoftRecordLayoutBuilder::layoutBitField(const FieldDecl *FD) {
unsigned Width = FD->getBitWidthValue(Context);
if (Width == 0) {
layoutZeroWidthBitField(FD);
return;
}
std::pair<CharUnits, CharUnits> FieldInfo = getAdjustedFieldInfo(FD);
CharUnits FieldSize = FieldInfo.first;
CharUnits FieldAlign = FieldInfo.second;
// Clamp the bitfield to a containable size for the sake of being able
// to lay them out. Sema will throw an error.
if (Width > Context.toBits(FieldSize))
Width = Context.toBits(FieldSize);
// Check to see if this bitfield fits into an existing allocation. Note:
// MSVC refuses to pack bitfields of formal types with different sizes
// into the same allocation.
if (!IsUnion && LastFieldIsNonZeroWidthBitfield &&
CurrentBitfieldSize == FieldSize && Width <= RemainingBitsInField) {
placeFieldAtBitOffset(Context.toBits(Size) - RemainingBitsInField);
RemainingBitsInField -= Width;
return;
}
LastFieldIsNonZeroWidthBitfield = true;
CurrentBitfieldSize = FieldSize;
if (IsUnion) {
placeFieldAtZero();
Size = std::max(Size, FieldSize);
// TODO: Add a Sema warning that MS ignores bitfield alignment in unions.
} else {
// Allocate a new block of memory and place the bitfield in it.
CharUnits FieldOffset = Size.RoundUpToAlignment(FieldAlign);
placeFieldAtOffset(FieldOffset);
Size = FieldOffset + FieldSize;
updateAlignment(FieldAlign);
RemainingBitsInField = Context.toBits(FieldSize) - Width;
}
}
void
MicrosoftRecordLayoutBuilder::layoutZeroWidthBitField(const FieldDecl *FD) {
// Zero-width bitfields are ignored unless they follow a non-zero-width
// bitfield.
std::pair<CharUnits, CharUnits> FieldInfo = getAdjustedFieldInfo(FD);
CharUnits FieldSize = FieldInfo.first;
CharUnits FieldAlign = FieldInfo.second;
if (!LastFieldIsNonZeroWidthBitfield) {
placeFieldAtOffset(IsUnion ? CharUnits::Zero() : Size);
// TODO: Add a Sema warning that MS ignores alignment for zero
// sized bitfields that occur after zero-size bitfields or non bitfields.
return;
}
LastFieldIsNonZeroWidthBitfield = false;
if (IsUnion) {
placeFieldAtZero();
Size = std::max(Size, FieldSize);
} else {
// Round up the current record size to the field's alignment boundary.
CharUnits FieldOffset = Size.RoundUpToAlignment(FieldAlign);
placeFieldAtOffset(FieldOffset);
Size = FieldOffset;
updateAlignment(FieldAlign);
}
}
void MicrosoftRecordLayoutBuilder::layoutVirtualBases(const CXXRecordDecl *RD) {
if (!HasVBPtr)
return;
updateAlignment(VirtualAlignment);
// Zero-sized v-bases obey the alignment attribute so apply it here. The
// alignment attribute is normally accounted for in FinalizeLayout.
if (unsigned MaxAlign = RD->getMaxAlignment())
updateAlignment(Context.toCharUnitsFromBits(MaxAlign));
llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtordisp =
computeVtorDispSet(RD);
// Iterate through the virtual bases and lay them out.
for (CXXRecordDecl::base_class_const_iterator i = RD->vbases_begin(),
e = RD->vbases_end();
i != e; ++i) {
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(i->getType()->castAs<RecordType>()->getDecl());
layoutVirtualBase(BaseDecl, HasVtordisp.count(BaseDecl));
}
}
void MicrosoftRecordLayoutBuilder::layoutVirtualBase(const CXXRecordDecl *RD,
bool HasVtordisp) {
if (LazyEmptyBase) {
const ASTRecordLayout &LazyLayout =
Context.getASTRecordLayout(LazyEmptyBase);
Size = Size.RoundUpToAlignment(LazyLayout.getAlignment());
VBases.insert(
std::make_pair(LazyEmptyBase, ASTRecordLayout::VBaseInfo(Size, false)));
// Empty bases only consume space when followed by another empty base.
// The space consumed is in an Alignment sized/aligned block and the v-base
// is placed at its alignment offset into the chunk, unless its alignment
// is less than the size of a pointer, at which it is placed at pointer
// width offset in the chunck. We have no idea why.
if (RD && Context.getASTRecordLayout(RD).getNonVirtualSize().isZero())
Size = Size.RoundUpToAlignment(Alignment) + PointerSize;
LazyEmptyBase = 0;
}
// RD is null when flushing the final lazy virtual base.
if (!RD)
return;
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
if (Layout.getNonVirtualSize().isZero() && !HasVtordisp) {
LazyEmptyBase = RD;
return;
}
CharUnits BaseNVSize = Layout.getNonVirtualSize();
CharUnits BaseAlign = Layout.getAlignment();
if (HasVtordisp)
Size = Size.RoundUpToAlignment(Alignment) + PointerSize;
Size = Size.RoundUpToAlignment(BaseAlign);
// Insert the base here.
CharUnits BaseOffset = Size.RoundUpToAlignment(BaseAlign);
VBases.insert(
std::make_pair(RD, ASTRecordLayout::VBaseInfo(BaseOffset, HasVtordisp)));
Size = BaseOffset + BaseNVSize;
// Note: we don't update alignment here because it was accounted for in
// InitializeLayout.
}
void MicrosoftRecordLayoutBuilder::finalizeCXXLayout(const CXXRecordDecl *RD) {
// Flush the lazy virtual base.
layoutVirtualBase(0, false);
if (RD->vbases_begin() == RD->vbases_end() || AlignAfterVBases)
Size = Size.RoundUpToAlignment(Alignment);
if (Size.isZero())
Size = Alignment;
}
void MicrosoftRecordLayoutBuilder::honorDeclspecAlign(const RecordDecl *RD) {
if (unsigned MaxAlign = RD->getMaxAlignment()) {
AlignAfterVBases = true;
updateAlignment(Context.toCharUnitsFromBits(MaxAlign));
Size = Size.RoundUpToAlignment(Alignment);
}
}
static bool
RequiresVtordisp(const llvm::SmallPtrSet<const CXXRecordDecl *, 2> &HasVtordisp,
const CXXRecordDecl *RD) {
if (HasVtordisp.count(RD))
return true;
// If any of a virtual bases non-virtual bases (recursively) requires a
// vtordisp than so does this virtual base.
for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
e = RD->bases_end();
i != e; ++i)
if (!i->isVirtual() &&
RequiresVtordisp(
HasVtordisp,
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl())))
return true;
return false;
}
llvm::SmallPtrSet<const CXXRecordDecl *, 2>
MicrosoftRecordLayoutBuilder::computeVtorDispSet(const CXXRecordDecl *RD) {
llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtordisp;
// If any of our bases need a vtordisp for this type, so do we. Check our
// direct bases for vtordisp requirements.
for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
e = RD->bases_end();
i != e; ++i) {
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
for (ASTRecordLayout::VBaseOffsetsMapTy::const_iterator
bi = Layout.getVBaseOffsetsMap().begin(),
be = Layout.getVBaseOffsetsMap().end();
bi != be; ++bi)
if (bi->second.hasVtorDisp())
HasVtordisp.insert(bi->first);
}
// If we define a constructor or destructor and override a function that is
// defined in a virtual base's vtable, that virtual bases need a vtordisp.
// Here we collect a list of classes with vtables for which our virtual bases
// actually live. The virtual bases with this property will require
// vtordisps. In addition, virtual bases that contain non-virtual bases that
// define functions we override also require vtordisps, this case is checked
// explicitly below.
if (RD->hasUserDeclaredConstructor() || RD->hasUserDeclaredDestructor()) {
llvm::SmallPtrSet<const CXXMethodDecl *, 8> Work;
// Seed the working set with our non-destructor virtual methods.
for (CXXRecordDecl::method_iterator i = RD->method_begin(),
e = RD->method_end();
i != e; ++i)
if ((*i)->isVirtual() && (*i) != RD->getDestructor())
Work.insert(*i);
while (!Work.empty()) {
const CXXMethodDecl *MD = *Work.begin();
CXXMethodDecl::method_iterator i = MD->begin_overridden_methods(),
e = MD->end_overridden_methods();
if (i == e)
// If a virtual method has no-overrides it lives in its parent's vtable.
HasVtordisp.insert(MD->getParent());
else
Work.insert(i, e);
// We've finished processing this element, remove it from the working set.
Work.erase(MD);
}
}
// Re-check all of our vbases for vtordisp requirements (in case their
// non-virtual bases have vtordisp requirements).
for (CXXRecordDecl::base_class_const_iterator i = RD->vbases_begin(),
e = RD->vbases_end();
i != e; ++i) {
const CXXRecordDecl *BaseDecl = i->getType()->getAsCXXRecordDecl();
if (!HasVtordisp.count(BaseDecl) && RequiresVtordisp(HasVtordisp, BaseDecl))
HasVtordisp.insert(BaseDecl);
}
return HasVtordisp;
}
/// \brief Get or compute information about the layout of the specified record
/// (struct/union/class), which indicates its size and field position
/// information.
const ASTRecordLayout *
ASTContext::BuildMicrosoftASTRecordLayout(const RecordDecl *D) const {
MicrosoftRecordLayoutBuilder Builder(*this);
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
Builder.cxxLayout(RD);
return new (*this) ASTRecordLayout(
*this, Builder.Size, Builder.Alignment,
Builder.HasVFPtr && !Builder.PrimaryBase, Builder.HasVFPtr,
Builder.HasVBPtr && !Builder.SharedVBPtrBase, Builder.VBPtrOffset,
Builder.DataSize, Builder.FieldOffsets.data(),
Builder.FieldOffsets.size(), Builder.DataSize,
Builder.NonVirtualAlignment, CharUnits::Zero(), Builder.PrimaryBase,
false, Builder.AlignAfterVBases, Builder.Bases, Builder.VBases);
} else {
Builder.layout(D);
return new (*this) ASTRecordLayout(
*this, Builder.Size, Builder.Alignment, Builder.Size,
Builder.FieldOffsets.data(), Builder.FieldOffsets.size());
}
}
/// getASTRecordLayout - Get or compute information about the layout of the
/// specified record (struct/union/class), which indicates its size and field
/// position information.
@ -2362,27 +3068,15 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
const ASTRecordLayout *Entry = ASTRecordLayouts[D];
if (Entry) return *Entry;
const ASTRecordLayout *NewEntry;
const ASTRecordLayout *NewEntry = 0;
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
if (isMsLayout(D) && !D->getASTContext().getExternalSource()) {
NewEntry = BuildMicrosoftASTRecordLayout(D);
} else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
EmptySubobjectMap EmptySubobjects(*this, RD);
RecordLayoutBuilder Builder(*this, &EmptySubobjects);
Builder.Layout(RD);
// MSVC gives the vb-table pointer an alignment equal to that of
// the non-virtual part of the structure. That's an inherently
// multi-pass operation. If our first pass doesn't give us
// adequate alignment, try again with the specified minimum
// alignment. This is *much* more maintainable than computing the
// alignment in advance in a separately-coded pass; it's also
// significantly more efficient in the common case where the
// vb-table doesn't need extra padding.
if (Builder.VBPtrOffset != CharUnits::fromQuantity(-1) &&
(Builder.VBPtrOffset % Builder.NonVirtualAlignment) != 0) {
Builder.resetWithTargetAlignment(Builder.NonVirtualAlignment);
Builder.Layout(RD);
}
// In certain situations, we are allowed to lay out objects in the
// tail-padding of base classes. This is ABI-dependent.
// FIXME: this should be stored in the record layout.
@ -2394,11 +3088,12 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
skipTailPadding ? Builder.getSize() : Builder.getDataSize();
CharUnits NonVirtualSize =
skipTailPadding ? DataSize : Builder.NonVirtualSize;
NewEntry =
new (*this) ASTRecordLayout(*this, Builder.getSize(),
Builder.Alignment,
Builder.HasOwnVFPtr,
RD->isDynamicClass(),
Builder.HasOwnVBPtr,
Builder.VBPtrOffset,
DataSize,
Builder.FieldOffsets.data(),
@ -2408,6 +3103,7 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
EmptySubobjects.SizeOfLargestEmptySubobject,
Builder.PrimaryBase,
Builder.PrimaryBaseIsVirtual,
true,
Builder.Bases, Builder.VBases);
} else {
RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0);
@ -2564,16 +3260,19 @@ static void DumpCXXRecordLayout(raw_ostream &OS,
IndentLevel++;
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
bool HasVfptr = Layout.hasOwnVFPtr();
bool HasVbptr = Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1);
bool HasOwnVFPtr = Layout.hasOwnVFPtr();
bool HasOwnVBPtr = Layout.hasOwnVBPtr();
// Vtable pointer.
if (RD->isDynamicClass() && !PrimaryBase &&
!C.getTargetInfo().getCXXABI().isMicrosoft()) {
if (RD->isDynamicClass() && !PrimaryBase && !isMsLayout(RD)) {
PrintOffset(OS, Offset, IndentLevel);
OS << '(' << *RD << " vtable pointer)\n";
} else if (HasOwnVFPtr) {
PrintOffset(OS, Offset, IndentLevel);
// vfptr (for Microsoft C++ ABI)
OS << '(' << *RD << " vftable pointer)\n";
}
// Dump (non-virtual) bases
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
E = RD->bases_end(); I != E; ++I) {
@ -2592,12 +3291,8 @@ static void DumpCXXRecordLayout(raw_ostream &OS,
/*IncludeVirtualBases=*/false);
}
// vfptr and vbptr (for Microsoft C++ ABI)
if (HasVfptr) {
PrintOffset(OS, Offset, IndentLevel);
OS << '(' << *RD << " vftable pointer)\n";
}
if (HasVbptr) {
// vbptr (for Microsoft C++ ABI)
if (HasOwnVBPtr) {
PrintOffset(OS, Offset + Layout.getVBPtrOffset(), IndentLevel);
OS << '(' << *RD << " vbtable pointer)\n";
}
@ -2650,7 +3345,8 @@ static void DumpCXXRecordLayout(raw_ostream &OS,
PrintIndentNoOffset(OS, IndentLevel - 1);
OS << "[sizeof=" << Layout.getSize().getQuantity();
OS << ", dsize=" << Layout.getDataSize().getQuantity();
if (!isMsLayout(RD))
OS << ", dsize=" << Layout.getDataSize().getQuantity();
OS << ", align=" << Layout.getAlignment().getQuantity() << '\n';
PrintIndentNoOffset(OS, IndentLevel - 1);
@ -2677,7 +3373,8 @@ void ASTContext::DumpRecordLayout(const RecordDecl *RD,
OS << "\nLayout: ";
OS << "<ASTRecordLayout\n";
OS << " Size:" << toBits(Info.getSize()) << "\n";
OS << " DataSize:" << toBits(Info.getDataSize()) << "\n";
if (!isMsLayout(RD))
OS << " DataSize:" << toBits(Info.getDataSize()) << "\n";
OS << " Alignment:" << toBits(Info.getAlignment()) << "\n";
OS << " FieldOffsets: [";
for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) {

View File

@ -696,7 +696,7 @@ CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
}
// Add a vb-table pointer if the layout insists.
if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) {
if (Layout.hasOwnVBPtr()) {
CharUnits VBPtrOffset = Layout.getVBPtrOffset();
llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext());
AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr));

View File

@ -54,10 +54,6 @@ void VBTableBuilder::enumerateVBTables(VBTableVector &VBTables) {
}
}
bool VBTableBuilder::hasVBPtr(const CXXRecordDecl *RD) {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
return Layout.getVBPtrOffset().getQuantity() != -1;
}
void VBTableBuilder::findUnambiguousPaths(const CXXRecordDecl *ReusingBase,
BaseSubobject CurSubobject,
@ -65,10 +61,11 @@ void VBTableBuilder::findUnambiguousPaths(const CXXRecordDecl *ReusingBase,
size_t PathsStart = Paths.size();
bool ReuseVBPtrFromBase = true;
const CXXRecordDecl *CurBase = CurSubobject.getBase();
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(CurBase);
// If this base has a vbptr, then we've found a path. These are not full
// paths, so we don't use CXXBasePath.
if (hasVBPtr(CurBase)) {
if (Layout.hasOwnVBPtr()) {
ReuseVBPtrFromBase = false;
VBTablePath *Info = new VBTablePath(
VBTableInfo(ReusingBase, CurSubobject, /*GV=*/0));
@ -76,7 +73,6 @@ void VBTableBuilder::findUnambiguousPaths(const CXXRecordDecl *ReusingBase,
}
// Recurse onto any bases which themselves have virtual bases.
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(CurBase);
for (CXXRecordDecl::base_class_const_iterator I = CurBase->bases_begin(),
E = CurBase->bases_end(); I != E; ++I) {
const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();

View File

@ -11503,8 +11503,7 @@ Decl *Sema::ActOnIvar(Scope *S,
if (BitWidth) {
// 6.7.2.1p3, 6.7.2.1p4
BitWidth =
VerifyBitField(Loc, II, T, /*IsMsStruct=*/false, BitWidth).take();
BitWidth = VerifyBitField(Loc, II, T, /*IsMsStruct*/false, BitWidth).take();
if (!BitWidth)
D.setInvalidType();
} else {

View File

@ -4451,6 +4451,13 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
}
}
// Check to see if we're trying to lay out a struct using the ms_struct
// attribute that is dynamic.
if (Record->isMsStruct(Context) && Record->isDynamicClass()) {
Diag(Record->getLocation(), diag::warn_pragma_ms_struct_failed);
Record->dropAttr<MsStructAttr>();
}
// Declare inheriting constructors. We do this eagerly here because:
// - The standard requires an eager diagnostic for conflicting inheriting
// constructors from different classes.

View File

@ -1,4 +1,4 @@
// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 %s -triple i386-apple-darwin10 -emit-llvm -o - | FileCheck %s
struct __attribute((packed)) x {int a : 24;};
int a(struct x* g) {
// CHECK: load i24

View File

@ -156,7 +156,9 @@ C::C() {
// CHECK-NEXT: %[[vbptr_off:.*]] = getelementptr inbounds i8* %[[this_i8]], i64 0
// CHECK-NEXT: %[[vbptr:.*]] = bitcast i8* %[[vbptr_off]] to [2 x i32]**
// CHECK-NEXT: store [2 x i32]* @"\01??_8C@constructors@@7B@", [2 x i32]** %[[vbptr]]
// CHECK-NEXT: bitcast %"struct.constructors::C"* %{{.*}} to %"struct.constructors::A"*
// CHECK-NEXT: bitcast %"struct.constructors::C"* %{{.*}} to i8*
// CHECK-NEXT: getelementptr inbounds i8* %{{.*}}, i64 4
// CHECK-NEXT: bitcast i8* %{{.*}} to %"struct.constructors::A"*
// CHECK-NEXT: call x86_thiscallcc %"struct.constructors::A"* @"\01??0A@constructors@@QAE@XZ"(%"struct.constructors::A"* %{{.*}})
// CHECK-NEXT: br label %[[SKIP_VBASES]]
//
@ -189,7 +191,9 @@ D::D() {
// CHECK-NEXT: %[[vbptr_off:.*]] = getelementptr inbounds i8* %[[this_i8]], i64 0
// CHECK-NEXT: %[[vbptr:.*]] = bitcast i8* %[[vbptr_off]] to [2 x i32]**
// CHECK-NEXT: store [2 x i32]* @"\01??_8D@constructors@@7B@", [2 x i32]** %[[vbptr]]
// CHECK-NEXT: bitcast %"struct.constructors::D"* %{{.*}} to %"struct.constructors::A"*
// CHECK-NEXT: bitcast %"struct.constructors::D"* %{{.*}} to i8*
// CHECK-NEXT: getelementptr inbounds i8* %{{.*}}, i64 4
// CHECK-NEXT: bitcast i8* %{{.*}} to %"struct.constructors::A"*
// CHECK-NEXT: call x86_thiscallcc %"struct.constructors::A"* @"\01??0A@constructors@@QAE@XZ"(%"struct.constructors::A"* %{{.*}})
// CHECK-NEXT: br label %[[SKIP_VBASES]]
//
@ -217,7 +221,9 @@ E::E() {
// CHECK-NEXT: %[[offs:.*]] = getelementptr inbounds i8* %[[this_i8]], i64 4
// CHECK-NEXT: %[[vbptr_C:.*]] = bitcast i8* %[[offs]] to [2 x i32]**
// CHECK-NEXT: store [2 x i32]* @"\01??_8E@constructors@@7BC@1@@", [2 x i32]** %[[vbptr_C]]
// CHECK-NEXT: bitcast %"struct.constructors::E"* %{{.*}} to %"struct.constructors::A"*
// CHECK-NEXT: bitcast %"struct.constructors::E"* %{{.*}} to i8*
// CHECK-NEXT: getelementptr inbounds i8* %{{.*}}, i64 4
// CHECK-NEXT: bitcast i8* %{{.*}} to %"struct.constructors::A"*
// CHECK-NEXT: call x86_thiscallcc %"struct.constructors::A"* @"\01??0A@constructors@@QAE@XZ"(%"struct.constructors::A"* %{{.*}})
// CHECK: call x86_thiscallcc %"struct.constructors::C"* @"\01??0C@constructors@@QAE@XZ"(%"struct.constructors::C"* %{{.*}}, i32 0)
// CHECK-NEXT: br label %[[SKIP_VBASES]]

View File

@ -1,5 +1,5 @@
// RUN: %clang_cc1 -emit-llvm -fobjc-exceptions -o %t %s
// RUN: %clang_cc1 -g -emit-llvm -fobjc-exceptions -o %t %s
// RUN: %clang_cc1 -emit-llvm -fobjc-exceptions -triple x86_64-apple-darwin -o %t %s
// RUN: %clang_cc1 -g -emit-llvm -fobjc-exceptions -triple x86_64-apple-darwin -o %t %s
// An error could be seen for targeting x86_64-win32;
//

View File

@ -8,7 +8,7 @@
// rdar://10830559
#pragma ms_struct on
//#pragma ms_struct on
template< typename T >
class Templated

View File

@ -164,7 +164,7 @@ int main() {
// CHECK-NEXT: 0 | (D vftable pointer)
// CHECK-NEXT: 8 | double a
// CHECK-NEXT: sizeof=16, dsize=16, align=8
// CHECK-NEXT: sizeof=16, align=8
// CHECK-NEXT: nvsize=16, nvalign=8
// CHECK: %class.D = type { i32 (...)**, double }
@ -173,7 +173,7 @@ int main() {
// CHECK-NEXT: 0 | (B vftable pointer)
// CHECK-NEXT: 4 | int b_field
// CHECK-NEXT: sizeof=8, dsize=8, align=4
// CHECK-NEXT: sizeof=8, align=4
// CHECK-NEXT: nvsize=8, nvalign=4
// CHECK: %class.B = type { i32 (...)**, i32 }
@ -185,7 +185,7 @@ int main() {
// CHECK-NEXT: 8 | int a_field
// CHECK-NEXT: 12 | char one
// CHECK-NEXT: sizeof=16, dsize=16, align=4
// CHECK-NEXT: sizeof=16, align=4
// CHECK-NEXT: nvsize=16, nvalign=4
// CHECK: 0 | class C
@ -207,7 +207,7 @@ int main() {
// CHECK-NEXT: 72 | int a_field
// CHECK-NEXT: 76 | char one
// CHECK-NEXT: sizeof=80, dsize=80, align=8
// CHECK-NEXT: sizeof=80, align=8
// CHECK-NEXT: nvsize=64, nvalign=8
// CHECK: %class.A = type { %class.B, i32, i8 }
@ -237,10 +237,10 @@ int main() {
// CHECK-NEXT: 88 | int a_field
// CHECK-NEXT: 92 | char one
// CHECK-NEXT: sizeof=80, dsize=80, align=8
// CHECK-NEXT: sizeof=80, align=8
// CHECK-NEXT: nvsize=64, nvalign=8
// CHECK: sizeof=96, dsize=96, align=8
// CHECK: sizeof=96, align=8
// CHECK-NEXT: nvsize=96, nvalign=8
// CHECK: %struct.BaseStruct = type { double, float, %class.C }
@ -267,18 +267,18 @@ int main() {
// CHECK-NEXT: 84 | int b_field
// CHECK-NEXT: 88 | int a_field
// CHECK-NEXT: 92 | char one
// CHECK-NEXT: sizeof=80, dsize=80, align=8
// CHECK-NEXT: sizeof=80, align=8
// CHECK-NEXT: nvsize=64, nvalign=8
// CHECK: 96 | int x
// CHECK-NEXT: sizeof=104, dsize=104, align=8
// CHECK-NEXT: sizeof=104, align=8
// CHECK-NEXT: nvsize=104, nvalign=8
// CHECK: %struct.DerivedStruct = type { %struct.BaseStruct, i32 }
// CHECK: 0 | struct G
// CHECK-NEXT: 0 | int g_field
// CHECK-NEXT: sizeof=4, dsize=4, align=4
// CHECK-NEXT: sizeof=4, align=4
// CHECK-NEXT: nvsize=4, nvalign=4
// CHECK: 0 | struct H
@ -288,7 +288,7 @@ int main() {
// CHECK-NEXT: 8 | class D (virtual base)
// CHECK-NEXT: 8 | (D vftable pointer)
// CHECK-NEXT: 16 | double a
// CHECK-NEXT: sizeof=24, dsize=24, align=8
// CHECK-NEXT: sizeof=24, align=8
// CHECK-NEXT: nvsize=8, nvalign=4
// CHECK: %struct.H = type { %struct.G, i32*, %class.D }
@ -300,7 +300,7 @@ int main() {
// CHECK-NEXT: 24 | class D (virtual base)
// CHECK-NEXT: 24 | (D vftable pointer)
// CHECK-NEXT: 32 | double a
// CHECK-NEXT: sizeof=40, dsize=40, align=8
// CHECK-NEXT: sizeof=40, align=8
// CHECK-NEXT: nvsize=24, nvalign=8
// CHECK: %struct.I = type { i32 (...)**, [4 x i8], i32*, double, %class.D }
@ -308,12 +308,12 @@ int main() {
// CHECK: 0 | struct L
// CHECK-NEXT: 0 | int l
// CHECK-NEXT: sizeof=4, dsize=4, align=4
// CHECK-NEXT: sizeof=4, align=4
// CHECK-NEXT: nvsize=4, nvalign=4
// CHECK: 0 | struct K
// CHECK-NEXT: 0 | int k
// CHECK-NEXT: sizeof=4, dsize=4, align=4
// CHECK-NEXT: sizeof=4, align=4
// CHECK-NEXT: nvsize=4, nvalign=4
// CHECK: 0 | struct M
@ -321,42 +321,42 @@ int main() {
// CHECK-NEXT: 4 | int m
// CHECK-NEXT: 8 | struct K (virtual base)
// CHECK-NEXT: 8 | int k
// CHECK-NEXT: sizeof=12, dsize=12, align=4
// CHECK-NEXT: sizeof=12, align=4
//CHECK: %struct.M = type { i32*, i32, %struct.K }
//CHECK: %struct.M.base = type { i32*, i32 }
// CHECK: 0 | struct N
// CHECK-NEXT: 0 | (N vftable pointer)
// CHECK-NEXT: 4 | struct L (base)
// CHECK-NEXT: 4 | int l
// CHECK-NEXT: 8 | struct M (base)
// CHECK-NEXT: 8 | (M vbtable pointer)
// CHECK-NEXT: 12 | int m
// CHECK-NEXT: 0 | (N vftable pointer)
// CHECK-NEXT: 16 | struct K (virtual base)
// CHECK-NEXT: 16 | int k
// CHECK-NEXT: sizeof=20, dsize=20, align=4
// CHECK-NEXT: sizeof=20, align=4
// CHECK-NEXT: nvsize=16, nvalign=4
//CHECK: %struct.N = type { i32 (...)**, %struct.L, %struct.M.base, %struct.K }
// FIXME: MSVC place struct H at offset 8.
// CHECK: 0 | struct O
// CHECK-NEXT: 4 | struct H (base)
// CHECK-NEXT: 4 | struct G (base)
// CHECK-NEXT: 4 | int g_field
// CHECK-NEXT: 8 | (H vbtable pointer)
// CHECK-NEXT: 12 | struct G (base)
// CHECK-NEXT: 12 | int g_field
// CHECK-NEXT: 0 | (O vftable pointer)
// CHECK-NEXT: 16 | class D (virtual base)
// CHECK-NEXT: 16 | (D vftable pointer)
// CHECK-NEXT: 24 | double a
// CHECK-NEXT: sizeof=32, dsize=32, align=8
// CHECK-NEXT: nvsize=16, nvalign=4
// CHECK-NEXT: 8 | struct H (base)
// CHECK-NEXT: 8 | struct G (base)
// CHECK-NEXT: 8 | int g_field
// CHECK-NEXT: 12 | (H vbtable pointer)
// CHECK-NEXT: 16 | struct G (base)
// CHECK-NEXT: 16 | int g_field
// CHECK-NEXT: 24 | class D (virtual base)
// CHECK-NEXT: 24 | (D vftable pointer)
// CHECK-NEXT: 32 | double a
// CHECK-NEXT: | [sizeof=40, align=8
// CHECK-NEXT: | nvsize=24, nvalign=8]
// CHECK: struct.O = type { i32 (...)**, [4 x i8], %struct.H.base, %struct.G, [4 x i8], %class.D }
// CHECK: struct.O.base = type { i32 (...)**, [4 x i8], %struct.H.base, %struct.G, [4 x i8] }
//CHECK: %struct.O = type { i32 (...)**, %struct.H.base, %struct.G, %class.D }
//CHECK: %struct.O.base = type { i32 (...)**, %struct.H.base, %struct.G }
// CHECK: 0 | struct P
// CHECK-NEXT: 0 | struct M (base)
@ -367,20 +367,20 @@ int main() {
// CHECK-NEXT: 12 | int k
// CHECK-NEXT: 16 | struct L (virtual base)
// CHECK-NEXT: 16 | int l
// CHECK-NEXT: sizeof=20, dsize=20, align=4
// CHECK-NEXT: sizeof=20, align=4
// CHECK-NEXT: nvsize=12, nvalign=4
//CHECK: %struct.P = type { %struct.M.base, i32, %struct.K, %struct.L }
// CHECK: 0 | struct R (empty)
// CHECK-NEXT: sizeof=1, dsize=0, align=1
// CHECK-NEXT: sizeof=1, align=1
// CHECK-NEXT: nvsize=0, nvalign=1
//CHECK: %struct.R = type { i8 }
// CHECK: 0 | struct f
// CHECK-NEXT: 0 | (f vftable pointer)
// CHECK-NEXT: sizeof=4, dsize=4, align=4
// CHECK-NEXT: sizeof=4, align=4
// CHECK-NEXT: nvsize=4, nvalign=4
// CHECK: 0 | struct s
@ -390,12 +390,12 @@ int main() {
// CHECK-NEXT: 12 | (vtordisp for vbase f)
// CHECK-NEXT: 16 | struct f (virtual base)
// CHECK-NEXT: 16 | (f vftable pointer)
// CHECK-NEXT: sizeof=20, dsize=20, align=4
// CHECK-NEXT: sizeof=20, align=4
// CHECK-NEXT: nvsize=12, nvalign=4
// CHECK: 0 | class IA
// CHECK-NEXT: 0 | (IA vftable pointer)
// CHECK-NEXT: sizeof=4, dsize=4, align=4
// CHECK-NEXT: sizeof=4, align=4
// CHECK-NEXT: nvsize=4, nvalign=4
// CHECK: 0 | class ICh
@ -404,7 +404,7 @@ int main() {
// CHECK-NEXT: 8 | (vtordisp for vbase IA)
// CHECK-NEXT: 12 | class IA (virtual base)
// CHECK-NEXT: 12 | (IA vftable pointer)
// CHECK-NEXT: sizeof=16, dsize=16, align=4
// CHECK-NEXT: sizeof=16, align=4
// CHECK-NEXT: nvsize=8, nvalign=4
// CHECK: 0 | struct sd
@ -424,7 +424,7 @@ int main() {
// CHECK-NEXT: 40 | class ICh (virtual base)
// CHECK-NEXT: 40 | (ICh vftable pointer)
// CHECK-NEXT: 44 | (ICh vbtable pointer)
// CHECK-NEXT: sizeof=48, dsize=48, align=4
// CHECK-NEXT: sizeof=48, align=4
// CHECK-NEXT: nvsize=12, nvalign=4
// CHECK: %struct.f = type { i32 (...)** }
@ -435,14 +435,14 @@ int main() {
// CHECK: 0 | struct AV
// CHECK-NEXT: 0 | (AV vftable pointer)
// CHECK-NEXT: sizeof=4, dsize=4, align=4
// CHECK-NEXT: sizeof=4, align=4
// CHECK-NEXT: nvsize=4, nvalign=4
// CHECK: 0 | struct BV
// CHECK-NEXT: 0 | struct AV (primary base)
// CHECK-NEXT: 0 | (AV vftable pointer)
// CHECK-NEXT: sizeof=4, dsize=4, align=4
// CHECK-NEXT: sizeof=4, align=4
// CHECK-NEXT: nvsize=4, nvalign=4
@ -452,7 +452,7 @@ int main() {
// CHECK-NEXT: 8 | struct BV (virtual base)
// CHECK-NEXT: 8 | struct AV (primary base)
// CHECK-NEXT: 8 | (AV vftable pointer)
// CHECK-NEXT: sizeof=12, dsize=12, align=4
// CHECK-NEXT: sizeof=12, align=4
// CHECK-NEXT: nvsize=4, nvalign=4
// CHECK: %struct.AV = type { i32 (...)** }
@ -464,7 +464,7 @@ int main() {
// CHECK-NEXT: 0 | struct BV (primary base)
// CHECK-NEXT: 0 | struct AV (primary base)
// CHECK-NEXT: 0 | (AV vftable pointer)
// CHECK-NEXT: sizeof=4, dsize=4, align=4
// CHECK-NEXT: sizeof=4, align=4
// CHECK-NEXT: nvsize=4, nvalign=4
// CHECK: %struct.DV = type { %struct.BV }
@ -480,14 +480,14 @@ int main() {
// CHECK-NEXT: 12 | struct BV (virtual base)
// CHECK-NEXT: 12 | struct AV (primary base)
// CHECK-NEXT: 12 | (AV vftable pointer)
// CHECK-NEXT: sizeof=16, dsize=16, align=4
// CHECK-NEXT: sizeof=16, align=4
// CHECK-NEXT: nvsize=8, nvalign=4
// CHECK: %struct.EV = type { %struct.DV, %struct.CV.base, [4 x i8], %struct.BV }
// CHECK: %struct.EV.base = type { %struct.DV, %struct.CV.base }
// Overriding a method means that all the vbases containing that
// method need a vtordisp.
// method need a vtordisp. Note: this code will cause an error in cl.exe.
namespace test1 {
struct A { virtual void foo(); };
struct B : A {};
@ -503,6 +503,6 @@ namespace test1 {
// CHECK-NEXT: 16 | struct test1::B (virtual base)
// CHECK-NEXT: 16 | struct test1::A (primary base)
// CHECK-NEXT: 16 | (A vftable pointer)
// CHECK-NEXT: sizeof=20, dsize=20, align=4
// CHECK-NEXT: sizeof=20, align=4
// CHECK-NEXT: nvsize=4, nvalign=4
}

View File

@ -6,7 +6,6 @@
struct A {
unsigned long a:4;
unsigned char b;
A();
};
struct B : public A {