mirror of https://github.com/microsoft/clang.git
[CodeGen, X86] Classify vectors <= 32 bits as INTEGER
We shouldn't crash despite the AMD64 ABI not giving clear guidance as to how to pass around vector types <= 32 bits. Instead, classify such vectors as INTEGER to be compatible with GCC. This fixes PR24162. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@242508 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
c077866752
commit
9d163ef59c
|
@ -1907,16 +1907,18 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
|
|||
|
||||
if (const VectorType *VT = Ty->getAs<VectorType>()) {
|
||||
uint64_t Size = getContext().getTypeSize(VT);
|
||||
if (Size == 32) {
|
||||
// gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
|
||||
// float> as integer.
|
||||
if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
|
||||
// gcc passes the following as integer:
|
||||
// 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
|
||||
// 2 bytes - <2 x char>, <1 x short>
|
||||
// 1 byte - <1 x char>
|
||||
Current = Integer;
|
||||
|
||||
// If this type crosses an eightbyte boundary, it should be
|
||||
// split.
|
||||
uint64_t EB_Real = (OffsetBase) / 64;
|
||||
uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
|
||||
if (EB_Real != EB_Imag)
|
||||
uint64_t EB_Lo = (OffsetBase) / 64;
|
||||
uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
|
||||
if (EB_Lo != EB_Hi)
|
||||
Hi = Lo;
|
||||
} else if (Size == 64) {
|
||||
// gcc passes <1 x double> in memory. :(
|
||||
|
|
|
@ -212,3 +212,12 @@ int FuncForDerivedPacked(DerivedPacked d) {
|
|||
return d.three;
|
||||
}
|
||||
}
|
||||
|
||||
namespace test11 {
|
||||
union U {
|
||||
float f1;
|
||||
char __attribute__((__vector_size__(1))) f2;
|
||||
};
|
||||
int f(union U u) { return u.f2[1]; }
|
||||
// CHECK-LABEL: define i32 @_ZN6test111fENS_1UE(i32
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue