Generalize NRVO to cover C structs.

This commit generalizes NRVO to cover C structs (both trivial and
non-trivial structs).

rdar://problem/33599681

Differential Revision: https://reviews.llvm.org/D44968

git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@328809 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Akira Hatanaka 2018-03-29 17:56:24 +00:00
parent d0c687aaf8
commit dd65d15d8c
14 changed files with 482 additions and 823 deletions

View File

@ -467,13 +467,11 @@ namespace {
}
};
struct DestroyNRVOVariable final : EHScopeStack::Cleanup {
DestroyNRVOVariable(Address addr,
const CXXDestructorDecl *Dtor,
llvm::Value *NRVOFlag)
: Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(addr) {}
template <class Derived>
struct DestroyNRVOVariable : EHScopeStack::Cleanup {
DestroyNRVOVariable(Address addr, llvm::Value *NRVOFlag)
: NRVOFlag(NRVOFlag), Loc(addr) {}
const CXXDestructorDecl *Dtor;
llvm::Value *NRVOFlag;
Address Loc;
@ -492,13 +490,40 @@ namespace {
CGF.EmitBlock(RunDtorBB);
}
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false,
/*Delegating=*/false,
Loc);
static_cast<Derived *>(this)->emitDestructorCall(CGF);
if (NRVO) CGF.EmitBlock(SkipDtorBB);
}
virtual ~DestroyNRVOVariable() = default;
};
struct DestroyNRVOVariableCXX final
: DestroyNRVOVariable<DestroyNRVOVariableCXX> {
DestroyNRVOVariableCXX(Address addr, const CXXDestructorDecl *Dtor,
llvm::Value *NRVOFlag)
: DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, NRVOFlag),
Dtor(Dtor) {}
const CXXDestructorDecl *Dtor;
void emitDestructorCall(CodeGenFunction &CGF) {
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false,
/*Delegating=*/false, Loc);
}
};
struct DestroyNRVOVariableC final
: DestroyNRVOVariable<DestroyNRVOVariableC> {
DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
: DestroyNRVOVariable<DestroyNRVOVariableC>(addr, NRVOFlag), Ty(Ty) {}
QualType Ty;
void emitDestructorCall(CodeGenFunction &CGF) {
CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
}
};
struct CallStackRestore final : EHScopeStack::Cleanup {
@ -1088,7 +1113,10 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
address = ReturnValue;
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
const auto *RD = RecordTy->getDecl();
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
RD->isNonTrivialToPrimitiveDestroy()) {
// Create a flag that is used to indicate when the NRVO was applied
// to this variable. Set it to zero to indicate that NRVO was not
// applied.
@ -1461,8 +1489,8 @@ void CodeGenFunction::emitAutoVarTypeCleanup(
if (emission.NRVOFlag) {
assert(!type->isArrayType());
CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr,
dtor, emission.NRVOFlag);
EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, dtor,
emission.NRVOFlag);
return;
}
break;
@ -1484,6 +1512,12 @@ void CodeGenFunction::emitAutoVarTypeCleanup(
case QualType::DK_nontrivial_c_struct:
destroyer = CodeGenFunction::destroyNonTrivialCStruct;
if (emission.NRVOFlag) {
assert(!type->isArrayType());
EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
emission.NRVOFlag, type);
return;
}
break;
}

View File

@ -12713,8 +12713,8 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// Try to apply the named return value optimization. We have to check
// if we can do this here because lambdas keep return statements around
// to deduce an implicit return type.
if (getLangOpts().CPlusPlus && FD->getReturnType()->isRecordType() &&
!FD->isDependentContext())
if (FD->getReturnType()->isRecordType() &&
(!getLangOpts().CPlusPlus || !FD->isDependentContext()))
computeNRVO(Body, getCurFunction());
}

View File

@ -2872,9 +2872,6 @@ Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) {
/// NRVO, or NULL if there is no such candidate.
VarDecl *Sema::getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK) {
if (!getLangOpts().CPlusPlus)
return nullptr;
// - in a return statement in a function [where] ...
// ... the expression is the name of a non-volatile automatic object ...
DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E->IgnoreParens());

View File

@ -108,9 +108,7 @@ typedef struct {
TEST(struct_1);
// CHECK-LABEL: define swiftcc { i64, i64 } @return_struct_1() {{.*}}{
// CHECK: [[RET:%.*]] = alloca [[STRUCT1:%.*]], align 4
// CHECK: [[VAR:%.*]] = alloca [[STRUCT1]], align 4
// CHECK: call void @llvm.memset
// CHECK: call void @llvm.memcpy
// CHECK: [[CAST:%.*]] = bitcast [[STRUCT1]]* %retval to { i64, i64 }*
// CHECK: [[GEP0:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 0
// CHECK: [[T0:%.*]] = load i64, i64* [[GEP0]], align 4
@ -158,12 +156,8 @@ typedef struct {
TEST(struct_2);
// CHECK-LABEL: define swiftcc { i64, i64 } @return_struct_2() {{.*}}{
// CHECK: [[RET:%.*]] = alloca [[STRUCT2_TYPE]], align 4
// CHECK: [[VAR:%.*]] = alloca [[STRUCT2_TYPE]], align 4
// CHECK: [[CASTVAR:%.*]] = bitcast {{.*}} [[VAR]]
// CHECK: [[CASTVAR:%.*]] = bitcast {{.*}} [[RET]]
// CHECK: call void @llvm.memcpy{{.*}}({{.*}}[[CASTVAR]], {{.*}}[[STRUCT2_RESULT]]
// CHECK: [[CASTRET:%.*]] = bitcast {{.*}} [[RET]]
// CHECK: [[CASTVAR:%.*]] = bitcast {{.*}} [[VAR]]
// CHECK: call void @llvm.memcpy{{.*}}({{.*}}[[CASTRET]], {{.*}}[[CASTVAR]]
// CHECK: [[CAST:%.*]] = bitcast [[STRUCT2_TYPE]]* [[RET]] to { i64, i64 }*
// CHECK: [[GEP0:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 0
// CHECK: [[T0:%.*]] = load i64, i64* [[GEP0]], align 4
@ -214,12 +208,8 @@ typedef struct {
TEST(struct_misaligned_1)
// CHECK-LABEL: define swiftcc i64 @return_struct_misaligned_1()
// CHECK: [[RET:%.*]] = alloca [[STRUCT:%.*]], align 1
// CHECK: [[RES:%.*]] = alloca [[STRUCT]], align 1
// CHECK: [[CAST:%.*]] = bitcast [[STRUCT]]* [[RES]] to i8*
// CHECK: [[CAST:%.*]] = bitcast [[STRUCT]]* [[RET]] to i8*
// CHECK: call void @llvm.memset{{.*}}(i8* align 1 [[CAST]], i8 0, i64 5
// CHECK: [[CASTRET:%.*]] = bitcast [[STRUCT]]* [[RET]] to i8*
// CHECK: [[CASTRES:%.*]] = bitcast [[STRUCT]]* [[RES]] to i8*
// CHECK: call void @llvm.memcpy{{.*}}(i8* align 1 [[CASTRET]], i8* align 1 [[CASTRES]], i64 5
// CHECK: [[CAST:%.*]] = bitcast [[STRUCT]]* [[RET]] to { i64 }*
// CHECK: [[GEP:%.*]] = getelementptr inbounds { i64 }, { i64 }* [[CAST]], i32 0, i32 0
// CHECK: [[R0:%.*]] = load i64, i64* [[GEP]], align 1
@ -267,12 +257,8 @@ typedef union {
TEST(union_het_fp)
// CHECK-LABEL: define swiftcc i64 @return_union_het_fp()
// CHECK: [[RET:%.*]] = alloca [[UNION:%.*]], align 8
// CHECK: [[RES:%.*]] = alloca [[UNION]], align 8
// CHECK: [[CAST:%.*]] = bitcast [[UNION]]* [[RES]] to i8*
// CHECK: [[CAST:%.*]] = bitcast [[UNION]]* [[RET]] to i8*
// CHECK: call void @llvm.memcpy{{.*}}(i8* align 8 [[CAST]]
// CHECK: [[CASTRET:%.*]] = bitcast [[UNION]]* [[RET]] to i8*
// CHECK: [[CASTRES:%.*]] = bitcast [[UNION]]* [[RES]] to i8*
// CHECK: call void @llvm.memcpy{{.*}}(i8* align 8 [[CASTRET]], i8* align 8 [[CASTRES]]
// CHECK: [[CAST:%.*]] = bitcast [[UNION]]* [[RET]] to { i64 }*
// CHECK: [[GEP:%.*]] = getelementptr inbounds { i64 }, { i64 }* [[CAST]], i32 0, i32 0
// CHECK: [[R0:%.*]] = load i64, i64* [[GEP]], align 8

File diff suppressed because it is too large Load Diff

View File

@ -1348,16 +1348,13 @@ float16x8_t test_vbslq_f16(uint16x8_t a, float16x8_t b, float16x8_t c) {
// CHECK-LABEL: test_vzip_f16
// CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x2_t, align 8
// CHECK: [[__RET_I:%.*]] = alloca %struct.float16x4x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <4 x half>*
// CHECK: [[VZIP0_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
// CHECK: store <4 x half> [[VZIP0_I]], <4 x half>* [[TMP1]]
// CHECK: [[TMP2:%.*]] = getelementptr inbounds <4 x half>, <4 x half>* [[TMP1]], i32 1
// CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
// CHECK: store <4 x half> [[VZIP1_I]], <4 x half>* [[TMP2]]
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.float16x4x2_t* [[__RET_I]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP5]], i8* align 8 [[TMP6]], i64 16, i1 false)
float16x4x2_t test_vzip_f16(float16x4_t a, float16x4_t b) {
return vzip_f16(a, b);
}
@ -1365,16 +1362,13 @@ float16x4x2_t test_vzip_f16(float16x4_t a, float16x4_t b) {
// CHECK-LABEL: test_vzipq_f16
// CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x2_t, align 16
// CHECK: [[__RET_I:%.*]] = alloca %struct.float16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x half>*
// CHECK: [[VZIP0_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
// CHECK: store <8 x half> [[VZIP0_I]], <8 x half>* [[TMP1]]
// CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x half>, <8 x half>* [[TMP1]], i32 1
// CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
// CHECK: store <8 x half> [[VZIP1_I]], <8 x half>* [[TMP2]]
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.float16x8x2_t* [[__RET_I]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP5]], i8* align 16 [[TMP6]], i64 32, i1 false)
float16x8x2_t test_vzipq_f16(float16x8_t a, float16x8_t b) {
return vzipq_f16(a, b);
}
@ -1382,16 +1376,13 @@ float16x8x2_t test_vzipq_f16(float16x8_t a, float16x8_t b) {
// CHECK-LABEL: test_vuzp_f16
// CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x2_t, align 8
// CHECK: [[__RET_I:%.*]] = alloca %struct.float16x4x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <4 x half>*
// CHECK: [[VZIP0_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
// CHECK: store <4 x half> [[VZIP0_I]], <4 x half>* [[TMP1]]
// CHECK: [[TMP2:%.*]] = getelementptr inbounds <4 x half>, <4 x half>* [[TMP1]], i32 1
// CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
// CHECK: store <4 x half> [[VZIP1_I]], <4 x half>* [[TMP2]]
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.float16x4x2_t* [[__RET_I]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP5]], i8* align 8 [[TMP6]], i64 16, i1 false)
float16x4x2_t test_vuzp_f16(float16x4_t a, float16x4_t b) {
return vuzp_f16(a, b);
}
@ -1399,16 +1390,13 @@ float16x4x2_t test_vuzp_f16(float16x4_t a, float16x4_t b) {
// CHECK-LABEL: test_vuzpq_f16
// CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x2_t, align 16
// CHECK: [[__RET_I:%.*]] = alloca %struct.float16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x half>*
// CHECK: [[VZIP0_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
// CHECK: store <8 x half> [[VZIP0_I]], <8 x half>* [[TMP1]]
// CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x half>, <8 x half>* [[TMP1]], i32 1
// CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
// CHECK: store <8 x half> [[VZIP1_I]], <8 x half>* [[TMP2]]
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.float16x8x2_t* [[__RET_I]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP5]], i8* align 16 [[TMP6]], i64 32, i1 false)
float16x8x2_t test_vuzpq_f16(float16x8_t a, float16x8_t b) {
return vuzpq_f16(a, b);
}
@ -1416,16 +1404,13 @@ float16x8x2_t test_vuzpq_f16(float16x8_t a, float16x8_t b) {
// CHECK-LABEL: test_vtrn_f16
// CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x2_t, align 8
// CHECK: [[__RET_I:%.*]] = alloca %struct.float16x4x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <4 x half>*
// CHECK: [[VZIP0_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
// CHECK: store <4 x half> [[VZIP0_I]], <4 x half>* [[TMP1]]
// CHECK: [[TMP2:%.*]] = getelementptr inbounds <4 x half>, <4 x half>* [[TMP1]], i32 1
// CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
// CHECK: store <4 x half> [[VZIP1_I]], <4 x half>* [[TMP2]]
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.float16x4x2_t* [[__RET_I]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP5]], i8* align 8 [[TMP6]], i64 16, i1 false)
float16x4x2_t test_vtrn_f16(float16x4_t a, float16x4_t b) {
return vtrn_f16(a, b);
}
@ -1433,16 +1418,13 @@ float16x4x2_t test_vtrn_f16(float16x4_t a, float16x4_t b) {
// CHECK-LABEL: test_vtrnq_f16
// CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x2_t, align 16
// CHECK: [[__RET_I:%.*]] = alloca %struct.float16x8x2_t, align 16
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[__RET_I]] to i8*
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x half>*
// CHECK: [[VZIP0_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
// CHECK: store <8 x half> [[VZIP0_I]], <8 x half>* [[TMP1]]
// CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x half>, <8 x half>* [[TMP1]], i32 1
// CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
// CHECK: store <8 x half> [[VZIP1_I]], <8 x half>* [[TMP2]]
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
// CHECK: [[TMP6:%.*]] = bitcast %struct.float16x8x2_t* [[__RET_I]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP5]], i8* align 16 [[TMP6]], i64 32, i1 false)
float16x8x2_t test_vtrnq_f16(float16x8_t a, float16x8_t b) {
return vtrnq_f16(a, b);
}

View File

@ -18,8 +18,6 @@ struct S bar() {
// O0-NOT: @llvm.lifetime.end
struct S r;
// O1: call void @llvm.lifetime.start.p0i8({{[^,]*}}, i8* nonnull %[[R_TMP:[^)]+]])
// O1: call void @llvm.lifetime.start.p0i8({{[^,]*}}, i8* nonnull %[[TMP1:[^)]+]])
// O1: call void @foo
r = foo();
@ -35,7 +33,6 @@ struct S bar() {
r = foo();
// O1: call void @llvm.lifetime.end.p0i8({{[^,]*}}, i8* nonnull %[[TMP3]])
// O1: call void @llvm.lifetime.end.p0i8({{[^,]*}}, i8* nonnull %[[R_TMP]])
return r;
}
@ -51,11 +48,8 @@ struct S baz(int i, volatile int *j) {
// O0-NOT: @llvm.lifetime.end
struct S r;
// O1: %[[RESULT_ALLOCA:[^ ]+]] = alloca %struct.S
// O1: %[[TMP1_ALLOCA:[^ ]+]] = alloca %struct.S
// O1: %[[TMP2_ALLOCA:[^ ]+]] = alloca %struct.S
// O1: %[[P:[^ ]+]] = bitcast %struct.S* %[[RESULT_ALLOCA]] to i8*
// O1: call void @llvm.lifetime.start.p0i8({{[^,]*}}, i8* %[[P]])
// O1: br label %[[DO_BODY:.+]]
do {
@ -94,8 +88,6 @@ struct S baz(int i, volatile int *j) {
} while (1);
// O1: [[DO_END]]:
// O1: call void @llvm.memcpy
// O1: %[[P:[^ ]+]] = bitcast %struct.S* %[[RESULT_ALLOCA]] to i8*
// O1: call void @llvm.lifetime.end.p0i8({{[^,]*}}, i8* %[[P]])
// O1-NEXT: ret void
return r;
}

View File

@ -103,9 +103,7 @@ typedef struct {
TEST(struct_1);
// CHECK-LABEL: define {{.*}} @return_struct_1()
// CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align 4
// CHECK: [[VAR:%.*]] = alloca [[REC]], align 4
// CHECK: @llvm.memset
// CHECK: @llvm.memcpy
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ i32, i16, \[2 x i8\], float, float }]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4
@ -170,8 +168,6 @@ typedef struct {
TEST(struct_2);
// CHECK-LABEL: define {{.*}} @return_struct_2()
// CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align 4
// CHECK: [[VAR:%.*]] = alloca [[REC]], align 4
// CHECK: @llvm.memcpy
// CHECK: @llvm.memcpy
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ i32, i32, float, float }]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
@ -240,9 +236,7 @@ typedef struct {
TEST(struct_misaligned_1)
// CHECK-LABEL: define {{.*}} @return_struct_misaligned_1()
// CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align
// CHECK: [[VAR:%.*]] = alloca [[REC]], align
// CHECK: @llvm.memset
// CHECK: @llvm.memcpy
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ i32, i8 }]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align
@ -282,8 +276,6 @@ typedef union {
TEST(union_het_fp)
// CHECK-LABEL: define {{.*}} @return_union_het_fp()
// CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align {{(4|8)}}
// CHECK: [[VAR:%.*]] = alloca [[REC]], align {{(4|8)}}
// CHECK: @llvm.memcpy
// CHECK: @llvm.memcpy
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ i32, i32 }]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
@ -414,7 +406,6 @@ TEST(int4)
TEST(int8)
// CHECK-LABEL: define {{.*}} @return_int8()
// CHECK: [[RET:%.*]] = alloca [[REC:<8 x i32>]], align 32
// CHECK: [[VAR:%.*]] = alloca [[REC]], align
// CHECK: store
// CHECK: load
// CHECK: store
@ -458,7 +449,6 @@ TEST(int8)
TEST(int5)
// CHECK-LABEL: define {{.*}} @return_int5()
// CHECK: [[RET:%.*]] = alloca [[REC:<5 x i32>]], align 32
// CHECK: [[VAR:%.*]] = alloca [[REC]], align
// CHECK: store
// CHECK: load
// CHECK: store

File diff suppressed because it is too large Load Diff

View File

@ -186,8 +186,8 @@ struct S {
struct S fd1(struct S *a) {
// CHECK-LABEL: @fd1
// CHECK: [[RETVAL:%.*]] = alloca %struct.S, align 4
// CHECK: [[RET:%.*]] = alloca %struct.S, align 4
// CHECK: [[CAST:%.*]] = bitcast %struct.S* [[RET]] to i64*
// CHECK: bitcast %struct.S* {{.*}} to i64*
// CHECK: [[CAST:%.*]] = bitcast %struct.S* [[RETVAL]] to i64*
// CHECK: [[CALL:%.*]] = call i64 @__atomic_load_8(
// CHECK: store i64 [[CALL]], i64* [[CAST]], align 4
struct S ret;

View File

@ -48,13 +48,12 @@ void test7 (int x, struct test7 y)
{
}
// CHECK-LABEL: define void @test1va(%struct.test1* noalias sret %agg.result, i32 signext %x, ...)
// CHECK: %y = alloca %struct.test1, align 4
// CHECK: define void @test1va(%struct.test1* noalias sret %[[AGG_RESULT:.*]], i32 signext %x, ...)
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
// CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 8
// CHECK: store i8* %[[NEXT]], i8** %ap
// CHECK: [[T0:%.*]] = bitcast i8* %[[CUR]] to %struct.test1*
// CHECK: [[DEST:%.*]] = bitcast %struct.test1* %y to i8*
// CHECK: [[DEST:%.*]] = bitcast %struct.test1* %[[AGG_RESULT]] to i8*
// CHECK: [[SRC:%.*]] = bitcast %struct.test1* [[T0]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[DEST]], i8* align 8 [[SRC]], i64 8, i1 false)
struct test1 test1va (int x, ...)
@ -67,8 +66,7 @@ struct test1 test1va (int x, ...)
return y;
}
// CHECK-LABEL: define void @test2va(%struct.test2* noalias sret %agg.result, i32 signext %x, ...)
// CHECK: %y = alloca %struct.test2, align 16
// CHECK: define void @test2va(%struct.test2* noalias sret %[[AGG_RESULT:.*]], i32 signext %x, ...)
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
// CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64
// CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15
@ -77,7 +75,7 @@ struct test1 test1va (int x, ...)
// CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[ALIGN]], i64 16
// CHECK: store i8* %[[NEXT]], i8** %ap
// CHECK: [[T0:%.*]] = bitcast i8* %[[ALIGN]] to %struct.test2*
// CHECK: [[DEST:%.*]] = bitcast %struct.test2* %y to i8*
// CHECK: [[DEST:%.*]] = bitcast %struct.test2* %[[AGG_RESULT]] to i8*
// CHECK: [[SRC:%.*]] = bitcast %struct.test2* [[T0]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[DEST]], i8* align 16 [[SRC]], i64 16, i1 false)
struct test2 test2va (int x, ...)
@ -90,8 +88,7 @@ struct test2 test2va (int x, ...)
return y;
}
// CHECK-LABEL: define void @test3va(%struct.test3* noalias sret %agg.result, i32 signext %x, ...)
// CHECK: %y = alloca %struct.test3, align 32
// CHECK: define void @test3va(%struct.test3* noalias sret %[[AGG_RESULT:.*]], i32 signext %x, ...)
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
// CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64
// CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15
@ -100,7 +97,7 @@ struct test2 test2va (int x, ...)
// CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[ALIGN]], i64 32
// CHECK: store i8* %[[NEXT]], i8** %ap
// CHECK: [[T0:%.*]] = bitcast i8* %[[ALIGN]] to %struct.test3*
// CHECK: [[DEST:%.*]] = bitcast %struct.test3* %y to i8*
// CHECK: [[DEST:%.*]] = bitcast %struct.test3* %[[AGG_RESULT]] to i8*
// CHECK: [[SRC:%.*]] = bitcast %struct.test3* [[T0]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 [[DEST]], i8* align 16 [[SRC]], i64 32, i1 false)
struct test3 test3va (int x, ...)
@ -113,13 +110,12 @@ struct test3 test3va (int x, ...)
return y;
}
// CHECK-LABEL: define void @test4va(%struct.test4* noalias sret %agg.result, i32 signext %x, ...)
// CHECK: %y = alloca %struct.test4, align 4
// CHECK: define void @test4va(%struct.test4* noalias sret %[[AGG_RESULT:.*]], i32 signext %x, ...)
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
// CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 16
// CHECK: store i8* %[[NEXT]], i8** %ap
// CHECK: [[T0:%.*]] = bitcast i8* %[[CUR]] to %struct.test4*
// CHECK: [[DEST:%.*]] = bitcast %struct.test4* %y to i8*
// CHECK: [[DEST:%.*]] = bitcast %struct.test4* %[[AGG_RESULT]] to i8*
// CHECK: [[SRC:%.*]] = bitcast %struct.test4* [[T0]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[DEST]], i8* align 8 [[SRC]], i64 12, i1 false)
struct test4 test4va (int x, ...)
@ -132,13 +128,12 @@ struct test4 test4va (int x, ...)
return y;
}
// CHECK-LABEL: define void @testva_longdouble(%struct.test_longdouble* noalias sret %agg.result, i32 signext %x, ...)
// CHECK: %y = alloca %struct.test_longdouble, align 16
// CHECK: define void @testva_longdouble(%struct.test_longdouble* noalias sret %[[AGG_RESULT:.*]], i32 signext %x, ...)
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
// CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 16
// CHECK: store i8* %[[NEXT]], i8** %ap
// CHECK: [[T0:%.*]] = bitcast i8* %[[CUR]] to %struct.test_longdouble*
// CHECK: [[DEST:%.*]] = bitcast %struct.test_longdouble* %y to i8*
// CHECK: [[DEST:%.*]] = bitcast %struct.test_longdouble* %[[AGG_RESULT]] to i8*
// CHECK: [[SRC:%.*]] = bitcast %struct.test_longdouble* [[T0]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[DEST]], i8* align 8 [[SRC]], i64 16, i1 false)
struct test_longdouble { long double x; };
@ -152,8 +147,7 @@ struct test_longdouble testva_longdouble (int x, ...)
return y;
}
// CHECK-LABEL: define void @testva_vector(%struct.test_vector* noalias sret %agg.result, i32 signext %x, ...)
// CHECK: %y = alloca %struct.test_vector, align 16
// CHECK: define void @testva_vector(%struct.test_vector* noalias sret %[[AGG_RESULT:.*]], i32 signext %x, ...)
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
// CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64
// CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15
@ -162,7 +156,7 @@ struct test_longdouble testva_longdouble (int x, ...)
// CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[ALIGN]], i64 16
// CHECK: store i8* %[[NEXT]], i8** %ap
// CHECK: [[T0:%.*]] = bitcast i8* %[[ALIGN]] to %struct.test_vector*
// CHECK: [[DEST:%.*]] = bitcast %struct.test_vector* %y to i8*
// CHECK: [[DEST:%.*]] = bitcast %struct.test_vector* %[[AGG_RESULT]] to i8*
// CHECK: [[SRC:%.*]] = bitcast %struct.test_vector* [[T0]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[DEST]], i8* align 16 [[SRC]], i64 16, i1 false)
struct test_vector { vector int x; };

View File

@ -80,10 +80,9 @@ struct S test_struct(char *fmt, ...) {
return v;
}
// CHECK: define void @test_struct([[STRUCT_S:%[^,=]+]]*{{.*}} noalias sret %agg.result, i8*{{.*}} %fmt, ...) {{.*}} {
// CHECK: define void @test_struct([[STRUCT_S:%[^,=]+]]*{{.*}} noalias sret [[AGG_RESULT:%.*]], i8*{{.*}} %fmt, ...) {{.*}} {
// CHECK: [[FMT_ADDR:%[^,=]+]] = alloca i8*, align 4
// CHECK: [[VA:%[^,=]+]] = alloca i8*, align 4
// CHECK: [[V:%[^,=]+]] = alloca [[STRUCT_S]], align 4
// CHECK: store i8* %fmt, i8** [[FMT_ADDR]], align 4
// CHECK: [[VA1:%[^,=]+]] = bitcast i8** [[VA]] to i8*
// CHECK: call void @llvm.va_start(i8* [[VA1]])
@ -91,13 +90,10 @@ struct S test_struct(char *fmt, ...) {
// CHECK: [[ARGP_NEXT:%[^,=]+]] = getelementptr inbounds i8, i8* [[ARGP_CUR]], i32 12
// CHECK: store i8* [[ARGP_NEXT]], i8** [[VA]], align 4
// CHECK: [[R3:%[^,=]+]] = bitcast i8* [[ARGP_CUR]] to [[STRUCT_S]]*
// CHECK: [[R4:%[^,=]+]] = bitcast [[STRUCT_S]]* [[V]] to i8*
// CHECK: [[R4:%[^,=]+]] = bitcast [[STRUCT_S]]* [[AGG_RESULT]] to i8*
// CHECK: [[R5:%[^,=]+]] = bitcast [[STRUCT_S]]* [[R3]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[R4]], i8* align 4 [[R5]], i32 12, i1 false)
// CHECK: [[VA2:%[^,=]+]] = bitcast i8** [[VA]] to i8*
// CHECK: call void @llvm.va_end(i8* [[VA2]])
// CHECK: [[R6:%[^,=]+]] = bitcast [[STRUCT_S]]* %agg.result to i8*
// CHECK: [[R7:%[^,=]+]] = bitcast [[STRUCT_S]]* [[V]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[R6]], i8* align 4 [[R7]], i32 12, i1 false)
// CHECK: ret void
// CHECK: }

View File

@ -99,9 +99,7 @@ typedef struct {
TEST(struct_1);
// CHECK-LABEL: define dso_local swiftcc { i64, i64 } @return_struct_1() {{.*}}{
// CHECK: [[RET:%.*]] = alloca [[STRUCT1:%.*]], align 4
// CHECK: [[VAR:%.*]] = alloca [[STRUCT1]], align 4
// CHECK: call void @llvm.memset
// CHECK: call void @llvm.memcpy
// CHECK: [[CAST:%.*]] = bitcast [[STRUCT1]]* %retval to { i64, i64 }*
// CHECK: [[GEP0:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 0
// CHECK: [[T0:%.*]] = load i64, i64* [[GEP0]], align 4
@ -149,12 +147,8 @@ typedef struct {
TEST(struct_2);
// CHECK-LABEL: define dso_local swiftcc { i64, i64 } @return_struct_2() {{.*}}{
// CHECK: [[RET:%.*]] = alloca [[STRUCT2_TYPE]], align 4
// CHECK: [[VAR:%.*]] = alloca [[STRUCT2_TYPE]], align 4
// CHECK: [[CASTVAR:%.*]] = bitcast {{.*}} [[VAR]]
// CHECK: [[CASTVAR:%.*]] = bitcast {{.*}} [[RET]]
// CHECK: call void @llvm.memcpy{{.*}}({{.*}}[[CASTVAR]], {{.*}}[[STRUCT2_RESULT]]
// CHECK: [[CASTRET:%.*]] = bitcast {{.*}} [[RET]]
// CHECK: [[CASTVAR:%.*]] = bitcast {{.*}} [[VAR]]
// CHECK: call void @llvm.memcpy{{.*}}({{.*}}[[CASTRET]], {{.*}}[[CASTVAR]]
// CHECK: [[CAST:%.*]] = bitcast [[STRUCT2_TYPE]]* [[RET]] to { i64, i64 }*
// CHECK: [[GEP0:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 0
// CHECK: [[T0:%.*]] = load i64, i64* [[GEP0]], align 4
@ -205,12 +199,8 @@ typedef struct {
TEST(struct_misaligned_1)
// CHECK-LABEL: define dso_local swiftcc i64 @return_struct_misaligned_1()
// CHECK: [[RET:%.*]] = alloca [[STRUCT:%.*]], align 1
// CHECK: [[RES:%.*]] = alloca [[STRUCT]], align 1
// CHECK: [[CAST:%.*]] = bitcast [[STRUCT]]* [[RES]] to i8*
// CHECK: [[CAST:%.*]] = bitcast [[STRUCT]]* [[RET]] to i8*
// CHECK: call void @llvm.memset{{.*}}(i8* align 1 [[CAST]], i8 0, i64 5
// CHECK: [[CASTRET:%.*]] = bitcast [[STRUCT]]* [[RET]] to i8*
// CHECK: [[CASTRES:%.*]] = bitcast [[STRUCT]]* [[RES]] to i8*
// CHECK: call void @llvm.memcpy{{.*}}(i8* align {{[0-9]+}} [[CASTRET]], i8* align {{[0-9]+}} [[CASTRES]], i64 5
// CHECK: [[CAST:%.*]] = bitcast [[STRUCT]]* [[RET]] to { i64 }*
// CHECK: [[GEP:%.*]] = getelementptr inbounds { i64 }, { i64 }* [[CAST]], i32 0, i32 0
// CHECK: [[R0:%.*]] = load i64, i64* [[GEP]], align 1
@ -258,12 +248,8 @@ typedef union {
TEST(union_het_fp)
// CHECK-LABEL: define dso_local swiftcc i64 @return_union_het_fp()
// CHECK: [[RET:%.*]] = alloca [[UNION:%.*]], align 8
// CHECK: [[RES:%.*]] = alloca [[UNION]], align 8
// CHECK: [[CAST:%.*]] = bitcast [[UNION]]* [[RES]] to i8*
// CHECK: [[CAST:%.*]] = bitcast [[UNION]]* [[RET]] to i8*
// CHECK: call void @llvm.memcpy{{.*}}(i8* align {{[0-9]+}} [[CAST]]
// CHECK: [[CASTRET:%.*]] = bitcast [[UNION]]* [[RET]] to i8*
// CHECK: [[CASTRES:%.*]] = bitcast [[UNION]]* [[RES]] to i8*
// CHECK: call void @llvm.memcpy{{.*}}(i8* align {{[0-9]+}} [[CASTRET]], i8* align {{[0-9]+}} [[CASTRES]]
// CHECK: [[CAST:%.*]] = bitcast [[UNION]]* [[RET]] to { i64 }*
// CHECK: [[GEP:%.*]] = getelementptr inbounds { i64 }, { i64 }* [[CAST]], i32 0, i32 0
// CHECK: [[R0:%.*]] = load i64, i64* [[GEP]], align 8

View File

@ -0,0 +1,134 @@
// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -fobjc-arc -fobjc-runtime-has-weak -o - %s | FileCheck %s
// CHECK: %[[STRUCT_TRIVIAL:.*]] = type { i32 }
// CHECK: %[[STRUCT_TRIVIALBIG:.*]] = type { [64 x i32] }
// CHECK: %[[STRUCT_STRONG:.*]] = type { i8* }
// CHECK: %[[STRUCT_WEAK:.*]] = type { i8* }
typedef struct {
int x;
} Trivial;
typedef struct {
int x[64];
} TrivialBig;
typedef struct {
id x;
} Strong;
typedef struct {
__weak id x;
} Weak;
// CHECK: define i32 @testTrivial()
// CHECK: %[[RETVAL:.*]] = alloca %[[STRUCT_TRIVIAL]], align 4
// CHECK-NEXT: call void @func0(%[[STRUCT_TRIVIAL]]* %[[RETVAL]])
// CHECK-NOT: memcpy
// CHECK: ret i32 %
void func0(Trivial *);
Trivial testTrivial(void) {
Trivial a;
func0(&a);
return a;
}
void func1(TrivialBig *);
// CHECK: define void @testTrivialBig(%[[STRUCT_TRIVIALBIG]]* noalias sret %[[AGG_RESULT:.*]])
// CHECK-NOT: alloca
// CHECK: call void @func1(%[[STRUCT_TRIVIALBIG]]* %[[AGG_RESULT]])
// CHECK-NEXT: ret void
TrivialBig testTrivialBig(void) {
TrivialBig a;
func1(&a);
return a;
}
// CHECK: define i8* @testStrong()
// CHECK: %[[RETVAL:.*]] = alloca %[[STRUCT_STRONG]], align 8
// CHECK: %[[NRVO:.*]] = alloca i1, align 1
// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONG]]* %[[RETVAL]] to i8**
// CHECK: call void @__default_constructor_8_s0(i8** %[[V0]])
// CHECK: store i1 true, i1* %[[NRVO]], align 1
// CHECK: %[[NRVO_VAL:.*]] = load i1, i1* %[[NRVO]], align 1
// CHECK: br i1 %[[NRVO_VAL]],
// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONG]]* %[[RETVAL]] to i8**
// CHECK: call void @__destructor_8_s0(i8** %[[V1]])
// CHECK: br
// CHECK: %[[COERCE_DIVE:.*]] = getelementptr inbounds %[[STRUCT_STRONG]], %[[STRUCT_STRONG]]* %[[RETVAL]], i32 0, i32 0
// CHECK: %[[V2:.*]] = load i8*, i8** %[[COERCE_DIVE]], align 8
// CHECK: ret i8* %[[V2]]
Strong testStrong(void) {
Strong a;
return a;
}
// CHECK: define void @testWeak(%[[STRUCT_WEAK]]* noalias sret %[[AGG_RESULT:.*]])
// CHECK: %[[NRVO:.*]] = alloca i1, align 1
// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_WEAK]]* %[[AGG_RESULT]] to i8**
// CHECK: call void @__default_constructor_8_w0(i8** %[[V0]])
// CHECK: store i1 true, i1* %[[NRVO]], align 1
// CHECK: %[[NRVO_VAL:.*]] = load i1, i1* %[[NRVO]], align 1
// CHECK: br i1 %[[NRVO_VAL]],
// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_WEAK]]* %[[AGG_RESULT]] to i8**
// CHECK: call void @__destructor_8_w0(i8** %[[V1]])
// CHECK: br
// CHECK-NOT: call
// CHECK: ret void
Weak testWeak(void) {
Weak a;
return a;
}
// CHECK: define void @testWeak2(
// CHECK: call void @__default_constructor_8_w0(
// CHECK: call void @__default_constructor_8_w0(
// CHECK: call void @__copy_constructor_8_8_w0(
// CHECK: call void @__copy_constructor_8_8_w0(
// CHECK: call void @__destructor_8_w0(
// CHECK: call void @__destructor_8_w0(
Weak testWeak2(int c) {
Weak a, b;
if (c)
return a;
else
return b;
}
// CHECK: define internal void @"\01-[C1 foo1]"(%[[STRUCT_WEAK]]* noalias sret %[[AGG_RESULT:.*]], %{{.*}}* %{{.*}}, i8* %{{.*}})
// CHECK: %[[NRVO:.*]] = alloca i1, align 1
// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_WEAK]]* %[[AGG_RESULT]] to i8**
// CHECK: call void @__default_constructor_8_w0(i8** %[[V0]])
// CHECK: store i1 true, i1* %[[NRVO]], align 1
// CHECK: %[[NRVO_VAL:.*]] = load i1, i1* %[[NRVO]], align 1
// CHECK: br i1 %[[NRVO_VAL]],
// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_WEAK]]* %[[AGG_RESULT]] to i8**
// CHECK: call void @__destructor_8_w0(i8** %[[V1]])
// CHECK: br
// CHECK-NOT: call
// CHECK: ret void
__attribute__((objc_root_class))
@interface C1
- (Weak)foo1;
@end
@implementation C1
- (Weak)foo1 {
Weak a;
return a;
}
@end