[CodeGen] Fix an assertion failure in CGRecordLowering.

This patch fixes a bug in CGRecordLowering::accumulateBitFields where it
unconditionally starts a new run and emits a storage field when it sees
a zero-sized bitfield, which causes an assertion in insertPadding to
fail when -fno-bitfield-type-align is used.

It shouldn't emit new storage if UseZeroLengthBitfieldAlignment and
UseBitFieldTypeAlignment are both false.

rdar://problem/36762205

git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@323943 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Akira Hatanaka 2018-02-01 03:04:15 +00:00
parent ea016449cc
commit 2cbd32574f
2 changed files with 50 additions and 2 deletions

View File

@ -443,14 +443,18 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
// If the start field of a new run is better as a single run, or
// if current field is better as a single run, or
// if current field has zero width bitfield, or
// if current field has zero width bitfield and either
// UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
// true, or
// if the offset of current field is inconsistent with the offset of
// previous field plus its offset,
// skip the block below and go ahead to emit the storage.
// Otherwise, try to add bitfields to the run.
if (!StartFieldAsSingleRun && Field != FieldEnd &&
!IsBetterAsSingleFieldRun(Field) &&
Field->getBitWidthValue(Context) != 0 &&
(Field->getBitWidthValue(Context) != 0 ||
(!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
!Context.getTargetInfo().useBitFieldTypeAlignment())) &&
Tail == getFieldBitOffset(*Field)) {
Tail += Field->getBitWidthValue(Context);
++Field;

View File

@ -0,0 +1,44 @@
// RUN: %clang_cc1 -triple x86_64-apple-darwin -fno-bitfield-type-align -emit-llvm -o - %s | FileCheck %s
// CHECK: %[[STRUCT_S:.*]] = type { i32 }
struct S {
unsigned short: 0;
unsigned short f1:15;
unsigned short: 0;
unsigned short f2:15;
};
// CHECK: define void @test_zero_width_bitfield(%[[STRUCT_S]]* %[[A:.*]])
// CHECK: %[[BF_LOAD:.*]] = load i32, i32* %[[V1:.*]], align 1
// CHECK: %[[BF_CLEAR:.*]] = and i32 %[[BF_LOAD]], 32767
// CHECK: %[[BF_CAST:.*]] = trunc i32 %[[BF_CLEAR]] to i16
// CHECK: %[[CONV:.*]] = zext i16 %[[BF_CAST]] to i32
// CHECK: %[[ADD:.*]] = add nsw i32 %[[CONV]], 1
// CHECK: %[[CONV1:.*]] = trunc i32 %[[ADD]] to i16
// CHECK: %[[V2:.*]] = zext i16 %[[CONV1]] to i32
// CHECK: %[[BF_LOAD2:.*]] = load i32, i32* %[[V1]], align 1
// CHECK: %[[BF_VALUE:.*]] = and i32 %[[V2]], 32767
// CHECK: %[[BF_CLEAR3:.*]] = and i32 %[[BF_LOAD2]], -32768
// CHECK: %[[BF_SET:.*]] = or i32 %[[BF_CLEAR3]], %[[BF_VALUE]]
// CHECK: store i32 %[[BF_SET]], i32* %[[V1]], align 1
// CHECK: %[[BF_LOAD4:.*]] = load i32, i32* %[[V4:.*]], align 1
// CHECK: %[[BF_LSHR:.*]] = lshr i32 %[[BF_LOAD4]], 15
// CHECK: %[[BF_CLEAR5:.*]] = and i32 %[[BF_LSHR]], 32767
// CHECK: %[[BF_CAST6:.*]] = trunc i32 %[[BF_CLEAR5]] to i16
// CHECK: %[[CONV7:.*]] = zext i16 %[[BF_CAST6]] to i32
// CHECK: %[[ADD8:.*]] = add nsw i32 %[[CONV7]], 2
// CHECK: %[[CONV9:.*]] = trunc i32 %[[ADD8]] to i16
// CHECK: %[[V5:.*]] = zext i16 %[[CONV9]] to i32
// CHECK: %[[BF_LOAD10:.*]] = load i32, i32* %[[V4]], align 1
// CHECK: %[[BF_VALUE11:.*]] = and i32 %[[V5]], 32767
// CHECK: %[[BF_SHL:.*]] = shl i32 %[[BF_VALUE11]], 15
// CHECK: %[[BF_CLEAR12:.*]] = and i32 %[[BF_LOAD10]], -1073709057
// CHECK: %[[BF_SET13:.*]] = or i32 %[[BF_CLEAR12]], %[[BF_SHL]]
// CHECK: store i32 %[[BF_SET13]], i32* %[[V4]], align 1
void test_zero_width_bitfield(struct S *a) {
a->f1 += 1;
a->f2 += 2;
}