[ARM] Fix shouldExpandAtomicLoadInIR for subtargets without ldrexd.
Regression from 2f497ec3; we should not try to generate ldrexd on targets that don't have it. Also, while I'm here, fix shouldExpandAtomicStoreInIR, for consistency. That doesn't really have any practical effect, though. On Thumb targets where we need to use __sync_* libcalls, there is no libcall for stores, so SelectionDAG calls __sync_lock_test_and_set_8 anyway.
This commit is contained in:
parent
f10f16a6a9
commit
ddca66622c
|
@ -20976,8 +20976,16 @@ Instruction *ARMTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
|
|||
// things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
|
||||
// anything for those.
|
||||
bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
bool has64BitAtomicStore;
|
||||
if (Subtarget->isMClass())
|
||||
has64BitAtomicStore = false;
|
||||
else if (Subtarget->isThumb())
|
||||
has64BitAtomicStore = Subtarget->hasV7Ops();
|
||||
else
|
||||
has64BitAtomicStore = Subtarget->hasV6Ops();
|
||||
|
||||
unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
|
||||
return (Size == 64) && !Subtarget->isMClass();
|
||||
return Size == 64 && has64BitAtomicStore;
|
||||
}
|
||||
|
||||
// Loads and stores less than 64-bits are already atomic; ones above that
|
||||
|
@ -20989,9 +20997,17 @@ bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
|||
// sections A8.8.72-74 LDRD)
|
||||
TargetLowering::AtomicExpansionKind
|
||||
ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
|
||||
bool has64BitAtomicLoad;
|
||||
if (Subtarget->isMClass())
|
||||
has64BitAtomicLoad = false;
|
||||
else if (Subtarget->isThumb())
|
||||
has64BitAtomicLoad = Subtarget->hasV7Ops();
|
||||
else
|
||||
has64BitAtomicLoad = Subtarget->hasV6Ops();
|
||||
|
||||
unsigned Size = LI->getType()->getPrimitiveSizeInBits();
|
||||
return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly
|
||||
: AtomicExpansionKind::None;
|
||||
return (Size == 64 && has64BitAtomicLoad) ? AtomicExpansionKind::LLOnly
|
||||
: AtomicExpansionKind::None;
|
||||
}
|
||||
|
||||
// For the real atomic operations, we have ldrex/strex up to 32 bits,
|
||||
|
|
|
@ -270,8 +270,15 @@ define i64 @test_old_load_64bit(i64* %p) {
|
|||
;
|
||||
; THUMBONE-LABEL: test_old_load_64bit:
|
||||
; THUMBONE: @ %bb.0:
|
||||
; THUMBONE-NEXT: ldaexd r0, r1, [r0]
|
||||
; THUMBONE-NEXT: bx lr
|
||||
; THUMBONE-NEXT: push {r7, lr}
|
||||
; THUMBONE-NEXT: sub sp, #8
|
||||
; THUMBONE-NEXT: movs r2, #0
|
||||
; THUMBONE-NEXT: str r2, [sp]
|
||||
; THUMBONE-NEXT: str r2, [sp, #4]
|
||||
; THUMBONE-NEXT: mov r3, r2
|
||||
; THUMBONE-NEXT: bl __sync_val_compare_and_swap_8
|
||||
; THUMBONE-NEXT: add sp, #8
|
||||
; THUMBONE-NEXT: pop {r7, pc}
|
||||
;
|
||||
; ARMV4-LABEL: test_old_load_64bit:
|
||||
; ARMV4: @ %bb.0:
|
||||
|
|
Loading…
Reference in New Issue