[AArch64] Use SUBXrx64 for dynamic stack to refer to sp

When we lower dynamic stack, we need to substract pattern `x15 << 4`  from sp.
Subtract instruction with arith shifted register(SUBXrs) can't refer to sp. So for now we need two extra mov like:

```
mov x0, sp
sub x0, x0, x15, lsl #4
mov sp, x0
```
If we want to refer to sp in subtract instruction like this:
```
sub	sp, sp, x15, lsl #4
```
We must use arith extended register version(SUBXrx).
So in this patch when we find sub have sp operand on src0, try to select to SubXrx64.

Reviewed By: efriedma

Differential Revision: https://reviews.llvm.org/D129932
This commit is contained in:
chenglin.bi 2022-07-20 11:06:16 +08:00
parent 5b0e96a8ff
commit d337c1f256
5 changed files with 36 additions and 4 deletions

View File

@ -69,6 +69,7 @@ public:
bool tryMLAV64LaneV128(SDNode *N);
bool tryMULLV64LaneV128(unsigned IntNo, SDNode *N);
bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
bool SelectArithUXTXRegister(SDValue N, SDValue &Reg, SDValue &Shift);
bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
@ -893,6 +894,30 @@ bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
return isWorthFolding(N);
}
/// SelectArithUXTXRegister - Select a "UXTX register" operand. This
/// operand is refered by the instructions have SP operand
bool AArch64DAGToDAGISel::SelectArithUXTXRegister(SDValue N, SDValue &Reg,
SDValue &Shift) {
unsigned ShiftVal = 0;
AArch64_AM::ShiftExtendType Ext;
if (N.getOpcode() != ISD::SHL)
return false;
ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
if (!CSD)
return false;
ShiftVal = CSD->getZExtValue();
if (ShiftVal > 4)
return false;
Ext = AArch64_AM::UXTX;
Reg = N.getOperand(0);
Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
MVT::i32);
return isWorthFolding(N);
}
/// If there's a use of this ADDlow that's not itself a load/store then we'll
/// need to create a real ADD instruction from it anyway and there's no point in
/// folding it into the mem op. Theoretically, it shouldn't matter, but there's

View File

@ -1168,6 +1168,8 @@ def gi_arith_extended_reg32to64_i64 :
GIComplexOperandMatcher<s64, "selectArithExtendedRegister">,
GIComplexPatternEquiv<arith_extended_reg32to64_i64>;
def arith_uxtx : ComplexPattern<i64, 2, "SelectArithUXTXRegister", []>;
// Floating-point immediate.
def fpimm16XForm : SDNodeXForm<fpimm, [{

View File

@ -1691,6 +1691,11 @@ def : InstAlias<"mov $dst, $src",
defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
def copyFromSP: PatLeaf<(i64 GPR64:$src), [{
return N->getOpcode() == ISD::CopyFromReg &&
cast<RegisterSDNode>(N->getOperand(1))->getReg() == AArch64::SP;
}]>;
// Use SUBS instead of SUB to enable CSE between SUBS and SUB.
def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
(SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
@ -1709,6 +1714,8 @@ def : Pat<(sub GPR32sp:$R2, arith_extended_reg32_i32:$R3),
(SUBSWrx GPR32sp:$R2, arith_extended_reg32_i32:$R3)>;
def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64_i64:$R3),
(SUBSXrx GPR64sp:$R2, arith_extended_reg32to64_i64:$R3)>;
def : Pat<(sub copyFromSP:$R2, (arith_uxtx GPR64:$R3, arith_extendlsl64:$imm)),
(SUBXrx64 GPR64sp:$R2, GPR64:$R3, arith_extendlsl64:$imm)>;
}
// Because of the immediate format for add/sub-imm instructions, the

View File

@ -17,7 +17,6 @@ declare void @func2(i8*)
; CHECK: add [[REG1:x[0-9]+]], x0, #15
; CHECK-OPT: lsr x15, [[REG1]], #4
; CHECK: bl __chkstk
; CHECK: mov [[REG2:x[0-9]+]], sp
; CHECK-OPT: sub [[REG3:x[0-9]+]], [[REG2]], x15, lsl #4
; CHECK-OPT: sub [[REG3:x[0-9]+]], sp, x15, lsl #4
; CHECK-OPT: mov sp, [[REG3]]
; CHECK: bl func2

View File

@ -197,8 +197,7 @@ define void @vla(i32, i8*, ...) local_unnamed_addr {
; CHECK-NEXT: stp x5, x6, [x29, #48]
; CHECK-NEXT: str x7, [x29, #64]
; CHECK-NEXT: bl __chkstk
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: sub x20, x8, x15, lsl #4
; CHECK-NEXT: sub x20, sp, x15, lsl #4
; CHECK-NEXT: mov sp, x20
; CHECK-NEXT: ldr x21, [x29, #16]
; CHECK-NEXT: sxtw x22, w0