forked from OSchip/llvm-project
117 lines
3.4 KiB
LLVM
117 lines
3.4 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
|
|
|
|
declare void @callee_stack0()
|
|
declare void @callee_stack8([8 x i64], i64)
|
|
declare void @callee_stack16([8 x i64], i64, i64)
|
|
|
|
define dso_local void @caller_to0_from0() nounwind {
|
|
; CHECK-LABEL: caller_to0_from0:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: b callee_stack0
|
|
tail call void @callee_stack0()
|
|
ret void
|
|
}
|
|
|
|
define dso_local void @caller_to0_from8([8 x i64], i64) nounwind{
|
|
; CHECK-LABEL: caller_to0_from8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: b callee_stack0
|
|
|
|
tail call void @callee_stack0()
|
|
ret void
|
|
}
|
|
|
|
define dso_local void @caller_to8_from0() {
|
|
; CHECK-LABEL: caller_to8_from0:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: sub sp, sp, #32
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
|
|
; CHECK-NEXT: .cfi_offset w30, -16
|
|
; CHECK-NEXT: mov w8, #42
|
|
; CHECK-NEXT: str x8, [sp]
|
|
; CHECK-NEXT: bl callee_stack8
|
|
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
|
|
; CHECK-NEXT: add sp, sp, #32
|
|
; CHECK-NEXT: ret
|
|
|
|
; Caller isn't going to clean up any extra stack we allocate, so it
|
|
; can't be a tail call.
|
|
tail call void @callee_stack8([8 x i64] undef, i64 42)
|
|
ret void
|
|
}
|
|
|
|
define dso_local void @caller_to8_from8([8 x i64], i64 %a) {
|
|
; CHECK-LABEL: caller_to8_from8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov w8, #42
|
|
; CHECK-NEXT: str x8, [sp]
|
|
; CHECK-NEXT: b callee_stack8
|
|
|
|
; This should reuse our stack area for the 42
|
|
tail call void @callee_stack8([8 x i64] undef, i64 42)
|
|
ret void
|
|
}
|
|
|
|
define dso_local void @caller_to16_from8([8 x i64], i64 %a) {
|
|
; CHECK-LABEL: caller_to16_from8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: sub sp, sp, #32
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
|
|
; CHECK-NEXT: .cfi_offset w30, -16
|
|
; CHECK-NEXT: bl callee_stack16
|
|
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
|
|
; CHECK-NEXT: add sp, sp, #32
|
|
; CHECK-NEXT: ret
|
|
|
|
; Shouldn't be a tail call: we can't use SP+8 because our caller might
|
|
; have something there. This may sound obvious but implementation does
|
|
; some funky aligning.
|
|
tail call void @callee_stack16([8 x i64] undef, i64 undef, i64 undef)
|
|
ret void
|
|
}
|
|
|
|
define dso_local void @caller_to8_from24([8 x i64], i64 %a, i64 %b, i64 %c) {
|
|
; CHECK-LABEL: caller_to8_from24:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov w8, #42
|
|
; CHECK-NEXT: str x8, [sp]
|
|
; CHECK-NEXT: b callee_stack8
|
|
|
|
; Reuse our area, putting "42" at incoming sp
|
|
tail call void @callee_stack8([8 x i64] undef, i64 42)
|
|
ret void
|
|
}
|
|
|
|
define dso_local void @caller_to16_from16([8 x i64], i64 %a, i64 %b) {
|
|
; CHECK-LABEL: caller_to16_from16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ldp x8, x9, [sp]
|
|
; CHECK-NEXT: stp x9, x8, [sp]
|
|
; CHECK-NEXT: b callee_stack16
|
|
|
|
; Here we want to make sure that both loads happen before the stores:
|
|
; otherwise either %a or %b will be wrongly clobbered.
|
|
tail call void @callee_stack16([8 x i64] undef, i64 %b, i64 %a)
|
|
ret void
|
|
|
|
|
|
}
|
|
|
|
@func = dso_local global void(i32)* null
|
|
|
|
define dso_local void @indirect_tail() {
|
|
; CHECK-LABEL: indirect_tail:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: adrp x8, func
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ldr x1, [x8, :lo12:func]
|
|
; CHECK-NEXT: br x1
|
|
|
|
%fptr = load void(i32)*, void(i32)** @func
|
|
tail call void %fptr(i32 42)
|
|
ret void
|
|
}
|