1; RUN: llc < %s -mtriple arm64-apple-darwin -aarch64-load-store-opt=false -asm-verbose=false | FileCheck %s
2; Disable the load/store optimizer to avoid having LDP/STPs and simplify checks.
3
4target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
5
6; Check that we don't try to tail-call with an sret-demoted return.
7
8declare i1024 @test_sret() #0
9
10; CHECK-LABEL: _test_call_sret:
11; CHECK: mov  x[[CALLERX8NUM:[0-9]+]], x8
12; CHECK: mov  x8, sp
13; CHECK-NEXT: bl _test_sret
14; CHECK-NEXT: ldr [[CALLERSRET1:x[0-9]+]], [sp]
15; CHECK: str [[CALLERSRET1:x[0-9]+]], [x[[CALLERX8NUM]]]
16; CHECK: ret
17define i1024 @test_call_sret() #0 {
18  %a = call i1024 @test_sret()
19  ret i1024 %a
20}
21
22; CHECK-LABEL: _test_tailcall_sret:
23; CHECK: mov  x[[CALLERX8NUM:[0-9]+]], x8
24; CHECK: mov  x8, sp
25; CHECK-NEXT: bl _test_sret
26; CHECK-NEXT: ldr [[CALLERSRET1:x[0-9]+]], [sp]
27; CHECK: str [[CALLERSRET1:x[0-9]+]], [x[[CALLERX8NUM]]]
28; CHECK: ret
29define i1024 @test_tailcall_sret() #0 {
30  %a = tail call i1024 @test_sret()
31  ret i1024 %a
32}
33
34; CHECK-LABEL: _test_indirect_tailcall_sret:
35; CHECK: mov  x[[CALLERX8NUM:[0-9]+]], x8
36; CHECK: mov  x8, sp
37; CHECK-NEXT: blr x0
38; CHECK-NEXT: ldr [[CALLERSRET1:x[0-9]+]], [sp]
39; CHECK: str [[CALLERSRET1:x[0-9]+]], [x[[CALLERX8NUM]]]
40; CHECK: ret
41define i1024 @test_indirect_tailcall_sret(i1024 ()* %f) #0 {
42  %a = tail call i1024 %f()
43  ret i1024 %a
44}
45
46attributes #0 = { nounwind }
47