1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -global-isel -stop-after=irtranslator -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck -enable-var-scope %s
3
4; amdgpu_gfx calling convention
5declare hidden amdgpu_gfx void @external_gfx_void_func_void() #0
6declare hidden amdgpu_gfx void @external_gfx_void_func_i32(i32) #0
7declare hidden amdgpu_gfx void @external_gfx_void_func_i32_inreg(i32 inreg) #0
8declare hidden amdgpu_gfx void @external_gfx_void_func_struct_i8_i32({ i8, i32 }) #0
9declare hidden amdgpu_gfx void @external_gfx_void_func_struct_i8_i32_inreg({ i8, i32 } inreg) #0
10
11define amdgpu_gfx void @test_gfx_call_external_void_func_void() #0 {
12  ; CHECK-LABEL: name: test_gfx_call_external_void_func_void
13  ; CHECK: bb.1 (%ir-block.0):
14  ; CHECK:   liveins: $sgpr30_sgpr31
15  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
16  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
17  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_gfx_void_func_void
18  ; CHECK:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
19  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
20  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_gfx_void_func_void, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3
21  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
22  ; CHECK:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
23  ; CHECK:   S_SETPC_B64_return [[COPY2]]
24  call amdgpu_gfx void @external_gfx_void_func_void()
25  ret void
26}
27
28define amdgpu_gfx void @test_gfx_call_external_void_func_i32_imm(i32) #0 {
29  ; CHECK-LABEL: name: test_gfx_call_external_void_func_i32_imm
30  ; CHECK: bb.1 (%ir-block.1):
31  ; CHECK:   liveins: $vgpr0, $sgpr30_sgpr31
32  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
33  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
34  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
35  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
36  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_gfx_void_func_i32
37  ; CHECK:   $vgpr0 = COPY [[C]](s32)
38  ; CHECK:   [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
39  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]](<4 x s32>)
40  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_gfx_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3
41  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
42  ; CHECK:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
43  ; CHECK:   S_SETPC_B64_return [[COPY3]]
44  call amdgpu_gfx void @external_gfx_void_func_i32(i32 42)
45  ret void
46}
47
48define amdgpu_gfx void @test_gfx_call_external_void_func_i32_imm_inreg(i32 inreg) #0 {
49  ; CHECK-LABEL: name: test_gfx_call_external_void_func_i32_imm_inreg
50  ; CHECK: bb.1 (%ir-block.1):
51  ; CHECK:   liveins: $sgpr4, $sgpr30_sgpr31
52  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr4
53  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
54  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
55  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
56  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_gfx_void_func_i32_inreg
57  ; CHECK:   $sgpr4 = COPY [[C]](s32)
58  ; CHECK:   [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
59  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]](<4 x s32>)
60  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_gfx_void_func_i32_inreg, csr_amdgpu_highregs, implicit $sgpr4, implicit $sgpr0_sgpr1_sgpr2_sgpr3
61  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
62  ; CHECK:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
63  ; CHECK:   S_SETPC_B64_return [[COPY3]]
64  call amdgpu_gfx void @external_gfx_void_func_i32_inreg(i32 inreg 42)
65  ret void
66}
67
68define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32() #0 {
69  ; CHECK-LABEL: name: test_gfx_call_external_void_func_struct_i8_i32
70  ; CHECK: bb.1 (%ir-block.0):
71  ; CHECK:   liveins: $sgpr30_sgpr31
72  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
73  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
74  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load (p1) from `{ i8, i32 } addrspace(1)* addrspace(4)* undef`, addrspace 4)
75  ; CHECK:   [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p1) :: (load (s8) from %ir.ptr0, align 4, addrspace 1)
76  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
77  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
78  ; CHECK:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from %ir.ptr0 + 4, addrspace 1)
79  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
80  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_gfx_void_func_struct_i8_i32
81  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[LOAD1]](s8)
82  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
83  ; CHECK:   $vgpr0 = COPY [[ANYEXT1]](s32)
84  ; CHECK:   $vgpr1 = COPY [[LOAD2]](s32)
85  ; CHECK:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
86  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
87  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_gfx_void_func_struct_i8_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3
88  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
89  ; CHECK:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
90  ; CHECK:   S_SETPC_B64_return [[COPY2]]
91  %ptr0 = load { i8, i32 } addrspace(1)*, { i8, i32 } addrspace(1)* addrspace(4)* undef
92  %val = load { i8, i32 }, { i8, i32 } addrspace(1)* %ptr0
93  call amdgpu_gfx void @external_gfx_void_func_struct_i8_i32({ i8, i32 } %val)
94  ret void
95}
96
97define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32_inreg() #0 {
98  ; CHECK-LABEL: name: test_gfx_call_external_void_func_struct_i8_i32_inreg
99  ; CHECK: bb.1 (%ir-block.0):
100  ; CHECK:   liveins: $sgpr30_sgpr31
101  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
102  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
103  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load (p1) from `{ i8, i32 } addrspace(1)* addrspace(4)* undef`, addrspace 4)
104  ; CHECK:   [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p1) :: (load (s8) from %ir.ptr0, align 4, addrspace 1)
105  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
106  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
107  ; CHECK:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from %ir.ptr0 + 4, addrspace 1)
108  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
109  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_gfx_void_func_struct_i8_i32_inreg
110  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[LOAD1]](s8)
111  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
112  ; CHECK:   $sgpr4 = COPY [[ANYEXT1]](s32)
113  ; CHECK:   $sgpr5 = COPY [[LOAD2]](s32)
114  ; CHECK:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
115  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
116  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_gfx_void_func_struct_i8_i32_inreg, csr_amdgpu_highregs, implicit $sgpr4, implicit $sgpr5, implicit $sgpr0_sgpr1_sgpr2_sgpr3
117  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
118  ; CHECK:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
119  ; CHECK:   S_SETPC_B64_return [[COPY2]]
120  %ptr0 = load { i8, i32 } addrspace(1)*, { i8, i32 } addrspace(1)* addrspace(4)* undef
121  %val = load { i8, i32 }, { i8, i32 } addrspace(1)* %ptr0
122  call amdgpu_gfx void @external_gfx_void_func_struct_i8_i32_inreg({ i8, i32 } inreg %val)
123  ret void
124}
125
126attributes #0 = { nounwind }
127attributes #1 = { nounwind readnone }
128attributes #2 = { nounwind noinline }
129