1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -O0 -global-isel -stop-after=irtranslator -verify-machineinstrs -o - %s | FileCheck %s
3
4define amdgpu_kernel void @asm_convergent() convergent{
5  ; CHECK-LABEL: name: asm_convergent
6  ; CHECK: bb.1 (%ir-block.0):
7  ; CHECK:   INLINEASM &s_barrier, 33 /* sideeffect isconvergent attdialect */, !0
8  ; CHECK:   S_ENDPGM 0
9  call void asm sideeffect "s_barrier", ""() convergent, !srcloc !0
10  ret void
11}
12
13define amdgpu_kernel void @asm_simple_memory_clobber() {
14  ; CHECK-LABEL: name: asm_simple_memory_clobber
15  ; CHECK: bb.1 (%ir-block.0):
16  ; CHECK:   INLINEASM &"", 25 /* sideeffect mayload maystore attdialect */, !0
17  ; CHECK:   INLINEASM &"", 1 /* sideeffect attdialect */, !0
18  ; CHECK:   S_ENDPGM 0
19  call void asm sideeffect "", "~{memory}"(), !srcloc !0
20  call void asm sideeffect "", ""(), !srcloc !0
21  ret void
22}
23
24define amdgpu_kernel void @asm_simple_vgpr_clobber() {
25  ; CHECK-LABEL: name: asm_simple_vgpr_clobber
26  ; CHECK: bb.1 (%ir-block.0):
27  ; CHECK:   INLINEASM &"v_mov_b32 v0, 7", 1 /* sideeffect attdialect */, 12 /* clobber */, implicit-def early-clobber $vgpr0, !0
28  ; CHECK:   S_ENDPGM 0
29  call void asm sideeffect "v_mov_b32 v0, 7", "~{v0}"(), !srcloc !0
30  ret void
31}
32
33define amdgpu_kernel void @asm_simple_sgpr_clobber() {
34  ; CHECK-LABEL: name: asm_simple_sgpr_clobber
35  ; CHECK: bb.1 (%ir-block.0):
36  ; CHECK:   INLINEASM &"s_mov_b32 s0, 7", 1 /* sideeffect attdialect */, 12 /* clobber */, implicit-def early-clobber $sgpr0, !0
37  ; CHECK:   S_ENDPGM 0
38  call void asm sideeffect "s_mov_b32 s0, 7", "~{s0}"(), !srcloc !0
39  ret void
40}
41
42define amdgpu_kernel void @asm_simple_agpr_clobber() {
43  ; CHECK-LABEL: name: asm_simple_agpr_clobber
44  ; CHECK: bb.1 (%ir-block.0):
45  ; CHECK:   INLINEASM &"; def a0", 1 /* sideeffect attdialect */, 12 /* clobber */, implicit-def early-clobber $agpr0, !0
46  ; CHECK:   S_ENDPGM 0
47  call void asm sideeffect "; def a0", "~{a0}"(), !srcloc !0
48  ret void
49}
50
51define i32 @asm_vgpr_early_clobber() {
52  ; CHECK-LABEL: name: asm_vgpr_early_clobber
53  ; CHECK: bb.1 (%ir-block.0):
54  ; CHECK:   liveins: $sgpr30_sgpr31
55  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
56  ; CHECK:   INLINEASM &"v_mov_b32 $0, 7; v_mov_b32 $1, 7", 1 /* sideeffect attdialect */, 1835019 /* regdef-ec:VGPR_32 */, def early-clobber %1, 1835019 /* regdef-ec:VGPR_32 */, def early-clobber %2, !0
57  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
58  ; CHECK:   [[COPY2:%[0-9]+]]:_(s32) = COPY %2
59  ; CHECK:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY2]]
60  ; CHECK:   $vgpr0 = COPY [[ADD]](s32)
61  ; CHECK:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
62  ; CHECK:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
63  call { i32, i32 } asm sideeffect "v_mov_b32 $0, 7; v_mov_b32 $1, 7", "=&v,=&v"(), !srcloc !0
64  %asmresult = extractvalue { i32, i32 } %1, 0
65  %asmresult1 = extractvalue { i32, i32 } %1, 1
66  %add = add i32 %asmresult, %asmresult1
67  ret i32 %add
68}
69
70define i32 @test_specific_vgpr_output() nounwind {
71  ; CHECK-LABEL: name: test_specific_vgpr_output
72  ; CHECK: bb.1.entry:
73  ; CHECK:   liveins: $sgpr30_sgpr31
74  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
75  ; CHECK:   INLINEASM &"v_mov_b32 v1, 7", 0 /* attdialect */, 10 /* regdef */, implicit-def $vgpr1
76  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
77  ; CHECK:   $vgpr0 = COPY [[COPY1]](s32)
78  ; CHECK:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
79  ; CHECK:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
80entry:
81  %0 = tail call i32 asm "v_mov_b32 v1, 7", "={v1}"() nounwind
82  ret i32 %0
83}
84
85define i32 @test_single_vgpr_output() nounwind {
86  ; CHECK-LABEL: name: test_single_vgpr_output
87  ; CHECK: bb.1.entry:
88  ; CHECK:   liveins: $sgpr30_sgpr31
89  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
90  ; CHECK:   INLINEASM &"v_mov_b32 $0, 7", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %1
91  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
92  ; CHECK:   $vgpr0 = COPY [[COPY1]](s32)
93  ; CHECK:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
94  ; CHECK:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
95entry:
96  %0 = tail call i32 asm "v_mov_b32 $0, 7", "=v"() nounwind
97  ret i32 %0
98}
99
100define i32 @test_single_sgpr_output_s32() nounwind {
101  ; CHECK-LABEL: name: test_single_sgpr_output_s32
102  ; CHECK: bb.1.entry:
103  ; CHECK:   liveins: $sgpr30_sgpr31
104  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
105  ; CHECK:   INLINEASM &"s_mov_b32 $0, 7", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %1
106  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
107  ; CHECK:   $vgpr0 = COPY [[COPY1]](s32)
108  ; CHECK:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
109  ; CHECK:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
110entry:
111  %0 = tail call i32 asm "s_mov_b32 $0, 7", "=s"() nounwind
112  ret i32 %0
113}
114
115; Check support for returning several floats
116define float @test_multiple_register_outputs_same() #0 {
117  ; CHECK-LABEL: name: test_multiple_register_outputs_same
118  ; CHECK: bb.1 (%ir-block.0):
119  ; CHECK:   liveins: $sgpr30_sgpr31
120  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
121  ; CHECK:   INLINEASM &"v_mov_b32 $0, 0; v_mov_b32 $1, 1", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %1, 1835018 /* regdef:VGPR_32 */, def %2
122  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
123  ; CHECK:   [[COPY2:%[0-9]+]]:_(s32) = COPY %2
124  ; CHECK:   [[FADD:%[0-9]+]]:_(s32) = G_FADD [[COPY1]], [[COPY2]]
125  ; CHECK:   $vgpr0 = COPY [[FADD]](s32)
126  ; CHECK:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
127  ; CHECK:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
128  %1 = call { float, float } asm "v_mov_b32 $0, 0; v_mov_b32 $1, 1", "=v,=v"()
129  %asmresult = extractvalue { float, float } %1, 0
130  %asmresult1 = extractvalue { float, float } %1, 1
131  %add = fadd float %asmresult, %asmresult1
132  ret float %add
133}
134
135; Check support for returning several floats
136define double @test_multiple_register_outputs_mixed() #0 {
137  ; CHECK-LABEL: name: test_multiple_register_outputs_mixed
138  ; CHECK: bb.1 (%ir-block.0):
139  ; CHECK:   liveins: $sgpr30_sgpr31
140  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
141  ; CHECK:   INLINEASM &"v_mov_b32 $0, 0; v_add_f64 $1, 0, 0", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %1, 2883594 /* regdef:VReg_64 */, def %2
142  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
143  ; CHECK:   [[COPY2:%[0-9]+]]:_(s64) = COPY %2
144  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](s64)
145  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
146  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
147  ; CHECK:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
148  ; CHECK:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0, implicit $vgpr1
149  %1 = call { float, double } asm "v_mov_b32 $0, 0; v_add_f64 $1, 0, 0", "=v,=v"()
150  %asmresult = extractvalue { float, double } %1, 1
151  ret double %asmresult
152}
153
154
155define float @test_vector_output() nounwind {
156  ; CHECK-LABEL: name: test_vector_output
157  ; CHECK: bb.1 (%ir-block.0):
158  ; CHECK:   liveins: $sgpr30_sgpr31
159  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
160  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
161  ; CHECK:   INLINEASM &"v_add_f64 $0, 0, 0", 1 /* sideeffect attdialect */, 10 /* regdef */, implicit-def $vgpr14_vgpr15
162  ; CHECK:   [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr14_vgpr15
163  ; CHECK:   [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY1]](<2 x s32>), [[C]](s32)
164  ; CHECK:   $vgpr0 = COPY [[EVEC]](s32)
165  ; CHECK:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
166  ; CHECK:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
167  %1 = tail call <2 x float> asm sideeffect "v_add_f64 $0, 0, 0", "={v[14:15]}"() nounwind
168  %2 = extractelement <2 x float> %1, i32 0
169  ret float %2
170}
171
172define amdgpu_kernel void @test_input_vgpr_imm() {
173  ; CHECK-LABEL: name: test_input_vgpr_imm
174  ; CHECK: bb.1 (%ir-block.0):
175  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
176  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY [[C]](s32)
177  ; CHECK:   INLINEASM &"v_mov_b32 v0, $0", 1 /* sideeffect attdialect */, 1835017 /* reguse:VGPR_32 */, [[COPY]]
178  ; CHECK:   S_ENDPGM 0
179  call void asm sideeffect "v_mov_b32 v0, $0", "v"(i32 42)
180  ret void
181}
182
183define amdgpu_kernel void @test_input_sgpr_imm() {
184  ; CHECK-LABEL: name: test_input_sgpr_imm
185  ; CHECK: bb.1 (%ir-block.0):
186  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
187  ; CHECK:   [[COPY:%[0-9]+]]:sreg_32 = COPY [[C]](s32)
188  ; CHECK:   INLINEASM &"s_mov_b32 s0, $0", 1 /* sideeffect attdialect */, 1966089 /* reguse:SReg_32 */, [[COPY]]
189  ; CHECK:   S_ENDPGM 0
190  call void asm sideeffect "s_mov_b32 s0, $0", "s"(i32 42)
191  ret void
192}
193
194define amdgpu_kernel void @test_input_imm() {
195  ; CHECK-LABEL: name: test_input_imm
196  ; CHECK: bb.1 (%ir-block.0):
197  ; CHECK:   INLINEASM &"s_mov_b32 s0, $0", 9 /* sideeffect mayload attdialect */, 13 /* imm */, 42
198  ; CHECK:   INLINEASM &"s_mov_b64 s[0:1], $0", 9 /* sideeffect mayload attdialect */, 13 /* imm */, 42
199  ; CHECK:   S_ENDPGM 0
200  call void asm sideeffect "s_mov_b32 s0, $0", "i"(i32 42)
201  call void asm sideeffect "s_mov_b64 s[0:1], $0", "i"(i64 42)
202  ret void
203}
204
205define float @test_input_vgpr(i32 %src) nounwind {
206  ; CHECK-LABEL: name: test_input_vgpr
207  ; CHECK: bb.1.entry:
208  ; CHECK:   liveins: $vgpr0, $sgpr30_sgpr31
209  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
210  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
211  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]](s32)
212  ; CHECK:   INLINEASM &"v_add_f32 $0, 1.0, $1", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %2, 1835017 /* reguse:VGPR_32 */, [[COPY2]]
213  ; CHECK:   [[COPY3:%[0-9]+]]:_(s32) = COPY %2
214  ; CHECK:   $vgpr0 = COPY [[COPY3]](s32)
215  ; CHECK:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
216  ; CHECK:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0
217entry:
218  %0 = tail call float asm "v_add_f32 $0, 1.0, $1", "=v,v"(i32 %src) nounwind
219  ret float %0
220}
221
222define i32 @test_memory_constraint(i32 addrspace(3)* %a) nounwind {
223  ; CHECK-LABEL: name: test_memory_constraint
224  ; CHECK: bb.1 (%ir-block.0):
225  ; CHECK:   liveins: $vgpr0, $sgpr30_sgpr31
226  ; CHECK:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
227  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
228  ; CHECK:   INLINEASM &"ds_read_b32 $0, $1", 8 /* mayload attdialect */, 1835018 /* regdef:VGPR_32 */, def %2, 196622 /* mem:m */, [[COPY]](p3)
229  ; CHECK:   [[COPY2:%[0-9]+]]:_(s32) = COPY %2
230  ; CHECK:   $vgpr0 = COPY [[COPY2]](s32)
231  ; CHECK:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
232  ; CHECK:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
233  %1 = tail call i32 asm "ds_read_b32 $0, $1", "=v,*m"(i32 addrspace(3)* %a)
234  ret i32 %1
235}
236
237define i32 @test_vgpr_matching_constraint(i32 %a) nounwind {
238  ; CHECK-LABEL: name: test_vgpr_matching_constraint
239  ; CHECK: bb.1 (%ir-block.0):
240  ; CHECK:   liveins: $vgpr0, $sgpr30_sgpr31
241  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
242  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
243  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
244  ; CHECK:   [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
245  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[AND]](s32)
246  ; CHECK:   INLINEASM &";", 1 /* sideeffect attdialect */, 1835018 /* regdef:VGPR_32 */, def %4, 2147483657 /* reguse tiedto:$0 */, [[COPY2]](tied-def 3)
247  ; CHECK:   [[COPY3:%[0-9]+]]:_(s32) = COPY %4
248  ; CHECK:   $vgpr0 = COPY [[COPY3]](s32)
249  ; CHECK:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
250  ; CHECK:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0
251  %and = and i32 %a, 1
252  %asm = call i32 asm sideeffect ";", "=v,0"(i32 %and)
253  ret i32 %asm
254}
255
256define i32 @test_sgpr_matching_constraint() nounwind {
257  ; CHECK-LABEL: name: test_sgpr_matching_constraint
258  ; CHECK: bb.1.entry:
259  ; CHECK:   liveins: $sgpr30_sgpr31
260  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
261  ; CHECK:   INLINEASM &"s_mov_b32 $0, 7", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %1
262  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
263  ; CHECK:   INLINEASM &"s_mov_b32 $0, 8", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %3
264  ; CHECK:   [[COPY2:%[0-9]+]]:_(s32) = COPY %3
265  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY1]](s32)
266  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY2]](s32)
267  ; CHECK:   INLINEASM &"s_add_u32 $0, $1, $2", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %5, 1966089 /* reguse:SReg_32 */, [[COPY3]], 2147483657 /* reguse tiedto:$0 */, [[COPY4]](tied-def 3)
268  ; CHECK:   [[COPY5:%[0-9]+]]:_(s32) = COPY %5
269  ; CHECK:   $vgpr0 = COPY [[COPY5]](s32)
270  ; CHECK:   [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
271  ; CHECK:   S_SETPC_B64_return [[COPY6]], implicit $vgpr0
272entry:
273  %asm0 = tail call i32 asm "s_mov_b32 $0, 7", "=s"() nounwind
274  %asm1 = tail call i32 asm "s_mov_b32 $0, 8", "=s"() nounwind
275  %asm2 = tail call i32 asm "s_add_u32 $0, $1, $2", "=s,s,0"(i32 %asm0, i32 %asm1) nounwind
276  ret i32 %asm2
277}
278
279define void @test_many_matching_constraints(i32 %a, i32 %b, i32 %c) nounwind {
280  ; CHECK-LABEL: name: test_many_matching_constraints
281  ; CHECK: bb.1 (%ir-block.0):
282  ; CHECK:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
283  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
284  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
285  ; CHECK:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
286  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
287  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
288  ; CHECK:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]](s32)
289  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[COPY]](s32)
290  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY1]](s32)
291  ; CHECK:   INLINEASM &"; ", 1 /* sideeffect attdialect */, 1835018 /* regdef:VGPR_32 */, def %4, 1835018 /* regdef:VGPR_32 */, def %5, 1835018 /* regdef:VGPR_32 */, def %6, 2147483657 /* reguse tiedto:$0 */, [[COPY4]](tied-def 3), 2147614729 /* reguse tiedto:$2 */, [[COPY5]](tied-def 7), 2147549193 /* reguse tiedto:$1 */, [[COPY6]](tied-def 5)
292  ; CHECK:   [[COPY7:%[0-9]+]]:_(s32) = COPY %4
293  ; CHECK:   [[COPY8:%[0-9]+]]:_(s32) = COPY %5
294  ; CHECK:   [[COPY9:%[0-9]+]]:_(s32) = COPY %6
295  ; CHECK:   G_STORE [[COPY7]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
296  ; CHECK:   G_STORE [[COPY8]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
297  ; CHECK:   G_STORE [[COPY9]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
298  ; CHECK:   [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
299  ; CHECK:   S_SETPC_B64_return [[COPY10]]
300  %asm = call {i32, i32, i32} asm sideeffect "; ", "=v,=v,=v,0,2,1"(i32 %c, i32 %a, i32 %b)
301  %asmresult0 = extractvalue  {i32, i32, i32} %asm, 0
302  store i32 %asmresult0, i32 addrspace(1)* undef
303  %asmresult1 = extractvalue  {i32, i32, i32} %asm, 1
304  store i32 %asmresult1, i32 addrspace(1)* undef
305  %asmresult2 = extractvalue  {i32, i32, i32} %asm, 2
306  store i32 %asmresult2, i32 addrspace(1)* undef
307  ret void
308}
309
310define i32 @test_sgpr_to_vgpr_move_matching_constraint() nounwind {
311  ; CHECK-LABEL: name: test_sgpr_to_vgpr_move_matching_constraint
312  ; CHECK: bb.1.entry:
313  ; CHECK:   liveins: $sgpr30_sgpr31
314  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
315  ; CHECK:   INLINEASM &"s_mov_b32 $0, 7", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %1
316  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
317  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY1]](s32)
318  ; CHECK:   INLINEASM &"v_mov_b32 $0, $1", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %3, 2147483657 /* reguse tiedto:$0 */, [[COPY2]](tied-def 3)
319  ; CHECK:   [[COPY3:%[0-9]+]]:_(s32) = COPY %3
320  ; CHECK:   $vgpr0 = COPY [[COPY3]](s32)
321  ; CHECK:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
322  ; CHECK:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0
323entry:
324  %asm0 = tail call i32 asm "s_mov_b32 $0, 7", "=s"() nounwind
325  %asm1 = tail call i32 asm "v_mov_b32 $0, $1", "=v,0"(i32 %asm0) nounwind
326  ret i32 %asm1
327}
328
329define amdgpu_kernel void @asm_constraint_n_n()  {
330  ; CHECK-LABEL: name: asm_constraint_n_n
331  ; CHECK: bb.1 (%ir-block.0):
332  ; CHECK:   INLINEASM &"s_trap ${0:n}", 1 /* sideeffect attdialect */, 13 /* imm */, 10
333  ; CHECK:   S_ENDPGM 0
334  tail call void asm sideeffect "s_trap ${0:n}", "n"(i32 10) #1
335  ret void
336}
337
338!0 = !{i32 70}
339