1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -global-isel -global-isel-abort=0 -stop-after=irtranslator -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -verify-machineinstrs -o - %s | FileCheck -enable-var-scope %s
3
4; FIXME: Also test with a pre-gfx8 target.
5
6define i1 @i1_func_void() #0 {
7  ; CHECK-LABEL: name: i1_func_void
8  ; CHECK: bb.1 (%ir-block.0):
9  ; CHECK:   liveins: $sgpr30_sgpr31
10  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
11  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
12  ; CHECK:   [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `i1 addrspace(1)* undef`, addrspace 1)
13  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s1)
14  ; CHECK:   $vgpr0 = COPY [[ANYEXT]](s32)
15  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
16  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
17  %val = load i1, i1 addrspace(1)* undef
18  ret i1 %val
19}
20
21define zeroext i1 @i1_zeroext_func_void() #0 {
22  ; CHECK-LABEL: name: i1_zeroext_func_void
23  ; CHECK: bb.1 (%ir-block.0):
24  ; CHECK:   liveins: $sgpr30_sgpr31
25  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
26  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
27  ; CHECK:   [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `i1 addrspace(1)* undef`, addrspace 1)
28  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s1)
29  ; CHECK:   $vgpr0 = COPY [[ZEXT]](s32)
30  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
31  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
32  %val = load i1, i1 addrspace(1)* undef
33  ret i1 %val
34}
35
36define signext i1 @i1_signext_func_void() #0 {
37  ; CHECK-LABEL: name: i1_signext_func_void
38  ; CHECK: bb.1 (%ir-block.0):
39  ; CHECK:   liveins: $sgpr30_sgpr31
40  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
41  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
42  ; CHECK:   [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `i1 addrspace(1)* undef`, addrspace 1)
43  ; CHECK:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s1)
44  ; CHECK:   $vgpr0 = COPY [[SEXT]](s32)
45  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
46  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
47  %val = load i1, i1 addrspace(1)* undef
48  ret i1 %val
49}
50
51define i7 @i7_func_void() #0 {
52  ; CHECK-LABEL: name: i7_func_void
53  ; CHECK: bb.1 (%ir-block.0):
54  ; CHECK:   liveins: $sgpr30_sgpr31
55  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
56  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
57  ; CHECK:   [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `i7 addrspace(1)* undef`, addrspace 1)
58  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s7)
59  ; CHECK:   $vgpr0 = COPY [[ANYEXT]](s32)
60  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
61  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
62  %val = load i7, i7 addrspace(1)* undef
63  ret i7 %val
64}
65
66define zeroext i7 @i7_zeroext_func_void() #0 {
67  ; CHECK-LABEL: name: i7_zeroext_func_void
68  ; CHECK: bb.1 (%ir-block.0):
69  ; CHECK:   liveins: $sgpr30_sgpr31
70  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
71  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
72  ; CHECK:   [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `i7 addrspace(1)* undef`, addrspace 1)
73  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s7)
74  ; CHECK:   $vgpr0 = COPY [[ZEXT]](s32)
75  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
76  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
77  %val = load i7, i7 addrspace(1)* undef
78  ret i7 %val
79}
80
81define signext i7 @i7_signext_func_void() #0 {
82  ; CHECK-LABEL: name: i7_signext_func_void
83  ; CHECK: bb.1 (%ir-block.0):
84  ; CHECK:   liveins: $sgpr30_sgpr31
85  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
86  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
87  ; CHECK:   [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `i7 addrspace(1)* undef`, addrspace 1)
88  ; CHECK:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s7)
89  ; CHECK:   $vgpr0 = COPY [[SEXT]](s32)
90  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
91  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
92  %val = load i7, i7 addrspace(1)* undef
93  ret i7 %val
94}
95
96define i8 @i8_func_void() #0 {
97  ; CHECK-LABEL: name: i8_func_void
98  ; CHECK: bb.1 (%ir-block.0):
99  ; CHECK:   liveins: $sgpr30_sgpr31
100  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
101  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
102  ; CHECK:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `i8 addrspace(1)* undef`, addrspace 1)
103  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8)
104  ; CHECK:   $vgpr0 = COPY [[ANYEXT]](s32)
105  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
106  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
107  %val = load i8, i8 addrspace(1)* undef
108  ret i8 %val
109}
110
111define zeroext i8 @i8_zeroext_func_void() #0 {
112  ; CHECK-LABEL: name: i8_zeroext_func_void
113  ; CHECK: bb.1 (%ir-block.0):
114  ; CHECK:   liveins: $sgpr30_sgpr31
115  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
116  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
117  ; CHECK:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `i8 addrspace(1)* undef`, addrspace 1)
118  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s8)
119  ; CHECK:   $vgpr0 = COPY [[ZEXT]](s32)
120  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
121  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
122  %val = load i8, i8 addrspace(1)* undef
123  ret i8 %val
124}
125
126define signext i8 @i8_signext_func_void() #0 {
127  ; CHECK-LABEL: name: i8_signext_func_void
128  ; CHECK: bb.1 (%ir-block.0):
129  ; CHECK:   liveins: $sgpr30_sgpr31
130  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
131  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
132  ; CHECK:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `i8 addrspace(1)* undef`, addrspace 1)
133  ; CHECK:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s8)
134  ; CHECK:   $vgpr0 = COPY [[SEXT]](s32)
135  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
136  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
137  %val = load i8, i8 addrspace(1)* undef
138  ret i8 %val
139}
140
141define i16 @i16_func_void() #0 {
142  ; CHECK-LABEL: name: i16_func_void
143  ; CHECK: bb.1 (%ir-block.0):
144  ; CHECK:   liveins: $sgpr30_sgpr31
145  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
146  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
147  ; CHECK:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `i16 addrspace(1)* undef`, addrspace 1)
148  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
149  ; CHECK:   $vgpr0 = COPY [[ANYEXT]](s32)
150  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
151  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
152  %val = load i16, i16 addrspace(1)* undef
153  ret i16 %val
154}
155
156define zeroext i16 @i16_zeroext_func_void() #0 {
157  ; CHECK-LABEL: name: i16_zeroext_func_void
158  ; CHECK: bb.1 (%ir-block.0):
159  ; CHECK:   liveins: $sgpr30_sgpr31
160  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
161  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
162  ; CHECK:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `i16 addrspace(1)* undef`, addrspace 1)
163  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s16)
164  ; CHECK:   $vgpr0 = COPY [[ZEXT]](s32)
165  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
166  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
167  %val = load i16, i16 addrspace(1)* undef
168  ret i16 %val
169}
170
171define signext i16 @i16_signext_func_void() #0 {
172  ; CHECK-LABEL: name: i16_signext_func_void
173  ; CHECK: bb.1 (%ir-block.0):
174  ; CHECK:   liveins: $sgpr30_sgpr31
175  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
176  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
177  ; CHECK:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `i16 addrspace(1)* undef`, addrspace 1)
178  ; CHECK:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s16)
179  ; CHECK:   $vgpr0 = COPY [[SEXT]](s32)
180  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
181  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
182  %val = load i16, i16 addrspace(1)* undef
183  ret i16 %val
184}
185
186define half @f16_func_void() #0 {
187  ; CHECK-LABEL: name: f16_func_void
188  ; CHECK: bb.1 (%ir-block.0):
189  ; CHECK:   liveins: $sgpr30_sgpr31
190  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
191  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
192  ; CHECK:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `half addrspace(1)* undef`, addrspace 1)
193  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
194  ; CHECK:   $vgpr0 = COPY [[ANYEXT]](s32)
195  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
196  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
197  %val = load half, half addrspace(1)* undef
198  ret half %val
199}
200
201define i24 @i24_func_void() #0 {
202  ; CHECK-LABEL: name: i24_func_void
203  ; CHECK: bb.1 (%ir-block.0):
204  ; CHECK:   liveins: $sgpr30_sgpr31
205  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
206  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
207  ; CHECK:   [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `i24 addrspace(1)* undef`, align 4, addrspace 1)
208  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s24)
209  ; CHECK:   $vgpr0 = COPY [[ANYEXT]](s32)
210  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
211  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
212  %val = load i24, i24 addrspace(1)* undef
213  ret i24 %val
214}
215
216define zeroext i24 @i24_zeroext_func_void() #0 {
217  ; CHECK-LABEL: name: i24_zeroext_func_void
218  ; CHECK: bb.1 (%ir-block.0):
219  ; CHECK:   liveins: $sgpr30_sgpr31
220  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
221  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
222  ; CHECK:   [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `i24 addrspace(1)* undef`, align 4, addrspace 1)
223  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s24)
224  ; CHECK:   $vgpr0 = COPY [[ZEXT]](s32)
225  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
226  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
227  %val = load i24, i24 addrspace(1)* undef
228  ret i24 %val
229}
230
231define signext i24 @i24_signext_func_void() #0 {
232  ; CHECK-LABEL: name: i24_signext_func_void
233  ; CHECK: bb.1 (%ir-block.0):
234  ; CHECK:   liveins: $sgpr30_sgpr31
235  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
236  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
237  ; CHECK:   [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `i24 addrspace(1)* undef`, align 4, addrspace 1)
238  ; CHECK:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s24)
239  ; CHECK:   $vgpr0 = COPY [[SEXT]](s32)
240  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
241  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
242  %val = load i24, i24 addrspace(1)* undef
243  ret i24 %val
244}
245
246define <2 x i24> @v2i24_func_void() #0 {
247  ; CHECK-LABEL: name: v2i24_func_void
248  ; CHECK: bb.1 (%ir-block.0):
249  ; CHECK:   liveins: $sgpr30_sgpr31
250  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
251  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
252  ; CHECK:   [[LOAD:%[0-9]+]]:_(<2 x s24>) = G_LOAD [[DEF]](p1) :: (load (<2 x s24>) from `<2 x i24> addrspace(1)* undef`, align 8, addrspace 1)
253  ; CHECK:   [[UV:%[0-9]+]]:_(s24), [[UV1:%[0-9]+]]:_(s24) = G_UNMERGE_VALUES [[LOAD]](<2 x s24>)
254  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s24)
255  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s24)
256  ; CHECK:   $vgpr0 = COPY [[ANYEXT]](s32)
257  ; CHECK:   $vgpr1 = COPY [[ANYEXT1]](s32)
258  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
259  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
260  %val = load <2 x i24>, <2 x i24> addrspace(1)* undef
261  ret <2 x i24> %val
262}
263
264define <3 x i24> @v3i24_func_void() #0 {
265  ; CHECK-LABEL: name: v3i24_func_void
266  ; CHECK: bb.1 (%ir-block.0):
267  ; CHECK:   liveins: $sgpr30_sgpr31
268  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
269  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
270  ; CHECK:   [[LOAD:%[0-9]+]]:_(<3 x s24>) = G_LOAD [[DEF]](p1) :: (load (<3 x s24>) from `<3 x i24> addrspace(1)* undef`, align 16, addrspace 1)
271  ; CHECK:   [[UV:%[0-9]+]]:_(s24), [[UV1:%[0-9]+]]:_(s24), [[UV2:%[0-9]+]]:_(s24) = G_UNMERGE_VALUES [[LOAD]](<3 x s24>)
272  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s24)
273  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s24)
274  ; CHECK:   [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s24)
275  ; CHECK:   $vgpr0 = COPY [[ANYEXT]](s32)
276  ; CHECK:   $vgpr1 = COPY [[ANYEXT1]](s32)
277  ; CHECK:   $vgpr2 = COPY [[ANYEXT2]](s32)
278  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
279  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
280  %val = load <3 x i24>, <3 x i24> addrspace(1)* undef
281  ret <3 x i24> %val
282}
283
284define i32 @i32_func_void() #0 {
285  ; CHECK-LABEL: name: i32_func_void
286  ; CHECK: bb.1 (%ir-block.0):
287  ; CHECK:   liveins: $sgpr30_sgpr31
288  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
289  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
290  ; CHECK:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
291  ; CHECK:   $vgpr0 = COPY [[LOAD]](s32)
292  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
293  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
294  %val = load i32, i32 addrspace(1)* undef
295  ret i32 %val
296}
297
298define i48 @i48_func_void() #0 {
299  ; CHECK-LABEL: name: i48_func_void
300  ; CHECK: bb.1 (%ir-block.0):
301  ; CHECK:   liveins: $sgpr30_sgpr31
302  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
303  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
304  ; CHECK:   [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load (s48) from `i48 addrspace(1)* undef`, align 8, addrspace 1)
305  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s48)
306  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s64)
307  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
308  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
309  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
310  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
311  %val = load i48, i48 addrspace(1)* undef, align 8
312  ret i48 %val
313}
314
315define signext i48 @i48_signext_func_void() #0 {
316  ; CHECK-LABEL: name: i48_signext_func_void
317  ; CHECK: bb.1 (%ir-block.0):
318  ; CHECK:   liveins: $sgpr30_sgpr31
319  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
320  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
321  ; CHECK:   [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load (s48) from `i48 addrspace(1)* undef`, align 8, addrspace 1)
322  ; CHECK:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[LOAD]](s48)
323  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
324  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
325  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
326  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
327  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
328  %val = load i48, i48 addrspace(1)* undef, align 8
329  ret i48 %val
330}
331
332define zeroext i48 @i48_zeroext_func_void() #0 {
333  ; CHECK-LABEL: name: i48_zeroext_func_void
334  ; CHECK: bb.1 (%ir-block.0):
335  ; CHECK:   liveins: $sgpr30_sgpr31
336  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
337  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
338  ; CHECK:   [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load (s48) from `i48 addrspace(1)* undef`, align 8, addrspace 1)
339  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s48)
340  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s64)
341  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
342  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
343  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
344  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
345  %val = load i48, i48 addrspace(1)* undef, align 8
346  ret i48 %val
347}
348
349define i64 @i64_func_void() #0 {
350  ; CHECK-LABEL: name: i64_func_void
351  ; CHECK: bb.1 (%ir-block.0):
352  ; CHECK:   liveins: $sgpr30_sgpr31
353  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
354  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
355  ; CHECK:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[DEF]](p1) :: (load (s64) from `i64 addrspace(1)* undef`, addrspace 1)
356  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
357  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
358  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
359  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
360  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
361  %val = load i64, i64 addrspace(1)* undef
362  ret i64 %val
363}
364
365define i65 @i65_func_void() #0 {
366  ; CHECK-LABEL: name: i65_func_void
367  ; CHECK: bb.1 (%ir-block.0):
368  ; CHECK:   liveins: $sgpr30_sgpr31
369  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
370  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
371  ; CHECK:   [[LOAD:%[0-9]+]]:_(s65) = G_LOAD [[DEF]](p1) :: (load (s65) from `i65 addrspace(1)* undef`, align 8, addrspace 1)
372  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s96) = G_ANYEXT [[LOAD]](s65)
373  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s96)
374  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
375  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
376  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
377  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
378  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
379  %val = load i65, i65 addrspace(1)* undef
380  ret i65 %val
381}
382
383define signext i65 @i65_signext_func_void() #0 {
384  ; CHECK-LABEL: name: i65_signext_func_void
385  ; CHECK: bb.1 (%ir-block.0):
386  ; CHECK:   liveins: $sgpr30_sgpr31
387  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
388  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
389  ; CHECK:   [[LOAD:%[0-9]+]]:_(s65) = G_LOAD [[DEF]](p1) :: (load (s65) from `i65 addrspace(1)* undef`, align 8, addrspace 1)
390  ; CHECK:   [[SEXT:%[0-9]+]]:_(s96) = G_SEXT [[LOAD]](s65)
391  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s96)
392  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
393  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
394  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
395  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
396  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
397  %val = load i65, i65 addrspace(1)* undef
398  ret i65 %val
399}
400
401define zeroext i65 @i65_zeroext_func_void() #0 {
402  ; CHECK-LABEL: name: i65_zeroext_func_void
403  ; CHECK: bb.1 (%ir-block.0):
404  ; CHECK:   liveins: $sgpr30_sgpr31
405  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
406  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
407  ; CHECK:   [[LOAD:%[0-9]+]]:_(s65) = G_LOAD [[DEF]](p1) :: (load (s65) from `i65 addrspace(1)* undef`, align 8, addrspace 1)
408  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s96) = G_ZEXT [[LOAD]](s65)
409  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s96)
410  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
411  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
412  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
413  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
414  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
415  %val = load i65, i65 addrspace(1)* undef
416  ret i65 %val
417}
418
419define float @f32_func_void() #0 {
420  ; CHECK-LABEL: name: f32_func_void
421  ; CHECK: bb.1 (%ir-block.0):
422  ; CHECK:   liveins: $sgpr30_sgpr31
423  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
424  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
425  ; CHECK:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (load (s32) from `float addrspace(1)* undef`, addrspace 1)
426  ; CHECK:   $vgpr0 = COPY [[LOAD]](s32)
427  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
428  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
429  %val = load float, float addrspace(1)* undef
430  ret float %val
431}
432
433define double @f64_func_void() #0 {
434  ; CHECK-LABEL: name: f64_func_void
435  ; CHECK: bb.1 (%ir-block.0):
436  ; CHECK:   liveins: $sgpr30_sgpr31
437  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
438  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
439  ; CHECK:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[DEF]](p1) :: (load (s64) from `double addrspace(1)* undef`, addrspace 1)
440  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
441  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
442  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
443  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
444  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
445  %val = load double, double addrspace(1)* undef
446  ret double %val
447}
448
449define <2 x double> @v2f64_func_void() #0 {
450  ; CHECK-LABEL: name: v2f64_func_void
451  ; CHECK: bb.1 (%ir-block.0):
452  ; CHECK:   liveins: $sgpr30_sgpr31
453  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
454  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
455  ; CHECK:   [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[DEF]](p1) :: (load (<2 x s64>) from `<2 x double> addrspace(1)* undef`, addrspace 1)
456  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
457  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
458  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
459  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
460  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
461  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
462  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
463  %val = load <2 x double>, <2 x double> addrspace(1)* undef
464  ret <2 x double> %val
465}
466
467define <2 x i32> @v2i32_func_void() #0 {
468  ; CHECK-LABEL: name: v2i32_func_void
469  ; CHECK: bb.1 (%ir-block.0):
470  ; CHECK:   liveins: $sgpr30_sgpr31
471  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
472  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
473  ; CHECK:   [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[DEF]](p1) :: (load (<2 x s32>) from `<2 x i32> addrspace(1)* undef`, addrspace 1)
474  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
475  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
476  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
477  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
478  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
479  %val = load <2 x i32>, <2 x i32> addrspace(1)* undef
480  ret <2 x i32> %val
481}
482
483define <3 x i32> @v3i32_func_void() #0 {
484  ; CHECK-LABEL: name: v3i32_func_void
485  ; CHECK: bb.1 (%ir-block.0):
486  ; CHECK:   liveins: $sgpr30_sgpr31
487  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
488  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
489  ; CHECK:   [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[DEF]](p1) :: (load (<3 x s32>) from `<3 x i32> addrspace(1)* undef`, align 16, addrspace 1)
490  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<3 x s32>)
491  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
492  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
493  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
494  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
495  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
496  %val = load <3 x i32>, <3 x i32> addrspace(1)* undef
497  ret <3 x i32> %val
498}
499
500define <4 x i32> @v4i32_func_void() #0 {
501  ; CHECK-LABEL: name: v4i32_func_void
502  ; CHECK: bb.1 (%ir-block.0):
503  ; CHECK:   liveins: $sgpr30_sgpr31
504  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
505  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
506  ; CHECK:   [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[DEF]](p1) :: (load (<4 x s32>) from `<4 x i32> addrspace(1)* undef`, addrspace 1)
507  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
508  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
509  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
510  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
511  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
512  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
513  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
514  %val = load <4 x i32>, <4 x i32> addrspace(1)* undef
515  ret <4 x i32> %val
516}
517
518define <5 x i32> @v5i32_func_void() #0 {
519  ; CHECK-LABEL: name: v5i32_func_void
520  ; CHECK: bb.1 (%ir-block.0):
521  ; CHECK:   liveins: $sgpr30_sgpr31
522  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
523  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
524  ; CHECK:   [[LOAD:%[0-9]+]]:_(<5 x s32>) = G_LOAD [[DEF]](p1) :: (volatile load (<5 x s32>) from `<5 x i32> addrspace(1)* undef`, align 32, addrspace 1)
525  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<5 x s32>)
526  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
527  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
528  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
529  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
530  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
531  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
532  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4
533  %val = load volatile <5 x i32>, <5 x i32> addrspace(1)* undef
534  ret <5 x i32> %val
535}
536
537define <8 x i32> @v8i32_func_void() #0 {
538  ; CHECK-LABEL: name: v8i32_func_void
539  ; CHECK: bb.1 (%ir-block.0):
540  ; CHECK:   liveins: $sgpr30_sgpr31
541  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
542  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
543  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<8 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
544  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<8 x s32>) from %ir.ptr, addrspace 1)
545  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<8 x s32>)
546  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
547  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
548  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
549  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
550  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
551  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
552  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
553  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
554  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
555  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
556  %ptr = load volatile <8 x i32> addrspace(1)*, <8 x i32> addrspace(1)* addrspace(4)* undef
557  %val = load <8 x i32>, <8 x i32> addrspace(1)* %ptr
558  ret <8 x i32> %val
559}
560
561define <16 x i32> @v16i32_func_void() #0 {
562  ; CHECK-LABEL: name: v16i32_func_void
563  ; CHECK: bb.1 (%ir-block.0):
564  ; CHECK:   liveins: $sgpr30_sgpr31
565  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
566  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
567  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<16 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
568  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s32>) from %ir.ptr, addrspace 1)
569  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<16 x s32>)
570  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
571  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
572  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
573  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
574  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
575  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
576  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
577  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
578  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
579  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
580  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
581  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
582  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
583  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
584  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
585  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
586  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
587  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
588  %ptr = load volatile <16 x i32> addrspace(1)*, <16 x i32> addrspace(1)* addrspace(4)* undef
589  %val = load <16 x i32>, <16 x i32> addrspace(1)* %ptr
590  ret <16 x i32> %val
591}
592
593define <32 x i32> @v32i32_func_void() #0 {
594  ; CHECK-LABEL: name: v32i32_func_void
595  ; CHECK: bb.1 (%ir-block.0):
596  ; CHECK:   liveins: $sgpr30_sgpr31
597  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
598  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
599  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<32 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
600  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<32 x s32>) from %ir.ptr, addrspace 1)
601  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<32 x s32>)
602  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
603  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
604  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
605  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
606  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
607  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
608  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
609  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
610  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
611  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
612  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
613  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
614  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
615  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
616  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
617  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
618  ; CHECK:   $vgpr16 = COPY [[UV16]](s32)
619  ; CHECK:   $vgpr17 = COPY [[UV17]](s32)
620  ; CHECK:   $vgpr18 = COPY [[UV18]](s32)
621  ; CHECK:   $vgpr19 = COPY [[UV19]](s32)
622  ; CHECK:   $vgpr20 = COPY [[UV20]](s32)
623  ; CHECK:   $vgpr21 = COPY [[UV21]](s32)
624  ; CHECK:   $vgpr22 = COPY [[UV22]](s32)
625  ; CHECK:   $vgpr23 = COPY [[UV23]](s32)
626  ; CHECK:   $vgpr24 = COPY [[UV24]](s32)
627  ; CHECK:   $vgpr25 = COPY [[UV25]](s32)
628  ; CHECK:   $vgpr26 = COPY [[UV26]](s32)
629  ; CHECK:   $vgpr27 = COPY [[UV27]](s32)
630  ; CHECK:   $vgpr28 = COPY [[UV28]](s32)
631  ; CHECK:   $vgpr29 = COPY [[UV29]](s32)
632  ; CHECK:   $vgpr30 = COPY [[UV30]](s32)
633  ; CHECK:   $vgpr31 = COPY [[UV31]](s32)
634  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
635  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
636  %ptr = load volatile <32 x i32> addrspace(1)*, <32 x i32> addrspace(1)* addrspace(4)* undef
637  %val = load <32 x i32>, <32 x i32> addrspace(1)* %ptr
638  ret <32 x i32> %val
639}
640
641define <2 x i64> @v2i64_func_void() #0 {
642  ; CHECK-LABEL: name: v2i64_func_void
643  ; CHECK: bb.1 (%ir-block.0):
644  ; CHECK:   liveins: $sgpr30_sgpr31
645  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
646  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
647  ; CHECK:   [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[DEF]](p1) :: (load (<2 x s64>) from `<2 x i64> addrspace(1)* undef`, addrspace 1)
648  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
649  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
650  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
651  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
652  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
653  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
654  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
655  %val = load <2 x i64>, <2 x i64> addrspace(1)* undef
656  ret <2 x i64> %val
657}
658
659define <3 x i64> @v3i64_func_void() #0 {
660  ; CHECK-LABEL: name: v3i64_func_void
661  ; CHECK: bb.1 (%ir-block.0):
662  ; CHECK:   liveins: $sgpr30_sgpr31
663  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
664  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
665  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<3 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
666  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<3 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<3 x s64>) from %ir.ptr, align 32, addrspace 1)
667  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s64>)
668  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
669  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
670  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
671  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
672  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
673  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
674  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
675  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
676  %ptr = load volatile <3 x i64> addrspace(1)*, <3 x i64> addrspace(1)* addrspace(4)* undef
677  %val = load <3 x i64>, <3 x i64> addrspace(1)* %ptr
678  ret <3 x i64> %val
679}
680
681define <4 x i64> @v4i64_func_void() #0 {
682  ; CHECK-LABEL: name: v4i64_func_void
683  ; CHECK: bb.1 (%ir-block.0):
684  ; CHECK:   liveins: $sgpr30_sgpr31
685  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
686  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
687  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<4 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
688  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<4 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<4 x s64>) from %ir.ptr, addrspace 1)
689  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<4 x s64>)
690  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
691  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
692  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
693  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
694  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
695  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
696  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
697  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
698  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
699  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
700  %ptr = load volatile <4 x i64> addrspace(1)*, <4 x i64> addrspace(1)* addrspace(4)* undef
701  %val = load <4 x i64>, <4 x i64> addrspace(1)* %ptr
702  ret <4 x i64> %val
703}
704
705define <5 x i64> @v5i64_func_void() #0 {
706  ; CHECK-LABEL: name: v5i64_func_void
707  ; CHECK: bb.1 (%ir-block.0):
708  ; CHECK:   liveins: $sgpr30_sgpr31
709  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
710  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
711  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<5 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
712  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<5 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<5 x s64>) from %ir.ptr, align 64, addrspace 1)
713  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<5 x s64>)
714  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
715  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
716  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
717  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
718  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
719  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
720  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
721  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
722  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
723  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
724  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
725  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9
726  %ptr = load volatile <5 x i64> addrspace(1)*, <5 x i64> addrspace(1)* addrspace(4)* undef
727  %val = load <5 x i64>, <5 x i64> addrspace(1)* %ptr
728  ret <5 x i64> %val
729}
730
731define <8 x i64> @v8i64_func_void() #0 {
732  ; CHECK-LABEL: name: v8i64_func_void
733  ; CHECK: bb.1 (%ir-block.0):
734  ; CHECK:   liveins: $sgpr30_sgpr31
735  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
736  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
737  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<8 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
738  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<8 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<8 x s64>) from %ir.ptr, addrspace 1)
739  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<8 x s64>)
740  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
741  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
742  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
743  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
744  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
745  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
746  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
747  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
748  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
749  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
750  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
751  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
752  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
753  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
754  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
755  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
756  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
757  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
758  %ptr = load volatile <8 x i64> addrspace(1)*, <8 x i64> addrspace(1)* addrspace(4)* undef
759  %val = load <8 x i64>, <8 x i64> addrspace(1)* %ptr
760  ret <8 x i64> %val
761}
762
763define <16 x i64> @v16i64_func_void() #0 {
764  ; CHECK-LABEL: name: v16i64_func_void
765  ; CHECK: bb.1 (%ir-block.0):
766  ; CHECK:   liveins: $sgpr30_sgpr31
767  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
768  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
769  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<16 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
770  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<16 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s64>) from %ir.ptr, addrspace 1)
771  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<16 x s64>)
772  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
773  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
774  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
775  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
776  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
777  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
778  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
779  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
780  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
781  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
782  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
783  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
784  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
785  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
786  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
787  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
788  ; CHECK:   $vgpr16 = COPY [[UV16]](s32)
789  ; CHECK:   $vgpr17 = COPY [[UV17]](s32)
790  ; CHECK:   $vgpr18 = COPY [[UV18]](s32)
791  ; CHECK:   $vgpr19 = COPY [[UV19]](s32)
792  ; CHECK:   $vgpr20 = COPY [[UV20]](s32)
793  ; CHECK:   $vgpr21 = COPY [[UV21]](s32)
794  ; CHECK:   $vgpr22 = COPY [[UV22]](s32)
795  ; CHECK:   $vgpr23 = COPY [[UV23]](s32)
796  ; CHECK:   $vgpr24 = COPY [[UV24]](s32)
797  ; CHECK:   $vgpr25 = COPY [[UV25]](s32)
798  ; CHECK:   $vgpr26 = COPY [[UV26]](s32)
799  ; CHECK:   $vgpr27 = COPY [[UV27]](s32)
800  ; CHECK:   $vgpr28 = COPY [[UV28]](s32)
801  ; CHECK:   $vgpr29 = COPY [[UV29]](s32)
802  ; CHECK:   $vgpr30 = COPY [[UV30]](s32)
803  ; CHECK:   $vgpr31 = COPY [[UV31]](s32)
804  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
805  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
806  %ptr = load volatile <16 x i64> addrspace(1)*, <16 x i64> addrspace(1)* addrspace(4)* undef
807  %val = load <16 x i64>, <16 x i64> addrspace(1)* %ptr
808  ret <16 x i64> %val
809}
810
811define <2 x i16> @v2i16_func_void() #0 {
812  ; CHECK-LABEL: name: v2i16_func_void
813  ; CHECK: bb.1 (%ir-block.0):
814  ; CHECK:   liveins: $sgpr30_sgpr31
815  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
816  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
817  ; CHECK:   [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[DEF]](p1) :: (load (<2 x s16>) from `<2 x i16> addrspace(1)* undef`, addrspace 1)
818  ; CHECK:   $vgpr0 = COPY [[LOAD]](<2 x s16>)
819  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
820  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
821  %val = load <2 x i16>, <2 x i16> addrspace(1)* undef
822  ret <2 x i16> %val
823}
824
825define <2 x half> @v2f16_func_void() #0 {
826  ; CHECK-LABEL: name: v2f16_func_void
827  ; CHECK: bb.1 (%ir-block.0):
828  ; CHECK:   liveins: $sgpr30_sgpr31
829  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
830  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
831  ; CHECK:   [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[DEF]](p1) :: (load (<2 x s16>) from `<2 x half> addrspace(1)* undef`, addrspace 1)
832  ; CHECK:   $vgpr0 = COPY [[LOAD]](<2 x s16>)
833  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
834  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
835  %val = load <2 x half>, <2 x half> addrspace(1)* undef
836  ret <2 x half> %val
837}
838
839define <3 x i16> @v3i16_func_void() #0 {
840  ; CHECK-LABEL: name: v3i16_func_void
841  ; CHECK: bb.1 (%ir-block.0):
842  ; CHECK:   liveins: $sgpr30_sgpr31
843  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
844  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
845  ; CHECK:   [[LOAD:%[0-9]+]]:_(<3 x s16>) = G_LOAD [[DEF]](p1) :: (load (<3 x s16>) from `<3 x i16> addrspace(1)* undef`, align 8, addrspace 1)
846  ; CHECK:   [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
847  ; CHECK:   [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[LOAD]](<3 x s16>), [[DEF1]](<3 x s16>)
848  ; CHECK:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<6 x s16>)
849  ; CHECK:   $vgpr0 = COPY [[UV]](<2 x s16>)
850  ; CHECK:   $vgpr1 = COPY [[UV1]](<2 x s16>)
851  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
852  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
853  %val = load <3 x i16>, <3 x i16> addrspace(1)* undef
854  ret <3 x i16> %val
855}
856
857define <4 x i16> @v4i16_func_void() #0 {
858  ; CHECK-LABEL: name: v4i16_func_void
859  ; CHECK: bb.1 (%ir-block.0):
860  ; CHECK:   liveins: $sgpr30_sgpr31
861  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
862  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
863  ; CHECK:   [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[DEF]](p1) :: (load (<4 x s16>) from `<4 x i16> addrspace(1)* undef`, addrspace 1)
864  ; CHECK:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
865  ; CHECK:   $vgpr0 = COPY [[UV]](<2 x s16>)
866  ; CHECK:   $vgpr1 = COPY [[UV1]](<2 x s16>)
867  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
868  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
869  %val = load <4 x i16>, <4 x i16> addrspace(1)* undef
870  ret <4 x i16> %val
871}
872
873define <4 x half> @v4f16_func_void() #0 {
874  ; CHECK-LABEL: name: v4f16_func_void
875  ; CHECK: bb.1 (%ir-block.0):
876  ; CHECK:   liveins: $sgpr30_sgpr31
877  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
878  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
879  ; CHECK:   [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[DEF]](p1) :: (load (<4 x s16>) from `<4 x half> addrspace(1)* undef`, addrspace 1)
880  ; CHECK:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
881  ; CHECK:   $vgpr0 = COPY [[UV]](<2 x s16>)
882  ; CHECK:   $vgpr1 = COPY [[UV1]](<2 x s16>)
883  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
884  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
885  %val = load <4 x half>, <4 x half> addrspace(1)* undef
886  ret <4 x half> %val
887}
888
889define <5 x i16> @v5i16_func_void() #0 {
890  ; CHECK-LABEL: name: v5i16_func_void
891  ; CHECK: bb.1 (%ir-block.0):
892  ; CHECK:   liveins: $sgpr30_sgpr31
893  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
894  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
895  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<5 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
896  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<5 x s16>) = G_LOAD [[LOAD]](p1) :: (load (<5 x s16>) from %ir.ptr, align 16, addrspace 1)
897  ; CHECK:   [[DEF1:%[0-9]+]]:_(<5 x s16>) = G_IMPLICIT_DEF
898  ; CHECK:   [[CONCAT_VECTORS:%[0-9]+]]:_(<10 x s16>) = G_CONCAT_VECTORS [[LOAD1]](<5 x s16>), [[DEF1]](<5 x s16>)
899  ; CHECK:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>), [[UV4:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<10 x s16>)
900  ; CHECK:   $vgpr0 = COPY [[UV]](<2 x s16>)
901  ; CHECK:   $vgpr1 = COPY [[UV1]](<2 x s16>)
902  ; CHECK:   $vgpr2 = COPY [[UV2]](<2 x s16>)
903  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
904  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
905  %ptr = load volatile <5 x i16> addrspace(1)*, <5 x i16> addrspace(1)* addrspace(4)* undef
906  %val = load <5 x i16>, <5 x i16> addrspace(1)* %ptr
907  ret <5 x i16> %val
908}
909
910define <8 x i16> @v8i16_func_void() #0 {
911  ; CHECK-LABEL: name: v8i16_func_void
912  ; CHECK: bb.1 (%ir-block.0):
913  ; CHECK:   liveins: $sgpr30_sgpr31
914  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
915  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
916  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<8 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
917  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[LOAD]](p1) :: (load (<8 x s16>) from %ir.ptr, addrspace 1)
918  ; CHECK:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD1]](<8 x s16>)
919  ; CHECK:   $vgpr0 = COPY [[UV]](<2 x s16>)
920  ; CHECK:   $vgpr1 = COPY [[UV1]](<2 x s16>)
921  ; CHECK:   $vgpr2 = COPY [[UV2]](<2 x s16>)
922  ; CHECK:   $vgpr3 = COPY [[UV3]](<2 x s16>)
923  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
924  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
925  %ptr = load volatile <8 x i16> addrspace(1)*, <8 x i16> addrspace(1)* addrspace(4)* undef
926  %val = load <8 x i16>, <8 x i16> addrspace(1)* %ptr
927  ret <8 x i16> %val
928}
929
930define <16 x i16> @v16i16_func_void() #0 {
931  ; CHECK-LABEL: name: v16i16_func_void
932  ; CHECK: bb.1 (%ir-block.0):
933  ; CHECK:   liveins: $sgpr30_sgpr31
934  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
935  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
936  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<16 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
937  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<16 x s16>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s16>) from %ir.ptr, addrspace 1)
938  ; CHECK:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>), [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>), [[UV6:%[0-9]+]]:_(<2 x s16>), [[UV7:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD1]](<16 x s16>)
939  ; CHECK:   $vgpr0 = COPY [[UV]](<2 x s16>)
940  ; CHECK:   $vgpr1 = COPY [[UV1]](<2 x s16>)
941  ; CHECK:   $vgpr2 = COPY [[UV2]](<2 x s16>)
942  ; CHECK:   $vgpr3 = COPY [[UV3]](<2 x s16>)
943  ; CHECK:   $vgpr4 = COPY [[UV4]](<2 x s16>)
944  ; CHECK:   $vgpr5 = COPY [[UV5]](<2 x s16>)
945  ; CHECK:   $vgpr6 = COPY [[UV6]](<2 x s16>)
946  ; CHECK:   $vgpr7 = COPY [[UV7]](<2 x s16>)
947  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
948  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
949  %ptr = load volatile <16 x i16> addrspace(1)*, <16 x i16> addrspace(1)* addrspace(4)* undef
950  %val = load <16 x i16>, <16 x i16> addrspace(1)* %ptr
951  ret <16 x i16> %val
952}
953
954define <16 x i8> @v16i8_func_void() #0 {
955  ; CHECK-LABEL: name: v16i8_func_void
956  ; CHECK: bb.1 (%ir-block.0):
957  ; CHECK:   liveins: $sgpr30_sgpr31
958  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
959  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
960  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<16 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
961  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s8>) from %ir.ptr, addrspace 1)
962  ; CHECK:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8), [[UV12:%[0-9]+]]:_(s8), [[UV13:%[0-9]+]]:_(s8), [[UV14:%[0-9]+]]:_(s8), [[UV15:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<16 x s8>)
963  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
964  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
965  ; CHECK:   [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[UV2]](s8)
966  ; CHECK:   [[ANYEXT3:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
967  ; CHECK:   [[ANYEXT4:%[0-9]+]]:_(s16) = G_ANYEXT [[UV4]](s8)
968  ; CHECK:   [[ANYEXT5:%[0-9]+]]:_(s16) = G_ANYEXT [[UV5]](s8)
969  ; CHECK:   [[ANYEXT6:%[0-9]+]]:_(s16) = G_ANYEXT [[UV6]](s8)
970  ; CHECK:   [[ANYEXT7:%[0-9]+]]:_(s16) = G_ANYEXT [[UV7]](s8)
971  ; CHECK:   [[ANYEXT8:%[0-9]+]]:_(s16) = G_ANYEXT [[UV8]](s8)
972  ; CHECK:   [[ANYEXT9:%[0-9]+]]:_(s16) = G_ANYEXT [[UV9]](s8)
973  ; CHECK:   [[ANYEXT10:%[0-9]+]]:_(s16) = G_ANYEXT [[UV10]](s8)
974  ; CHECK:   [[ANYEXT11:%[0-9]+]]:_(s16) = G_ANYEXT [[UV11]](s8)
975  ; CHECK:   [[ANYEXT12:%[0-9]+]]:_(s16) = G_ANYEXT [[UV12]](s8)
976  ; CHECK:   [[ANYEXT13:%[0-9]+]]:_(s16) = G_ANYEXT [[UV13]](s8)
977  ; CHECK:   [[ANYEXT14:%[0-9]+]]:_(s16) = G_ANYEXT [[UV14]](s8)
978  ; CHECK:   [[ANYEXT15:%[0-9]+]]:_(s16) = G_ANYEXT [[UV15]](s8)
979  ; CHECK:   [[ANYEXT16:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
980  ; CHECK:   $vgpr0 = COPY [[ANYEXT16]](s32)
981  ; CHECK:   [[ANYEXT17:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
982  ; CHECK:   $vgpr1 = COPY [[ANYEXT17]](s32)
983  ; CHECK:   [[ANYEXT18:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT2]](s16)
984  ; CHECK:   $vgpr2 = COPY [[ANYEXT18]](s32)
985  ; CHECK:   [[ANYEXT19:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT3]](s16)
986  ; CHECK:   $vgpr3 = COPY [[ANYEXT19]](s32)
987  ; CHECK:   [[ANYEXT20:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT4]](s16)
988  ; CHECK:   $vgpr4 = COPY [[ANYEXT20]](s32)
989  ; CHECK:   [[ANYEXT21:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT5]](s16)
990  ; CHECK:   $vgpr5 = COPY [[ANYEXT21]](s32)
991  ; CHECK:   [[ANYEXT22:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT6]](s16)
992  ; CHECK:   $vgpr6 = COPY [[ANYEXT22]](s32)
993  ; CHECK:   [[ANYEXT23:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT7]](s16)
994  ; CHECK:   $vgpr7 = COPY [[ANYEXT23]](s32)
995  ; CHECK:   [[ANYEXT24:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT8]](s16)
996  ; CHECK:   $vgpr8 = COPY [[ANYEXT24]](s32)
997  ; CHECK:   [[ANYEXT25:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT9]](s16)
998  ; CHECK:   $vgpr9 = COPY [[ANYEXT25]](s32)
999  ; CHECK:   [[ANYEXT26:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT10]](s16)
1000  ; CHECK:   $vgpr10 = COPY [[ANYEXT26]](s32)
1001  ; CHECK:   [[ANYEXT27:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT11]](s16)
1002  ; CHECK:   $vgpr11 = COPY [[ANYEXT27]](s32)
1003  ; CHECK:   [[ANYEXT28:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT12]](s16)
1004  ; CHECK:   $vgpr12 = COPY [[ANYEXT28]](s32)
1005  ; CHECK:   [[ANYEXT29:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT13]](s16)
1006  ; CHECK:   $vgpr13 = COPY [[ANYEXT29]](s32)
1007  ; CHECK:   [[ANYEXT30:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT14]](s16)
1008  ; CHECK:   $vgpr14 = COPY [[ANYEXT30]](s32)
1009  ; CHECK:   [[ANYEXT31:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT15]](s16)
1010  ; CHECK:   $vgpr15 = COPY [[ANYEXT31]](s32)
1011  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1012  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
1013  %ptr = load volatile <16 x i8> addrspace(1)*, <16 x i8> addrspace(1)* addrspace(4)* undef
1014  %val = load <16 x i8>, <16 x i8> addrspace(1)* %ptr
1015  ret <16 x i8> %val
1016}
1017
1018define <2 x i8> @v2i8_func_void() #0 {
1019  ; CHECK-LABEL: name: v2i8_func_void
1020  ; CHECK: bb.1 (%ir-block.0):
1021  ; CHECK:   liveins: $sgpr30_sgpr31
1022  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1023  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1024  ; CHECK:   [[LOAD:%[0-9]+]]:_(<2 x s8>) = G_LOAD [[DEF]](p1) :: (load (<2 x s8>) from `<2 x i8> addrspace(1)* undef`, addrspace 1)
1025  ; CHECK:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD]](<2 x s8>)
1026  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
1027  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
1028  ; CHECK:   [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
1029  ; CHECK:   $vgpr0 = COPY [[ANYEXT2]](s32)
1030  ; CHECK:   [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
1031  ; CHECK:   $vgpr1 = COPY [[ANYEXT3]](s32)
1032  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1033  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
1034  %val = load <2 x i8>, <2 x i8> addrspace(1)* undef
1035  ret <2 x i8> %val
1036}
1037
1038define <3 x i8> @v3i8_func_void() #0 {
1039  ; CHECK-LABEL: name: v3i8_func_void
1040  ; CHECK: bb.1 (%ir-block.0):
1041  ; CHECK:   liveins: $sgpr30_sgpr31
1042  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1043  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1044  ; CHECK:   [[LOAD:%[0-9]+]]:_(<3 x s8>) = G_LOAD [[DEF]](p1) :: (load (<3 x s8>) from `<3 x i8> addrspace(1)* undef`, align 4, addrspace 1)
1045  ; CHECK:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD]](<3 x s8>)
1046  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
1047  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
1048  ; CHECK:   [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[UV2]](s8)
1049  ; CHECK:   [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
1050  ; CHECK:   $vgpr0 = COPY [[ANYEXT3]](s32)
1051  ; CHECK:   [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
1052  ; CHECK:   $vgpr1 = COPY [[ANYEXT4]](s32)
1053  ; CHECK:   [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT2]](s16)
1054  ; CHECK:   $vgpr2 = COPY [[ANYEXT5]](s32)
1055  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1056  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
1057  %val = load <3 x i8>, <3 x i8> addrspace(1)* undef
1058  ret <3 x i8> %val
1059}
1060
1061define <4  x i8> @v4i8_func_void() #0 {
1062  ; CHECK-LABEL: name: v4i8_func_void
1063  ; CHECK: bb.1 (%ir-block.0):
1064  ; CHECK:   liveins: $sgpr30_sgpr31
1065  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1066  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
1067  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<4 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
1068  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<4 x s8>) = G_LOAD [[LOAD]](p1) :: (load (<4 x s8>) from %ir.ptr, addrspace 1)
1069  ; CHECK:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<4 x s8>)
1070  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
1071  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
1072  ; CHECK:   [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[UV2]](s8)
1073  ; CHECK:   [[ANYEXT3:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
1074  ; CHECK:   [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
1075  ; CHECK:   $vgpr0 = COPY [[ANYEXT4]](s32)
1076  ; CHECK:   [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
1077  ; CHECK:   $vgpr1 = COPY [[ANYEXT5]](s32)
1078  ; CHECK:   [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT2]](s16)
1079  ; CHECK:   $vgpr2 = COPY [[ANYEXT6]](s32)
1080  ; CHECK:   [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT3]](s16)
1081  ; CHECK:   $vgpr3 = COPY [[ANYEXT7]](s32)
1082  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1083  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1084  %ptr = load volatile <4  x i8> addrspace(1)*, <4  x i8> addrspace(1)* addrspace(4)* undef
1085  %val = load <4  x i8>, <4  x i8> addrspace(1)* %ptr
1086  ret <4  x i8> %val
1087}
1088
1089define {i8, i32} @struct_i8_i32_func_void() #0 {
1090  ; CHECK-LABEL: name: struct_i8_i32_func_void
1091  ; CHECK: bb.1 (%ir-block.0):
1092  ; CHECK:   liveins: $sgpr30_sgpr31
1093  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1094  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1095  ; CHECK:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
1096  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1097  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C]](s64)
1098  ; CHECK:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
1099  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8)
1100  ; CHECK:   $vgpr0 = COPY [[ANYEXT]](s32)
1101  ; CHECK:   $vgpr1 = COPY [[LOAD1]](s32)
1102  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1103  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
1104  %val = load { i8, i32 }, { i8, i32 } addrspace(1)* undef
1105  ret { i8, i32 } %val
1106}
1107
1108define void @void_func_sret_struct_i8_i32({ i8, i32 } addrspace(5)* sret({ i8, i32 }) %arg0) #0 {
1109  ; CHECK-LABEL: name: void_func_sret_struct_i8_i32
1110  ; CHECK: bb.1 (%ir-block.0):
1111  ; CHECK:   liveins: $vgpr0, $sgpr30_sgpr31
1112  ; CHECK:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1113  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1114  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1115  ; CHECK:   [[COPY2:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
1116  ; CHECK:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (volatile load (s8) from `i8 addrspace(1)* undef`, addrspace 1)
1117  ; CHECK:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p1) :: (volatile load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
1118  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1119  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
1120  ; CHECK:   G_STORE [[LOAD]](s8), [[COPY]](p5) :: (store (s8) into %ir.gep01, addrspace 5)
1121  ; CHECK:   G_STORE [[LOAD1]](s32), [[PTR_ADD]](p5) :: (store (s32) into %ir.gep1, addrspace 5)
1122  ; CHECK:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1123  ; CHECK:   S_SETPC_B64_return [[COPY3]]
1124  %val0 = load volatile i8, i8 addrspace(1)* undef
1125  %val1 = load volatile i32, i32 addrspace(1)* undef
1126  %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 0
1127  %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 1
1128  store i8 %val0, i8 addrspace(5)* %gep0
1129  store i32 %val1, i32 addrspace(5)* %gep1
1130  ret void
1131}
1132
1133; FIXME: Should be able to fold offsets in all of these pre-gfx9. Call
1134; lowering introduces an extra CopyToReg/CopyFromReg obscuring the
1135; AssertZext inserted. Not using it introduces the spills.
1136
1137define <33 x i32> @v33i32_func_void() #0 {
1138  ; CHECK-LABEL: name: v33i32_func_void
1139  ; CHECK: bb.1 (%ir-block.0):
1140  ; CHECK:   liveins: $vgpr0, $sgpr30_sgpr31
1141  ; CHECK:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1142  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1143  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
1144  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<33 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
1145  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<33 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<33 x s32>) from %ir.ptr, align 256, addrspace 1)
1146  ; CHECK:   G_STORE [[LOAD1]](<33 x s32>), [[COPY]](p5) :: (store (<33 x s32>), align 256, addrspace 5)
1147  ; CHECK:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1148  ; CHECK:   S_SETPC_B64_return [[COPY2]]
1149  %ptr = load volatile <33 x i32> addrspace(1)*, <33 x i32> addrspace(1)* addrspace(4)* undef
1150  %val = load <33 x i32>, <33 x i32> addrspace(1)* %ptr
1151  ret <33 x i32> %val
1152}
1153
1154define <33 x i32> @v33i32_func_v33i32_i32(<33 x i32> addrspace(1)* %p, i32 %idx) #0 {
1155  ; CHECK-LABEL: name: v33i32_func_v33i32_i32
1156  ; CHECK: bb.1 (%ir-block.0):
1157  ; CHECK:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
1158  ; CHECK:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1159  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1160  ; CHECK:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1161  ; CHECK:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
1162  ; CHECK:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1163  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1164  ; CHECK:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY3]](s32)
1165  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 256
1166  ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
1167  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[MV]], [[MUL]](s64)
1168  ; CHECK:   [[COPY5:%[0-9]+]]:_(p1) = COPY [[PTR_ADD]](p1)
1169  ; CHECK:   [[LOAD:%[0-9]+]]:_(<33 x s32>) = G_LOAD [[COPY5]](p1) :: (load (<33 x s32>) from %ir.gep, align 256, addrspace 1)
1170  ; CHECK:   G_STORE [[LOAD]](<33 x s32>), [[COPY]](p5) :: (store (<33 x s32>), align 256, addrspace 5)
1171  ; CHECK:   [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
1172  ; CHECK:   S_SETPC_B64_return [[COPY6]]
1173  %gep = getelementptr inbounds <33 x i32>, <33 x i32> addrspace(1)* %p, i32 %idx
1174  %val = load <33 x i32>, <33 x i32> addrspace(1)* %gep
1175  ret <33 x i32> %val
1176}
1177
1178define { <32 x i32>, i32 } @struct_v32i32_i32_func_void() #0 {
1179  ; CHECK-LABEL: name: struct_v32i32_i32_func_void
1180  ; CHECK: bb.1 (%ir-block.0):
1181  ; CHECK:   liveins: $vgpr0, $sgpr30_sgpr31
1182  ; CHECK:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1183  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1184  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
1185  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `{ <32 x i32>, i32 } addrspace(1)* addrspace(4)* undef`, addrspace 4)
1186  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<32 x s32>) from %ir.ptr, addrspace 1)
1187  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
1188  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
1189  ; CHECK:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from %ir.ptr + 128, align 128, addrspace 1)
1190  ; CHECK:   G_STORE [[LOAD1]](<32 x s32>), [[COPY]](p5) :: (store (<32 x s32>), addrspace 5)
1191  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
1192  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
1193  ; CHECK:   G_STORE [[LOAD2]](s32), [[PTR_ADD1]](p5) :: (store (s32), align 128, addrspace 5)
1194  ; CHECK:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1195  ; CHECK:   S_SETPC_B64_return [[COPY2]]
1196  %ptr = load volatile { <32 x i32>, i32 } addrspace(1)*, { <32 x i32>, i32 } addrspace(1)* addrspace(4)* undef
1197  %val = load { <32 x i32>, i32 }, { <32 x i32>, i32 } addrspace(1)* %ptr
1198  ret { <32 x i32>, i32 }%val
1199}
1200
1201define { i32, <32 x i32> } @struct_i32_v32i32_func_void() #0 {
1202  ; CHECK-LABEL: name: struct_i32_v32i32_func_void
1203  ; CHECK: bb.1 (%ir-block.0):
1204  ; CHECK:   liveins: $vgpr0, $sgpr30_sgpr31
1205  ; CHECK:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1206  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1207  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
1208  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `{ i32, <32 x i32> } addrspace(1)* addrspace(4)* undef`, addrspace 4)
1209  ; CHECK:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p1) :: (load (s32) from %ir.ptr, align 128, addrspace 1)
1210  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
1211  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
1212  ; CHECK:   [[LOAD2:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<32 x s32>) from %ir.ptr + 128, addrspace 1)
1213  ; CHECK:   G_STORE [[LOAD1]](s32), [[COPY]](p5) :: (store (s32), align 128, addrspace 5)
1214  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
1215  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
1216  ; CHECK:   G_STORE [[LOAD2]](<32 x s32>), [[PTR_ADD1]](p5) :: (store (<32 x s32>), addrspace 5)
1217  ; CHECK:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1218  ; CHECK:   S_SETPC_B64_return [[COPY2]]
1219  %ptr = load volatile { i32, <32 x i32> } addrspace(1)*, { i32, <32 x i32> } addrspace(1)* addrspace(4)* undef
1220  %val = load { i32, <32 x i32> }, { i32, <32 x i32> } addrspace(1)* %ptr
1221  ret { i32, <32 x i32> }%val
1222}
1223
1224; Make sure the last struct component is returned in v3, not v4.
1225define { <3 x i32>, i32 } @v3i32_struct_func_void_wasted_reg() #0 {
1226  ; CHECK-LABEL: name: v3i32_struct_func_void_wasted_reg
1227  ; CHECK: bb.1 (%ir-block.0):
1228  ; CHECK:   liveins: $sgpr30_sgpr31
1229  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1230  ; CHECK:   [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1231  ; CHECK:   [[DEF1:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
1232  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1233  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1234  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1235  ; CHECK:   [[DEF2:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
1236  ; CHECK:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `i32 addrspace(3)* undef`, addrspace 3)
1237  ; CHECK:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `i32 addrspace(3)* undef`, addrspace 3)
1238  ; CHECK:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `i32 addrspace(3)* undef`, addrspace 3)
1239  ; CHECK:   [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `i32 addrspace(3)* undef`, addrspace 3)
1240  ; CHECK:   [[IVEC:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[DEF1]], [[LOAD]](s32), [[C]](s32)
1241  ; CHECK:   [[IVEC1:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC]], [[LOAD1]](s32), [[C1]](s32)
1242  ; CHECK:   [[IVEC2:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC1]], [[LOAD2]](s32), [[C2]](s32)
1243  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[IVEC2]](<3 x s32>)
1244  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
1245  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
1246  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
1247  ; CHECK:   $vgpr3 = COPY [[LOAD3]](s32)
1248  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1249  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1250  %load0 = load volatile i32, i32 addrspace(3)* undef
1251  %load1 = load volatile i32, i32 addrspace(3)* undef
1252  %load2 = load volatile i32, i32 addrspace(3)* undef
1253  %load3 = load volatile i32, i32 addrspace(3)* undef
1254
1255  %insert.0 = insertelement <3 x i32> undef, i32 %load0, i32 0
1256  %insert.1 = insertelement <3 x i32> %insert.0, i32 %load1, i32 1
1257  %insert.2 = insertelement <3 x i32> %insert.1, i32 %load2, i32 2
1258  %insert.3 = insertvalue { <3 x i32>, i32 } undef, <3 x i32> %insert.2, 0
1259  %insert.4 = insertvalue { <3 x i32>, i32 } %insert.3, i32 %load3, 1
1260  ret { <3 x i32>, i32 } %insert.4
1261}
1262
1263define { <3 x float>, i32 } @v3f32_struct_func_void_wasted_reg() #0 {
1264  ; CHECK-LABEL: name: v3f32_struct_func_void_wasted_reg
1265  ; CHECK: bb.1 (%ir-block.0):
1266  ; CHECK:   liveins: $sgpr30_sgpr31
1267  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1268  ; CHECK:   [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1269  ; CHECK:   [[COPY1:%[0-9]+]]:_(p3) = COPY [[DEF]](p3)
1270  ; CHECK:   [[DEF1:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
1271  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1272  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1273  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1274  ; CHECK:   [[DEF2:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
1275  ; CHECK:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `float addrspace(3)* undef`, addrspace 3)
1276  ; CHECK:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `float addrspace(3)* undef`, addrspace 3)
1277  ; CHECK:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `float addrspace(3)* undef`, addrspace 3)
1278  ; CHECK:   [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[COPY1]](p3) :: (volatile load (s32) from `i32 addrspace(3)* undef`, addrspace 3)
1279  ; CHECK:   [[IVEC:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[DEF1]], [[LOAD]](s32), [[C]](s32)
1280  ; CHECK:   [[IVEC1:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC]], [[LOAD1]](s32), [[C1]](s32)
1281  ; CHECK:   [[IVEC2:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC1]], [[LOAD2]](s32), [[C2]](s32)
1282  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[IVEC2]](<3 x s32>)
1283  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
1284  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
1285  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
1286  ; CHECK:   $vgpr3 = COPY [[LOAD3]](s32)
1287  ; CHECK:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1288  ; CHECK:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1289  %load0 = load volatile float, float addrspace(3)* undef
1290  %load1 = load volatile float, float addrspace(3)* undef
1291  %load2 = load volatile float, float addrspace(3)* undef
1292  %load3 = load volatile i32, i32 addrspace(3)* undef
1293
1294  %insert.0 = insertelement <3 x float> undef, float %load0, i32 0
1295  %insert.1 = insertelement <3 x float> %insert.0, float %load1, i32 1
1296  %insert.2 = insertelement <3 x float> %insert.1, float %load2, i32 2
1297  %insert.3 = insertvalue { <3 x float>, i32 } undef, <3 x float> %insert.2, 0
1298  %insert.4 = insertvalue { <3 x float>, i32 } %insert.3, i32 %load3, 1
1299  ret { <3 x float>, i32 } %insert.4
1300}
1301
1302define void @void_func_sret_max_known_zero_bits(i8 addrspace(5)* sret(i8) %arg0) #0 {
1303  ; CHECK-LABEL: name: void_func_sret_max_known_zero_bits
1304  ; CHECK: bb.1 (%ir-block.0):
1305  ; CHECK:   liveins: $vgpr0, $sgpr30_sgpr31
1306  ; CHECK:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1307  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1308  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
1309  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 17
1310  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 18
1311  ; CHECK:   [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1312  ; CHECK:   [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p5)
1313  ; CHECK:   [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C]](s32)
1314  ; CHECK:   [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C1]](s32)
1315  ; CHECK:   [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C2]](s32)
1316  ; CHECK:   G_STORE [[LSHR]](s32), [[DEF]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
1317  ; CHECK:   G_STORE [[LSHR1]](s32), [[DEF]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
1318  ; CHECK:   G_STORE [[LSHR2]](s32), [[DEF]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
1319  ; CHECK:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1320  ; CHECK:   S_SETPC_B64_return [[COPY2]]
1321  %arg0.int = ptrtoint i8 addrspace(5)* %arg0 to i32
1322
1323  %lshr0 = lshr i32 %arg0.int, 16
1324  %lshr1 = lshr i32 %arg0.int, 17
1325  %lshr2 = lshr i32 %arg0.int, 18
1326
1327  store volatile i32 %lshr0, i32 addrspace(3)* undef
1328  store volatile i32 %lshr1, i32 addrspace(3)* undef
1329  store volatile i32 %lshr2, i32 addrspace(3)* undef
1330  ret void
1331}
1332
1333define i1022 @i1022_func_void() #0 {
1334  ; CHECK-LABEL: name: i1022_func_void
1335  ; CHECK: bb.1 (%ir-block.0):
1336  ; CHECK:   liveins: $sgpr30_sgpr31
1337  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1338  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1339  ; CHECK:   [[LOAD:%[0-9]+]]:_(s1022) = G_LOAD [[DEF]](p1) :: (load (s1022) from `i1022 addrspace(1)* undef`, align 8, addrspace 1)
1340  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s1024) = G_ANYEXT [[LOAD]](s1022)
1341  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s1024)
1342  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
1343  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
1344  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
1345  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
1346  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
1347  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
1348  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
1349  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
1350  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
1351  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
1352  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
1353  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
1354  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
1355  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
1356  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
1357  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
1358  ; CHECK:   $vgpr16 = COPY [[UV16]](s32)
1359  ; CHECK:   $vgpr17 = COPY [[UV17]](s32)
1360  ; CHECK:   $vgpr18 = COPY [[UV18]](s32)
1361  ; CHECK:   $vgpr19 = COPY [[UV19]](s32)
1362  ; CHECK:   $vgpr20 = COPY [[UV20]](s32)
1363  ; CHECK:   $vgpr21 = COPY [[UV21]](s32)
1364  ; CHECK:   $vgpr22 = COPY [[UV22]](s32)
1365  ; CHECK:   $vgpr23 = COPY [[UV23]](s32)
1366  ; CHECK:   $vgpr24 = COPY [[UV24]](s32)
1367  ; CHECK:   $vgpr25 = COPY [[UV25]](s32)
1368  ; CHECK:   $vgpr26 = COPY [[UV26]](s32)
1369  ; CHECK:   $vgpr27 = COPY [[UV27]](s32)
1370  ; CHECK:   $vgpr28 = COPY [[UV28]](s32)
1371  ; CHECK:   $vgpr29 = COPY [[UV29]](s32)
1372  ; CHECK:   $vgpr30 = COPY [[UV30]](s32)
1373  ; CHECK:   $vgpr31 = COPY [[UV31]](s32)
1374  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1375  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
1376  %val = load i1022, i1022 addrspace(1)* undef
1377  ret i1022 %val
1378}
1379
1380define signext i1022 @i1022_signext_func_void() #0 {
1381  ; CHECK-LABEL: name: i1022_signext_func_void
1382  ; CHECK: bb.1 (%ir-block.0):
1383  ; CHECK:   liveins: $sgpr30_sgpr31
1384  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1385  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1386  ; CHECK:   [[LOAD:%[0-9]+]]:_(s1022) = G_LOAD [[DEF]](p1) :: (load (s1022) from `i1022 addrspace(1)* undef`, align 8, addrspace 1)
1387  ; CHECK:   [[SEXT:%[0-9]+]]:_(s1024) = G_SEXT [[LOAD]](s1022)
1388  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s1024)
1389  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
1390  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
1391  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
1392  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
1393  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
1394  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
1395  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
1396  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
1397  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
1398  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
1399  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
1400  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
1401  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
1402  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
1403  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
1404  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
1405  ; CHECK:   $vgpr16 = COPY [[UV16]](s32)
1406  ; CHECK:   $vgpr17 = COPY [[UV17]](s32)
1407  ; CHECK:   $vgpr18 = COPY [[UV18]](s32)
1408  ; CHECK:   $vgpr19 = COPY [[UV19]](s32)
1409  ; CHECK:   $vgpr20 = COPY [[UV20]](s32)
1410  ; CHECK:   $vgpr21 = COPY [[UV21]](s32)
1411  ; CHECK:   $vgpr22 = COPY [[UV22]](s32)
1412  ; CHECK:   $vgpr23 = COPY [[UV23]](s32)
1413  ; CHECK:   $vgpr24 = COPY [[UV24]](s32)
1414  ; CHECK:   $vgpr25 = COPY [[UV25]](s32)
1415  ; CHECK:   $vgpr26 = COPY [[UV26]](s32)
1416  ; CHECK:   $vgpr27 = COPY [[UV27]](s32)
1417  ; CHECK:   $vgpr28 = COPY [[UV28]](s32)
1418  ; CHECK:   $vgpr29 = COPY [[UV29]](s32)
1419  ; CHECK:   $vgpr30 = COPY [[UV30]](s32)
1420  ; CHECK:   $vgpr31 = COPY [[UV31]](s32)
1421  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1422  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
1423  %val = load i1022, i1022 addrspace(1)* undef
1424  ret i1022 %val
1425}
1426
1427define zeroext i1022 @i1022_zeroext_func_void() #0 {
1428  ; CHECK-LABEL: name: i1022_zeroext_func_void
1429  ; CHECK: bb.1 (%ir-block.0):
1430  ; CHECK:   liveins: $sgpr30_sgpr31
1431  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1432  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1433  ; CHECK:   [[LOAD:%[0-9]+]]:_(s1022) = G_LOAD [[DEF]](p1) :: (load (s1022) from `i1022 addrspace(1)* undef`, align 8, addrspace 1)
1434  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s1024) = G_ZEXT [[LOAD]](s1022)
1435  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s1024)
1436  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
1437  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
1438  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
1439  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
1440  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
1441  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
1442  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
1443  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
1444  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
1445  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
1446  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
1447  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
1448  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
1449  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
1450  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
1451  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
1452  ; CHECK:   $vgpr16 = COPY [[UV16]](s32)
1453  ; CHECK:   $vgpr17 = COPY [[UV17]](s32)
1454  ; CHECK:   $vgpr18 = COPY [[UV18]](s32)
1455  ; CHECK:   $vgpr19 = COPY [[UV19]](s32)
1456  ; CHECK:   $vgpr20 = COPY [[UV20]](s32)
1457  ; CHECK:   $vgpr21 = COPY [[UV21]](s32)
1458  ; CHECK:   $vgpr22 = COPY [[UV22]](s32)
1459  ; CHECK:   $vgpr23 = COPY [[UV23]](s32)
1460  ; CHECK:   $vgpr24 = COPY [[UV24]](s32)
1461  ; CHECK:   $vgpr25 = COPY [[UV25]](s32)
1462  ; CHECK:   $vgpr26 = COPY [[UV26]](s32)
1463  ; CHECK:   $vgpr27 = COPY [[UV27]](s32)
1464  ; CHECK:   $vgpr28 = COPY [[UV28]](s32)
1465  ; CHECK:   $vgpr29 = COPY [[UV29]](s32)
1466  ; CHECK:   $vgpr30 = COPY [[UV30]](s32)
1467  ; CHECK:   $vgpr31 = COPY [[UV31]](s32)
1468  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1469  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
1470  %val = load i1022, i1022 addrspace(1)* undef
1471  ret i1022 %val
1472}
1473
1474%struct.with.ptrs = type { <32 x i32>, i32 addrspace(3)*, i32 addrspace(1)*, <2 x i8 addrspace(1)*> }
1475
1476define %struct.with.ptrs @ptr_in_struct_func_void() #0 {
1477  ; CHECK-LABEL: name: ptr_in_struct_func_void
1478  ; CHECK: bb.1 (%ir-block.0):
1479  ; CHECK:   liveins: $vgpr0, $sgpr30_sgpr31
1480  ; CHECK:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1481  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1482  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1483  ; CHECK:   [[LOAD:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[DEF]](p1) :: (volatile load (<32 x s32>) from `%struct.with.ptrs addrspace(1)* undef`, addrspace 1)
1484  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
1485  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C]](s64)
1486  ; CHECK:   [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p1) :: (volatile load (p3) from `%struct.with.ptrs addrspace(1)* undef` + 128, align 128, addrspace 1)
1487  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 136
1488  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C1]](s64)
1489  ; CHECK:   [[LOAD2:%[0-9]+]]:_(p1) = G_LOAD [[PTR_ADD1]](p1) :: (volatile load (p1) from `%struct.with.ptrs addrspace(1)* undef` + 136, addrspace 1)
1490  ; CHECK:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 144
1491  ; CHECK:   [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
1492  ; CHECK:   [[LOAD3:%[0-9]+]]:_(<2 x p1>) = G_LOAD [[PTR_ADD2]](p1) :: (volatile load (<2 x p1>) from `%struct.with.ptrs addrspace(1)* undef` + 144, addrspace 1)
1493  ; CHECK:   G_STORE [[LOAD]](<32 x s32>), [[COPY]](p5) :: (store (<32 x s32>), addrspace 5)
1494  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
1495  ; CHECK:   [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
1496  ; CHECK:   G_STORE [[LOAD1]](p3), [[PTR_ADD3]](p5) :: (store (p3), align 128, addrspace 5)
1497  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 136
1498  ; CHECK:   [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
1499  ; CHECK:   G_STORE [[LOAD2]](p1), [[PTR_ADD4]](p5) :: (store (p1), addrspace 5)
1500  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 144
1501  ; CHECK:   [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
1502  ; CHECK:   G_STORE [[LOAD3]](<2 x p1>), [[PTR_ADD5]](p5) :: (store (<2 x p1>), addrspace 5)
1503  ; CHECK:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1504  ; CHECK:   S_SETPC_B64_return [[COPY2]]
1505  %val = load volatile %struct.with.ptrs, %struct.with.ptrs addrspace(1)* undef
1506  ret %struct.with.ptrs %val
1507}
1508
1509attributes #0 = { nounwind }
1510