1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -O0 -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=legalizer %s -o - | FileCheck %s
3
4---
5name: test_zext_trunc_v2s32_to_v2s16_to_v2s32
6body: |
7  bb.0:
8    liveins: $vgpr0_vgpr1
9
10    ; CHECK-LABEL: name: test_zext_trunc_v2s32_to_v2s16_to_v2s32
11    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
12    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
13    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
14    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY [[COPY]](<2 x s32>)
15    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[COPY1]], [[BUILD_VECTOR]]
16    ; CHECK: $vgpr0_vgpr1 = COPY [[AND]](<2 x s32>)
17    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
18    %1:_(<2 x s16>) = G_TRUNC %0
19    %2:_(<2 x s32>) = G_ZEXT %1
20    $vgpr0_vgpr1 = COPY %2
21...
22
23---
24name: test_zext_trunc_v2s32_to_v2s16_to_v2s64
25body: |
26  bb.0:
27    liveins: $vgpr0_vgpr1
28
29    ; CHECK-LABEL: name: test_zext_trunc_v2s32_to_v2s16_to_v2s64
30    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
31    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
32    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
33    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV]](s32)
34    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[UV1]](s32)
35    ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C]]
36    ; CHECK: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT1]], [[C]]
37    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[AND]](s64), [[AND1]](s64)
38    ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
39    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
40    %1:_(<2 x s16>) = G_TRUNC %0
41    %2:_(<2 x s64>) = G_ZEXT %1
42    $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2
43...
44
45---
46name: test_zext_trunc_v2s32_to_v2s8_to_v2s16
47body: |
48  bb.0:
49    liveins: $vgpr0_vgpr1
50
51    ; CHECK-LABEL: name: test_zext_trunc_v2s32_to_v2s8_to_v2s16
52    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
53    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
54    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C]](s32)
55    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
56    ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[C]], [[C1]](s32)
57    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY1]], [[SHL]]
58    ; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
59    ; CHECK: [[TRUNC:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[COPY]](<2 x s32>)
60    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s16>) = G_AND [[TRUNC]], [[BITCAST]]
61    ; CHECK: $vgpr0 = COPY [[AND]](<2 x s16>)
62    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
63    %1:_(<2 x s8>) = G_TRUNC %0
64    %2:_(<2 x s16>) = G_ZEXT %1
65    $vgpr0 = COPY %2
66...
67
68---
69name: test_zext_trunc_v3s32_to_v3s16_to_v3s32
70body: |
71  bb.0:
72    liveins: $vgpr0_vgpr1_vgpr2
73
74    ; CHECK-LABEL: name: test_zext_trunc_v3s32_to_v3s16_to_v3s32
75    ; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
76    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
77    ; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[COPY]](<3 x s32>)
78    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
79    ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
80    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32)
81    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV2]](s32), [[DEF]](s32)
82    ; CHECK: [[DEF1:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
83    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
84    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[DEF]](s32)
85    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
86    ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s32>) = G_AND [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
87    ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s32>) = G_CONCAT_VECTORS [[AND]](<2 x s32>), [[AND1]](<2 x s32>), [[DEF1]](<2 x s32>)
88    ; CHECK: [[UV3:%[0-9]+]]:_(<3 x s32>), [[UV4:%[0-9]+]]:_(<3 x s32>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<6 x s32>)
89    ; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[UV3]](<3 x s32>)
90    %0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
91    %1:_(<3 x s16>) = G_TRUNC %0
92    %2:_(<3 x s32>) = G_ZEXT %1
93    $vgpr0_vgpr1_vgpr2 = COPY %2
94...
95
96# Test for "Too many bits for uint64_t" assertion when combining
97# zexts with wide sources.
98---
99name: test_zext_128_trunc_s128_merge
100body: |
101  bb.0:
102    liveins: $vgpr0_vgpr1
103
104    ; CHECK-LABEL: name: test_zext_128_trunc_s128_merge
105    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
106    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
107    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
108    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
109    ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
110    ; CHECK: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C1]]
111    ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[AND]](s64), [[AND1]](s64)
112    ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
113    %0:_(s64) = COPY $vgpr0_vgpr1
114    %1:_(s64) = COPY $vgpr0_vgpr1
115    %2:_(s128) = G_MERGE_VALUES %0, %1
116    %3:_(s96) = G_TRUNC %2
117    %4:_(s128) = G_ZEXT %3
118    $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %4
119...
120
121---
122name: test_zext_s8_to_s32_of_sext_s1_to_s8
123body: |
124  bb.0:
125    liveins: $vgpr0, $vgpr1
126
127    ; CHECK-LABEL: name: test_zext_s8_to_s32_of_sext_s1_to_s8
128    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
129    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
130    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]]
131    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
132    ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1)
133    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[SEXT]], [[C]]
134    ; CHECK: $vgpr0 = COPY [[AND]](s32)
135    %0:_(s32) = COPY $vgpr0
136    %1:_(s32) = COPY $vgpr1
137    %2:_(s1) = G_ICMP intpred(eq), %0, %1
138    %3:_(s8) = G_SEXT %2
139    %4:_(s32) = G_ZEXT %3
140    $vgpr0 = COPY %4
141...
142
143---
144name: test_zext_s8_to_s32_of_sext_s1_to_s16
145body: |
146  bb.0:
147    liveins: $vgpr0, $vgpr1
148
149    ; CHECK-LABEL: name: test_zext_s8_to_s32_of_sext_s1_to_s16
150    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
151    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
152    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]]
153    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
154    ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1)
155    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[SEXT]], [[C]]
156    ; CHECK: $vgpr0 = COPY [[AND]](s32)
157    %0:_(s32) = COPY $vgpr0
158    %1:_(s32) = COPY $vgpr1
159    %2:_(s1) = G_ICMP intpred(eq), %0, %1
160    %3:_(s16) = G_SEXT %2
161    %4:_(s32) = G_ZEXT %3
162    $vgpr0 = COPY %4
163...
164
165---
166name: test_zext_s8_to_s32_of_sext_s8_to_s16
167body: |
168  bb.0:
169    liveins: $vgpr0_vgpr1
170
171    ; CHECK-LABEL: name: test_zext_s8_to_s32_of_sext_s8_to_s16
172    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
173    ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s8), addrspace 1)
174    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
175    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32)
176    ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 8
177    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[SEXT_INREG]], [[C]]
178    ; CHECK: $vgpr0 = COPY [[AND]](s32)
179    %0:_(p1) = COPY $vgpr0_vgpr1
180    %1:_(s8) = G_LOAD %0 :: (load (s8), addrspace 1)
181    %2:_(s16) = G_SEXT %1
182    %3:_(s32) = G_ZEXT %2
183    $vgpr0 = COPY %3
184...
185
186---
187name: test_zext_v2s8_to_v2s32_of_sext_v2s1_to_v2s8
188body: |
189  bb.0:
190    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
191
192    ; CHECK-LABEL: name: test_zext_v2s8_to_v2s32_of_sext_v2s1_to_v2s8
193    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
194    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
195    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
196    ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
197    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV]](s32), [[UV2]]
198    ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[UV3]]
199    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
200    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1)
201    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
202    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
203    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
204    ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY2]], 1
205    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT1]](s32)
206    ; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY3]], 1
207    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32)
208    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[BUILD_VECTOR1]], [[BUILD_VECTOR]]
209    ; CHECK: $vgpr0_vgpr1 = COPY [[AND]](<2 x s32>)
210    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
211    %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
212    %2:_(<2 x s1>) = G_ICMP intpred(eq), %0, %1
213    %3:_(<2 x s8>) = G_SEXT %2
214    %4:_(<2 x s32>) = G_ZEXT %3
215    $vgpr0_vgpr1 = COPY %4
216...
217
218---
219name: test_zext_v2s8_to_v2s32_of_sext_v2s1_to_v2s16
220body: |
221  bb.0:
222    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
223
224    ; CHECK-LABEL: name: test_zext_v2s8_to_v2s32_of_sext_v2s1_to_v2s16
225    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
226    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
227    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
228    ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
229    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV]](s32), [[UV2]]
230    ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[UV3]]
231    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
232    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1)
233    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
234    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
235    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
236    ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY2]], 1
237    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT1]](s32)
238    ; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY3]], 1
239    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32)
240    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[BUILD_VECTOR1]], [[BUILD_VECTOR]]
241    ; CHECK: $vgpr0_vgpr1 = COPY [[AND]](<2 x s32>)
242    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
243    %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
244    %2:_(<2 x s1>) = G_ICMP intpred(eq), %0, %1
245    %3:_(<2 x s16>) = G_SEXT %2
246    %4:_(<2 x s32>) = G_ZEXT %3
247    $vgpr0_vgpr1 = COPY %4
248...
249
250---
251name: test_zext_v2s8_to_v2s32_of_sext_v2s8_to_v2s16
252body: |
253  bb.0:
254    liveins: $vgpr0_vgpr1
255
256    ; CHECK-LABEL: name: test_zext_v2s8_to_v2s32_of_sext_v2s8_to_v2s16
257    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
258    ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
259    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
260    ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C]](s32)
261    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
262    ; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32)
263    ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
264    ; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C2]](s32)
265    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32)
266    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
267    ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
268    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32)
269    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
270    ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY3]], 8
271    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
272    ; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY4]], 8
273    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32)
274    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[BUILD_VECTOR1]], [[BUILD_VECTOR]]
275    ; CHECK: $vgpr0_vgpr1 = COPY [[AND]](<2 x s32>)
276    %0:_(p1) = COPY $vgpr0_vgpr1
277    %1:_(<2 x s8>) = G_LOAD %0 :: (load (<2 x s8>), addrspace 1)
278    %2:_(<2 x s16>) = G_SEXT %1
279    %3:_(<2 x s32>) = G_ZEXT %2
280    $vgpr0_vgpr1 = COPY %3
281...
282