1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -O0 -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=legalizer %s -o - | FileCheck %s
3
4---
5name: test_zext_trunc_v2s32_to_v2s16_to_v2s32
6body: |
7  bb.0:
8    liveins: $vgpr0_vgpr1
9
10    ; CHECK-LABEL: name: test_zext_trunc_v2s32_to_v2s16_to_v2s32
11    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
12    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
13    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
14    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[COPY]], [[BUILD_VECTOR]]
15    ; CHECK: $vgpr0_vgpr1 = COPY [[AND]](<2 x s32>)
16    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
17    %1:_(<2 x s16>) = G_TRUNC %0
18    %2:_(<2 x s32>) = G_ZEXT %1
19    $vgpr0_vgpr1 = COPY %2
20...
21
22---
23name: test_zext_trunc_v2s32_to_v2s16_to_v2s64
24body: |
25  bb.0:
26    liveins: $vgpr0_vgpr1
27
28    ; CHECK-LABEL: name: test_zext_trunc_v2s32_to_v2s16_to_v2s64
29    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
30    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
31    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
32    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV]](s32)
33    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[UV1]](s32)
34    ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C]]
35    ; CHECK: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT1]], [[C]]
36    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[AND]](s64), [[AND1]](s64)
37    ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
38    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
39    %1:_(<2 x s16>) = G_TRUNC %0
40    %2:_(<2 x s64>) = G_ZEXT %1
41    $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2
42...
43
44---
45name: test_zext_trunc_v2s32_to_v2s8_to_v2s16
46body: |
47  bb.0:
48    liveins: $vgpr0_vgpr1
49
50    ; CHECK-LABEL: name: test_zext_trunc_v2s32_to_v2s8_to_v2s16
51    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
52    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
53    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C]](s32)
54    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
55    ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[C]], [[C1]](s32)
56    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY1]], [[SHL]]
57    ; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
58    ; CHECK: [[TRUNC:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[COPY]](<2 x s32>)
59    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s16>) = G_AND [[TRUNC]], [[BITCAST]]
60    ; CHECK: $vgpr0 = COPY [[AND]](<2 x s16>)
61    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
62    %1:_(<2 x s8>) = G_TRUNC %0
63    %2:_(<2 x s16>) = G_ZEXT %1
64    $vgpr0 = COPY %2
65...
66
67---
68name: test_zext_trunc_v3s32_to_v3s16_to_v3s32
69body: |
70  bb.0:
71    liveins: $vgpr0_vgpr1_vgpr2
72
73    ; CHECK-LABEL: name: test_zext_trunc_v3s32_to_v3s16_to_v3s32
74    ; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
75    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
76    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
77    ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
78    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32)
79    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV2]](s32), [[DEF]](s32)
80    ; CHECK: [[DEF1:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
81    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
82    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[DEF]](s32)
83    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
84    ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s32>) = G_AND [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
85    ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s32>) = G_CONCAT_VECTORS [[AND]](<2 x s32>), [[AND1]](<2 x s32>), [[DEF1]](<2 x s32>)
86    ; CHECK: [[UV3:%[0-9]+]]:_(<3 x s32>), [[UV4:%[0-9]+]]:_(<3 x s32>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<6 x s32>)
87    ; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[UV3]](<3 x s32>)
88    %0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
89    %1:_(<3 x s16>) = G_TRUNC %0
90    %2:_(<3 x s32>) = G_ZEXT %1
91    $vgpr0_vgpr1_vgpr2 = COPY %2
92...
93
94# Test for "Too many bits for uint64_t" assertion when combining
95# zexts with wide sources.
96---
97name: test_zext_128_trunc_s128_merge
98body: |
99  bb.0:
100    liveins: $vgpr0_vgpr1
101
102    ; CHECK-LABEL: name: test_zext_128_trunc_s128_merge
103    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
104    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
105    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
106    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
107    ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
108    ; CHECK: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C1]]
109    ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[AND]](s64), [[AND1]](s64)
110    ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
111    %0:_(s64) = COPY $vgpr0_vgpr1
112    %1:_(s64) = COPY $vgpr0_vgpr1
113    %2:_(s128) = G_MERGE_VALUES %0, %1
114    %3:_(s96) = G_TRUNC %2
115    %4:_(s128) = G_ZEXT %3
116    $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %4
117...
118
119---
120name: test_zext_s8_to_s32_of_sext_s1_to_s8
121body: |
122  bb.0:
123    liveins: $vgpr0, $vgpr1
124
125    ; CHECK-LABEL: name: test_zext_s8_to_s32_of_sext_s1_to_s8
126    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
127    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
128    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]]
129    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
130    ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1)
131    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[SEXT]], [[C]]
132    ; CHECK: $vgpr0 = COPY [[AND]](s32)
133    %0:_(s32) = COPY $vgpr0
134    %1:_(s32) = COPY $vgpr1
135    %2:_(s1) = G_ICMP intpred(eq), %0, %1
136    %3:_(s8) = G_SEXT %2
137    %4:_(s32) = G_ZEXT %3
138    $vgpr0 = COPY %4
139...
140
141---
142name: test_zext_s8_to_s32_of_sext_s1_to_s16
143body: |
144  bb.0:
145    liveins: $vgpr0, $vgpr1
146
147    ; CHECK-LABEL: name: test_zext_s8_to_s32_of_sext_s1_to_s16
148    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
149    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
150    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]]
151    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
152    ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1)
153    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[SEXT]], [[C]]
154    ; CHECK: $vgpr0 = COPY [[AND]](s32)
155    %0:_(s32) = COPY $vgpr0
156    %1:_(s32) = COPY $vgpr1
157    %2:_(s1) = G_ICMP intpred(eq), %0, %1
158    %3:_(s16) = G_SEXT %2
159    %4:_(s32) = G_ZEXT %3
160    $vgpr0 = COPY %4
161...
162
163---
164name: test_zext_s8_to_s32_of_sext_s8_to_s16
165body: |
166  bb.0:
167    liveins: $vgpr0_vgpr1
168
169    ; CHECK-LABEL: name: test_zext_s8_to_s32_of_sext_s8_to_s16
170    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
171    ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s8), addrspace 1)
172    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
173    ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LOAD]], 8
174    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[SEXT_INREG]], [[C]]
175    ; CHECK: $vgpr0 = COPY [[AND]](s32)
176    %0:_(p1) = COPY $vgpr0_vgpr1
177    %1:_(s8) = G_LOAD %0 :: (load (s8), addrspace 1)
178    %2:_(s16) = G_SEXT %1
179    %3:_(s32) = G_ZEXT %2
180    $vgpr0 = COPY %3
181...
182
183---
184name: test_zext_v2s8_to_v2s32_of_sext_v2s1_to_v2s8
185body: |
186  bb.0:
187    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
188
189    ; CHECK-LABEL: name: test_zext_v2s8_to_v2s32_of_sext_v2s1_to_v2s8
190    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
191    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
192    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
193    ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
194    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV]](s32), [[UV2]]
195    ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[UV3]]
196    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
197    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1)
198    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
199    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
200    ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ANYEXT]], 1
201    ; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ANYEXT1]], 1
202    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32)
203    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[BUILD_VECTOR1]], [[BUILD_VECTOR]]
204    ; CHECK: $vgpr0_vgpr1 = COPY [[AND]](<2 x s32>)
205    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
206    %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
207    %2:_(<2 x s1>) = G_ICMP intpred(eq), %0, %1
208    %3:_(<2 x s8>) = G_SEXT %2
209    %4:_(<2 x s32>) = G_ZEXT %3
210    $vgpr0_vgpr1 = COPY %4
211...
212
213---
214name: test_zext_v2s8_to_v2s32_of_sext_v2s1_to_v2s16
215body: |
216  bb.0:
217    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
218
219    ; CHECK-LABEL: name: test_zext_v2s8_to_v2s32_of_sext_v2s1_to_v2s16
220    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
221    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
222    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
223    ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
224    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV]](s32), [[UV2]]
225    ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[UV3]]
226    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
227    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1)
228    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
229    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
230    ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ANYEXT]], 1
231    ; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ANYEXT1]], 1
232    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32)
233    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[BUILD_VECTOR1]], [[BUILD_VECTOR]]
234    ; CHECK: $vgpr0_vgpr1 = COPY [[AND]](<2 x s32>)
235    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
236    %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
237    %2:_(<2 x s1>) = G_ICMP intpred(eq), %0, %1
238    %3:_(<2 x s16>) = G_SEXT %2
239    %4:_(<2 x s32>) = G_ZEXT %3
240    $vgpr0_vgpr1 = COPY %4
241...
242
243---
244name: test_zext_v2s8_to_v2s32_of_sext_v2s8_to_v2s16
245body: |
246  bb.0:
247    liveins: $vgpr0_vgpr1
248
249    ; CHECK-LABEL: name: test_zext_v2s8_to_v2s32_of_sext_v2s8_to_v2s16
250    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
251    ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
252    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
253    ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C]](s32)
254    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
255    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32)
256    ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LOAD]], 8
257    ; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR]], 8
258    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32)
259    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[BUILD_VECTOR1]], [[BUILD_VECTOR]]
260    ; CHECK: $vgpr0_vgpr1 = COPY [[AND]](<2 x s32>)
261    %0:_(p1) = COPY $vgpr0_vgpr1
262    %1:_(<2 x s8>) = G_LOAD %0 :: (load (<2 x s8>), addrspace 1)
263    %2:_(<2 x s16>) = G_SEXT %1
264    %3:_(<2 x s32>) = G_ZEXT %2
265    $vgpr0_vgpr1 = COPY %3
266...
267