1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -O0 -run-pass=legalizer %s -o - | FileCheck -check-prefix=GFX78 %s
3# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck -check-prefix=GFX78 %s
4# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -O0 -run-pass=legalizer %s -o - | FileCheck -check-prefix=GFX9 %s
5# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -O0 -run-pass=legalizer %s -o - | FileCheck -check-prefix=GFX9 %s
6
7---
8name: build_vector_v2s16
9body: |
10  bb.0:
11    liveins: $vgpr0, $vgpr1
12
13    ; GFX78-LABEL: name: build_vector_v2s16
14    ; GFX78: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
15    ; GFX78: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
16    ; GFX78: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
17    ; GFX78: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
18    ; GFX78: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
19    ; GFX78: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
20    ; GFX78: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
21    ; GFX78: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
22    ; GFX78: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
23    ; GFX78: S_NOP 0, implicit [[BITCAST]](<2 x s16>)
24    ; GFX9-LABEL: name: build_vector_v2s16
25    ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
26    ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
27    ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY]](s32), [[COPY1]](s32)
28    ; GFX9: S_NOP 0, implicit [[BUILD_VECTOR_TRUNC]](<2 x s16>)
29    %0:_(s32) = COPY $vgpr0
30    %1:_(s32) = COPY $vgpr1
31    %2:_(s16) = G_TRUNC %0
32    %3:_(s16) = G_TRUNC %1
33    %4:_(<2 x s16>) = G_BUILD_VECTOR %2, %3
34    S_NOP 0, implicit %4
35...
36
37---
38name: build_vector_v3s16
39body: |
40  bb.0:
41    liveins: $vgpr0, $vgpr1, $vgpr2
42
43    ; GFX78-LABEL: name: build_vector_v3s16
44    ; GFX78: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
45    ; GFX78: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
46    ; GFX78: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
47    ; GFX78: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
48    ; GFX78: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
49    ; GFX78: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
50    ; GFX78: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
51    ; GFX78: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
52    ; GFX78: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
53    ; GFX78: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
54    ; GFX78: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
55    ; GFX78: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
56    ; GFX78: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C1]](s32)
57    ; GFX78: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]]
58    ; GFX78: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
59    ; GFX78: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
60    ; GFX78: [[AND5:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
61    ; GFX78: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C1]](s32)
62    ; GFX78: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]]
63    ; GFX78: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
64    ; GFX78: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BITCAST2]](<2 x s16>)
65    ; GFX78: S_NOP 0, implicit [[CONCAT_VECTORS]](<6 x s16>)
66    ; GFX9-LABEL: name: build_vector_v3s16
67    ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
68    ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
69    ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
70    ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY]](s32), [[COPY1]](s32)
71    ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY2]](s32), [[COPY]](s32)
72    ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32)
73    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), [[BUILD_VECTOR_TRUNC2]](<2 x s16>)
74    ; GFX9: S_NOP 0, implicit [[CONCAT_VECTORS]](<6 x s16>)
75    %0:_(s32) = COPY $vgpr0
76    %1:_(s32) = COPY $vgpr1
77    %2:_(s32) = COPY $vgpr2
78    %3:_(s16) = G_TRUNC %0
79    %4:_(s16) = G_TRUNC %1
80    %5:_(s16) = G_TRUNC %2
81    %6:_(<3 x s16>) = G_BUILD_VECTOR %3, %4, %5
82    %7:_(<6 x s16>) = G_CONCAT_VECTORS %6, %6
83    S_NOP 0, implicit %7
84...
85
86---
87name: build_vector_v4s16
88body: |
89  bb.0:
90    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
91
92    ; GFX78-LABEL: name: build_vector_v4s16
93    ; GFX78: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
94    ; GFX78: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
95    ; GFX78: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
96    ; GFX78: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
97    ; GFX78: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
98    ; GFX78: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
99    ; GFX78: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
100    ; GFX78: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
101    ; GFX78: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
102    ; GFX78: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
103    ; GFX78: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
104    ; GFX78: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
105    ; GFX78: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
106    ; GFX78: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C1]](s32)
107    ; GFX78: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]]
108    ; GFX78: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
109    ; GFX78: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>)
110    ; GFX78: S_NOP 0, implicit [[CONCAT_VECTORS]](<4 x s16>)
111    ; GFX9-LABEL: name: build_vector_v4s16
112    ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
113    ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
114    ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
115    ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
116    ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY]](s32), [[COPY1]](s32)
117    ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY2]](s32), [[COPY3]](s32)
118    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
119    ; GFX9: S_NOP 0, implicit [[CONCAT_VECTORS]](<4 x s16>)
120    %0:_(s32) = COPY $vgpr0
121    %1:_(s32) = COPY $vgpr1
122    %2:_(s32) = COPY $vgpr2
123    %3:_(s32) = COPY $vgpr3
124    %4:_(s16) = G_TRUNC %0
125    %5:_(s16) = G_TRUNC %1
126    %6:_(s16) = G_TRUNC %2
127    %7:_(s16) = G_TRUNC %3
128    %8:_(<4 x s16>) = G_BUILD_VECTOR %4, %5, %6, %7
129    S_NOP 0, implicit %8
130...
131
132---
133name: build_vector_v5s16
134body: |
135  bb.0:
136    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
137
138    ; GFX78-LABEL: name: build_vector_v5s16
139    ; GFX78: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
140    ; GFX78: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
141    ; GFX78: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
142    ; GFX78: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
143    ; GFX78: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
144    ; GFX78: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
145    ; GFX78: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
146    ; GFX78: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
147    ; GFX78: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
148    ; GFX78: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
149    ; GFX78: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
150    ; GFX78: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
151    ; GFX78: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
152    ; GFX78: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
153    ; GFX78: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C1]](s32)
154    ; GFX78: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]]
155    ; GFX78: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
156    ; GFX78: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
157    ; GFX78: [[AND5:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
158    ; GFX78: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C1]](s32)
159    ; GFX78: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]]
160    ; GFX78: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
161    ; GFX78: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
162    ; GFX78: [[AND7:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
163    ; GFX78: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C1]](s32)
164    ; GFX78: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]]
165    ; GFX78: [[BITCAST3:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR3]](s32)
166    ; GFX78: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
167    ; GFX78: [[AND9:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
168    ; GFX78: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND9]], [[C1]](s32)
169    ; GFX78: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]]
170    ; GFX78: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR4]](s32)
171    ; GFX78: [[CONCAT_VECTORS:%[0-9]+]]:_(<10 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BITCAST2]](<2 x s16>), [[BITCAST3]](<2 x s16>), [[BITCAST4]](<2 x s16>)
172    ; GFX78: S_NOP 0, implicit [[CONCAT_VECTORS]](<10 x s16>)
173    ; GFX9-LABEL: name: build_vector_v5s16
174    ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
175    ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
176    ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
177    ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
178    ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
179    ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY]](s32), [[COPY1]](s32)
180    ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY2]](s32), [[COPY3]](s32)
181    ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY4]](s32), [[COPY]](s32)
182    ; GFX9: [[BUILD_VECTOR_TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32)
183    ; GFX9: [[BUILD_VECTOR_TRUNC4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY3]](s32), [[COPY4]](s32)
184    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<10 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), [[BUILD_VECTOR_TRUNC2]](<2 x s16>), [[BUILD_VECTOR_TRUNC3]](<2 x s16>), [[BUILD_VECTOR_TRUNC4]](<2 x s16>)
185    ; GFX9: S_NOP 0, implicit [[CONCAT_VECTORS]](<10 x s16>)
186    %0:_(s32) = COPY $vgpr0
187    %1:_(s32) = COPY $vgpr1
188    %2:_(s32) = COPY $vgpr2
189    %3:_(s32) = COPY $vgpr3
190    %4:_(s32) = COPY $vgpr4
191    %5:_(s16) = G_TRUNC %0
192    %6:_(s16) = G_TRUNC %1
193    %7:_(s16) = G_TRUNC %2
194    %8:_(s16) = G_TRUNC %3
195    %9:_(s16) = G_TRUNC %4
196    %10:_(<5 x s16>) = G_BUILD_VECTOR %5, %6, %7, %8, %9
197    %11:_(<10 x s16>) = G_CONCAT_VECTORS %10, %10
198    S_NOP 0, implicit %11
199...
200
201---
202name: build_vector_v7s16
203body: |
204  bb.0:
205    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
206
207    ; GFX78-LABEL: name: build_vector_v7s16
208    ; GFX78: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
209    ; GFX78: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
210    ; GFX78: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
211    ; GFX78: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
212    ; GFX78: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
213    ; GFX78: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
214    ; GFX78: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
215    ; GFX78: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
216    ; GFX78: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
217    ; GFX78: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
218    ; GFX78: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
219    ; GFX78: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
220    ; GFX78: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
221    ; GFX78: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
222    ; GFX78: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
223    ; GFX78: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
224    ; GFX78: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C1]](s32)
225    ; GFX78: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]]
226    ; GFX78: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
227    ; GFX78: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
228    ; GFX78: [[AND5:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C]]
229    ; GFX78: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C1]](s32)
230    ; GFX78: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]]
231    ; GFX78: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
232    ; GFX78: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C]]
233    ; GFX78: [[AND7:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
234    ; GFX78: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C1]](s32)
235    ; GFX78: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]]
236    ; GFX78: [[BITCAST3:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR3]](s32)
237    ; GFX78: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
238    ; GFX78: [[AND9:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
239    ; GFX78: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND9]], [[C1]](s32)
240    ; GFX78: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]]
241    ; GFX78: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR4]](s32)
242    ; GFX78: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
243    ; GFX78: [[AND11:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
244    ; GFX78: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C1]](s32)
245    ; GFX78: [[OR5:%[0-9]+]]:_(s32) = G_OR [[AND10]], [[SHL5]]
246    ; GFX78: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR5]](s32)
247    ; GFX78: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C]]
248    ; GFX78: [[AND13:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C]]
249    ; GFX78: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[AND13]], [[C1]](s32)
250    ; GFX78: [[OR6:%[0-9]+]]:_(s32) = G_OR [[AND12]], [[SHL6]]
251    ; GFX78: [[BITCAST6:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR6]](s32)
252    ; GFX78: [[CONCAT_VECTORS:%[0-9]+]]:_(<14 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BITCAST2]](<2 x s16>), [[BITCAST3]](<2 x s16>), [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>), [[BITCAST6]](<2 x s16>)
253    ; GFX78: S_NOP 0, implicit [[CONCAT_VECTORS]](<14 x s16>)
254    ; GFX9-LABEL: name: build_vector_v7s16
255    ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
256    ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
257    ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
258    ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
259    ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
260    ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
261    ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
262    ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY]](s32), [[COPY1]](s32)
263    ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY2]](s32), [[COPY3]](s32)
264    ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY4]](s32), [[COPY5]](s32)
265    ; GFX9: [[BUILD_VECTOR_TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY6]](s32), [[COPY]](s32)
266    ; GFX9: [[BUILD_VECTOR_TRUNC4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32)
267    ; GFX9: [[BUILD_VECTOR_TRUNC5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY3]](s32), [[COPY4]](s32)
268    ; GFX9: [[BUILD_VECTOR_TRUNC6:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY5]](s32), [[COPY6]](s32)
269    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<14 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), [[BUILD_VECTOR_TRUNC2]](<2 x s16>), [[BUILD_VECTOR_TRUNC3]](<2 x s16>), [[BUILD_VECTOR_TRUNC4]](<2 x s16>), [[BUILD_VECTOR_TRUNC5]](<2 x s16>), [[BUILD_VECTOR_TRUNC6]](<2 x s16>)
270    ; GFX9: S_NOP 0, implicit [[CONCAT_VECTORS]](<14 x s16>)
271    %0:_(s32) = COPY $vgpr0
272    %1:_(s32) = COPY $vgpr1
273    %2:_(s32) = COPY $vgpr2
274    %3:_(s32) = COPY $vgpr3
275    %4:_(s32) = COPY $vgpr4
276    %5:_(s32) = COPY $vgpr5
277    %6:_(s32) = COPY $vgpr6
278    %7:_(s16) = G_TRUNC %0
279    %8:_(s16) = G_TRUNC %1
280    %9:_(s16) = G_TRUNC %2
281    %10:_(s16) = G_TRUNC %3
282    %11:_(s16) = G_TRUNC %4
283    %12:_(s16) = G_TRUNC %5
284    %13:_(s16) = G_TRUNC %6
285    %14:_(<7 x s16>) = G_BUILD_VECTOR %7, %8, %9, %10, %11, %12, %13
286    %15:_(<14 x s16>) = G_CONCAT_VECTORS %14, %14
287    S_NOP 0, implicit %15
288...
289
290---
291name: build_vector_v8s16
292body: |
293  bb.0:
294    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
295
296    ; GFX78-LABEL: name: build_vector_v8s16
297    ; GFX78: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
298    ; GFX78: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
299    ; GFX78: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
300    ; GFX78: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
301    ; GFX78: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
302    ; GFX78: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
303    ; GFX78: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
304    ; GFX78: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
305    ; GFX78: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
306    ; GFX78: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
307    ; GFX78: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
308    ; GFX78: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
309    ; GFX78: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
310    ; GFX78: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
311    ; GFX78: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
312    ; GFX78: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
313    ; GFX78: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
314    ; GFX78: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C1]](s32)
315    ; GFX78: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]]
316    ; GFX78: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
317    ; GFX78: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
318    ; GFX78: [[AND5:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C]]
319    ; GFX78: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C1]](s32)
320    ; GFX78: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]]
321    ; GFX78: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
322    ; GFX78: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C]]
323    ; GFX78: [[AND7:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C]]
324    ; GFX78: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C1]](s32)
325    ; GFX78: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]]
326    ; GFX78: [[BITCAST3:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR3]](s32)
327    ; GFX78: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BITCAST2]](<2 x s16>), [[BITCAST3]](<2 x s16>)
328    ; GFX78: S_NOP 0, implicit [[CONCAT_VECTORS]](<8 x s16>)
329    ; GFX9-LABEL: name: build_vector_v8s16
330    ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
331    ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
332    ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
333    ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
334    ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
335    ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
336    ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
337    ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
338    ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY]](s32), [[COPY1]](s32)
339    ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY2]](s32), [[COPY3]](s32)
340    ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY4]](s32), [[COPY5]](s32)
341    ; GFX9: [[BUILD_VECTOR_TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY6]](s32), [[COPY7]](s32)
342    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), [[BUILD_VECTOR_TRUNC2]](<2 x s16>), [[BUILD_VECTOR_TRUNC3]](<2 x s16>)
343    ; GFX9: S_NOP 0, implicit [[CONCAT_VECTORS]](<8 x s16>)
344    %0:_(s32) = COPY $vgpr0
345    %1:_(s32) = COPY $vgpr1
346    %2:_(s32) = COPY $vgpr2
347    %3:_(s32) = COPY $vgpr3
348    %4:_(s32) = COPY $vgpr4
349    %5:_(s32) = COPY $vgpr5
350    %6:_(s32) = COPY $vgpr6
351    %7:_(s32) = COPY $vgpr7
352    %8:_(s16) = G_TRUNC %0
353    %9:_(s16) = G_TRUNC %1
354    %10:_(s16) = G_TRUNC %2
355    %11:_(s16) = G_TRUNC %3
356    %12:_(s16) = G_TRUNC %4
357    %13:_(s16) = G_TRUNC %5
358    %14:_(s16) = G_TRUNC %6
359    %15:_(s16) = G_TRUNC %7
360    %16:_(<8 x s16>) = G_BUILD_VECTOR %8, %9, %10, %11, %12, %13, %14, %15
361    S_NOP 0, implicit %16
362...
363
364---
365name: build_vector_v16s16
366body: |
367  bb.0:
368    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15
369
370    ; GFX78-LABEL: name: build_vector_v16s16
371    ; GFX78: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
372    ; GFX78: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
373    ; GFX78: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
374    ; GFX78: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
375    ; GFX78: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
376    ; GFX78: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
377    ; GFX78: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
378    ; GFX78: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
379    ; GFX78: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
380    ; GFX78: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
381    ; GFX78: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
382    ; GFX78: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
383    ; GFX78: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
384    ; GFX78: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
385    ; GFX78: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
386    ; GFX78: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
387    ; GFX78: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
388    ; GFX78: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
389    ; GFX78: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
390    ; GFX78: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
391    ; GFX78: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
392    ; GFX78: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
393    ; GFX78: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
394    ; GFX78: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
395    ; GFX78: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
396    ; GFX78: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C1]](s32)
397    ; GFX78: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]]
398    ; GFX78: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
399    ; GFX78: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
400    ; GFX78: [[AND5:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C]]
401    ; GFX78: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C1]](s32)
402    ; GFX78: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]]
403    ; GFX78: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
404    ; GFX78: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C]]
405    ; GFX78: [[AND7:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C]]
406    ; GFX78: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C1]](s32)
407    ; GFX78: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]]
408    ; GFX78: [[BITCAST3:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR3]](s32)
409    ; GFX78: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C]]
410    ; GFX78: [[AND9:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C]]
411    ; GFX78: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND9]], [[C1]](s32)
412    ; GFX78: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]]
413    ; GFX78: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR4]](s32)
414    ; GFX78: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C]]
415    ; GFX78: [[AND11:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C]]
416    ; GFX78: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C1]](s32)
417    ; GFX78: [[OR5:%[0-9]+]]:_(s32) = G_OR [[AND10]], [[SHL5]]
418    ; GFX78: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR5]](s32)
419    ; GFX78: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C]]
420    ; GFX78: [[AND13:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C]]
421    ; GFX78: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[AND13]], [[C1]](s32)
422    ; GFX78: [[OR6:%[0-9]+]]:_(s32) = G_OR [[AND12]], [[SHL6]]
423    ; GFX78: [[BITCAST6:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR6]](s32)
424    ; GFX78: [[AND14:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C]]
425    ; GFX78: [[AND15:%[0-9]+]]:_(s32) = G_AND [[COPY15]], [[C]]
426    ; GFX78: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C1]](s32)
427    ; GFX78: [[OR7:%[0-9]+]]:_(s32) = G_OR [[AND14]], [[SHL7]]
428    ; GFX78: [[BITCAST7:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR7]](s32)
429    ; GFX78: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BITCAST2]](<2 x s16>), [[BITCAST3]](<2 x s16>), [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>), [[BITCAST6]](<2 x s16>), [[BITCAST7]](<2 x s16>)
430    ; GFX78: S_NOP 0, implicit [[CONCAT_VECTORS]](<16 x s16>)
431    ; GFX9-LABEL: name: build_vector_v16s16
432    ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
433    ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
434    ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
435    ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
436    ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
437    ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
438    ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
439    ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
440    ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
441    ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
442    ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
443    ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
444    ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
445    ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
446    ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
447    ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
448    ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY]](s32), [[COPY1]](s32)
449    ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY2]](s32), [[COPY3]](s32)
450    ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY4]](s32), [[COPY5]](s32)
451    ; GFX9: [[BUILD_VECTOR_TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY6]](s32), [[COPY7]](s32)
452    ; GFX9: [[BUILD_VECTOR_TRUNC4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY8]](s32), [[COPY9]](s32)
453    ; GFX9: [[BUILD_VECTOR_TRUNC5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32)
454    ; GFX9: [[BUILD_VECTOR_TRUNC6:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32)
455    ; GFX9: [[BUILD_VECTOR_TRUNC7:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
456    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), [[BUILD_VECTOR_TRUNC2]](<2 x s16>), [[BUILD_VECTOR_TRUNC3]](<2 x s16>), [[BUILD_VECTOR_TRUNC4]](<2 x s16>), [[BUILD_VECTOR_TRUNC5]](<2 x s16>), [[BUILD_VECTOR_TRUNC6]](<2 x s16>), [[BUILD_VECTOR_TRUNC7]](<2 x s16>)
457    ; GFX9: S_NOP 0, implicit [[CONCAT_VECTORS]](<16 x s16>)
458    %0:_(s32) = COPY $vgpr0
459    %1:_(s32) = COPY $vgpr1
460    %2:_(s32) = COPY $vgpr2
461    %3:_(s32) = COPY $vgpr3
462    %4:_(s32) = COPY $vgpr4
463    %5:_(s32) = COPY $vgpr5
464    %6:_(s32) = COPY $vgpr6
465    %7:_(s32) = COPY $vgpr7
466    %8:_(s32) = COPY $vgpr8
467    %9:_(s32) = COPY $vgpr9
468    %10:_(s32) = COPY $vgpr10
469    %11:_(s32) = COPY $vgpr11
470    %12:_(s32) = COPY $vgpr12
471    %13:_(s32) = COPY $vgpr13
472    %14:_(s32) = COPY $vgpr14
473    %15:_(s32) = COPY $vgpr15
474    %16:_(s16) = G_TRUNC %0
475    %17:_(s16) = G_TRUNC %1
476    %18:_(s16) = G_TRUNC %2
477    %19:_(s16) = G_TRUNC %3
478    %20:_(s16) = G_TRUNC %4
479    %21:_(s16) = G_TRUNC %5
480    %22:_(s16) = G_TRUNC %6
481    %23:_(s16) = G_TRUNC %7
482    %24:_(s16) = G_TRUNC %8
483    %25:_(s16) = G_TRUNC %9
484    %26:_(s16) = G_TRUNC %10
485    %27:_(s16) = G_TRUNC %11
486    %28:_(s16) = G_TRUNC %12
487    %29:_(s16) = G_TRUNC %13
488    %30:_(s16) = G_TRUNC %14
489    %31:_(s16) = G_TRUNC %15
490    %32:_(<16 x s16>) = G_BUILD_VECTOR  %16, %17, %18, %19, %20, %21, %22, %23, %24, %25, %26, %27, %28, %29, %30, %31
491    S_NOP 0, implicit %32
492...
493