1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 < %s | FileCheck %s --check-prefixes=CHECK,RV32
3; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 < %s | FileCheck %s --check-prefixes=CHECK,RV64
4
5define <32 x i1> @bitcast_v4i8_v32i1(<4 x i8> %a, <32 x i1> %b) {
6; CHECK-LABEL: bitcast_v4i8_v32i1:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    addi a0, zero, 32
9; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
10; CHECK-NEXT:    vmxor.mm v0, v0, v8
11; CHECK-NEXT:    ret
12  %c = bitcast <4 x i8> %a to <32 x i1>
13  %d = xor <32 x i1> %b, %c
14  ret <32 x i1> %d
15}
16
17define i8 @bitcast_v1i8_i8(<1 x i8> %a) {
18; CHECK-LABEL: bitcast_v1i8_i8:
19; CHECK:       # %bb.0:
20; CHECK-NEXT:    vsetivli zero, 0, e8, mf8, ta, mu
21; CHECK-NEXT:    vmv.x.s a0, v8
22; CHECK-NEXT:    ret
23  %b = bitcast <1 x i8> %a to i8
24  ret i8 %b
25}
26
27define i16 @bitcast_v2i8_i16(<2 x i8> %a) {
28; CHECK-LABEL: bitcast_v2i8_i16:
29; CHECK:       # %bb.0:
30; CHECK-NEXT:    vsetivli zero, 0, e16, mf4, ta, mu
31; CHECK-NEXT:    vmv.x.s a0, v8
32; CHECK-NEXT:    ret
33  %b = bitcast <2 x i8> %a to i16
34  ret i16 %b
35}
36
37define i16 @bitcast_v1i16_i16(<1 x i16> %a) {
38; CHECK-LABEL: bitcast_v1i16_i16:
39; CHECK:       # %bb.0:
40; CHECK-NEXT:    vsetivli zero, 0, e16, mf4, ta, mu
41; CHECK-NEXT:    vmv.x.s a0, v8
42; CHECK-NEXT:    ret
43  %b = bitcast <1 x i16> %a to i16
44  ret i16 %b
45}
46
47define i32 @bitcast_v4i8_i32(<4 x i8> %a) {
48; CHECK-LABEL: bitcast_v4i8_i32:
49; CHECK:       # %bb.0:
50; CHECK-NEXT:    vsetivli zero, 0, e32, mf2, ta, mu
51; CHECK-NEXT:    vmv.x.s a0, v8
52; CHECK-NEXT:    ret
53  %b = bitcast <4 x i8> %a to i32
54  ret i32 %b
55}
56
57define i32 @bitcast_v2i16_i32(<2 x i16> %a) {
58; CHECK-LABEL: bitcast_v2i16_i32:
59; CHECK:       # %bb.0:
60; CHECK-NEXT:    vsetivli zero, 0, e32, mf2, ta, mu
61; CHECK-NEXT:    vmv.x.s a0, v8
62; CHECK-NEXT:    ret
63  %b = bitcast <2 x i16> %a to i32
64  ret i32 %b
65}
66
67define i32 @bitcast_v1i32_i32(<1 x i32> %a) {
68; CHECK-LABEL: bitcast_v1i32_i32:
69; CHECK:       # %bb.0:
70; CHECK-NEXT:    vsetivli zero, 0, e32, mf2, ta, mu
71; CHECK-NEXT:    vmv.x.s a0, v8
72; CHECK-NEXT:    ret
73  %b = bitcast <1 x i32> %a to i32
74  ret i32 %b
75}
76
77define i64 @bitcast_v8i8_i64(<8 x i8> %a) {
78; RV32-LABEL: bitcast_v8i8_i64:
79; RV32:       # %bb.0:
80; RV32-NEXT:    addi a0, zero, 32
81; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
82; RV32-NEXT:    vsrl.vx v25, v8, a0
83; RV32-NEXT:    vmv.x.s a1, v25
84; RV32-NEXT:    vmv.x.s a0, v8
85; RV32-NEXT:    ret
86;
87; RV64-LABEL: bitcast_v8i8_i64:
88; RV64:       # %bb.0:
89; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
90; RV64-NEXT:    vmv.x.s a0, v8
91; RV64-NEXT:    ret
92  %b = bitcast <8 x i8> %a to i64
93  ret i64 %b
94}
95
96define i64 @bitcast_v4i16_i64(<4 x i16> %a) {
97; RV32-LABEL: bitcast_v4i16_i64:
98; RV32:       # %bb.0:
99; RV32-NEXT:    addi a0, zero, 32
100; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
101; RV32-NEXT:    vsrl.vx v25, v8, a0
102; RV32-NEXT:    vmv.x.s a1, v25
103; RV32-NEXT:    vmv.x.s a0, v8
104; RV32-NEXT:    ret
105;
106; RV64-LABEL: bitcast_v4i16_i64:
107; RV64:       # %bb.0:
108; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
109; RV64-NEXT:    vmv.x.s a0, v8
110; RV64-NEXT:    ret
111  %b = bitcast <4 x i16> %a to i64
112  ret i64 %b
113}
114
115define i64 @bitcast_v2i32_i64(<2 x i32> %a) {
116; RV32-LABEL: bitcast_v2i32_i64:
117; RV32:       # %bb.0:
118; RV32-NEXT:    addi a0, zero, 32
119; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
120; RV32-NEXT:    vsrl.vx v25, v8, a0
121; RV32-NEXT:    vmv.x.s a1, v25
122; RV32-NEXT:    vmv.x.s a0, v8
123; RV32-NEXT:    ret
124;
125; RV64-LABEL: bitcast_v2i32_i64:
126; RV64:       # %bb.0:
127; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
128; RV64-NEXT:    vmv.x.s a0, v8
129; RV64-NEXT:    ret
130  %b = bitcast <2 x i32> %a to i64
131  ret i64 %b
132}
133
134define i64 @bitcast_v1i64_i64(<1 x i64> %a) {
135; RV32-LABEL: bitcast_v1i64_i64:
136; RV32:       # %bb.0:
137; RV32-NEXT:    addi a0, zero, 32
138; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
139; RV32-NEXT:    vsrl.vx v25, v8, a0
140; RV32-NEXT:    vmv.x.s a1, v25
141; RV32-NEXT:    vmv.x.s a0, v8
142; RV32-NEXT:    ret
143;
144; RV64-LABEL: bitcast_v1i64_i64:
145; RV64:       # %bb.0:
146; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
147; RV64-NEXT:    vmv.x.s a0, v8
148; RV64-NEXT:    ret
149  %b = bitcast <1 x i64> %a to i64
150  ret i64 %b
151}
152
153define half @bitcast_v2i8_f16(<2 x i8> %a) {
154; CHECK-LABEL: bitcast_v2i8_f16:
155; CHECK:       # %bb.0:
156; CHECK-NEXT:    vsetivli zero, 0, e16, mf4, ta, mu
157; CHECK-NEXT:    vmv.x.s a0, v8
158; CHECK-NEXT:    ret
159  %b = bitcast <2 x i8> %a to half
160  ret half %b
161}
162
163define half @bitcast_v1i16_f16(<1 x i16> %a) {
164; CHECK-LABEL: bitcast_v1i16_f16:
165; CHECK:       # %bb.0:
166; CHECK-NEXT:    vsetivli zero, 0, e16, mf4, ta, mu
167; CHECK-NEXT:    vmv.x.s a0, v8
168; CHECK-NEXT:    ret
169  %b = bitcast <1 x i16> %a to half
170  ret half %b
171}
172
173define float @bitcast_v4i8_f32(<4 x i8> %a) {
174; CHECK-LABEL: bitcast_v4i8_f32:
175; CHECK:       # %bb.0:
176; CHECK-NEXT:    vsetivli zero, 0, e32, mf2, ta, mu
177; CHECK-NEXT:    vmv.x.s a0, v8
178; CHECK-NEXT:    ret
179  %b = bitcast <4 x i8> %a to float
180  ret float %b
181}
182
183define float @bitcast_v2i16_f32(<2 x i16> %a) {
184; CHECK-LABEL: bitcast_v2i16_f32:
185; CHECK:       # %bb.0:
186; CHECK-NEXT:    vsetivli zero, 0, e32, mf2, ta, mu
187; CHECK-NEXT:    vmv.x.s a0, v8
188; CHECK-NEXT:    ret
189  %b = bitcast <2 x i16> %a to float
190  ret float %b
191}
192
193define float @bitcast_v1i32_f32(<1 x i32> %a) {
194; CHECK-LABEL: bitcast_v1i32_f32:
195; CHECK:       # %bb.0:
196; CHECK-NEXT:    vsetivli zero, 0, e32, mf2, ta, mu
197; CHECK-NEXT:    vmv.x.s a0, v8
198; CHECK-NEXT:    ret
199  %b = bitcast <1 x i32> %a to float
200  ret float %b
201}
202
203define double @bitcast_v8i8_f64(<8 x i8> %a) {
204; RV32-LABEL: bitcast_v8i8_f64:
205; RV32:       # %bb.0:
206; RV32-NEXT:    addi a0, zero, 32
207; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
208; RV32-NEXT:    vsrl.vx v25, v8, a0
209; RV32-NEXT:    vmv.x.s a1, v25
210; RV32-NEXT:    vmv.x.s a0, v8
211; RV32-NEXT:    ret
212;
213; RV64-LABEL: bitcast_v8i8_f64:
214; RV64:       # %bb.0:
215; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
216; RV64-NEXT:    vmv.x.s a0, v8
217; RV64-NEXT:    ret
218  %b = bitcast <8 x i8> %a to double
219  ret double %b
220}
221
222define double @bitcast_v4i16_f64(<4 x i16> %a) {
223; RV32-LABEL: bitcast_v4i16_f64:
224; RV32:       # %bb.0:
225; RV32-NEXT:    addi a0, zero, 32
226; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
227; RV32-NEXT:    vsrl.vx v25, v8, a0
228; RV32-NEXT:    vmv.x.s a1, v25
229; RV32-NEXT:    vmv.x.s a0, v8
230; RV32-NEXT:    ret
231;
232; RV64-LABEL: bitcast_v4i16_f64:
233; RV64:       # %bb.0:
234; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
235; RV64-NEXT:    vmv.x.s a0, v8
236; RV64-NEXT:    ret
237  %b = bitcast <4 x i16> %a to double
238  ret double %b
239}
240
241define double @bitcast_v2i32_f64(<2 x i32> %a) {
242; RV32-LABEL: bitcast_v2i32_f64:
243; RV32:       # %bb.0:
244; RV32-NEXT:    addi a0, zero, 32
245; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
246; RV32-NEXT:    vsrl.vx v25, v8, a0
247; RV32-NEXT:    vmv.x.s a1, v25
248; RV32-NEXT:    vmv.x.s a0, v8
249; RV32-NEXT:    ret
250;
251; RV64-LABEL: bitcast_v2i32_f64:
252; RV64:       # %bb.0:
253; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
254; RV64-NEXT:    vmv.x.s a0, v8
255; RV64-NEXT:    ret
256  %b = bitcast <2 x i32> %a to double
257  ret double %b
258}
259
260define double @bitcast_v1i64_f64(<1 x i64> %a) {
261; RV32-LABEL: bitcast_v1i64_f64:
262; RV32:       # %bb.0:
263; RV32-NEXT:    addi a0, zero, 32
264; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
265; RV32-NEXT:    vsrl.vx v25, v8, a0
266; RV32-NEXT:    vmv.x.s a1, v25
267; RV32-NEXT:    vmv.x.s a0, v8
268; RV32-NEXT:    ret
269;
270; RV64-LABEL: bitcast_v1i64_f64:
271; RV64:       # %bb.0:
272; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
273; RV64-NEXT:    vmv.x.s a0, v8
274; RV64-NEXT:    ret
275  %b = bitcast <1 x i64> %a to double
276  ret double %b
277}
278
279define <1 x i16> @bitcast_i16_v1i16(i16 %a) {
280; CHECK-LABEL: bitcast_i16_v1i16:
281; CHECK:       # %bb.0:
282; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
283; CHECK-NEXT:    vmv.v.x v8, a0
284; CHECK-NEXT:    ret
285  %b = bitcast i16 %a to <1 x i16>
286  ret <1 x i16> %b
287}
288
289define <2 x i16> @bitcast_i32_v2i16(i32 %a) {
290; RV32-LABEL: bitcast_i32_v2i16:
291; RV32:       # %bb.0:
292; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
293; RV32-NEXT:    vmv.s.x v8, a0
294; RV32-NEXT:    ret
295;
296; RV64-LABEL: bitcast_i32_v2i16:
297; RV64:       # %bb.0:
298; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
299; RV64-NEXT:    vmv.v.x v8, a0
300; RV64-NEXT:    ret
301  %b = bitcast i32 %a to <2 x i16>
302  ret <2 x i16> %b
303}
304
305define <1 x i32> @bitcast_i32_v1i32(i32 %a) {
306; RV32-LABEL: bitcast_i32_v1i32:
307; RV32:       # %bb.0:
308; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
309; RV32-NEXT:    vmv.s.x v8, a0
310; RV32-NEXT:    ret
311;
312; RV64-LABEL: bitcast_i32_v1i32:
313; RV64:       # %bb.0:
314; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
315; RV64-NEXT:    vmv.v.x v8, a0
316; RV64-NEXT:    ret
317  %b = bitcast i32 %a to <1 x i32>
318  ret <1 x i32> %b
319}
320
321define <4 x i16> @bitcast_i64_v4i16(i64 %a) {
322; RV32-LABEL: bitcast_i64_v4i16:
323; RV32:       # %bb.0:
324; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
325; RV32-NEXT:    vmv.v.i v25, 0
326; RV32-NEXT:    vslide1up.vx v26, v25, a1
327; RV32-NEXT:    vslide1up.vx v25, v26, a0
328; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
329; RV32-NEXT:    vslideup.vi v8, v25, 0
330; RV32-NEXT:    ret
331;
332; RV64-LABEL: bitcast_i64_v4i16:
333; RV64:       # %bb.0:
334; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
335; RV64-NEXT:    vmv.s.x v8, a0
336; RV64-NEXT:    ret
337  %b = bitcast i64 %a to <4 x i16>
338  ret <4 x i16> %b
339}
340
341define <2 x i32> @bitcast_i64_v2i32(i64 %a) {
342; RV32-LABEL: bitcast_i64_v2i32:
343; RV32:       # %bb.0:
344; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
345; RV32-NEXT:    vmv.v.i v25, 0
346; RV32-NEXT:    vslide1up.vx v26, v25, a1
347; RV32-NEXT:    vslide1up.vx v25, v26, a0
348; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
349; RV32-NEXT:    vslideup.vi v8, v25, 0
350; RV32-NEXT:    ret
351;
352; RV64-LABEL: bitcast_i64_v2i32:
353; RV64:       # %bb.0:
354; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
355; RV64-NEXT:    vmv.s.x v8, a0
356; RV64-NEXT:    ret
357  %b = bitcast i64 %a to <2 x i32>
358  ret <2 x i32> %b
359}
360
361define <1 x i64> @bitcast_i64_v1i64(i64 %a) {
362; RV32-LABEL: bitcast_i64_v1i64:
363; RV32:       # %bb.0:
364; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
365; RV32-NEXT:    vmv.v.i v25, 0
366; RV32-NEXT:    vslide1up.vx v26, v25, a1
367; RV32-NEXT:    vslide1up.vx v25, v26, a0
368; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
369; RV32-NEXT:    vslideup.vi v8, v25, 0
370; RV32-NEXT:    ret
371;
372; RV64-LABEL: bitcast_i64_v1i64:
373; RV64:       # %bb.0:
374; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
375; RV64-NEXT:    vmv.s.x v8, a0
376; RV64-NEXT:    ret
377  %b = bitcast i64 %a to <1 x i64>
378  ret <1 x i64> %b
379}
380