1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
3
4declare i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8>)
5
6define signext i8 @intrinsic_vmv.x.s_s_nxv1i8(<vscale x 1 x i8> %0) nounwind {
7; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8:
8; CHECK:       # %bb.0: # %entry
9; CHECK-NEXT:    vsetivli zero, 0, e8, mf8, ta, mu
10; CHECK-NEXT:    vmv.x.s a0, v8
11; CHECK-NEXT:    ret
12entry:
13  %a = call i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8> %0)
14  ret i8 %a
15}
16
17declare i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8>)
18
19define signext i8 @intrinsic_vmv.x.s_s_nxv2i8(<vscale x 2 x i8> %0) nounwind {
20; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8:
21; CHECK:       # %bb.0: # %entry
22; CHECK-NEXT:    vsetivli zero, 0, e8, mf4, ta, mu
23; CHECK-NEXT:    vmv.x.s a0, v8
24; CHECK-NEXT:    ret
25entry:
26  %a = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> %0)
27  ret i8 %a
28}
29
30declare i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8>)
31
32define signext i8 @intrinsic_vmv.x.s_s_nxv4i8(<vscale x 4 x i8> %0) nounwind {
33; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8:
34; CHECK:       # %bb.0: # %entry
35; CHECK-NEXT:    vsetivli zero, 0, e8, mf2, ta, mu
36; CHECK-NEXT:    vmv.x.s a0, v8
37; CHECK-NEXT:    ret
38entry:
39  %a = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> %0)
40  ret i8 %a
41}
42
43declare i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8>)
44
45define signext i8 @intrinsic_vmv.x.s_s_nxv8i8(<vscale x 8 x i8> %0) nounwind {
46; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8:
47; CHECK:       # %bb.0: # %entry
48; CHECK-NEXT:    vsetivli zero, 0, e8, m1, ta, mu
49; CHECK-NEXT:    vmv.x.s a0, v8
50; CHECK-NEXT:    ret
51entry:
52  %a = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> %0)
53  ret i8 %a
54}
55
56declare i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8>)
57
58define signext i8 @intrinsic_vmv.x.s_s_nxv16i8(<vscale x 16 x i8> %0) nounwind {
59; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8:
60; CHECK:       # %bb.0: # %entry
61; CHECK-NEXT:    vsetivli zero, 0, e8, m2, ta, mu
62; CHECK-NEXT:    vmv.x.s a0, v8
63; CHECK-NEXT:    ret
64entry:
65  %a = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> %0)
66  ret i8 %a
67}
68
69declare i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8>)
70
71define signext i8 @intrinsic_vmv.x.s_s_nxv32i8(<vscale x 32 x i8> %0) nounwind {
72; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8:
73; CHECK:       # %bb.0: # %entry
74; CHECK-NEXT:    vsetivli zero, 0, e8, m4, ta, mu
75; CHECK-NEXT:    vmv.x.s a0, v8
76; CHECK-NEXT:    ret
77entry:
78  %a = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> %0)
79  ret i8 %a
80}
81
82declare i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8>)
83
84define signext i8 @intrinsic_vmv.x.s_s_nxv64i8(<vscale x 64 x i8> %0) nounwind {
85; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8:
86; CHECK:       # %bb.0: # %entry
87; CHECK-NEXT:    vsetivli zero, 0, e8, m8, ta, mu
88; CHECK-NEXT:    vmv.x.s a0, v8
89; CHECK-NEXT:    ret
90entry:
91  %a = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> %0)
92  ret i8 %a
93}
94
95declare i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16>)
96
97define signext i16 @intrinsic_vmv.x.s_s_nxv1i16(<vscale x 1 x i16> %0) nounwind {
98; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16:
99; CHECK:       # %bb.0: # %entry
100; CHECK-NEXT:    vsetivli zero, 0, e16, mf4, ta, mu
101; CHECK-NEXT:    vmv.x.s a0, v8
102; CHECK-NEXT:    ret
103entry:
104  %a = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> %0)
105  ret i16 %a
106}
107
108declare i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16>)
109
110define signext i16 @intrinsic_vmv.x.s_s_nxv2i16(<vscale x 2 x i16> %0) nounwind {
111; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16:
112; CHECK:       # %bb.0: # %entry
113; CHECK-NEXT:    vsetivli zero, 0, e16, mf2, ta, mu
114; CHECK-NEXT:    vmv.x.s a0, v8
115; CHECK-NEXT:    ret
116entry:
117  %a = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> %0)
118  ret i16 %a
119}
120
121declare i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16>)
122
123define signext i16 @intrinsic_vmv.x.s_s_nxv4i16(<vscale x 4 x i16> %0) nounwind {
124; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16:
125; CHECK:       # %bb.0: # %entry
126; CHECK-NEXT:    vsetivli zero, 0, e16, m1, ta, mu
127; CHECK-NEXT:    vmv.x.s a0, v8
128; CHECK-NEXT:    ret
129entry:
130  %a = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> %0)
131  ret i16 %a
132}
133
134declare i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16>)
135
136define signext i16 @intrinsic_vmv.x.s_s_nxv8i16(<vscale x 8 x i16> %0) nounwind {
137; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16:
138; CHECK:       # %bb.0: # %entry
139; CHECK-NEXT:    vsetivli zero, 0, e16, m2, ta, mu
140; CHECK-NEXT:    vmv.x.s a0, v8
141; CHECK-NEXT:    ret
142entry:
143  %a = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> %0)
144  ret i16 %a
145}
146
147declare i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16>)
148
149define signext i16 @intrinsic_vmv.x.s_s_nxv16i16(<vscale x 16 x i16> %0) nounwind {
150; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16:
151; CHECK:       # %bb.0: # %entry
152; CHECK-NEXT:    vsetivli zero, 0, e16, m4, ta, mu
153; CHECK-NEXT:    vmv.x.s a0, v8
154; CHECK-NEXT:    ret
155entry:
156  %a = call i16 @llvm.riscv.vmv.x.s.nxv16i16( <vscale x 16 x i16> %0)
157  ret i16 %a
158}
159
160declare i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16>)
161
162define signext i16 @intrinsic_vmv.x.s_s_nxv32i16(<vscale x 32 x i16> %0) nounwind {
163; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16:
164; CHECK:       # %bb.0: # %entry
165; CHECK-NEXT:    vsetivli zero, 0, e16, m8, ta, mu
166; CHECK-NEXT:    vmv.x.s a0, v8
167; CHECK-NEXT:    ret
168entry:
169  %a = call i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16> %0)
170  ret i16 %a
171}
172
173declare i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32>)
174
175define signext i32 @intrinsic_vmv.x.s_s_nxv1i32(<vscale x 1 x i32> %0) nounwind {
176; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32:
177; CHECK:       # %bb.0: # %entry
178; CHECK-NEXT:    vsetivli zero, 0, e32, mf2, ta, mu
179; CHECK-NEXT:    vmv.x.s a0, v8
180; CHECK-NEXT:    ret
181entry:
182  %a = call i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32> %0)
183  ret i32 %a
184}
185
186declare i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32>)
187
188define signext i32 @intrinsic_vmv.x.s_s_nxv2i32(<vscale x 2 x i32> %0) nounwind {
189; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32:
190; CHECK:       # %bb.0: # %entry
191; CHECK-NEXT:    vsetivli zero, 0, e32, m1, ta, mu
192; CHECK-NEXT:    vmv.x.s a0, v8
193; CHECK-NEXT:    ret
194entry:
195  %a = call i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32> %0)
196  ret i32 %a
197}
198
199declare i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32>)
200
201define signext i32 @intrinsic_vmv.x.s_s_nxv4i32(<vscale x 4 x i32> %0) nounwind {
202; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32:
203; CHECK:       # %bb.0: # %entry
204; CHECK-NEXT:    vsetivli zero, 0, e32, m2, ta, mu
205; CHECK-NEXT:    vmv.x.s a0, v8
206; CHECK-NEXT:    ret
207entry:
208  %a = call i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32> %0)
209  ret i32 %a
210}
211
212declare i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32>)
213
214define signext i32 @intrinsic_vmv.x.s_s_nxv8i32(<vscale x 8 x i32> %0) nounwind {
215; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32:
216; CHECK:       # %bb.0: # %entry
217; CHECK-NEXT:    vsetivli zero, 0, e32, m4, ta, mu
218; CHECK-NEXT:    vmv.x.s a0, v8
219; CHECK-NEXT:    ret
220entry:
221  %a = call i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32> %0)
222  ret i32 %a
223}
224
225declare i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32>)
226
227define signext i32 @intrinsic_vmv.x.s_s_nxv16i32(<vscale x 16 x i32> %0) nounwind {
228; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32:
229; CHECK:       # %bb.0: # %entry
230; CHECK-NEXT:    vsetivli zero, 0, e32, m8, ta, mu
231; CHECK-NEXT:    vmv.x.s a0, v8
232; CHECK-NEXT:    ret
233entry:
234  %a = call i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32> %0)
235  ret i32 %a
236}
237
238declare i64 @llvm.riscv.vmv.x.s.nxv1i64( <vscale x 1 x i64>)
239
240define i64 @intrinsic_vmv.x.s_s_nxv1i64(<vscale x 1 x i64> %0) nounwind {
241; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i64:
242; CHECK:       # %bb.0: # %entry
243; CHECK-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
244; CHECK-NEXT:    vmv.x.s a0, v8
245; CHECK-NEXT:    ret
246entry:
247  %a = call i64 @llvm.riscv.vmv.x.s.nxv1i64( <vscale x 1 x i64> %0)
248  ret i64 %a
249}
250
251declare i64 @llvm.riscv.vmv.x.s.nxv2i64( <vscale x 2 x i64>)
252
253define i64 @intrinsic_vmv.x.s_s_nxv2i64(<vscale x 2 x i64> %0) nounwind {
254; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i64:
255; CHECK:       # %bb.0: # %entry
256; CHECK-NEXT:    vsetivli zero, 0, e64, m2, ta, mu
257; CHECK-NEXT:    vmv.x.s a0, v8
258; CHECK-NEXT:    ret
259entry:
260  %a = call i64 @llvm.riscv.vmv.x.s.nxv2i64( <vscale x 2 x i64> %0)
261  ret i64 %a
262}
263
264declare i64 @llvm.riscv.vmv.x.s.nxv4i64( <vscale x 4 x i64>)
265
266define i64 @intrinsic_vmv.x.s_s_nxv4i64(<vscale x 4 x i64> %0) nounwind {
267; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i64:
268; CHECK:       # %bb.0: # %entry
269; CHECK-NEXT:    vsetivli zero, 0, e64, m4, ta, mu
270; CHECK-NEXT:    vmv.x.s a0, v8
271; CHECK-NEXT:    ret
272entry:
273  %a = call i64 @llvm.riscv.vmv.x.s.nxv4i64( <vscale x 4 x i64> %0)
274  ret i64 %a
275}
276
277declare i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64>)
278
279define i64 @intrinsic_vmv.x.s_s_nxv8i64(<vscale x 8 x i64> %0) nounwind {
280; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i64:
281; CHECK:       # %bb.0: # %entry
282; CHECK-NEXT:    vsetivli zero, 0, e64, m8, ta, mu
283; CHECK-NEXT:    vmv.x.s a0, v8
284; CHECK-NEXT:    ret
285entry:
286  %a = call i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64> %0)
287  ret i64 %a
288}
289