1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \
3; RUN:   -mattr=+d -verify-machineinstrs \
4; RUN:   < %s | FileCheck %s
5declare void @llvm.riscv.vse.nxv1i64(
6  <vscale x 1 x i64>,
7  <vscale x 1 x i64>*,
8  i32);
9
10define void @intrinsic_vse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i32 %2) nounwind {
11; CHECK-LABEL: intrinsic_vse_v_nxv1i64_nxv1i64:
12; CHECK:       # %bb.0: # %entry
13; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
14; CHECK-NEXT:    vse64.v v8, (a0)
15; CHECK-NEXT:    ret
16entry:
17  call void @llvm.riscv.vse.nxv1i64(
18    <vscale x 1 x i64> %0,
19    <vscale x 1 x i64>* %1,
20    i32 %2)
21
22  ret void
23}
24
25declare void @llvm.riscv.vse.mask.nxv1i64(
26  <vscale x 1 x i64>,
27  <vscale x 1 x i64>*,
28  <vscale x 1 x i1>,
29  i32);
30
31define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
32; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i64_nxv1i64:
33; CHECK:       # %bb.0: # %entry
34; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
35; CHECK-NEXT:    vse64.v v8, (a0), v0.t
36; CHECK-NEXT:    ret
37entry:
38  call void @llvm.riscv.vse.mask.nxv1i64(
39    <vscale x 1 x i64> %0,
40    <vscale x 1 x i64>* %1,
41    <vscale x 1 x i1> %2,
42    i32 %3)
43
44  ret void
45}
46
47declare void @llvm.riscv.vse.nxv2i64(
48  <vscale x 2 x i64>,
49  <vscale x 2 x i64>*,
50  i32);
51
52define void @intrinsic_vse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i32 %2) nounwind {
53; CHECK-LABEL: intrinsic_vse_v_nxv2i64_nxv2i64:
54; CHECK:       # %bb.0: # %entry
55; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
56; CHECK-NEXT:    vse64.v v8, (a0)
57; CHECK-NEXT:    ret
58entry:
59  call void @llvm.riscv.vse.nxv2i64(
60    <vscale x 2 x i64> %0,
61    <vscale x 2 x i64>* %1,
62    i32 %2)
63
64  ret void
65}
66
67declare void @llvm.riscv.vse.mask.nxv2i64(
68  <vscale x 2 x i64>,
69  <vscale x 2 x i64>*,
70  <vscale x 2 x i1>,
71  i32);
72
73define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
74; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i64_nxv2i64:
75; CHECK:       # %bb.0: # %entry
76; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
77; CHECK-NEXT:    vse64.v v8, (a0), v0.t
78; CHECK-NEXT:    ret
79entry:
80  call void @llvm.riscv.vse.mask.nxv2i64(
81    <vscale x 2 x i64> %0,
82    <vscale x 2 x i64>* %1,
83    <vscale x 2 x i1> %2,
84    i32 %3)
85
86  ret void
87}
88
89declare void @llvm.riscv.vse.nxv4i64(
90  <vscale x 4 x i64>,
91  <vscale x 4 x i64>*,
92  i32);
93
94define void @intrinsic_vse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i32 %2) nounwind {
95; CHECK-LABEL: intrinsic_vse_v_nxv4i64_nxv4i64:
96; CHECK:       # %bb.0: # %entry
97; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
98; CHECK-NEXT:    vse64.v v8, (a0)
99; CHECK-NEXT:    ret
100entry:
101  call void @llvm.riscv.vse.nxv4i64(
102    <vscale x 4 x i64> %0,
103    <vscale x 4 x i64>* %1,
104    i32 %2)
105
106  ret void
107}
108
109declare void @llvm.riscv.vse.mask.nxv4i64(
110  <vscale x 4 x i64>,
111  <vscale x 4 x i64>*,
112  <vscale x 4 x i1>,
113  i32);
114
115define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
116; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i64_nxv4i64:
117; CHECK:       # %bb.0: # %entry
118; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
119; CHECK-NEXT:    vse64.v v8, (a0), v0.t
120; CHECK-NEXT:    ret
121entry:
122  call void @llvm.riscv.vse.mask.nxv4i64(
123    <vscale x 4 x i64> %0,
124    <vscale x 4 x i64>* %1,
125    <vscale x 4 x i1> %2,
126    i32 %3)
127
128  ret void
129}
130
131declare void @llvm.riscv.vse.nxv8i64(
132  <vscale x 8 x i64>,
133  <vscale x 8 x i64>*,
134  i32);
135
136define void @intrinsic_vse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i32 %2) nounwind {
137; CHECK-LABEL: intrinsic_vse_v_nxv8i64_nxv8i64:
138; CHECK:       # %bb.0: # %entry
139; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
140; CHECK-NEXT:    vse64.v v8, (a0)
141; CHECK-NEXT:    ret
142entry:
143  call void @llvm.riscv.vse.nxv8i64(
144    <vscale x 8 x i64> %0,
145    <vscale x 8 x i64>* %1,
146    i32 %2)
147
148  ret void
149}
150
151declare void @llvm.riscv.vse.mask.nxv8i64(
152  <vscale x 8 x i64>,
153  <vscale x 8 x i64>*,
154  <vscale x 8 x i1>,
155  i32);
156
157define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
158; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i64_nxv8i64:
159; CHECK:       # %bb.0: # %entry
160; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
161; CHECK-NEXT:    vse64.v v8, (a0), v0.t
162; CHECK-NEXT:    ret
163entry:
164  call void @llvm.riscv.vse.mask.nxv8i64(
165    <vscale x 8 x i64> %0,
166    <vscale x 8 x i64>* %1,
167    <vscale x 8 x i1> %2,
168    i32 %3)
169
170  ret void
171}
172
173declare void @llvm.riscv.vse.nxv1f64(
174  <vscale x 1 x double>,
175  <vscale x 1 x double>*,
176  i32);
177
178define void @intrinsic_vse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i32 %2) nounwind {
179; CHECK-LABEL: intrinsic_vse_v_nxv1f64_nxv1f64:
180; CHECK:       # %bb.0: # %entry
181; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
182; CHECK-NEXT:    vse64.v v8, (a0)
183; CHECK-NEXT:    ret
184entry:
185  call void @llvm.riscv.vse.nxv1f64(
186    <vscale x 1 x double> %0,
187    <vscale x 1 x double>* %1,
188    i32 %2)
189
190  ret void
191}
192
193declare void @llvm.riscv.vse.mask.nxv1f64(
194  <vscale x 1 x double>,
195  <vscale x 1 x double>*,
196  <vscale x 1 x i1>,
197  i32);
198
199define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
200; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f64_nxv1f64:
201; CHECK:       # %bb.0: # %entry
202; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
203; CHECK-NEXT:    vse64.v v8, (a0), v0.t
204; CHECK-NEXT:    ret
205entry:
206  call void @llvm.riscv.vse.mask.nxv1f64(
207    <vscale x 1 x double> %0,
208    <vscale x 1 x double>* %1,
209    <vscale x 1 x i1> %2,
210    i32 %3)
211
212  ret void
213}
214
215declare void @llvm.riscv.vse.nxv2f64(
216  <vscale x 2 x double>,
217  <vscale x 2 x double>*,
218  i32);
219
220define void @intrinsic_vse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i32 %2) nounwind {
221; CHECK-LABEL: intrinsic_vse_v_nxv2f64_nxv2f64:
222; CHECK:       # %bb.0: # %entry
223; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
224; CHECK-NEXT:    vse64.v v8, (a0)
225; CHECK-NEXT:    ret
226entry:
227  call void @llvm.riscv.vse.nxv2f64(
228    <vscale x 2 x double> %0,
229    <vscale x 2 x double>* %1,
230    i32 %2)
231
232  ret void
233}
234
235declare void @llvm.riscv.vse.mask.nxv2f64(
236  <vscale x 2 x double>,
237  <vscale x 2 x double>*,
238  <vscale x 2 x i1>,
239  i32);
240
241define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
242; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f64_nxv2f64:
243; CHECK:       # %bb.0: # %entry
244; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
245; CHECK-NEXT:    vse64.v v8, (a0), v0.t
246; CHECK-NEXT:    ret
247entry:
248  call void @llvm.riscv.vse.mask.nxv2f64(
249    <vscale x 2 x double> %0,
250    <vscale x 2 x double>* %1,
251    <vscale x 2 x i1> %2,
252    i32 %3)
253
254  ret void
255}
256
257declare void @llvm.riscv.vse.nxv4f64(
258  <vscale x 4 x double>,
259  <vscale x 4 x double>*,
260  i32);
261
262define void @intrinsic_vse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i32 %2) nounwind {
263; CHECK-LABEL: intrinsic_vse_v_nxv4f64_nxv4f64:
264; CHECK:       # %bb.0: # %entry
265; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
266; CHECK-NEXT:    vse64.v v8, (a0)
267; CHECK-NEXT:    ret
268entry:
269  call void @llvm.riscv.vse.nxv4f64(
270    <vscale x 4 x double> %0,
271    <vscale x 4 x double>* %1,
272    i32 %2)
273
274  ret void
275}
276
277declare void @llvm.riscv.vse.mask.nxv4f64(
278  <vscale x 4 x double>,
279  <vscale x 4 x double>*,
280  <vscale x 4 x i1>,
281  i32);
282
283define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
284; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f64_nxv4f64:
285; CHECK:       # %bb.0: # %entry
286; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
287; CHECK-NEXT:    vse64.v v8, (a0), v0.t
288; CHECK-NEXT:    ret
289entry:
290  call void @llvm.riscv.vse.mask.nxv4f64(
291    <vscale x 4 x double> %0,
292    <vscale x 4 x double>* %1,
293    <vscale x 4 x i1> %2,
294    i32 %3)
295
296  ret void
297}
298
299declare void @llvm.riscv.vse.nxv8f64(
300  <vscale x 8 x double>,
301  <vscale x 8 x double>*,
302  i32);
303
304define void @intrinsic_vse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i32 %2) nounwind {
305; CHECK-LABEL: intrinsic_vse_v_nxv8f64_nxv8f64:
306; CHECK:       # %bb.0: # %entry
307; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
308; CHECK-NEXT:    vse64.v v8, (a0)
309; CHECK-NEXT:    ret
310entry:
311  call void @llvm.riscv.vse.nxv8f64(
312    <vscale x 8 x double> %0,
313    <vscale x 8 x double>* %1,
314    i32 %2)
315
316  ret void
317}
318
319declare void @llvm.riscv.vse.mask.nxv8f64(
320  <vscale x 8 x double>,
321  <vscale x 8 x double>*,
322  <vscale x 8 x i1>,
323  i32);
324
325define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
326; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f64_nxv8f64:
327; CHECK:       # %bb.0: # %entry
328; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
329; CHECK-NEXT:    vse64.v v8, (a0), v0.t
330; CHECK-NEXT:    ret
331entry:
332  call void @llvm.riscv.vse.mask.nxv8f64(
333    <vscale x 8 x double> %0,
334    <vscale x 8 x double>* %1,
335    <vscale x 8 x i1> %2,
336    i32 %3)
337
338  ret void
339}
340
341declare void @llvm.riscv.vse.nxv1i32(
342  <vscale x 1 x i32>,
343  <vscale x 1 x i32>*,
344  i32);
345
346define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i32 %2) nounwind {
347; CHECK-LABEL: intrinsic_vse_v_nxv1i32_nxv1i32:
348; CHECK:       # %bb.0: # %entry
349; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
350; CHECK-NEXT:    vse32.v v8, (a0)
351; CHECK-NEXT:    ret
352entry:
353  call void @llvm.riscv.vse.nxv1i32(
354    <vscale x 1 x i32> %0,
355    <vscale x 1 x i32>* %1,
356    i32 %2)
357
358  ret void
359}
360
361declare void @llvm.riscv.vse.mask.nxv1i32(
362  <vscale x 1 x i32>,
363  <vscale x 1 x i32>*,
364  <vscale x 1 x i1>,
365  i32);
366
367define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
368; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i32_nxv1i32:
369; CHECK:       # %bb.0: # %entry
370; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
371; CHECK-NEXT:    vse32.v v8, (a0), v0.t
372; CHECK-NEXT:    ret
373entry:
374  call void @llvm.riscv.vse.mask.nxv1i32(
375    <vscale x 1 x i32> %0,
376    <vscale x 1 x i32>* %1,
377    <vscale x 1 x i1> %2,
378    i32 %3)
379
380  ret void
381}
382
383declare void @llvm.riscv.vse.nxv2i32(
384  <vscale x 2 x i32>,
385  <vscale x 2 x i32>*,
386  i32);
387
388define void @intrinsic_vse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i32 %2) nounwind {
389; CHECK-LABEL: intrinsic_vse_v_nxv2i32_nxv2i32:
390; CHECK:       # %bb.0: # %entry
391; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
392; CHECK-NEXT:    vse32.v v8, (a0)
393; CHECK-NEXT:    ret
394entry:
395  call void @llvm.riscv.vse.nxv2i32(
396    <vscale x 2 x i32> %0,
397    <vscale x 2 x i32>* %1,
398    i32 %2)
399
400  ret void
401}
402
403declare void @llvm.riscv.vse.mask.nxv2i32(
404  <vscale x 2 x i32>,
405  <vscale x 2 x i32>*,
406  <vscale x 2 x i1>,
407  i32);
408
409define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
410; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i32_nxv2i32:
411; CHECK:       # %bb.0: # %entry
412; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
413; CHECK-NEXT:    vse32.v v8, (a0), v0.t
414; CHECK-NEXT:    ret
415entry:
416  call void @llvm.riscv.vse.mask.nxv2i32(
417    <vscale x 2 x i32> %0,
418    <vscale x 2 x i32>* %1,
419    <vscale x 2 x i1> %2,
420    i32 %3)
421
422  ret void
423}
424
425declare void @llvm.riscv.vse.nxv4i32(
426  <vscale x 4 x i32>,
427  <vscale x 4 x i32>*,
428  i32);
429
430define void @intrinsic_vse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i32 %2) nounwind {
431; CHECK-LABEL: intrinsic_vse_v_nxv4i32_nxv4i32:
432; CHECK:       # %bb.0: # %entry
433; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
434; CHECK-NEXT:    vse32.v v8, (a0)
435; CHECK-NEXT:    ret
436entry:
437  call void @llvm.riscv.vse.nxv4i32(
438    <vscale x 4 x i32> %0,
439    <vscale x 4 x i32>* %1,
440    i32 %2)
441
442  ret void
443}
444
445declare void @llvm.riscv.vse.mask.nxv4i32(
446  <vscale x 4 x i32>,
447  <vscale x 4 x i32>*,
448  <vscale x 4 x i1>,
449  i32);
450
451define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
452; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i32_nxv4i32:
453; CHECK:       # %bb.0: # %entry
454; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
455; CHECK-NEXT:    vse32.v v8, (a0), v0.t
456; CHECK-NEXT:    ret
457entry:
458  call void @llvm.riscv.vse.mask.nxv4i32(
459    <vscale x 4 x i32> %0,
460    <vscale x 4 x i32>* %1,
461    <vscale x 4 x i1> %2,
462    i32 %3)
463
464  ret void
465}
466
467declare void @llvm.riscv.vse.nxv8i32(
468  <vscale x 8 x i32>,
469  <vscale x 8 x i32>*,
470  i32);
471
472define void @intrinsic_vse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i32 %2) nounwind {
473; CHECK-LABEL: intrinsic_vse_v_nxv8i32_nxv8i32:
474; CHECK:       # %bb.0: # %entry
475; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
476; CHECK-NEXT:    vse32.v v8, (a0)
477; CHECK-NEXT:    ret
478entry:
479  call void @llvm.riscv.vse.nxv8i32(
480    <vscale x 8 x i32> %0,
481    <vscale x 8 x i32>* %1,
482    i32 %2)
483
484  ret void
485}
486
487declare void @llvm.riscv.vse.mask.nxv8i32(
488  <vscale x 8 x i32>,
489  <vscale x 8 x i32>*,
490  <vscale x 8 x i1>,
491  i32);
492
493define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
494; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i32_nxv8i32:
495; CHECK:       # %bb.0: # %entry
496; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
497; CHECK-NEXT:    vse32.v v8, (a0), v0.t
498; CHECK-NEXT:    ret
499entry:
500  call void @llvm.riscv.vse.mask.nxv8i32(
501    <vscale x 8 x i32> %0,
502    <vscale x 8 x i32>* %1,
503    <vscale x 8 x i1> %2,
504    i32 %3)
505
506  ret void
507}
508
509declare void @llvm.riscv.vse.nxv16i32(
510  <vscale x 16 x i32>,
511  <vscale x 16 x i32>*,
512  i32);
513
514define void @intrinsic_vse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i32 %2) nounwind {
515; CHECK-LABEL: intrinsic_vse_v_nxv16i32_nxv16i32:
516; CHECK:       # %bb.0: # %entry
517; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
518; CHECK-NEXT:    vse32.v v8, (a0)
519; CHECK-NEXT:    ret
520entry:
521  call void @llvm.riscv.vse.nxv16i32(
522    <vscale x 16 x i32> %0,
523    <vscale x 16 x i32>* %1,
524    i32 %2)
525
526  ret void
527}
528
529declare void @llvm.riscv.vse.mask.nxv16i32(
530  <vscale x 16 x i32>,
531  <vscale x 16 x i32>*,
532  <vscale x 16 x i1>,
533  i32);
534
535define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
536; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i32_nxv16i32:
537; CHECK:       # %bb.0: # %entry
538; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
539; CHECK-NEXT:    vse32.v v8, (a0), v0.t
540; CHECK-NEXT:    ret
541entry:
542  call void @llvm.riscv.vse.mask.nxv16i32(
543    <vscale x 16 x i32> %0,
544    <vscale x 16 x i32>* %1,
545    <vscale x 16 x i1> %2,
546    i32 %3)
547
548  ret void
549}
550
551declare void @llvm.riscv.vse.nxv1f32(
552  <vscale x 1 x float>,
553  <vscale x 1 x float>*,
554  i32);
555
556define void @intrinsic_vse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i32 %2) nounwind {
557; CHECK-LABEL: intrinsic_vse_v_nxv1f32_nxv1f32:
558; CHECK:       # %bb.0: # %entry
559; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
560; CHECK-NEXT:    vse32.v v8, (a0)
561; CHECK-NEXT:    ret
562entry:
563  call void @llvm.riscv.vse.nxv1f32(
564    <vscale x 1 x float> %0,
565    <vscale x 1 x float>* %1,
566    i32 %2)
567
568  ret void
569}
570
571declare void @llvm.riscv.vse.mask.nxv1f32(
572  <vscale x 1 x float>,
573  <vscale x 1 x float>*,
574  <vscale x 1 x i1>,
575  i32);
576
577define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
578; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f32_nxv1f32:
579; CHECK:       # %bb.0: # %entry
580; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
581; CHECK-NEXT:    vse32.v v8, (a0), v0.t
582; CHECK-NEXT:    ret
583entry:
584  call void @llvm.riscv.vse.mask.nxv1f32(
585    <vscale x 1 x float> %0,
586    <vscale x 1 x float>* %1,
587    <vscale x 1 x i1> %2,
588    i32 %3)
589
590  ret void
591}
592
593declare void @llvm.riscv.vse.nxv2f32(
594  <vscale x 2 x float>,
595  <vscale x 2 x float>*,
596  i32);
597
598define void @intrinsic_vse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i32 %2) nounwind {
599; CHECK-LABEL: intrinsic_vse_v_nxv2f32_nxv2f32:
600; CHECK:       # %bb.0: # %entry
601; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
602; CHECK-NEXT:    vse32.v v8, (a0)
603; CHECK-NEXT:    ret
604entry:
605  call void @llvm.riscv.vse.nxv2f32(
606    <vscale x 2 x float> %0,
607    <vscale x 2 x float>* %1,
608    i32 %2)
609
610  ret void
611}
612
613declare void @llvm.riscv.vse.mask.nxv2f32(
614  <vscale x 2 x float>,
615  <vscale x 2 x float>*,
616  <vscale x 2 x i1>,
617  i32);
618
619define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
620; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f32_nxv2f32:
621; CHECK:       # %bb.0: # %entry
622; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
623; CHECK-NEXT:    vse32.v v8, (a0), v0.t
624; CHECK-NEXT:    ret
625entry:
626  call void @llvm.riscv.vse.mask.nxv2f32(
627    <vscale x 2 x float> %0,
628    <vscale x 2 x float>* %1,
629    <vscale x 2 x i1> %2,
630    i32 %3)
631
632  ret void
633}
634
635declare void @llvm.riscv.vse.nxv4f32(
636  <vscale x 4 x float>,
637  <vscale x 4 x float>*,
638  i32);
639
640define void @intrinsic_vse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i32 %2) nounwind {
641; CHECK-LABEL: intrinsic_vse_v_nxv4f32_nxv4f32:
642; CHECK:       # %bb.0: # %entry
643; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
644; CHECK-NEXT:    vse32.v v8, (a0)
645; CHECK-NEXT:    ret
646entry:
647  call void @llvm.riscv.vse.nxv4f32(
648    <vscale x 4 x float> %0,
649    <vscale x 4 x float>* %1,
650    i32 %2)
651
652  ret void
653}
654
655declare void @llvm.riscv.vse.mask.nxv4f32(
656  <vscale x 4 x float>,
657  <vscale x 4 x float>*,
658  <vscale x 4 x i1>,
659  i32);
660
661define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
662; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f32_nxv4f32:
663; CHECK:       # %bb.0: # %entry
664; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
665; CHECK-NEXT:    vse32.v v8, (a0), v0.t
666; CHECK-NEXT:    ret
667entry:
668  call void @llvm.riscv.vse.mask.nxv4f32(
669    <vscale x 4 x float> %0,
670    <vscale x 4 x float>* %1,
671    <vscale x 4 x i1> %2,
672    i32 %3)
673
674  ret void
675}
676
677declare void @llvm.riscv.vse.nxv8f32(
678  <vscale x 8 x float>,
679  <vscale x 8 x float>*,
680  i32);
681
682define void @intrinsic_vse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i32 %2) nounwind {
683; CHECK-LABEL: intrinsic_vse_v_nxv8f32_nxv8f32:
684; CHECK:       # %bb.0: # %entry
685; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
686; CHECK-NEXT:    vse32.v v8, (a0)
687; CHECK-NEXT:    ret
688entry:
689  call void @llvm.riscv.vse.nxv8f32(
690    <vscale x 8 x float> %0,
691    <vscale x 8 x float>* %1,
692    i32 %2)
693
694  ret void
695}
696
697declare void @llvm.riscv.vse.mask.nxv8f32(
698  <vscale x 8 x float>,
699  <vscale x 8 x float>*,
700  <vscale x 8 x i1>,
701  i32);
702
703define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
704; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f32_nxv8f32:
705; CHECK:       # %bb.0: # %entry
706; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
707; CHECK-NEXT:    vse32.v v8, (a0), v0.t
708; CHECK-NEXT:    ret
709entry:
710  call void @llvm.riscv.vse.mask.nxv8f32(
711    <vscale x 8 x float> %0,
712    <vscale x 8 x float>* %1,
713    <vscale x 8 x i1> %2,
714    i32 %3)
715
716  ret void
717}
718
719declare void @llvm.riscv.vse.nxv16f32(
720  <vscale x 16 x float>,
721  <vscale x 16 x float>*,
722  i32);
723
724define void @intrinsic_vse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i32 %2) nounwind {
725; CHECK-LABEL: intrinsic_vse_v_nxv16f32_nxv16f32:
726; CHECK:       # %bb.0: # %entry
727; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
728; CHECK-NEXT:    vse32.v v8, (a0)
729; CHECK-NEXT:    ret
730entry:
731  call void @llvm.riscv.vse.nxv16f32(
732    <vscale x 16 x float> %0,
733    <vscale x 16 x float>* %1,
734    i32 %2)
735
736  ret void
737}
738
739declare void @llvm.riscv.vse.mask.nxv16f32(
740  <vscale x 16 x float>,
741  <vscale x 16 x float>*,
742  <vscale x 16 x i1>,
743  i32);
744
745define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
746; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f32_nxv16f32:
747; CHECK:       # %bb.0: # %entry
748; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
749; CHECK-NEXT:    vse32.v v8, (a0), v0.t
750; CHECK-NEXT:    ret
751entry:
752  call void @llvm.riscv.vse.mask.nxv16f32(
753    <vscale x 16 x float> %0,
754    <vscale x 16 x float>* %1,
755    <vscale x 16 x i1> %2,
756    i32 %3)
757
758  ret void
759}
760
761declare void @llvm.riscv.vse.nxv1i16(
762  <vscale x 1 x i16>,
763  <vscale x 1 x i16>*,
764  i32);
765
766define void @intrinsic_vse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i32 %2) nounwind {
767; CHECK-LABEL: intrinsic_vse_v_nxv1i16_nxv1i16:
768; CHECK:       # %bb.0: # %entry
769; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
770; CHECK-NEXT:    vse16.v v8, (a0)
771; CHECK-NEXT:    ret
772entry:
773  call void @llvm.riscv.vse.nxv1i16(
774    <vscale x 1 x i16> %0,
775    <vscale x 1 x i16>* %1,
776    i32 %2)
777
778  ret void
779}
780
781declare void @llvm.riscv.vse.mask.nxv1i16(
782  <vscale x 1 x i16>,
783  <vscale x 1 x i16>*,
784  <vscale x 1 x i1>,
785  i32);
786
787define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
788; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i16_nxv1i16:
789; CHECK:       # %bb.0: # %entry
790; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
791; CHECK-NEXT:    vse16.v v8, (a0), v0.t
792; CHECK-NEXT:    ret
793entry:
794  call void @llvm.riscv.vse.mask.nxv1i16(
795    <vscale x 1 x i16> %0,
796    <vscale x 1 x i16>* %1,
797    <vscale x 1 x i1> %2,
798    i32 %3)
799
800  ret void
801}
802
803declare void @llvm.riscv.vse.nxv2i16(
804  <vscale x 2 x i16>,
805  <vscale x 2 x i16>*,
806  i32);
807
808define void @intrinsic_vse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i32 %2) nounwind {
809; CHECK-LABEL: intrinsic_vse_v_nxv2i16_nxv2i16:
810; CHECK:       # %bb.0: # %entry
811; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
812; CHECK-NEXT:    vse16.v v8, (a0)
813; CHECK-NEXT:    ret
814entry:
815  call void @llvm.riscv.vse.nxv2i16(
816    <vscale x 2 x i16> %0,
817    <vscale x 2 x i16>* %1,
818    i32 %2)
819
820  ret void
821}
822
823declare void @llvm.riscv.vse.mask.nxv2i16(
824  <vscale x 2 x i16>,
825  <vscale x 2 x i16>*,
826  <vscale x 2 x i1>,
827  i32);
828
829define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
830; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i16_nxv2i16:
831; CHECK:       # %bb.0: # %entry
832; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
833; CHECK-NEXT:    vse16.v v8, (a0), v0.t
834; CHECK-NEXT:    ret
835entry:
836  call void @llvm.riscv.vse.mask.nxv2i16(
837    <vscale x 2 x i16> %0,
838    <vscale x 2 x i16>* %1,
839    <vscale x 2 x i1> %2,
840    i32 %3)
841
842  ret void
843}
844
845declare void @llvm.riscv.vse.nxv4i16(
846  <vscale x 4 x i16>,
847  <vscale x 4 x i16>*,
848  i32);
849
850define void @intrinsic_vse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i32 %2) nounwind {
851; CHECK-LABEL: intrinsic_vse_v_nxv4i16_nxv4i16:
852; CHECK:       # %bb.0: # %entry
853; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
854; CHECK-NEXT:    vse16.v v8, (a0)
855; CHECK-NEXT:    ret
856entry:
857  call void @llvm.riscv.vse.nxv4i16(
858    <vscale x 4 x i16> %0,
859    <vscale x 4 x i16>* %1,
860    i32 %2)
861
862  ret void
863}
864
865declare void @llvm.riscv.vse.mask.nxv4i16(
866  <vscale x 4 x i16>,
867  <vscale x 4 x i16>*,
868  <vscale x 4 x i1>,
869  i32);
870
871define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
872; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i16_nxv4i16:
873; CHECK:       # %bb.0: # %entry
874; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
875; CHECK-NEXT:    vse16.v v8, (a0), v0.t
876; CHECK-NEXT:    ret
877entry:
878  call void @llvm.riscv.vse.mask.nxv4i16(
879    <vscale x 4 x i16> %0,
880    <vscale x 4 x i16>* %1,
881    <vscale x 4 x i1> %2,
882    i32 %3)
883
884  ret void
885}
886
887declare void @llvm.riscv.vse.nxv8i16(
888  <vscale x 8 x i16>,
889  <vscale x 8 x i16>*,
890  i32);
891
892define void @intrinsic_vse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i32 %2) nounwind {
893; CHECK-LABEL: intrinsic_vse_v_nxv8i16_nxv8i16:
894; CHECK:       # %bb.0: # %entry
895; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
896; CHECK-NEXT:    vse16.v v8, (a0)
897; CHECK-NEXT:    ret
898entry:
899  call void @llvm.riscv.vse.nxv8i16(
900    <vscale x 8 x i16> %0,
901    <vscale x 8 x i16>* %1,
902    i32 %2)
903
904  ret void
905}
906
907declare void @llvm.riscv.vse.mask.nxv8i16(
908  <vscale x 8 x i16>,
909  <vscale x 8 x i16>*,
910  <vscale x 8 x i1>,
911  i32);
912
913define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
914; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i16_nxv8i16:
915; CHECK:       # %bb.0: # %entry
916; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
917; CHECK-NEXT:    vse16.v v8, (a0), v0.t
918; CHECK-NEXT:    ret
919entry:
920  call void @llvm.riscv.vse.mask.nxv8i16(
921    <vscale x 8 x i16> %0,
922    <vscale x 8 x i16>* %1,
923    <vscale x 8 x i1> %2,
924    i32 %3)
925
926  ret void
927}
928
929declare void @llvm.riscv.vse.nxv16i16(
930  <vscale x 16 x i16>,
931  <vscale x 16 x i16>*,
932  i32);
933
934define void @intrinsic_vse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i32 %2) nounwind {
935; CHECK-LABEL: intrinsic_vse_v_nxv16i16_nxv16i16:
936; CHECK:       # %bb.0: # %entry
937; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
938; CHECK-NEXT:    vse16.v v8, (a0)
939; CHECK-NEXT:    ret
940entry:
941  call void @llvm.riscv.vse.nxv16i16(
942    <vscale x 16 x i16> %0,
943    <vscale x 16 x i16>* %1,
944    i32 %2)
945
946  ret void
947}
948
949declare void @llvm.riscv.vse.mask.nxv16i16(
950  <vscale x 16 x i16>,
951  <vscale x 16 x i16>*,
952  <vscale x 16 x i1>,
953  i32);
954
955define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
956; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i16_nxv16i16:
957; CHECK:       # %bb.0: # %entry
958; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
959; CHECK-NEXT:    vse16.v v8, (a0), v0.t
960; CHECK-NEXT:    ret
961entry:
962  call void @llvm.riscv.vse.mask.nxv16i16(
963    <vscale x 16 x i16> %0,
964    <vscale x 16 x i16>* %1,
965    <vscale x 16 x i1> %2,
966    i32 %3)
967
968  ret void
969}
970
971declare void @llvm.riscv.vse.nxv32i16(
972  <vscale x 32 x i16>,
973  <vscale x 32 x i16>*,
974  i32);
975
976define void @intrinsic_vse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i32 %2) nounwind {
977; CHECK-LABEL: intrinsic_vse_v_nxv32i16_nxv32i16:
978; CHECK:       # %bb.0: # %entry
979; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
980; CHECK-NEXT:    vse16.v v8, (a0)
981; CHECK-NEXT:    ret
982entry:
983  call void @llvm.riscv.vse.nxv32i16(
984    <vscale x 32 x i16> %0,
985    <vscale x 32 x i16>* %1,
986    i32 %2)
987
988  ret void
989}
990
991declare void @llvm.riscv.vse.mask.nxv32i16(
992  <vscale x 32 x i16>,
993  <vscale x 32 x i16>*,
994  <vscale x 32 x i1>,
995  i32);
996
997define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
998; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i16_nxv32i16:
999; CHECK:       # %bb.0: # %entry
1000; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
1001; CHECK-NEXT:    vse16.v v8, (a0), v0.t
1002; CHECK-NEXT:    ret
1003entry:
1004  call void @llvm.riscv.vse.mask.nxv32i16(
1005    <vscale x 32 x i16> %0,
1006    <vscale x 32 x i16>* %1,
1007    <vscale x 32 x i1> %2,
1008    i32 %3)
1009
1010  ret void
1011}
1012
1013declare void @llvm.riscv.vse.nxv1f16(
1014  <vscale x 1 x half>,
1015  <vscale x 1 x half>*,
1016  i32);
1017
1018define void @intrinsic_vse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i32 %2) nounwind {
1019; CHECK-LABEL: intrinsic_vse_v_nxv1f16_nxv1f16:
1020; CHECK:       # %bb.0: # %entry
1021; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1022; CHECK-NEXT:    vse16.v v8, (a0)
1023; CHECK-NEXT:    ret
1024entry:
1025  call void @llvm.riscv.vse.nxv1f16(
1026    <vscale x 1 x half> %0,
1027    <vscale x 1 x half>* %1,
1028    i32 %2)
1029
1030  ret void
1031}
1032
1033declare void @llvm.riscv.vse.mask.nxv1f16(
1034  <vscale x 1 x half>,
1035  <vscale x 1 x half>*,
1036  <vscale x 1 x i1>,
1037  i32);
1038
1039define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
1040; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f16_nxv1f16:
1041; CHECK:       # %bb.0: # %entry
1042; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1043; CHECK-NEXT:    vse16.v v8, (a0), v0.t
1044; CHECK-NEXT:    ret
1045entry:
1046  call void @llvm.riscv.vse.mask.nxv1f16(
1047    <vscale x 1 x half> %0,
1048    <vscale x 1 x half>* %1,
1049    <vscale x 1 x i1> %2,
1050    i32 %3)
1051
1052  ret void
1053}
1054
1055declare void @llvm.riscv.vse.nxv2f16(
1056  <vscale x 2 x half>,
1057  <vscale x 2 x half>*,
1058  i32);
1059
1060define void @intrinsic_vse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i32 %2) nounwind {
1061; CHECK-LABEL: intrinsic_vse_v_nxv2f16_nxv2f16:
1062; CHECK:       # %bb.0: # %entry
1063; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1064; CHECK-NEXT:    vse16.v v8, (a0)
1065; CHECK-NEXT:    ret
1066entry:
1067  call void @llvm.riscv.vse.nxv2f16(
1068    <vscale x 2 x half> %0,
1069    <vscale x 2 x half>* %1,
1070    i32 %2)
1071
1072  ret void
1073}
1074
1075declare void @llvm.riscv.vse.mask.nxv2f16(
1076  <vscale x 2 x half>,
1077  <vscale x 2 x half>*,
1078  <vscale x 2 x i1>,
1079  i32);
1080
1081define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
1082; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f16_nxv2f16:
1083; CHECK:       # %bb.0: # %entry
1084; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1085; CHECK-NEXT:    vse16.v v8, (a0), v0.t
1086; CHECK-NEXT:    ret
1087entry:
1088  call void @llvm.riscv.vse.mask.nxv2f16(
1089    <vscale x 2 x half> %0,
1090    <vscale x 2 x half>* %1,
1091    <vscale x 2 x i1> %2,
1092    i32 %3)
1093
1094  ret void
1095}
1096
1097declare void @llvm.riscv.vse.nxv4f16(
1098  <vscale x 4 x half>,
1099  <vscale x 4 x half>*,
1100  i32);
1101
1102define void @intrinsic_vse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i32 %2) nounwind {
1103; CHECK-LABEL: intrinsic_vse_v_nxv4f16_nxv4f16:
1104; CHECK:       # %bb.0: # %entry
1105; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1106; CHECK-NEXT:    vse16.v v8, (a0)
1107; CHECK-NEXT:    ret
1108entry:
1109  call void @llvm.riscv.vse.nxv4f16(
1110    <vscale x 4 x half> %0,
1111    <vscale x 4 x half>* %1,
1112    i32 %2)
1113
1114  ret void
1115}
1116
1117declare void @llvm.riscv.vse.mask.nxv4f16(
1118  <vscale x 4 x half>,
1119  <vscale x 4 x half>*,
1120  <vscale x 4 x i1>,
1121  i32);
1122
1123define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
1124; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f16_nxv4f16:
1125; CHECK:       # %bb.0: # %entry
1126; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1127; CHECK-NEXT:    vse16.v v8, (a0), v0.t
1128; CHECK-NEXT:    ret
1129entry:
1130  call void @llvm.riscv.vse.mask.nxv4f16(
1131    <vscale x 4 x half> %0,
1132    <vscale x 4 x half>* %1,
1133    <vscale x 4 x i1> %2,
1134    i32 %3)
1135
1136  ret void
1137}
1138
1139declare void @llvm.riscv.vse.nxv8f16(
1140  <vscale x 8 x half>,
1141  <vscale x 8 x half>*,
1142  i32);
1143
1144define void @intrinsic_vse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i32 %2) nounwind {
1145; CHECK-LABEL: intrinsic_vse_v_nxv8f16_nxv8f16:
1146; CHECK:       # %bb.0: # %entry
1147; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1148; CHECK-NEXT:    vse16.v v8, (a0)
1149; CHECK-NEXT:    ret
1150entry:
1151  call void @llvm.riscv.vse.nxv8f16(
1152    <vscale x 8 x half> %0,
1153    <vscale x 8 x half>* %1,
1154    i32 %2)
1155
1156  ret void
1157}
1158
1159declare void @llvm.riscv.vse.mask.nxv8f16(
1160  <vscale x 8 x half>,
1161  <vscale x 8 x half>*,
1162  <vscale x 8 x i1>,
1163  i32);
1164
1165define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
1166; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f16_nxv8f16:
1167; CHECK:       # %bb.0: # %entry
1168; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1169; CHECK-NEXT:    vse16.v v8, (a0), v0.t
1170; CHECK-NEXT:    ret
1171entry:
1172  call void @llvm.riscv.vse.mask.nxv8f16(
1173    <vscale x 8 x half> %0,
1174    <vscale x 8 x half>* %1,
1175    <vscale x 8 x i1> %2,
1176    i32 %3)
1177
1178  ret void
1179}
1180
1181declare void @llvm.riscv.vse.nxv16f16(
1182  <vscale x 16 x half>,
1183  <vscale x 16 x half>*,
1184  i32);
1185
1186define void @intrinsic_vse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i32 %2) nounwind {
1187; CHECK-LABEL: intrinsic_vse_v_nxv16f16_nxv16f16:
1188; CHECK:       # %bb.0: # %entry
1189; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1190; CHECK-NEXT:    vse16.v v8, (a0)
1191; CHECK-NEXT:    ret
1192entry:
1193  call void @llvm.riscv.vse.nxv16f16(
1194    <vscale x 16 x half> %0,
1195    <vscale x 16 x half>* %1,
1196    i32 %2)
1197
1198  ret void
1199}
1200
1201declare void @llvm.riscv.vse.mask.nxv16f16(
1202  <vscale x 16 x half>,
1203  <vscale x 16 x half>*,
1204  <vscale x 16 x i1>,
1205  i32);
1206
1207define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
1208; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f16_nxv16f16:
1209; CHECK:       # %bb.0: # %entry
1210; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1211; CHECK-NEXT:    vse16.v v8, (a0), v0.t
1212; CHECK-NEXT:    ret
1213entry:
1214  call void @llvm.riscv.vse.mask.nxv16f16(
1215    <vscale x 16 x half> %0,
1216    <vscale x 16 x half>* %1,
1217    <vscale x 16 x i1> %2,
1218    i32 %3)
1219
1220  ret void
1221}
1222
1223declare void @llvm.riscv.vse.nxv32f16(
1224  <vscale x 32 x half>,
1225  <vscale x 32 x half>*,
1226  i32);
1227
1228define void @intrinsic_vse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i32 %2) nounwind {
1229; CHECK-LABEL: intrinsic_vse_v_nxv32f16_nxv32f16:
1230; CHECK:       # %bb.0: # %entry
1231; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
1232; CHECK-NEXT:    vse16.v v8, (a0)
1233; CHECK-NEXT:    ret
1234entry:
1235  call void @llvm.riscv.vse.nxv32f16(
1236    <vscale x 32 x half> %0,
1237    <vscale x 32 x half>* %1,
1238    i32 %2)
1239
1240  ret void
1241}
1242
1243declare void @llvm.riscv.vse.mask.nxv32f16(
1244  <vscale x 32 x half>,
1245  <vscale x 32 x half>*,
1246  <vscale x 32 x i1>,
1247  i32);
1248
1249define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
1250; CHECK-LABEL: intrinsic_vse_mask_v_nxv32f16_nxv32f16:
1251; CHECK:       # %bb.0: # %entry
1252; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
1253; CHECK-NEXT:    vse16.v v8, (a0), v0.t
1254; CHECK-NEXT:    ret
1255entry:
1256  call void @llvm.riscv.vse.mask.nxv32f16(
1257    <vscale x 32 x half> %0,
1258    <vscale x 32 x half>* %1,
1259    <vscale x 32 x i1> %2,
1260    i32 %3)
1261
1262  ret void
1263}
1264
1265declare void @llvm.riscv.vse.nxv1i8(
1266  <vscale x 1 x i8>,
1267  <vscale x 1 x i8>*,
1268  i32);
1269
1270define void @intrinsic_vse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i32 %2) nounwind {
1271; CHECK-LABEL: intrinsic_vse_v_nxv1i8_nxv1i8:
1272; CHECK:       # %bb.0: # %entry
1273; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
1274; CHECK-NEXT:    vse8.v v8, (a0)
1275; CHECK-NEXT:    ret
1276entry:
1277  call void @llvm.riscv.vse.nxv1i8(
1278    <vscale x 1 x i8> %0,
1279    <vscale x 1 x i8>* %1,
1280    i32 %2)
1281
1282  ret void
1283}
1284
1285declare void @llvm.riscv.vse.mask.nxv1i8(
1286  <vscale x 1 x i8>,
1287  <vscale x 1 x i8>*,
1288  <vscale x 1 x i1>,
1289  i32);
1290
1291define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
1292; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i8_nxv1i8:
1293; CHECK:       # %bb.0: # %entry
1294; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
1295; CHECK-NEXT:    vse8.v v8, (a0), v0.t
1296; CHECK-NEXT:    ret
1297entry:
1298  call void @llvm.riscv.vse.mask.nxv1i8(
1299    <vscale x 1 x i8> %0,
1300    <vscale x 1 x i8>* %1,
1301    <vscale x 1 x i1> %2,
1302    i32 %3)
1303
1304  ret void
1305}
1306
1307declare void @llvm.riscv.vse.nxv2i8(
1308  <vscale x 2 x i8>,
1309  <vscale x 2 x i8>*,
1310  i32);
1311
1312define void @intrinsic_vse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i32 %2) nounwind {
1313; CHECK-LABEL: intrinsic_vse_v_nxv2i8_nxv2i8:
1314; CHECK:       # %bb.0: # %entry
1315; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1316; CHECK-NEXT:    vse8.v v8, (a0)
1317; CHECK-NEXT:    ret
1318entry:
1319  call void @llvm.riscv.vse.nxv2i8(
1320    <vscale x 2 x i8> %0,
1321    <vscale x 2 x i8>* %1,
1322    i32 %2)
1323
1324  ret void
1325}
1326
1327declare void @llvm.riscv.vse.mask.nxv2i8(
1328  <vscale x 2 x i8>,
1329  <vscale x 2 x i8>*,
1330  <vscale x 2 x i1>,
1331  i32);
1332
1333define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
1334; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i8_nxv2i8:
1335; CHECK:       # %bb.0: # %entry
1336; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1337; CHECK-NEXT:    vse8.v v8, (a0), v0.t
1338; CHECK-NEXT:    ret
1339entry:
1340  call void @llvm.riscv.vse.mask.nxv2i8(
1341    <vscale x 2 x i8> %0,
1342    <vscale x 2 x i8>* %1,
1343    <vscale x 2 x i1> %2,
1344    i32 %3)
1345
1346  ret void
1347}
1348
1349declare void @llvm.riscv.vse.nxv4i8(
1350  <vscale x 4 x i8>,
1351  <vscale x 4 x i8>*,
1352  i32);
1353
1354define void @intrinsic_vse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i32 %2) nounwind {
1355; CHECK-LABEL: intrinsic_vse_v_nxv4i8_nxv4i8:
1356; CHECK:       # %bb.0: # %entry
1357; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1358; CHECK-NEXT:    vse8.v v8, (a0)
1359; CHECK-NEXT:    ret
1360entry:
1361  call void @llvm.riscv.vse.nxv4i8(
1362    <vscale x 4 x i8> %0,
1363    <vscale x 4 x i8>* %1,
1364    i32 %2)
1365
1366  ret void
1367}
1368
1369declare void @llvm.riscv.vse.mask.nxv4i8(
1370  <vscale x 4 x i8>,
1371  <vscale x 4 x i8>*,
1372  <vscale x 4 x i1>,
1373  i32);
1374
1375define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
1376; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i8_nxv4i8:
1377; CHECK:       # %bb.0: # %entry
1378; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1379; CHECK-NEXT:    vse8.v v8, (a0), v0.t
1380; CHECK-NEXT:    ret
1381entry:
1382  call void @llvm.riscv.vse.mask.nxv4i8(
1383    <vscale x 4 x i8> %0,
1384    <vscale x 4 x i8>* %1,
1385    <vscale x 4 x i1> %2,
1386    i32 %3)
1387
1388  ret void
1389}
1390
1391declare void @llvm.riscv.vse.nxv8i8(
1392  <vscale x 8 x i8>,
1393  <vscale x 8 x i8>*,
1394  i32);
1395
1396define void @intrinsic_vse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i32 %2) nounwind {
1397; CHECK-LABEL: intrinsic_vse_v_nxv8i8_nxv8i8:
1398; CHECK:       # %bb.0: # %entry
1399; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1400; CHECK-NEXT:    vse8.v v8, (a0)
1401; CHECK-NEXT:    ret
1402entry:
1403  call void @llvm.riscv.vse.nxv8i8(
1404    <vscale x 8 x i8> %0,
1405    <vscale x 8 x i8>* %1,
1406    i32 %2)
1407
1408  ret void
1409}
1410
1411declare void @llvm.riscv.vse.mask.nxv8i8(
1412  <vscale x 8 x i8>,
1413  <vscale x 8 x i8>*,
1414  <vscale x 8 x i1>,
1415  i32);
1416
1417define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
1418; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i8_nxv8i8:
1419; CHECK:       # %bb.0: # %entry
1420; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1421; CHECK-NEXT:    vse8.v v8, (a0), v0.t
1422; CHECK-NEXT:    ret
1423entry:
1424  call void @llvm.riscv.vse.mask.nxv8i8(
1425    <vscale x 8 x i8> %0,
1426    <vscale x 8 x i8>* %1,
1427    <vscale x 8 x i1> %2,
1428    i32 %3)
1429
1430  ret void
1431}
1432
1433declare void @llvm.riscv.vse.nxv16i8(
1434  <vscale x 16 x i8>,
1435  <vscale x 16 x i8>*,
1436  i32);
1437
1438define void @intrinsic_vse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i32 %2) nounwind {
1439; CHECK-LABEL: intrinsic_vse_v_nxv16i8_nxv16i8:
1440; CHECK:       # %bb.0: # %entry
1441; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1442; CHECK-NEXT:    vse8.v v8, (a0)
1443; CHECK-NEXT:    ret
1444entry:
1445  call void @llvm.riscv.vse.nxv16i8(
1446    <vscale x 16 x i8> %0,
1447    <vscale x 16 x i8>* %1,
1448    i32 %2)
1449
1450  ret void
1451}
1452
1453declare void @llvm.riscv.vse.mask.nxv16i8(
1454  <vscale x 16 x i8>,
1455  <vscale x 16 x i8>*,
1456  <vscale x 16 x i1>,
1457  i32);
1458
1459define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
1460; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i8_nxv16i8:
1461; CHECK:       # %bb.0: # %entry
1462; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1463; CHECK-NEXT:    vse8.v v8, (a0), v0.t
1464; CHECK-NEXT:    ret
1465entry:
1466  call void @llvm.riscv.vse.mask.nxv16i8(
1467    <vscale x 16 x i8> %0,
1468    <vscale x 16 x i8>* %1,
1469    <vscale x 16 x i1> %2,
1470    i32 %3)
1471
1472  ret void
1473}
1474
1475declare void @llvm.riscv.vse.nxv32i8(
1476  <vscale x 32 x i8>,
1477  <vscale x 32 x i8>*,
1478  i32);
1479
1480define void @intrinsic_vse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i32 %2) nounwind {
1481; CHECK-LABEL: intrinsic_vse_v_nxv32i8_nxv32i8:
1482; CHECK:       # %bb.0: # %entry
1483; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1484; CHECK-NEXT:    vse8.v v8, (a0)
1485; CHECK-NEXT:    ret
1486entry:
1487  call void @llvm.riscv.vse.nxv32i8(
1488    <vscale x 32 x i8> %0,
1489    <vscale x 32 x i8>* %1,
1490    i32 %2)
1491
1492  ret void
1493}
1494
1495declare void @llvm.riscv.vse.mask.nxv32i8(
1496  <vscale x 32 x i8>,
1497  <vscale x 32 x i8>*,
1498  <vscale x 32 x i1>,
1499  i32);
1500
1501define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
1502; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i8_nxv32i8:
1503; CHECK:       # %bb.0: # %entry
1504; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1505; CHECK-NEXT:    vse8.v v8, (a0), v0.t
1506; CHECK-NEXT:    ret
1507entry:
1508  call void @llvm.riscv.vse.mask.nxv32i8(
1509    <vscale x 32 x i8> %0,
1510    <vscale x 32 x i8>* %1,
1511    <vscale x 32 x i1> %2,
1512    i32 %3)
1513
1514  ret void
1515}
1516
1517declare void @llvm.riscv.vse.nxv64i8(
1518  <vscale x 64 x i8>,
1519  <vscale x 64 x i8>*,
1520  i32);
1521
1522define void @intrinsic_vse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i32 %2) nounwind {
1523; CHECK-LABEL: intrinsic_vse_v_nxv64i8_nxv64i8:
1524; CHECK:       # %bb.0: # %entry
1525; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
1526; CHECK-NEXT:    vse8.v v8, (a0)
1527; CHECK-NEXT:    ret
1528entry:
1529  call void @llvm.riscv.vse.nxv64i8(
1530    <vscale x 64 x i8> %0,
1531    <vscale x 64 x i8>* %1,
1532    i32 %2)
1533
1534  ret void
1535}
1536
1537declare void @llvm.riscv.vse.mask.nxv64i8(
1538  <vscale x 64 x i8>,
1539  <vscale x 64 x i8>*,
1540  <vscale x 64 x i1>,
1541  i32);
1542
1543define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
1544; CHECK-LABEL: intrinsic_vse_mask_v_nxv64i8_nxv64i8:
1545; CHECK:       # %bb.0: # %entry
1546; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
1547; CHECK-NEXT:    vse8.v v8, (a0), v0.t
1548; CHECK-NEXT:    ret
1549entry:
1550  call void @llvm.riscv.vse.mask.nxv64i8(
1551    <vscale x 64 x i8> %0,
1552    <vscale x 64 x i8>* %1,
1553    <vscale x 64 x i1> %2,
1554    i32 %3)
1555
1556  ret void
1557}
1558