1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare void @llvm.riscv.vsse.nxv1i64(
5  <vscale x 1 x i64>,
6  <vscale x 1 x i64>*,
7  i64,
8  i64);
9
10define void @intrinsic_vsse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i64 %2, i64 %3) nounwind {
11; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64:
12; CHECK:       # %bb.0: # %entry
13; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
14; CHECK-NEXT:    vsse64.v v8, (a0), a1
15; CHECK-NEXT:    ret
16entry:
17  call void @llvm.riscv.vsse.nxv1i64(
18    <vscale x 1 x i64> %0,
19    <vscale x 1 x i64>* %1,
20    i64 %2,
21    i64 %3)
22
23  ret void
24}
25
26declare void @llvm.riscv.vsse.mask.nxv1i64(
27  <vscale x 1 x i64>,
28  <vscale x 1 x i64>*,
29  i64,
30  <vscale x 1 x i1>,
31  i64);
32
33define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
34; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64:
35; CHECK:       # %bb.0: # %entry
36; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
37; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
38; CHECK-NEXT:    ret
39entry:
40  call void @llvm.riscv.vsse.mask.nxv1i64(
41    <vscale x 1 x i64> %0,
42    <vscale x 1 x i64>* %1,
43    i64 %2,
44    <vscale x 1 x i1> %3,
45    i64 %4)
46
47  ret void
48}
49
50declare void @llvm.riscv.vsse.nxv2i64(
51  <vscale x 2 x i64>,
52  <vscale x 2 x i64>*,
53  i64,
54  i64);
55
56define void @intrinsic_vsse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i64 %2, i64 %3) nounwind {
57; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64:
58; CHECK:       # %bb.0: # %entry
59; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
60; CHECK-NEXT:    vsse64.v v8, (a0), a1
61; CHECK-NEXT:    ret
62entry:
63  call void @llvm.riscv.vsse.nxv2i64(
64    <vscale x 2 x i64> %0,
65    <vscale x 2 x i64>* %1,
66    i64 %2,
67    i64 %3)
68
69  ret void
70}
71
72declare void @llvm.riscv.vsse.mask.nxv2i64(
73  <vscale x 2 x i64>,
74  <vscale x 2 x i64>*,
75  i64,
76  <vscale x 2 x i1>,
77  i64);
78
79define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
80; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64:
81; CHECK:       # %bb.0: # %entry
82; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
83; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
84; CHECK-NEXT:    ret
85entry:
86  call void @llvm.riscv.vsse.mask.nxv2i64(
87    <vscale x 2 x i64> %0,
88    <vscale x 2 x i64>* %1,
89    i64 %2,
90    <vscale x 2 x i1> %3,
91    i64 %4)
92
93  ret void
94}
95
96declare void @llvm.riscv.vsse.nxv4i64(
97  <vscale x 4 x i64>,
98  <vscale x 4 x i64>*,
99  i64,
100  i64);
101
102define void @intrinsic_vsse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i64 %2, i64 %3) nounwind {
103; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64:
104; CHECK:       # %bb.0: # %entry
105; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
106; CHECK-NEXT:    vsse64.v v8, (a0), a1
107; CHECK-NEXT:    ret
108entry:
109  call void @llvm.riscv.vsse.nxv4i64(
110    <vscale x 4 x i64> %0,
111    <vscale x 4 x i64>* %1,
112    i64 %2,
113    i64 %3)
114
115  ret void
116}
117
118declare void @llvm.riscv.vsse.mask.nxv4i64(
119  <vscale x 4 x i64>,
120  <vscale x 4 x i64>*,
121  i64,
122  <vscale x 4 x i1>,
123  i64);
124
125define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
126; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64:
127; CHECK:       # %bb.0: # %entry
128; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
129; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
130; CHECK-NEXT:    ret
131entry:
132  call void @llvm.riscv.vsse.mask.nxv4i64(
133    <vscale x 4 x i64> %0,
134    <vscale x 4 x i64>* %1,
135    i64 %2,
136    <vscale x 4 x i1> %3,
137    i64 %4)
138
139  ret void
140}
141
142declare void @llvm.riscv.vsse.nxv8i64(
143  <vscale x 8 x i64>,
144  <vscale x 8 x i64>*,
145  i64,
146  i64);
147
148define void @intrinsic_vsse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i64 %2, i64 %3) nounwind {
149; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64:
150; CHECK:       # %bb.0: # %entry
151; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
152; CHECK-NEXT:    vsse64.v v8, (a0), a1
153; CHECK-NEXT:    ret
154entry:
155  call void @llvm.riscv.vsse.nxv8i64(
156    <vscale x 8 x i64> %0,
157    <vscale x 8 x i64>* %1,
158    i64 %2,
159    i64 %3)
160
161  ret void
162}
163
164declare void @llvm.riscv.vsse.mask.nxv8i64(
165  <vscale x 8 x i64>,
166  <vscale x 8 x i64>*,
167  i64,
168  <vscale x 8 x i1>,
169  i64);
170
171define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
172; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64:
173; CHECK:       # %bb.0: # %entry
174; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
175; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
176; CHECK-NEXT:    ret
177entry:
178  call void @llvm.riscv.vsse.mask.nxv8i64(
179    <vscale x 8 x i64> %0,
180    <vscale x 8 x i64>* %1,
181    i64 %2,
182    <vscale x 8 x i1> %3,
183    i64 %4)
184
185  ret void
186}
187
188declare void @llvm.riscv.vsse.nxv1f64(
189  <vscale x 1 x double>,
190  <vscale x 1 x double>*,
191  i64,
192  i64);
193
194define void @intrinsic_vsse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i64 %2, i64 %3) nounwind {
195; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64:
196; CHECK:       # %bb.0: # %entry
197; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
198; CHECK-NEXT:    vsse64.v v8, (a0), a1
199; CHECK-NEXT:    ret
200entry:
201  call void @llvm.riscv.vsse.nxv1f64(
202    <vscale x 1 x double> %0,
203    <vscale x 1 x double>* %1,
204    i64 %2,
205    i64 %3)
206
207  ret void
208}
209
210declare void @llvm.riscv.vsse.mask.nxv1f64(
211  <vscale x 1 x double>,
212  <vscale x 1 x double>*,
213  i64,
214  <vscale x 1 x i1>,
215  i64);
216
217define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
218; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64:
219; CHECK:       # %bb.0: # %entry
220; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
221; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
222; CHECK-NEXT:    ret
223entry:
224  call void @llvm.riscv.vsse.mask.nxv1f64(
225    <vscale x 1 x double> %0,
226    <vscale x 1 x double>* %1,
227    i64 %2,
228    <vscale x 1 x i1> %3,
229    i64 %4)
230
231  ret void
232}
233
234declare void @llvm.riscv.vsse.nxv2f64(
235  <vscale x 2 x double>,
236  <vscale x 2 x double>*,
237  i64,
238  i64);
239
240define void @intrinsic_vsse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i64 %2, i64 %3) nounwind {
241; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64:
242; CHECK:       # %bb.0: # %entry
243; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
244; CHECK-NEXT:    vsse64.v v8, (a0), a1
245; CHECK-NEXT:    ret
246entry:
247  call void @llvm.riscv.vsse.nxv2f64(
248    <vscale x 2 x double> %0,
249    <vscale x 2 x double>* %1,
250    i64 %2,
251    i64 %3)
252
253  ret void
254}
255
256declare void @llvm.riscv.vsse.mask.nxv2f64(
257  <vscale x 2 x double>,
258  <vscale x 2 x double>*,
259  i64,
260  <vscale x 2 x i1>,
261  i64);
262
263define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
264; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64:
265; CHECK:       # %bb.0: # %entry
266; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
267; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
268; CHECK-NEXT:    ret
269entry:
270  call void @llvm.riscv.vsse.mask.nxv2f64(
271    <vscale x 2 x double> %0,
272    <vscale x 2 x double>* %1,
273    i64 %2,
274    <vscale x 2 x i1> %3,
275    i64 %4)
276
277  ret void
278}
279
280declare void @llvm.riscv.vsse.nxv4f64(
281  <vscale x 4 x double>,
282  <vscale x 4 x double>*,
283  i64,
284  i64);
285
286define void @intrinsic_vsse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i64 %2, i64 %3) nounwind {
287; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64:
288; CHECK:       # %bb.0: # %entry
289; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
290; CHECK-NEXT:    vsse64.v v8, (a0), a1
291; CHECK-NEXT:    ret
292entry:
293  call void @llvm.riscv.vsse.nxv4f64(
294    <vscale x 4 x double> %0,
295    <vscale x 4 x double>* %1,
296    i64 %2,
297    i64 %3)
298
299  ret void
300}
301
302declare void @llvm.riscv.vsse.mask.nxv4f64(
303  <vscale x 4 x double>,
304  <vscale x 4 x double>*,
305  i64,
306  <vscale x 4 x i1>,
307  i64);
308
309define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
310; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64:
311; CHECK:       # %bb.0: # %entry
312; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
313; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
314; CHECK-NEXT:    ret
315entry:
316  call void @llvm.riscv.vsse.mask.nxv4f64(
317    <vscale x 4 x double> %0,
318    <vscale x 4 x double>* %1,
319    i64 %2,
320    <vscale x 4 x i1> %3,
321    i64 %4)
322
323  ret void
324}
325
326declare void @llvm.riscv.vsse.nxv8f64(
327  <vscale x 8 x double>,
328  <vscale x 8 x double>*,
329  i64,
330  i64);
331
332define void @intrinsic_vsse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i64 %2, i64 %3) nounwind {
333; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64:
334; CHECK:       # %bb.0: # %entry
335; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
336; CHECK-NEXT:    vsse64.v v8, (a0), a1
337; CHECK-NEXT:    ret
338entry:
339  call void @llvm.riscv.vsse.nxv8f64(
340    <vscale x 8 x double> %0,
341    <vscale x 8 x double>* %1,
342    i64 %2,
343    i64 %3)
344
345  ret void
346}
347
348declare void @llvm.riscv.vsse.mask.nxv8f64(
349  <vscale x 8 x double>,
350  <vscale x 8 x double>*,
351  i64,
352  <vscale x 8 x i1>,
353  i64);
354
355define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
356; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64:
357; CHECK:       # %bb.0: # %entry
358; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
359; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
360; CHECK-NEXT:    ret
361entry:
362  call void @llvm.riscv.vsse.mask.nxv8f64(
363    <vscale x 8 x double> %0,
364    <vscale x 8 x double>* %1,
365    i64 %2,
366    <vscale x 8 x i1> %3,
367    i64 %4)
368
369  ret void
370}
371
372declare void @llvm.riscv.vsse.nxv1i32(
373  <vscale x 1 x i32>,
374  <vscale x 1 x i32>*,
375  i64,
376  i64);
377
378define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i64 %2, i64 %3) nounwind {
379; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32:
380; CHECK:       # %bb.0: # %entry
381; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
382; CHECK-NEXT:    vsse32.v v8, (a0), a1
383; CHECK-NEXT:    ret
384entry:
385  call void @llvm.riscv.vsse.nxv1i32(
386    <vscale x 1 x i32> %0,
387    <vscale x 1 x i32>* %1,
388    i64 %2,
389    i64 %3)
390
391  ret void
392}
393
394declare void @llvm.riscv.vsse.mask.nxv1i32(
395  <vscale x 1 x i32>,
396  <vscale x 1 x i32>*,
397  i64,
398  <vscale x 1 x i1>,
399  i64);
400
401define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
402; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32:
403; CHECK:       # %bb.0: # %entry
404; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
405; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
406; CHECK-NEXT:    ret
407entry:
408  call void @llvm.riscv.vsse.mask.nxv1i32(
409    <vscale x 1 x i32> %0,
410    <vscale x 1 x i32>* %1,
411    i64 %2,
412    <vscale x 1 x i1> %3,
413    i64 %4)
414
415  ret void
416}
417
418declare void @llvm.riscv.vsse.nxv2i32(
419  <vscale x 2 x i32>,
420  <vscale x 2 x i32>*,
421  i64,
422  i64);
423
424define void @intrinsic_vsse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i64 %2, i64 %3) nounwind {
425; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32:
426; CHECK:       # %bb.0: # %entry
427; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
428; CHECK-NEXT:    vsse32.v v8, (a0), a1
429; CHECK-NEXT:    ret
430entry:
431  call void @llvm.riscv.vsse.nxv2i32(
432    <vscale x 2 x i32> %0,
433    <vscale x 2 x i32>* %1,
434    i64 %2,
435    i64 %3)
436
437  ret void
438}
439
440declare void @llvm.riscv.vsse.mask.nxv2i32(
441  <vscale x 2 x i32>,
442  <vscale x 2 x i32>*,
443  i64,
444  <vscale x 2 x i1>,
445  i64);
446
447define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
448; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32:
449; CHECK:       # %bb.0: # %entry
450; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
451; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
452; CHECK-NEXT:    ret
453entry:
454  call void @llvm.riscv.vsse.mask.nxv2i32(
455    <vscale x 2 x i32> %0,
456    <vscale x 2 x i32>* %1,
457    i64 %2,
458    <vscale x 2 x i1> %3,
459    i64 %4)
460
461  ret void
462}
463
464declare void @llvm.riscv.vsse.nxv4i32(
465  <vscale x 4 x i32>,
466  <vscale x 4 x i32>*,
467  i64,
468  i64);
469
470define void @intrinsic_vsse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i64 %2, i64 %3) nounwind {
471; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32:
472; CHECK:       # %bb.0: # %entry
473; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
474; CHECK-NEXT:    vsse32.v v8, (a0), a1
475; CHECK-NEXT:    ret
476entry:
477  call void @llvm.riscv.vsse.nxv4i32(
478    <vscale x 4 x i32> %0,
479    <vscale x 4 x i32>* %1,
480    i64 %2,
481    i64 %3)
482
483  ret void
484}
485
486declare void @llvm.riscv.vsse.mask.nxv4i32(
487  <vscale x 4 x i32>,
488  <vscale x 4 x i32>*,
489  i64,
490  <vscale x 4 x i1>,
491  i64);
492
493define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
494; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32:
495; CHECK:       # %bb.0: # %entry
496; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
497; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
498; CHECK-NEXT:    ret
499entry:
500  call void @llvm.riscv.vsse.mask.nxv4i32(
501    <vscale x 4 x i32> %0,
502    <vscale x 4 x i32>* %1,
503    i64 %2,
504    <vscale x 4 x i1> %3,
505    i64 %4)
506
507  ret void
508}
509
510declare void @llvm.riscv.vsse.nxv8i32(
511  <vscale x 8 x i32>,
512  <vscale x 8 x i32>*,
513  i64,
514  i64);
515
516define void @intrinsic_vsse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i64 %2, i64 %3) nounwind {
517; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32:
518; CHECK:       # %bb.0: # %entry
519; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
520; CHECK-NEXT:    vsse32.v v8, (a0), a1
521; CHECK-NEXT:    ret
522entry:
523  call void @llvm.riscv.vsse.nxv8i32(
524    <vscale x 8 x i32> %0,
525    <vscale x 8 x i32>* %1,
526    i64 %2,
527    i64 %3)
528
529  ret void
530}
531
532declare void @llvm.riscv.vsse.mask.nxv8i32(
533  <vscale x 8 x i32>,
534  <vscale x 8 x i32>*,
535  i64,
536  <vscale x 8 x i1>,
537  i64);
538
539define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
540; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32:
541; CHECK:       # %bb.0: # %entry
542; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
543; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
544; CHECK-NEXT:    ret
545entry:
546  call void @llvm.riscv.vsse.mask.nxv8i32(
547    <vscale x 8 x i32> %0,
548    <vscale x 8 x i32>* %1,
549    i64 %2,
550    <vscale x 8 x i1> %3,
551    i64 %4)
552
553  ret void
554}
555
556declare void @llvm.riscv.vsse.nxv16i32(
557  <vscale x 16 x i32>,
558  <vscale x 16 x i32>*,
559  i64,
560  i64);
561
562define void @intrinsic_vsse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i64 %2, i64 %3) nounwind {
563; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32:
564; CHECK:       # %bb.0: # %entry
565; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
566; CHECK-NEXT:    vsse32.v v8, (a0), a1
567; CHECK-NEXT:    ret
568entry:
569  call void @llvm.riscv.vsse.nxv16i32(
570    <vscale x 16 x i32> %0,
571    <vscale x 16 x i32>* %1,
572    i64 %2,
573    i64 %3)
574
575  ret void
576}
577
578declare void @llvm.riscv.vsse.mask.nxv16i32(
579  <vscale x 16 x i32>,
580  <vscale x 16 x i32>*,
581  i64,
582  <vscale x 16 x i1>,
583  i64);
584
585define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
586; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32:
587; CHECK:       # %bb.0: # %entry
588; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
589; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
590; CHECK-NEXT:    ret
591entry:
592  call void @llvm.riscv.vsse.mask.nxv16i32(
593    <vscale x 16 x i32> %0,
594    <vscale x 16 x i32>* %1,
595    i64 %2,
596    <vscale x 16 x i1> %3,
597    i64 %4)
598
599  ret void
600}
601
602declare void @llvm.riscv.vsse.nxv1f32(
603  <vscale x 1 x float>,
604  <vscale x 1 x float>*,
605  i64,
606  i64);
607
608define void @intrinsic_vsse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i64 %2, i64 %3) nounwind {
609; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32:
610; CHECK:       # %bb.0: # %entry
611; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
612; CHECK-NEXT:    vsse32.v v8, (a0), a1
613; CHECK-NEXT:    ret
614entry:
615  call void @llvm.riscv.vsse.nxv1f32(
616    <vscale x 1 x float> %0,
617    <vscale x 1 x float>* %1,
618    i64 %2,
619    i64 %3)
620
621  ret void
622}
623
624declare void @llvm.riscv.vsse.mask.nxv1f32(
625  <vscale x 1 x float>,
626  <vscale x 1 x float>*,
627  i64,
628  <vscale x 1 x i1>,
629  i64);
630
631define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
632; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32:
633; CHECK:       # %bb.0: # %entry
634; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
635; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
636; CHECK-NEXT:    ret
637entry:
638  call void @llvm.riscv.vsse.mask.nxv1f32(
639    <vscale x 1 x float> %0,
640    <vscale x 1 x float>* %1,
641    i64 %2,
642    <vscale x 1 x i1> %3,
643    i64 %4)
644
645  ret void
646}
647
648declare void @llvm.riscv.vsse.nxv2f32(
649  <vscale x 2 x float>,
650  <vscale x 2 x float>*,
651  i64,
652  i64);
653
654define void @intrinsic_vsse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i64 %2, i64 %3) nounwind {
655; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32:
656; CHECK:       # %bb.0: # %entry
657; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
658; CHECK-NEXT:    vsse32.v v8, (a0), a1
659; CHECK-NEXT:    ret
660entry:
661  call void @llvm.riscv.vsse.nxv2f32(
662    <vscale x 2 x float> %0,
663    <vscale x 2 x float>* %1,
664    i64 %2,
665    i64 %3)
666
667  ret void
668}
669
670declare void @llvm.riscv.vsse.mask.nxv2f32(
671  <vscale x 2 x float>,
672  <vscale x 2 x float>*,
673  i64,
674  <vscale x 2 x i1>,
675  i64);
676
677define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
678; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32:
679; CHECK:       # %bb.0: # %entry
680; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
681; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
682; CHECK-NEXT:    ret
683entry:
684  call void @llvm.riscv.vsse.mask.nxv2f32(
685    <vscale x 2 x float> %0,
686    <vscale x 2 x float>* %1,
687    i64 %2,
688    <vscale x 2 x i1> %3,
689    i64 %4)
690
691  ret void
692}
693
694declare void @llvm.riscv.vsse.nxv4f32(
695  <vscale x 4 x float>,
696  <vscale x 4 x float>*,
697  i64,
698  i64);
699
700define void @intrinsic_vsse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i64 %2, i64 %3) nounwind {
701; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32:
702; CHECK:       # %bb.0: # %entry
703; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
704; CHECK-NEXT:    vsse32.v v8, (a0), a1
705; CHECK-NEXT:    ret
706entry:
707  call void @llvm.riscv.vsse.nxv4f32(
708    <vscale x 4 x float> %0,
709    <vscale x 4 x float>* %1,
710    i64 %2,
711    i64 %3)
712
713  ret void
714}
715
716declare void @llvm.riscv.vsse.mask.nxv4f32(
717  <vscale x 4 x float>,
718  <vscale x 4 x float>*,
719  i64,
720  <vscale x 4 x i1>,
721  i64);
722
723define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
724; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32:
725; CHECK:       # %bb.0: # %entry
726; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
727; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
728; CHECK-NEXT:    ret
729entry:
730  call void @llvm.riscv.vsse.mask.nxv4f32(
731    <vscale x 4 x float> %0,
732    <vscale x 4 x float>* %1,
733    i64 %2,
734    <vscale x 4 x i1> %3,
735    i64 %4)
736
737  ret void
738}
739
740declare void @llvm.riscv.vsse.nxv8f32(
741  <vscale x 8 x float>,
742  <vscale x 8 x float>*,
743  i64,
744  i64);
745
746define void @intrinsic_vsse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i64 %2, i64 %3) nounwind {
747; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32:
748; CHECK:       # %bb.0: # %entry
749; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
750; CHECK-NEXT:    vsse32.v v8, (a0), a1
751; CHECK-NEXT:    ret
752entry:
753  call void @llvm.riscv.vsse.nxv8f32(
754    <vscale x 8 x float> %0,
755    <vscale x 8 x float>* %1,
756    i64 %2,
757    i64 %3)
758
759  ret void
760}
761
762declare void @llvm.riscv.vsse.mask.nxv8f32(
763  <vscale x 8 x float>,
764  <vscale x 8 x float>*,
765  i64,
766  <vscale x 8 x i1>,
767  i64);
768
769define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
770; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32:
771; CHECK:       # %bb.0: # %entry
772; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
773; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
774; CHECK-NEXT:    ret
775entry:
776  call void @llvm.riscv.vsse.mask.nxv8f32(
777    <vscale x 8 x float> %0,
778    <vscale x 8 x float>* %1,
779    i64 %2,
780    <vscale x 8 x i1> %3,
781    i64 %4)
782
783  ret void
784}
785
786declare void @llvm.riscv.vsse.nxv16f32(
787  <vscale x 16 x float>,
788  <vscale x 16 x float>*,
789  i64,
790  i64);
791
792define void @intrinsic_vsse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i64 %2, i64 %3) nounwind {
793; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32:
794; CHECK:       # %bb.0: # %entry
795; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
796; CHECK-NEXT:    vsse32.v v8, (a0), a1
797; CHECK-NEXT:    ret
798entry:
799  call void @llvm.riscv.vsse.nxv16f32(
800    <vscale x 16 x float> %0,
801    <vscale x 16 x float>* %1,
802    i64 %2,
803    i64 %3)
804
805  ret void
806}
807
808declare void @llvm.riscv.vsse.mask.nxv16f32(
809  <vscale x 16 x float>,
810  <vscale x 16 x float>*,
811  i64,
812  <vscale x 16 x i1>,
813  i64);
814
815define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
816; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32:
817; CHECK:       # %bb.0: # %entry
818; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
819; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
820; CHECK-NEXT:    ret
821entry:
822  call void @llvm.riscv.vsse.mask.nxv16f32(
823    <vscale x 16 x float> %0,
824    <vscale x 16 x float>* %1,
825    i64 %2,
826    <vscale x 16 x i1> %3,
827    i64 %4)
828
829  ret void
830}
831
832declare void @llvm.riscv.vsse.nxv1i16(
833  <vscale x 1 x i16>,
834  <vscale x 1 x i16>*,
835  i64,
836  i64);
837
838define void @intrinsic_vsse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i64 %2, i64 %3) nounwind {
839; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16:
840; CHECK:       # %bb.0: # %entry
841; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
842; CHECK-NEXT:    vsse16.v v8, (a0), a1
843; CHECK-NEXT:    ret
844entry:
845  call void @llvm.riscv.vsse.nxv1i16(
846    <vscale x 1 x i16> %0,
847    <vscale x 1 x i16>* %1,
848    i64 %2,
849    i64 %3)
850
851  ret void
852}
853
854declare void @llvm.riscv.vsse.mask.nxv1i16(
855  <vscale x 1 x i16>,
856  <vscale x 1 x i16>*,
857  i64,
858  <vscale x 1 x i1>,
859  i64);
860
861define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
862; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16:
863; CHECK:       # %bb.0: # %entry
864; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
865; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
866; CHECK-NEXT:    ret
867entry:
868  call void @llvm.riscv.vsse.mask.nxv1i16(
869    <vscale x 1 x i16> %0,
870    <vscale x 1 x i16>* %1,
871    i64 %2,
872    <vscale x 1 x i1> %3,
873    i64 %4)
874
875  ret void
876}
877
878declare void @llvm.riscv.vsse.nxv2i16(
879  <vscale x 2 x i16>,
880  <vscale x 2 x i16>*,
881  i64,
882  i64);
883
884define void @intrinsic_vsse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i64 %2, i64 %3) nounwind {
885; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16:
886; CHECK:       # %bb.0: # %entry
887; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
888; CHECK-NEXT:    vsse16.v v8, (a0), a1
889; CHECK-NEXT:    ret
890entry:
891  call void @llvm.riscv.vsse.nxv2i16(
892    <vscale x 2 x i16> %0,
893    <vscale x 2 x i16>* %1,
894    i64 %2,
895    i64 %3)
896
897  ret void
898}
899
900declare void @llvm.riscv.vsse.mask.nxv2i16(
901  <vscale x 2 x i16>,
902  <vscale x 2 x i16>*,
903  i64,
904  <vscale x 2 x i1>,
905  i64);
906
907define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
908; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16:
909; CHECK:       # %bb.0: # %entry
910; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
911; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
912; CHECK-NEXT:    ret
913entry:
914  call void @llvm.riscv.vsse.mask.nxv2i16(
915    <vscale x 2 x i16> %0,
916    <vscale x 2 x i16>* %1,
917    i64 %2,
918    <vscale x 2 x i1> %3,
919    i64 %4)
920
921  ret void
922}
923
924declare void @llvm.riscv.vsse.nxv4i16(
925  <vscale x 4 x i16>,
926  <vscale x 4 x i16>*,
927  i64,
928  i64);
929
930define void @intrinsic_vsse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i64 %2, i64 %3) nounwind {
931; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16:
932; CHECK:       # %bb.0: # %entry
933; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
934; CHECK-NEXT:    vsse16.v v8, (a0), a1
935; CHECK-NEXT:    ret
936entry:
937  call void @llvm.riscv.vsse.nxv4i16(
938    <vscale x 4 x i16> %0,
939    <vscale x 4 x i16>* %1,
940    i64 %2,
941    i64 %3)
942
943  ret void
944}
945
946declare void @llvm.riscv.vsse.mask.nxv4i16(
947  <vscale x 4 x i16>,
948  <vscale x 4 x i16>*,
949  i64,
950  <vscale x 4 x i1>,
951  i64);
952
953define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
954; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16:
955; CHECK:       # %bb.0: # %entry
956; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
957; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
958; CHECK-NEXT:    ret
959entry:
960  call void @llvm.riscv.vsse.mask.nxv4i16(
961    <vscale x 4 x i16> %0,
962    <vscale x 4 x i16>* %1,
963    i64 %2,
964    <vscale x 4 x i1> %3,
965    i64 %4)
966
967  ret void
968}
969
970declare void @llvm.riscv.vsse.nxv8i16(
971  <vscale x 8 x i16>,
972  <vscale x 8 x i16>*,
973  i64,
974  i64);
975
976define void @intrinsic_vsse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i64 %2, i64 %3) nounwind {
977; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16:
978; CHECK:       # %bb.0: # %entry
979; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
980; CHECK-NEXT:    vsse16.v v8, (a0), a1
981; CHECK-NEXT:    ret
982entry:
983  call void @llvm.riscv.vsse.nxv8i16(
984    <vscale x 8 x i16> %0,
985    <vscale x 8 x i16>* %1,
986    i64 %2,
987    i64 %3)
988
989  ret void
990}
991
992declare void @llvm.riscv.vsse.mask.nxv8i16(
993  <vscale x 8 x i16>,
994  <vscale x 8 x i16>*,
995  i64,
996  <vscale x 8 x i1>,
997  i64);
998
999define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1000; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16:
1001; CHECK:       # %bb.0: # %entry
1002; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
1003; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
1004; CHECK-NEXT:    ret
1005entry:
1006  call void @llvm.riscv.vsse.mask.nxv8i16(
1007    <vscale x 8 x i16> %0,
1008    <vscale x 8 x i16>* %1,
1009    i64 %2,
1010    <vscale x 8 x i1> %3,
1011    i64 %4)
1012
1013  ret void
1014}
1015
1016declare void @llvm.riscv.vsse.nxv16i16(
1017  <vscale x 16 x i16>,
1018  <vscale x 16 x i16>*,
1019  i64,
1020  i64);
1021
1022define void @intrinsic_vsse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i64 %2, i64 %3) nounwind {
1023; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16:
1024; CHECK:       # %bb.0: # %entry
1025; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
1026; CHECK-NEXT:    vsse16.v v8, (a0), a1
1027; CHECK-NEXT:    ret
1028entry:
1029  call void @llvm.riscv.vsse.nxv16i16(
1030    <vscale x 16 x i16> %0,
1031    <vscale x 16 x i16>* %1,
1032    i64 %2,
1033    i64 %3)
1034
1035  ret void
1036}
1037
1038declare void @llvm.riscv.vsse.mask.nxv16i16(
1039  <vscale x 16 x i16>,
1040  <vscale x 16 x i16>*,
1041  i64,
1042  <vscale x 16 x i1>,
1043  i64);
1044
1045define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1046; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16:
1047; CHECK:       # %bb.0: # %entry
1048; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
1049; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
1050; CHECK-NEXT:    ret
1051entry:
1052  call void @llvm.riscv.vsse.mask.nxv16i16(
1053    <vscale x 16 x i16> %0,
1054    <vscale x 16 x i16>* %1,
1055    i64 %2,
1056    <vscale x 16 x i1> %3,
1057    i64 %4)
1058
1059  ret void
1060}
1061
1062declare void @llvm.riscv.vsse.nxv32i16(
1063  <vscale x 32 x i16>,
1064  <vscale x 32 x i16>*,
1065  i64,
1066  i64);
1067
1068define void @intrinsic_vsse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i64 %2, i64 %3) nounwind {
1069; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16:
1070; CHECK:       # %bb.0: # %entry
1071; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
1072; CHECK-NEXT:    vsse16.v v8, (a0), a1
1073; CHECK-NEXT:    ret
1074entry:
1075  call void @llvm.riscv.vsse.nxv32i16(
1076    <vscale x 32 x i16> %0,
1077    <vscale x 32 x i16>* %1,
1078    i64 %2,
1079    i64 %3)
1080
1081  ret void
1082}
1083
1084declare void @llvm.riscv.vsse.mask.nxv32i16(
1085  <vscale x 32 x i16>,
1086  <vscale x 32 x i16>*,
1087  i64,
1088  <vscale x 32 x i1>,
1089  i64);
1090
1091define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
1092; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16:
1093; CHECK:       # %bb.0: # %entry
1094; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
1095; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
1096; CHECK-NEXT:    ret
1097entry:
1098  call void @llvm.riscv.vsse.mask.nxv32i16(
1099    <vscale x 32 x i16> %0,
1100    <vscale x 32 x i16>* %1,
1101    i64 %2,
1102    <vscale x 32 x i1> %3,
1103    i64 %4)
1104
1105  ret void
1106}
1107
1108declare void @llvm.riscv.vsse.nxv1f16(
1109  <vscale x 1 x half>,
1110  <vscale x 1 x half>*,
1111  i64,
1112  i64);
1113
1114define void @intrinsic_vsse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i64 %2, i64 %3) nounwind {
1115; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16:
1116; CHECK:       # %bb.0: # %entry
1117; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1118; CHECK-NEXT:    vsse16.v v8, (a0), a1
1119; CHECK-NEXT:    ret
1120entry:
1121  call void @llvm.riscv.vsse.nxv1f16(
1122    <vscale x 1 x half> %0,
1123    <vscale x 1 x half>* %1,
1124    i64 %2,
1125    i64 %3)
1126
1127  ret void
1128}
1129
1130declare void @llvm.riscv.vsse.mask.nxv1f16(
1131  <vscale x 1 x half>,
1132  <vscale x 1 x half>*,
1133  i64,
1134  <vscale x 1 x i1>,
1135  i64);
1136
1137define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1138; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16:
1139; CHECK:       # %bb.0: # %entry
1140; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1141; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
1142; CHECK-NEXT:    ret
1143entry:
1144  call void @llvm.riscv.vsse.mask.nxv1f16(
1145    <vscale x 1 x half> %0,
1146    <vscale x 1 x half>* %1,
1147    i64 %2,
1148    <vscale x 1 x i1> %3,
1149    i64 %4)
1150
1151  ret void
1152}
1153
1154declare void @llvm.riscv.vsse.nxv2f16(
1155  <vscale x 2 x half>,
1156  <vscale x 2 x half>*,
1157  i64,
1158  i64);
1159
1160define void @intrinsic_vsse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i64 %2, i64 %3) nounwind {
1161; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16:
1162; CHECK:       # %bb.0: # %entry
1163; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
1164; CHECK-NEXT:    vsse16.v v8, (a0), a1
1165; CHECK-NEXT:    ret
1166entry:
1167  call void @llvm.riscv.vsse.nxv2f16(
1168    <vscale x 2 x half> %0,
1169    <vscale x 2 x half>* %1,
1170    i64 %2,
1171    i64 %3)
1172
1173  ret void
1174}
1175
1176declare void @llvm.riscv.vsse.mask.nxv2f16(
1177  <vscale x 2 x half>,
1178  <vscale x 2 x half>*,
1179  i64,
1180  <vscale x 2 x i1>,
1181  i64);
1182
1183define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1184; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16:
1185; CHECK:       # %bb.0: # %entry
1186; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
1187; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
1188; CHECK-NEXT:    ret
1189entry:
1190  call void @llvm.riscv.vsse.mask.nxv2f16(
1191    <vscale x 2 x half> %0,
1192    <vscale x 2 x half>* %1,
1193    i64 %2,
1194    <vscale x 2 x i1> %3,
1195    i64 %4)
1196
1197  ret void
1198}
1199
1200declare void @llvm.riscv.vsse.nxv4f16(
1201  <vscale x 4 x half>,
1202  <vscale x 4 x half>*,
1203  i64,
1204  i64);
1205
1206define void @intrinsic_vsse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i64 %2, i64 %3) nounwind {
1207; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16:
1208; CHECK:       # %bb.0: # %entry
1209; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
1210; CHECK-NEXT:    vsse16.v v8, (a0), a1
1211; CHECK-NEXT:    ret
1212entry:
1213  call void @llvm.riscv.vsse.nxv4f16(
1214    <vscale x 4 x half> %0,
1215    <vscale x 4 x half>* %1,
1216    i64 %2,
1217    i64 %3)
1218
1219  ret void
1220}
1221
1222declare void @llvm.riscv.vsse.mask.nxv4f16(
1223  <vscale x 4 x half>,
1224  <vscale x 4 x half>*,
1225  i64,
1226  <vscale x 4 x i1>,
1227  i64);
1228
1229define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1230; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16:
1231; CHECK:       # %bb.0: # %entry
1232; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
1233; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
1234; CHECK-NEXT:    ret
1235entry:
1236  call void @llvm.riscv.vsse.mask.nxv4f16(
1237    <vscale x 4 x half> %0,
1238    <vscale x 4 x half>* %1,
1239    i64 %2,
1240    <vscale x 4 x i1> %3,
1241    i64 %4)
1242
1243  ret void
1244}
1245
1246declare void @llvm.riscv.vsse.nxv8f16(
1247  <vscale x 8 x half>,
1248  <vscale x 8 x half>*,
1249  i64,
1250  i64);
1251
1252define void @intrinsic_vsse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i64 %2, i64 %3) nounwind {
1253; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16:
1254; CHECK:       # %bb.0: # %entry
1255; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
1256; CHECK-NEXT:    vsse16.v v8, (a0), a1
1257; CHECK-NEXT:    ret
1258entry:
1259  call void @llvm.riscv.vsse.nxv8f16(
1260    <vscale x 8 x half> %0,
1261    <vscale x 8 x half>* %1,
1262    i64 %2,
1263    i64 %3)
1264
1265  ret void
1266}
1267
1268declare void @llvm.riscv.vsse.mask.nxv8f16(
1269  <vscale x 8 x half>,
1270  <vscale x 8 x half>*,
1271  i64,
1272  <vscale x 8 x i1>,
1273  i64);
1274
1275define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1276; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16:
1277; CHECK:       # %bb.0: # %entry
1278; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
1279; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
1280; CHECK-NEXT:    ret
1281entry:
1282  call void @llvm.riscv.vsse.mask.nxv8f16(
1283    <vscale x 8 x half> %0,
1284    <vscale x 8 x half>* %1,
1285    i64 %2,
1286    <vscale x 8 x i1> %3,
1287    i64 %4)
1288
1289  ret void
1290}
1291
1292declare void @llvm.riscv.vsse.nxv16f16(
1293  <vscale x 16 x half>,
1294  <vscale x 16 x half>*,
1295  i64,
1296  i64);
1297
1298define void @intrinsic_vsse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i64 %2, i64 %3) nounwind {
1299; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16:
1300; CHECK:       # %bb.0: # %entry
1301; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
1302; CHECK-NEXT:    vsse16.v v8, (a0), a1
1303; CHECK-NEXT:    ret
1304entry:
1305  call void @llvm.riscv.vsse.nxv16f16(
1306    <vscale x 16 x half> %0,
1307    <vscale x 16 x half>* %1,
1308    i64 %2,
1309    i64 %3)
1310
1311  ret void
1312}
1313
1314declare void @llvm.riscv.vsse.mask.nxv16f16(
1315  <vscale x 16 x half>,
1316  <vscale x 16 x half>*,
1317  i64,
1318  <vscale x 16 x i1>,
1319  i64);
1320
1321define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1322; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16:
1323; CHECK:       # %bb.0: # %entry
1324; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
1325; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
1326; CHECK-NEXT:    ret
1327entry:
1328  call void @llvm.riscv.vsse.mask.nxv16f16(
1329    <vscale x 16 x half> %0,
1330    <vscale x 16 x half>* %1,
1331    i64 %2,
1332    <vscale x 16 x i1> %3,
1333    i64 %4)
1334
1335  ret void
1336}
1337
1338declare void @llvm.riscv.vsse.nxv32f16(
1339  <vscale x 32 x half>,
1340  <vscale x 32 x half>*,
1341  i64,
1342  i64);
1343
1344define void @intrinsic_vsse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i64 %2, i64 %3) nounwind {
1345; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16:
1346; CHECK:       # %bb.0: # %entry
1347; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
1348; CHECK-NEXT:    vsse16.v v8, (a0), a1
1349; CHECK-NEXT:    ret
1350entry:
1351  call void @llvm.riscv.vsse.nxv32f16(
1352    <vscale x 32 x half> %0,
1353    <vscale x 32 x half>* %1,
1354    i64 %2,
1355    i64 %3)
1356
1357  ret void
1358}
1359
1360declare void @llvm.riscv.vsse.mask.nxv32f16(
1361  <vscale x 32 x half>,
1362  <vscale x 32 x half>*,
1363  i64,
1364  <vscale x 32 x i1>,
1365  i64);
1366
1367define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
1368; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16:
1369; CHECK:       # %bb.0: # %entry
1370; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
1371; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
1372; CHECK-NEXT:    ret
1373entry:
1374  call void @llvm.riscv.vsse.mask.nxv32f16(
1375    <vscale x 32 x half> %0,
1376    <vscale x 32 x half>* %1,
1377    i64 %2,
1378    <vscale x 32 x i1> %3,
1379    i64 %4)
1380
1381  ret void
1382}
1383
1384declare void @llvm.riscv.vsse.nxv1i8(
1385  <vscale x 1 x i8>,
1386  <vscale x 1 x i8>*,
1387  i64,
1388  i64);
1389
1390define void @intrinsic_vsse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i64 %2, i64 %3) nounwind {
1391; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8:
1392; CHECK:       # %bb.0: # %entry
1393; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
1394; CHECK-NEXT:    vsse8.v v8, (a0), a1
1395; CHECK-NEXT:    ret
1396entry:
1397  call void @llvm.riscv.vsse.nxv1i8(
1398    <vscale x 1 x i8> %0,
1399    <vscale x 1 x i8>* %1,
1400    i64 %2,
1401    i64 %3)
1402
1403  ret void
1404}
1405
1406declare void @llvm.riscv.vsse.mask.nxv1i8(
1407  <vscale x 1 x i8>,
1408  <vscale x 1 x i8>*,
1409  i64,
1410  <vscale x 1 x i1>,
1411  i64);
1412
1413define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1414; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8:
1415; CHECK:       # %bb.0: # %entry
1416; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
1417; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
1418; CHECK-NEXT:    ret
1419entry:
1420  call void @llvm.riscv.vsse.mask.nxv1i8(
1421    <vscale x 1 x i8> %0,
1422    <vscale x 1 x i8>* %1,
1423    i64 %2,
1424    <vscale x 1 x i1> %3,
1425    i64 %4)
1426
1427  ret void
1428}
1429
1430declare void @llvm.riscv.vsse.nxv2i8(
1431  <vscale x 2 x i8>,
1432  <vscale x 2 x i8>*,
1433  i64,
1434  i64);
1435
1436define void @intrinsic_vsse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i64 %2, i64 %3) nounwind {
1437; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8:
1438; CHECK:       # %bb.0: # %entry
1439; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
1440; CHECK-NEXT:    vsse8.v v8, (a0), a1
1441; CHECK-NEXT:    ret
1442entry:
1443  call void @llvm.riscv.vsse.nxv2i8(
1444    <vscale x 2 x i8> %0,
1445    <vscale x 2 x i8>* %1,
1446    i64 %2,
1447    i64 %3)
1448
1449  ret void
1450}
1451
1452declare void @llvm.riscv.vsse.mask.nxv2i8(
1453  <vscale x 2 x i8>,
1454  <vscale x 2 x i8>*,
1455  i64,
1456  <vscale x 2 x i1>,
1457  i64);
1458
1459define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1460; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8:
1461; CHECK:       # %bb.0: # %entry
1462; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
1463; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
1464; CHECK-NEXT:    ret
1465entry:
1466  call void @llvm.riscv.vsse.mask.nxv2i8(
1467    <vscale x 2 x i8> %0,
1468    <vscale x 2 x i8>* %1,
1469    i64 %2,
1470    <vscale x 2 x i1> %3,
1471    i64 %4)
1472
1473  ret void
1474}
1475
1476declare void @llvm.riscv.vsse.nxv4i8(
1477  <vscale x 4 x i8>,
1478  <vscale x 4 x i8>*,
1479  i64,
1480  i64);
1481
1482define void @intrinsic_vsse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i64 %2, i64 %3) nounwind {
1483; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8:
1484; CHECK:       # %bb.0: # %entry
1485; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
1486; CHECK-NEXT:    vsse8.v v8, (a0), a1
1487; CHECK-NEXT:    ret
1488entry:
1489  call void @llvm.riscv.vsse.nxv4i8(
1490    <vscale x 4 x i8> %0,
1491    <vscale x 4 x i8>* %1,
1492    i64 %2,
1493    i64 %3)
1494
1495  ret void
1496}
1497
1498declare void @llvm.riscv.vsse.mask.nxv4i8(
1499  <vscale x 4 x i8>,
1500  <vscale x 4 x i8>*,
1501  i64,
1502  <vscale x 4 x i1>,
1503  i64);
1504
1505define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1506; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8:
1507; CHECK:       # %bb.0: # %entry
1508; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
1509; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
1510; CHECK-NEXT:    ret
1511entry:
1512  call void @llvm.riscv.vsse.mask.nxv4i8(
1513    <vscale x 4 x i8> %0,
1514    <vscale x 4 x i8>* %1,
1515    i64 %2,
1516    <vscale x 4 x i1> %3,
1517    i64 %4)
1518
1519  ret void
1520}
1521
1522declare void @llvm.riscv.vsse.nxv8i8(
1523  <vscale x 8 x i8>,
1524  <vscale x 8 x i8>*,
1525  i64,
1526  i64);
1527
1528define void @intrinsic_vsse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i64 %2, i64 %3) nounwind {
1529; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8:
1530; CHECK:       # %bb.0: # %entry
1531; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1532; CHECK-NEXT:    vsse8.v v8, (a0), a1
1533; CHECK-NEXT:    ret
1534entry:
1535  call void @llvm.riscv.vsse.nxv8i8(
1536    <vscale x 8 x i8> %0,
1537    <vscale x 8 x i8>* %1,
1538    i64 %2,
1539    i64 %3)
1540
1541  ret void
1542}
1543
1544declare void @llvm.riscv.vsse.mask.nxv8i8(
1545  <vscale x 8 x i8>,
1546  <vscale x 8 x i8>*,
1547  i64,
1548  <vscale x 8 x i1>,
1549  i64);
1550
1551define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1552; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8:
1553; CHECK:       # %bb.0: # %entry
1554; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1555; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
1556; CHECK-NEXT:    ret
1557entry:
1558  call void @llvm.riscv.vsse.mask.nxv8i8(
1559    <vscale x 8 x i8> %0,
1560    <vscale x 8 x i8>* %1,
1561    i64 %2,
1562    <vscale x 8 x i1> %3,
1563    i64 %4)
1564
1565  ret void
1566}
1567
1568declare void @llvm.riscv.vsse.nxv16i8(
1569  <vscale x 16 x i8>,
1570  <vscale x 16 x i8>*,
1571  i64,
1572  i64);
1573
1574define void @intrinsic_vsse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i64 %2, i64 %3) nounwind {
1575; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8:
1576; CHECK:       # %bb.0: # %entry
1577; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
1578; CHECK-NEXT:    vsse8.v v8, (a0), a1
1579; CHECK-NEXT:    ret
1580entry:
1581  call void @llvm.riscv.vsse.nxv16i8(
1582    <vscale x 16 x i8> %0,
1583    <vscale x 16 x i8>* %1,
1584    i64 %2,
1585    i64 %3)
1586
1587  ret void
1588}
1589
1590declare void @llvm.riscv.vsse.mask.nxv16i8(
1591  <vscale x 16 x i8>,
1592  <vscale x 16 x i8>*,
1593  i64,
1594  <vscale x 16 x i1>,
1595  i64);
1596
1597define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1598; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8:
1599; CHECK:       # %bb.0: # %entry
1600; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
1601; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
1602; CHECK-NEXT:    ret
1603entry:
1604  call void @llvm.riscv.vsse.mask.nxv16i8(
1605    <vscale x 16 x i8> %0,
1606    <vscale x 16 x i8>* %1,
1607    i64 %2,
1608    <vscale x 16 x i1> %3,
1609    i64 %4)
1610
1611  ret void
1612}
1613
1614declare void @llvm.riscv.vsse.nxv32i8(
1615  <vscale x 32 x i8>,
1616  <vscale x 32 x i8>*,
1617  i64,
1618  i64);
1619
1620define void @intrinsic_vsse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i64 %2, i64 %3) nounwind {
1621; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8:
1622; CHECK:       # %bb.0: # %entry
1623; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
1624; CHECK-NEXT:    vsse8.v v8, (a0), a1
1625; CHECK-NEXT:    ret
1626entry:
1627  call void @llvm.riscv.vsse.nxv32i8(
1628    <vscale x 32 x i8> %0,
1629    <vscale x 32 x i8>* %1,
1630    i64 %2,
1631    i64 %3)
1632
1633  ret void
1634}
1635
1636declare void @llvm.riscv.vsse.mask.nxv32i8(
1637  <vscale x 32 x i8>,
1638  <vscale x 32 x i8>*,
1639  i64,
1640  <vscale x 32 x i1>,
1641  i64);
1642
1643define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
1644; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8:
1645; CHECK:       # %bb.0: # %entry
1646; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
1647; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
1648; CHECK-NEXT:    ret
1649entry:
1650  call void @llvm.riscv.vsse.mask.nxv32i8(
1651    <vscale x 32 x i8> %0,
1652    <vscale x 32 x i8>* %1,
1653    i64 %2,
1654    <vscale x 32 x i1> %3,
1655    i64 %4)
1656
1657  ret void
1658}
1659
1660declare void @llvm.riscv.vsse.nxv64i8(
1661  <vscale x 64 x i8>,
1662  <vscale x 64 x i8>*,
1663  i64,
1664  i64);
1665
1666define void @intrinsic_vsse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i64 %2, i64 %3) nounwind {
1667; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8:
1668; CHECK:       # %bb.0: # %entry
1669; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
1670; CHECK-NEXT:    vsse8.v v8, (a0), a1
1671; CHECK-NEXT:    ret
1672entry:
1673  call void @llvm.riscv.vsse.nxv64i8(
1674    <vscale x 64 x i8> %0,
1675    <vscale x 64 x i8>* %1,
1676    i64 %2,
1677    i64 %3)
1678
1679  ret void
1680}
1681
1682declare void @llvm.riscv.vsse.mask.nxv64i8(
1683  <vscale x 64 x i8>,
1684  <vscale x 64 x i8>*,
1685  i64,
1686  <vscale x 64 x i1>,
1687  i64);
1688
1689define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
1690; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8:
1691; CHECK:       # %bb.0: # %entry
1692; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
1693; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
1694; CHECK-NEXT:    ret
1695entry:
1696  call void @llvm.riscv.vsse.mask.nxv64i8(
1697    <vscale x 64 x i8> %0,
1698    <vscale x 64 x i8>* %1,
1699    i64 %2,
1700    <vscale x 64 x i1> %3,
1701    i64 %4)
1702
1703  ret void
1704}
1705