1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
5  <vscale x 1 x i8>,
6  <vscale x 1 x i8>,
7  i32);
8
9define <vscale x 1 x i16> @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
10; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
13; CHECK-NEXT:    vwsub.vv v25, v8, v9
14; CHECK-NEXT:    vmv1r.v v8, v25
15; CHECK-NEXT:    ret
16entry:
17  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
18    <vscale x 1 x i8> %0,
19    <vscale x 1 x i8> %1,
20    i32 %2)
21
22  ret <vscale x 1 x i16> %a
23}
24
25declare <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
26  <vscale x 1 x i16>,
27  <vscale x 1 x i8>,
28  <vscale x 1 x i8>,
29  <vscale x 1 x i1>,
30  i32);
31
32define <vscale x 1 x i16> @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
33; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8:
34; CHECK:       # %bb.0: # %entry
35; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
36; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
37; CHECK-NEXT:    ret
38entry:
39  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
40    <vscale x 1 x i16> %0,
41    <vscale x 1 x i8> %1,
42    <vscale x 1 x i8> %2,
43    <vscale x 1 x i1> %3,
44    i32 %4)
45
46  ret <vscale x 1 x i16> %a
47}
48
49declare <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
50  <vscale x 2 x i8>,
51  <vscale x 2 x i8>,
52  i32);
53
54define <vscale x 2 x i16> @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
55; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8:
56; CHECK:       # %bb.0: # %entry
57; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
58; CHECK-NEXT:    vwsub.vv v25, v8, v9
59; CHECK-NEXT:    vmv1r.v v8, v25
60; CHECK-NEXT:    ret
61entry:
62  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
63    <vscale x 2 x i8> %0,
64    <vscale x 2 x i8> %1,
65    i32 %2)
66
67  ret <vscale x 2 x i16> %a
68}
69
70declare <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
71  <vscale x 2 x i16>,
72  <vscale x 2 x i8>,
73  <vscale x 2 x i8>,
74  <vscale x 2 x i1>,
75  i32);
76
77define <vscale x 2 x i16> @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
78; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8:
79; CHECK:       # %bb.0: # %entry
80; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
81; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
82; CHECK-NEXT:    ret
83entry:
84  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
85    <vscale x 2 x i16> %0,
86    <vscale x 2 x i8> %1,
87    <vscale x 2 x i8> %2,
88    <vscale x 2 x i1> %3,
89    i32 %4)
90
91  ret <vscale x 2 x i16> %a
92}
93
94declare <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
95  <vscale x 4 x i8>,
96  <vscale x 4 x i8>,
97  i32);
98
99define <vscale x 4 x i16> @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
100; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8:
101; CHECK:       # %bb.0: # %entry
102; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
103; CHECK-NEXT:    vwsub.vv v25, v8, v9
104; CHECK-NEXT:    vmv1r.v v8, v25
105; CHECK-NEXT:    ret
106entry:
107  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
108    <vscale x 4 x i8> %0,
109    <vscale x 4 x i8> %1,
110    i32 %2)
111
112  ret <vscale x 4 x i16> %a
113}
114
115declare <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
116  <vscale x 4 x i16>,
117  <vscale x 4 x i8>,
118  <vscale x 4 x i8>,
119  <vscale x 4 x i1>,
120  i32);
121
122define <vscale x 4 x i16> @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
123; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8:
124; CHECK:       # %bb.0: # %entry
125; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
126; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
127; CHECK-NEXT:    ret
128entry:
129  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
130    <vscale x 4 x i16> %0,
131    <vscale x 4 x i8> %1,
132    <vscale x 4 x i8> %2,
133    <vscale x 4 x i1> %3,
134    i32 %4)
135
136  ret <vscale x 4 x i16> %a
137}
138
139declare <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
140  <vscale x 8 x i8>,
141  <vscale x 8 x i8>,
142  i32);
143
144define <vscale x 8 x i16> @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
145; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8:
146; CHECK:       # %bb.0: # %entry
147; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
148; CHECK-NEXT:    vwsub.vv v26, v8, v9
149; CHECK-NEXT:    vmv2r.v v8, v26
150; CHECK-NEXT:    ret
151entry:
152  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
153    <vscale x 8 x i8> %0,
154    <vscale x 8 x i8> %1,
155    i32 %2)
156
157  ret <vscale x 8 x i16> %a
158}
159
160declare <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
161  <vscale x 8 x i16>,
162  <vscale x 8 x i8>,
163  <vscale x 8 x i8>,
164  <vscale x 8 x i1>,
165  i32);
166
167define <vscale x 8 x i16> @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
168; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8:
169; CHECK:       # %bb.0: # %entry
170; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
171; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
172; CHECK-NEXT:    ret
173entry:
174  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
175    <vscale x 8 x i16> %0,
176    <vscale x 8 x i8> %1,
177    <vscale x 8 x i8> %2,
178    <vscale x 8 x i1> %3,
179    i32 %4)
180
181  ret <vscale x 8 x i16> %a
182}
183
184declare <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
185  <vscale x 16 x i8>,
186  <vscale x 16 x i8>,
187  i32);
188
189define <vscale x 16 x i16> @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
190; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8:
191; CHECK:       # %bb.0: # %entry
192; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
193; CHECK-NEXT:    vwsub.vv v28, v8, v10
194; CHECK-NEXT:    vmv4r.v v8, v28
195; CHECK-NEXT:    ret
196entry:
197  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
198    <vscale x 16 x i8> %0,
199    <vscale x 16 x i8> %1,
200    i32 %2)
201
202  ret <vscale x 16 x i16> %a
203}
204
205declare <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
206  <vscale x 16 x i16>,
207  <vscale x 16 x i8>,
208  <vscale x 16 x i8>,
209  <vscale x 16 x i1>,
210  i32);
211
212define <vscale x 16 x i16> @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
213; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8:
214; CHECK:       # %bb.0: # %entry
215; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
216; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
217; CHECK-NEXT:    ret
218entry:
219  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
220    <vscale x 16 x i16> %0,
221    <vscale x 16 x i8> %1,
222    <vscale x 16 x i8> %2,
223    <vscale x 16 x i1> %3,
224    i32 %4)
225
226  ret <vscale x 16 x i16> %a
227}
228
229declare <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
230  <vscale x 32 x i8>,
231  <vscale x 32 x i8>,
232  i32);
233
234define <vscale x 32 x i16> @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
235; CHECK-LABEL: intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8:
236; CHECK:       # %bb.0: # %entry
237; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
238; CHECK-NEXT:    vwsub.vv v16, v8, v12
239; CHECK-NEXT:    vmv8r.v v8, v16
240; CHECK-NEXT:    ret
241entry:
242  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
243    <vscale x 32 x i8> %0,
244    <vscale x 32 x i8> %1,
245    i32 %2)
246
247  ret <vscale x 32 x i16> %a
248}
249
250declare <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
251  <vscale x 32 x i16>,
252  <vscale x 32 x i8>,
253  <vscale x 32 x i8>,
254  <vscale x 32 x i1>,
255  i32);
256
257define <vscale x 32 x i16> @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
258; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8:
259; CHECK:       # %bb.0: # %entry
260; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
261; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
262; CHECK-NEXT:    ret
263entry:
264  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
265    <vscale x 32 x i16> %0,
266    <vscale x 32 x i8> %1,
267    <vscale x 32 x i8> %2,
268    <vscale x 32 x i1> %3,
269    i32 %4)
270
271  ret <vscale x 32 x i16> %a
272}
273
274declare <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
275  <vscale x 1 x i16>,
276  <vscale x 1 x i16>,
277  i32);
278
279define <vscale x 1 x i32> @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
280; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16:
281; CHECK:       # %bb.0: # %entry
282; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
283; CHECK-NEXT:    vwsub.vv v25, v8, v9
284; CHECK-NEXT:    vmv1r.v v8, v25
285; CHECK-NEXT:    ret
286entry:
287  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
288    <vscale x 1 x i16> %0,
289    <vscale x 1 x i16> %1,
290    i32 %2)
291
292  ret <vscale x 1 x i32> %a
293}
294
295declare <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
296  <vscale x 1 x i32>,
297  <vscale x 1 x i16>,
298  <vscale x 1 x i16>,
299  <vscale x 1 x i1>,
300  i32);
301
302define <vscale x 1 x i32> @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
303; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16:
304; CHECK:       # %bb.0: # %entry
305; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
306; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
307; CHECK-NEXT:    ret
308entry:
309  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
310    <vscale x 1 x i32> %0,
311    <vscale x 1 x i16> %1,
312    <vscale x 1 x i16> %2,
313    <vscale x 1 x i1> %3,
314    i32 %4)
315
316  ret <vscale x 1 x i32> %a
317}
318
319declare <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
320  <vscale x 2 x i16>,
321  <vscale x 2 x i16>,
322  i32);
323
324define <vscale x 2 x i32> @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
325; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16:
326; CHECK:       # %bb.0: # %entry
327; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
328; CHECK-NEXT:    vwsub.vv v25, v8, v9
329; CHECK-NEXT:    vmv1r.v v8, v25
330; CHECK-NEXT:    ret
331entry:
332  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
333    <vscale x 2 x i16> %0,
334    <vscale x 2 x i16> %1,
335    i32 %2)
336
337  ret <vscale x 2 x i32> %a
338}
339
340declare <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
341  <vscale x 2 x i32>,
342  <vscale x 2 x i16>,
343  <vscale x 2 x i16>,
344  <vscale x 2 x i1>,
345  i32);
346
347define <vscale x 2 x i32> @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
348; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16:
349; CHECK:       # %bb.0: # %entry
350; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
351; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
352; CHECK-NEXT:    ret
353entry:
354  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
355    <vscale x 2 x i32> %0,
356    <vscale x 2 x i16> %1,
357    <vscale x 2 x i16> %2,
358    <vscale x 2 x i1> %3,
359    i32 %4)
360
361  ret <vscale x 2 x i32> %a
362}
363
364declare <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
365  <vscale x 4 x i16>,
366  <vscale x 4 x i16>,
367  i32);
368
369define <vscale x 4 x i32> @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
370; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16:
371; CHECK:       # %bb.0: # %entry
372; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
373; CHECK-NEXT:    vwsub.vv v26, v8, v9
374; CHECK-NEXT:    vmv2r.v v8, v26
375; CHECK-NEXT:    ret
376entry:
377  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
378    <vscale x 4 x i16> %0,
379    <vscale x 4 x i16> %1,
380    i32 %2)
381
382  ret <vscale x 4 x i32> %a
383}
384
385declare <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
386  <vscale x 4 x i32>,
387  <vscale x 4 x i16>,
388  <vscale x 4 x i16>,
389  <vscale x 4 x i1>,
390  i32);
391
392define <vscale x 4 x i32> @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
393; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16:
394; CHECK:       # %bb.0: # %entry
395; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
396; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
397; CHECK-NEXT:    ret
398entry:
399  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
400    <vscale x 4 x i32> %0,
401    <vscale x 4 x i16> %1,
402    <vscale x 4 x i16> %2,
403    <vscale x 4 x i1> %3,
404    i32 %4)
405
406  ret <vscale x 4 x i32> %a
407}
408
409declare <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
410  <vscale x 8 x i16>,
411  <vscale x 8 x i16>,
412  i32);
413
414define <vscale x 8 x i32> @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
415; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16:
416; CHECK:       # %bb.0: # %entry
417; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
418; CHECK-NEXT:    vwsub.vv v28, v8, v10
419; CHECK-NEXT:    vmv4r.v v8, v28
420; CHECK-NEXT:    ret
421entry:
422  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
423    <vscale x 8 x i16> %0,
424    <vscale x 8 x i16> %1,
425    i32 %2)
426
427  ret <vscale x 8 x i32> %a
428}
429
430declare <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
431  <vscale x 8 x i32>,
432  <vscale x 8 x i16>,
433  <vscale x 8 x i16>,
434  <vscale x 8 x i1>,
435  i32);
436
437define <vscale x 8 x i32> @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
438; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16:
439; CHECK:       # %bb.0: # %entry
440; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
441; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
442; CHECK-NEXT:    ret
443entry:
444  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
445    <vscale x 8 x i32> %0,
446    <vscale x 8 x i16> %1,
447    <vscale x 8 x i16> %2,
448    <vscale x 8 x i1> %3,
449    i32 %4)
450
451  ret <vscale x 8 x i32> %a
452}
453
454declare <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
455  <vscale x 16 x i16>,
456  <vscale x 16 x i16>,
457  i32);
458
459define <vscale x 16 x i32> @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
460; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16:
461; CHECK:       # %bb.0: # %entry
462; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
463; CHECK-NEXT:    vwsub.vv v16, v8, v12
464; CHECK-NEXT:    vmv8r.v v8, v16
465; CHECK-NEXT:    ret
466entry:
467  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
468    <vscale x 16 x i16> %0,
469    <vscale x 16 x i16> %1,
470    i32 %2)
471
472  ret <vscale x 16 x i32> %a
473}
474
475declare <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
476  <vscale x 16 x i32>,
477  <vscale x 16 x i16>,
478  <vscale x 16 x i16>,
479  <vscale x 16 x i1>,
480  i32);
481
482define <vscale x 16 x i32> @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
483; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16:
484; CHECK:       # %bb.0: # %entry
485; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
486; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
487; CHECK-NEXT:    ret
488entry:
489  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
490    <vscale x 16 x i32> %0,
491    <vscale x 16 x i16> %1,
492    <vscale x 16 x i16> %2,
493    <vscale x 16 x i1> %3,
494    i32 %4)
495
496  ret <vscale x 16 x i32> %a
497}
498
499declare <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32(
500  <vscale x 1 x i32>,
501  <vscale x 1 x i32>,
502  i32);
503
504define <vscale x 1 x i64> @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
505; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32:
506; CHECK:       # %bb.0: # %entry
507; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
508; CHECK-NEXT:    vwsub.vv v25, v8, v9
509; CHECK-NEXT:    vmv1r.v v8, v25
510; CHECK-NEXT:    ret
511entry:
512  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32(
513    <vscale x 1 x i32> %0,
514    <vscale x 1 x i32> %1,
515    i32 %2)
516
517  ret <vscale x 1 x i64> %a
518}
519
520declare <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32(
521  <vscale x 1 x i64>,
522  <vscale x 1 x i32>,
523  <vscale x 1 x i32>,
524  <vscale x 1 x i1>,
525  i32);
526
527define <vscale x 1 x i64> @intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
528; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32:
529; CHECK:       # %bb.0: # %entry
530; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
531; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
532; CHECK-NEXT:    ret
533entry:
534  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32(
535    <vscale x 1 x i64> %0,
536    <vscale x 1 x i32> %1,
537    <vscale x 1 x i32> %2,
538    <vscale x 1 x i1> %3,
539    i32 %4)
540
541  ret <vscale x 1 x i64> %a
542}
543
544declare <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32(
545  <vscale x 2 x i32>,
546  <vscale x 2 x i32>,
547  i32);
548
549define <vscale x 2 x i64> @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
550; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32:
551; CHECK:       # %bb.0: # %entry
552; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
553; CHECK-NEXT:    vwsub.vv v26, v8, v9
554; CHECK-NEXT:    vmv2r.v v8, v26
555; CHECK-NEXT:    ret
556entry:
557  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32(
558    <vscale x 2 x i32> %0,
559    <vscale x 2 x i32> %1,
560    i32 %2)
561
562  ret <vscale x 2 x i64> %a
563}
564
565declare <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32(
566  <vscale x 2 x i64>,
567  <vscale x 2 x i32>,
568  <vscale x 2 x i32>,
569  <vscale x 2 x i1>,
570  i32);
571
572define <vscale x 2 x i64> @intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
573; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32:
574; CHECK:       # %bb.0: # %entry
575; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
576; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
577; CHECK-NEXT:    ret
578entry:
579  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32(
580    <vscale x 2 x i64> %0,
581    <vscale x 2 x i32> %1,
582    <vscale x 2 x i32> %2,
583    <vscale x 2 x i1> %3,
584    i32 %4)
585
586  ret <vscale x 2 x i64> %a
587}
588
589declare <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32(
590  <vscale x 4 x i32>,
591  <vscale x 4 x i32>,
592  i32);
593
594define <vscale x 4 x i64> @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
595; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32:
596; CHECK:       # %bb.0: # %entry
597; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
598; CHECK-NEXT:    vwsub.vv v28, v8, v10
599; CHECK-NEXT:    vmv4r.v v8, v28
600; CHECK-NEXT:    ret
601entry:
602  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32(
603    <vscale x 4 x i32> %0,
604    <vscale x 4 x i32> %1,
605    i32 %2)
606
607  ret <vscale x 4 x i64> %a
608}
609
610declare <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32(
611  <vscale x 4 x i64>,
612  <vscale x 4 x i32>,
613  <vscale x 4 x i32>,
614  <vscale x 4 x i1>,
615  i32);
616
617define <vscale x 4 x i64> @intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
618; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32:
619; CHECK:       # %bb.0: # %entry
620; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
621; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
622; CHECK-NEXT:    ret
623entry:
624  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32(
625    <vscale x 4 x i64> %0,
626    <vscale x 4 x i32> %1,
627    <vscale x 4 x i32> %2,
628    <vscale x 4 x i1> %3,
629    i32 %4)
630
631  ret <vscale x 4 x i64> %a
632}
633
634declare <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32(
635  <vscale x 8 x i32>,
636  <vscale x 8 x i32>,
637  i32);
638
639define <vscale x 8 x i64> @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
640; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32:
641; CHECK:       # %bb.0: # %entry
642; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
643; CHECK-NEXT:    vwsub.vv v16, v8, v12
644; CHECK-NEXT:    vmv8r.v v8, v16
645; CHECK-NEXT:    ret
646entry:
647  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32(
648    <vscale x 8 x i32> %0,
649    <vscale x 8 x i32> %1,
650    i32 %2)
651
652  ret <vscale x 8 x i64> %a
653}
654
655declare <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32(
656  <vscale x 8 x i64>,
657  <vscale x 8 x i32>,
658  <vscale x 8 x i32>,
659  <vscale x 8 x i1>,
660  i32);
661
662define <vscale x 8 x i64> @intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
663; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32:
664; CHECK:       # %bb.0: # %entry
665; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
666; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
667; CHECK-NEXT:    ret
668entry:
669  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32(
670    <vscale x 8 x i64> %0,
671    <vscale x 8 x i32> %1,
672    <vscale x 8 x i32> %2,
673    <vscale x 8 x i1> %3,
674    i32 %4)
675
676  ret <vscale x 8 x i64> %a
677}
678
679declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
680  <vscale x 1 x i8>,
681  i8,
682  i32);
683
684define <vscale x 1 x i16> @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
685; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8:
686; CHECK:       # %bb.0: # %entry
687; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
688; CHECK-NEXT:    vwsub.vx v25, v8, a0
689; CHECK-NEXT:    vmv1r.v v8, v25
690; CHECK-NEXT:    ret
691entry:
692  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
693    <vscale x 1 x i8> %0,
694    i8 %1,
695    i32 %2)
696
697  ret <vscale x 1 x i16> %a
698}
699
700declare <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
701  <vscale x 1 x i16>,
702  <vscale x 1 x i8>,
703  i8,
704  <vscale x 1 x i1>,
705  i32);
706
707define <vscale x 1 x i16> @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
708; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8:
709; CHECK:       # %bb.0: # %entry
710; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
711; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
712; CHECK-NEXT:    ret
713entry:
714  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
715    <vscale x 1 x i16> %0,
716    <vscale x 1 x i8> %1,
717    i8 %2,
718    <vscale x 1 x i1> %3,
719    i32 %4)
720
721  ret <vscale x 1 x i16> %a
722}
723
724declare <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
725  <vscale x 2 x i8>,
726  i8,
727  i32);
728
729define <vscale x 2 x i16> @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
730; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8:
731; CHECK:       # %bb.0: # %entry
732; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
733; CHECK-NEXT:    vwsub.vx v25, v8, a0
734; CHECK-NEXT:    vmv1r.v v8, v25
735; CHECK-NEXT:    ret
736entry:
737  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
738    <vscale x 2 x i8> %0,
739    i8 %1,
740    i32 %2)
741
742  ret <vscale x 2 x i16> %a
743}
744
745declare <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
746  <vscale x 2 x i16>,
747  <vscale x 2 x i8>,
748  i8,
749  <vscale x 2 x i1>,
750  i32);
751
752define <vscale x 2 x i16> @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
753; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8:
754; CHECK:       # %bb.0: # %entry
755; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
756; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
757; CHECK-NEXT:    ret
758entry:
759  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
760    <vscale x 2 x i16> %0,
761    <vscale x 2 x i8> %1,
762    i8 %2,
763    <vscale x 2 x i1> %3,
764    i32 %4)
765
766  ret <vscale x 2 x i16> %a
767}
768
769declare <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
770  <vscale x 4 x i8>,
771  i8,
772  i32);
773
774define <vscale x 4 x i16> @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
775; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8:
776; CHECK:       # %bb.0: # %entry
777; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
778; CHECK-NEXT:    vwsub.vx v25, v8, a0
779; CHECK-NEXT:    vmv1r.v v8, v25
780; CHECK-NEXT:    ret
781entry:
782  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
783    <vscale x 4 x i8> %0,
784    i8 %1,
785    i32 %2)
786
787  ret <vscale x 4 x i16> %a
788}
789
790declare <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
791  <vscale x 4 x i16>,
792  <vscale x 4 x i8>,
793  i8,
794  <vscale x 4 x i1>,
795  i32);
796
797define <vscale x 4 x i16> @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
798; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8:
799; CHECK:       # %bb.0: # %entry
800; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
801; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
802; CHECK-NEXT:    ret
803entry:
804  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
805    <vscale x 4 x i16> %0,
806    <vscale x 4 x i8> %1,
807    i8 %2,
808    <vscale x 4 x i1> %3,
809    i32 %4)
810
811  ret <vscale x 4 x i16> %a
812}
813
814declare <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
815  <vscale x 8 x i8>,
816  i8,
817  i32);
818
819define <vscale x 8 x i16> @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
820; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8:
821; CHECK:       # %bb.0: # %entry
822; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
823; CHECK-NEXT:    vwsub.vx v26, v8, a0
824; CHECK-NEXT:    vmv2r.v v8, v26
825; CHECK-NEXT:    ret
826entry:
827  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
828    <vscale x 8 x i8> %0,
829    i8 %1,
830    i32 %2)
831
832  ret <vscale x 8 x i16> %a
833}
834
835declare <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
836  <vscale x 8 x i16>,
837  <vscale x 8 x i8>,
838  i8,
839  <vscale x 8 x i1>,
840  i32);
841
842define <vscale x 8 x i16> @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
843; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8:
844; CHECK:       # %bb.0: # %entry
845; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
846; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
847; CHECK-NEXT:    ret
848entry:
849  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
850    <vscale x 8 x i16> %0,
851    <vscale x 8 x i8> %1,
852    i8 %2,
853    <vscale x 8 x i1> %3,
854    i32 %4)
855
856  ret <vscale x 8 x i16> %a
857}
858
859declare <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
860  <vscale x 16 x i8>,
861  i8,
862  i32);
863
864define <vscale x 16 x i16> @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
865; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8:
866; CHECK:       # %bb.0: # %entry
867; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
868; CHECK-NEXT:    vwsub.vx v28, v8, a0
869; CHECK-NEXT:    vmv4r.v v8, v28
870; CHECK-NEXT:    ret
871entry:
872  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
873    <vscale x 16 x i8> %0,
874    i8 %1,
875    i32 %2)
876
877  ret <vscale x 16 x i16> %a
878}
879
880declare <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
881  <vscale x 16 x i16>,
882  <vscale x 16 x i8>,
883  i8,
884  <vscale x 16 x i1>,
885  i32);
886
887define <vscale x 16 x i16> @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
888; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8:
889; CHECK:       # %bb.0: # %entry
890; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
891; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
892; CHECK-NEXT:    ret
893entry:
894  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
895    <vscale x 16 x i16> %0,
896    <vscale x 16 x i8> %1,
897    i8 %2,
898    <vscale x 16 x i1> %3,
899    i32 %4)
900
901  ret <vscale x 16 x i16> %a
902}
903
904declare <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
905  <vscale x 32 x i8>,
906  i8,
907  i32);
908
909define <vscale x 32 x i16> @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
910; CHECK-LABEL: intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8:
911; CHECK:       # %bb.0: # %entry
912; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
913; CHECK-NEXT:    vwsub.vx v16, v8, a0
914; CHECK-NEXT:    vmv8r.v v8, v16
915; CHECK-NEXT:    ret
916entry:
917  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
918    <vscale x 32 x i8> %0,
919    i8 %1,
920    i32 %2)
921
922  ret <vscale x 32 x i16> %a
923}
924
925declare <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
926  <vscale x 32 x i16>,
927  <vscale x 32 x i8>,
928  i8,
929  <vscale x 32 x i1>,
930  i32);
931
932define <vscale x 32 x i16> @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
933; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8:
934; CHECK:       # %bb.0: # %entry
935; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
936; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
937; CHECK-NEXT:    ret
938entry:
939  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
940    <vscale x 32 x i16> %0,
941    <vscale x 32 x i8> %1,
942    i8 %2,
943    <vscale x 32 x i1> %3,
944    i32 %4)
945
946  ret <vscale x 32 x i16> %a
947}
948
949declare <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
950  <vscale x 1 x i16>,
951  i16,
952  i32);
953
954define <vscale x 1 x i32> @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
955; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16:
956; CHECK:       # %bb.0: # %entry
957; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
958; CHECK-NEXT:    vwsub.vx v25, v8, a0
959; CHECK-NEXT:    vmv1r.v v8, v25
960; CHECK-NEXT:    ret
961entry:
962  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
963    <vscale x 1 x i16> %0,
964    i16 %1,
965    i32 %2)
966
967  ret <vscale x 1 x i32> %a
968}
969
970declare <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
971  <vscale x 1 x i32>,
972  <vscale x 1 x i16>,
973  i16,
974  <vscale x 1 x i1>,
975  i32);
976
977define <vscale x 1 x i32> @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
978; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16:
979; CHECK:       # %bb.0: # %entry
980; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
981; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
982; CHECK-NEXT:    ret
983entry:
984  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
985    <vscale x 1 x i32> %0,
986    <vscale x 1 x i16> %1,
987    i16 %2,
988    <vscale x 1 x i1> %3,
989    i32 %4)
990
991  ret <vscale x 1 x i32> %a
992}
993
994declare <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
995  <vscale x 2 x i16>,
996  i16,
997  i32);
998
999define <vscale x 2 x i32> @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
1000; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16:
1001; CHECK:       # %bb.0: # %entry
1002; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1003; CHECK-NEXT:    vwsub.vx v25, v8, a0
1004; CHECK-NEXT:    vmv1r.v v8, v25
1005; CHECK-NEXT:    ret
1006entry:
1007  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
1008    <vscale x 2 x i16> %0,
1009    i16 %1,
1010    i32 %2)
1011
1012  ret <vscale x 2 x i32> %a
1013}
1014
1015declare <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
1016  <vscale x 2 x i32>,
1017  <vscale x 2 x i16>,
1018  i16,
1019  <vscale x 2 x i1>,
1020  i32);
1021
1022define <vscale x 2 x i32> @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1023; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16:
1024; CHECK:       # %bb.0: # %entry
1025; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
1026; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
1027; CHECK-NEXT:    ret
1028entry:
1029  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
1030    <vscale x 2 x i32> %0,
1031    <vscale x 2 x i16> %1,
1032    i16 %2,
1033    <vscale x 2 x i1> %3,
1034    i32 %4)
1035
1036  ret <vscale x 2 x i32> %a
1037}
1038
1039declare <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
1040  <vscale x 4 x i16>,
1041  i16,
1042  i32);
1043
1044define <vscale x 4 x i32> @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
1045; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16:
1046; CHECK:       # %bb.0: # %entry
1047; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1048; CHECK-NEXT:    vwsub.vx v26, v8, a0
1049; CHECK-NEXT:    vmv2r.v v8, v26
1050; CHECK-NEXT:    ret
1051entry:
1052  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
1053    <vscale x 4 x i16> %0,
1054    i16 %1,
1055    i32 %2)
1056
1057  ret <vscale x 4 x i32> %a
1058}
1059
1060declare <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
1061  <vscale x 4 x i32>,
1062  <vscale x 4 x i16>,
1063  i16,
1064  <vscale x 4 x i1>,
1065  i32);
1066
1067define <vscale x 4 x i32> @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1068; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16:
1069; CHECK:       # %bb.0: # %entry
1070; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
1071; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
1072; CHECK-NEXT:    ret
1073entry:
1074  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
1075    <vscale x 4 x i32> %0,
1076    <vscale x 4 x i16> %1,
1077    i16 %2,
1078    <vscale x 4 x i1> %3,
1079    i32 %4)
1080
1081  ret <vscale x 4 x i32> %a
1082}
1083
1084declare <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
1085  <vscale x 8 x i16>,
1086  i16,
1087  i32);
1088
1089define <vscale x 8 x i32> @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
1090; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16:
1091; CHECK:       # %bb.0: # %entry
1092; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1093; CHECK-NEXT:    vwsub.vx v28, v8, a0
1094; CHECK-NEXT:    vmv4r.v v8, v28
1095; CHECK-NEXT:    ret
1096entry:
1097  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
1098    <vscale x 8 x i16> %0,
1099    i16 %1,
1100    i32 %2)
1101
1102  ret <vscale x 8 x i32> %a
1103}
1104
1105declare <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
1106  <vscale x 8 x i32>,
1107  <vscale x 8 x i16>,
1108  i16,
1109  <vscale x 8 x i1>,
1110  i32);
1111
1112define <vscale x 8 x i32> @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1113; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16:
1114; CHECK:       # %bb.0: # %entry
1115; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
1116; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
1117; CHECK-NEXT:    ret
1118entry:
1119  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
1120    <vscale x 8 x i32> %0,
1121    <vscale x 8 x i16> %1,
1122    i16 %2,
1123    <vscale x 8 x i1> %3,
1124    i32 %4)
1125
1126  ret <vscale x 8 x i32> %a
1127}
1128
1129declare <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
1130  <vscale x 16 x i16>,
1131  i16,
1132  i32);
1133
1134define <vscale x 16 x i32> @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
1135; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16:
1136; CHECK:       # %bb.0: # %entry
1137; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1138; CHECK-NEXT:    vwsub.vx v16, v8, a0
1139; CHECK-NEXT:    vmv8r.v v8, v16
1140; CHECK-NEXT:    ret
1141entry:
1142  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
1143    <vscale x 16 x i16> %0,
1144    i16 %1,
1145    i32 %2)
1146
1147  ret <vscale x 16 x i32> %a
1148}
1149
1150declare <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
1151  <vscale x 16 x i32>,
1152  <vscale x 16 x i16>,
1153  i16,
1154  <vscale x 16 x i1>,
1155  i32);
1156
1157define <vscale x 16 x i32> @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1158; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16:
1159; CHECK:       # %bb.0: # %entry
1160; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
1161; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
1162; CHECK-NEXT:    ret
1163entry:
1164  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
1165    <vscale x 16 x i32> %0,
1166    <vscale x 16 x i16> %1,
1167    i16 %2,
1168    <vscale x 16 x i1> %3,
1169    i32 %4)
1170
1171  ret <vscale x 16 x i32> %a
1172}
1173
1174declare <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32(
1175  <vscale x 1 x i32>,
1176  i32,
1177  i32);
1178
1179define <vscale x 1 x i64> @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
1180; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32:
1181; CHECK:       # %bb.0: # %entry
1182; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1183; CHECK-NEXT:    vwsub.vx v25, v8, a0
1184; CHECK-NEXT:    vmv1r.v v8, v25
1185; CHECK-NEXT:    ret
1186entry:
1187  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32(
1188    <vscale x 1 x i32> %0,
1189    i32 %1,
1190    i32 %2)
1191
1192  ret <vscale x 1 x i64> %a
1193}
1194
1195declare <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32(
1196  <vscale x 1 x i64>,
1197  <vscale x 1 x i32>,
1198  i32,
1199  <vscale x 1 x i1>,
1200  i32);
1201
1202define <vscale x 1 x i64> @intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1203; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32:
1204; CHECK:       # %bb.0: # %entry
1205; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1206; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
1207; CHECK-NEXT:    ret
1208entry:
1209  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32(
1210    <vscale x 1 x i64> %0,
1211    <vscale x 1 x i32> %1,
1212    i32 %2,
1213    <vscale x 1 x i1> %3,
1214    i32 %4)
1215
1216  ret <vscale x 1 x i64> %a
1217}
1218
1219declare <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32(
1220  <vscale x 2 x i32>,
1221  i32,
1222  i32);
1223
1224define <vscale x 2 x i64> @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
1225; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32:
1226; CHECK:       # %bb.0: # %entry
1227; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1228; CHECK-NEXT:    vwsub.vx v26, v8, a0
1229; CHECK-NEXT:    vmv2r.v v8, v26
1230; CHECK-NEXT:    ret
1231entry:
1232  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32(
1233    <vscale x 2 x i32> %0,
1234    i32 %1,
1235    i32 %2)
1236
1237  ret <vscale x 2 x i64> %a
1238}
1239
1240declare <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32(
1241  <vscale x 2 x i64>,
1242  <vscale x 2 x i32>,
1243  i32,
1244  <vscale x 2 x i1>,
1245  i32);
1246
1247define <vscale x 2 x i64> @intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1248; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32:
1249; CHECK:       # %bb.0: # %entry
1250; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1251; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
1252; CHECK-NEXT:    ret
1253entry:
1254  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32(
1255    <vscale x 2 x i64> %0,
1256    <vscale x 2 x i32> %1,
1257    i32 %2,
1258    <vscale x 2 x i1> %3,
1259    i32 %4)
1260
1261  ret <vscale x 2 x i64> %a
1262}
1263
1264declare <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32(
1265  <vscale x 4 x i32>,
1266  i32,
1267  i32);
1268
1269define <vscale x 4 x i64> @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
1270; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32:
1271; CHECK:       # %bb.0: # %entry
1272; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1273; CHECK-NEXT:    vwsub.vx v28, v8, a0
1274; CHECK-NEXT:    vmv4r.v v8, v28
1275; CHECK-NEXT:    ret
1276entry:
1277  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32(
1278    <vscale x 4 x i32> %0,
1279    i32 %1,
1280    i32 %2)
1281
1282  ret <vscale x 4 x i64> %a
1283}
1284
1285declare <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32(
1286  <vscale x 4 x i64>,
1287  <vscale x 4 x i32>,
1288  i32,
1289  <vscale x 4 x i1>,
1290  i32);
1291
1292define <vscale x 4 x i64> @intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1293; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32:
1294; CHECK:       # %bb.0: # %entry
1295; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1296; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
1297; CHECK-NEXT:    ret
1298entry:
1299  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32(
1300    <vscale x 4 x i64> %0,
1301    <vscale x 4 x i32> %1,
1302    i32 %2,
1303    <vscale x 4 x i1> %3,
1304    i32 %4)
1305
1306  ret <vscale x 4 x i64> %a
1307}
1308
1309declare <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32(
1310  <vscale x 8 x i32>,
1311  i32,
1312  i32);
1313
1314define <vscale x 8 x i64> @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
1315; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32:
1316; CHECK:       # %bb.0: # %entry
1317; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1318; CHECK-NEXT:    vwsub.vx v16, v8, a0
1319; CHECK-NEXT:    vmv8r.v v8, v16
1320; CHECK-NEXT:    ret
1321entry:
1322  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32(
1323    <vscale x 8 x i32> %0,
1324    i32 %1,
1325    i32 %2)
1326
1327  ret <vscale x 8 x i64> %a
1328}
1329
1330declare <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32(
1331  <vscale x 8 x i64>,
1332  <vscale x 8 x i32>,
1333  i32,
1334  <vscale x 8 x i1>,
1335  i32);
1336
1337define <vscale x 8 x i64> @intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1338; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32:
1339; CHECK:       # %bb.0: # %entry
1340; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1341; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
1342; CHECK-NEXT:    ret
1343entry:
1344  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32(
1345    <vscale x 8 x i64> %0,
1346    <vscale x 8 x i32> %1,
1347    i32 %2,
1348    <vscale x 8 x i1> %3,
1349    i32 %4)
1350
1351  ret <vscale x 8 x i64> %a
1352}
1353