1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
5  <vscale x 1 x i8>,
6  <vscale x 1 x i8>,
7  <vscale x 1 x i8>,
8  i64);
9
10define <vscale x 1 x i8>  @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
11; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8:
12; CHECK:       # %bb.0: # %entry
13; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
14; CHECK-NEXT:    vnmsub.vv v8, v9, v10
15; CHECK-NEXT:    ret
16entry:
17  %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
18    <vscale x 1 x i8> %0,
19    <vscale x 1 x i8> %1,
20    <vscale x 1 x i8> %2,
21    i64 %3)
22
23  ret <vscale x 1 x i8> %a
24}
25
26declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
27  <vscale x 1 x i8>,
28  <vscale x 1 x i8>,
29  <vscale x 1 x i8>,
30  <vscale x 1 x i1>,
31  i64);
32
33define <vscale x 1 x i8>  @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
34; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8:
35; CHECK:       # %bb.0: # %entry
36; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
37; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
38; CHECK-NEXT:    ret
39entry:
40  %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
41    <vscale x 1 x i8> %0,
42    <vscale x 1 x i8> %1,
43    <vscale x 1 x i8> %2,
44    <vscale x 1 x i1> %3,
45    i64 %4)
46
47  ret <vscale x 1 x i8> %a
48}
49
50declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
51  <vscale x 2 x i8>,
52  <vscale x 2 x i8>,
53  <vscale x 2 x i8>,
54  i64);
55
56define <vscale x 2 x i8>  @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
57; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8:
58; CHECK:       # %bb.0: # %entry
59; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
60; CHECK-NEXT:    vnmsub.vv v8, v9, v10
61; CHECK-NEXT:    ret
62entry:
63  %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
64    <vscale x 2 x i8> %0,
65    <vscale x 2 x i8> %1,
66    <vscale x 2 x i8> %2,
67    i64 %3)
68
69  ret <vscale x 2 x i8> %a
70}
71
72declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
73  <vscale x 2 x i8>,
74  <vscale x 2 x i8>,
75  <vscale x 2 x i8>,
76  <vscale x 2 x i1>,
77  i64);
78
79define <vscale x 2 x i8>  @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
80; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8:
81; CHECK:       # %bb.0: # %entry
82; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
83; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
84; CHECK-NEXT:    ret
85entry:
86  %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
87    <vscale x 2 x i8> %0,
88    <vscale x 2 x i8> %1,
89    <vscale x 2 x i8> %2,
90    <vscale x 2 x i1> %3,
91    i64 %4)
92
93  ret <vscale x 2 x i8> %a
94}
95
96declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
97  <vscale x 4 x i8>,
98  <vscale x 4 x i8>,
99  <vscale x 4 x i8>,
100  i64);
101
102define <vscale x 4 x i8>  @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
103; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8:
104; CHECK:       # %bb.0: # %entry
105; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
106; CHECK-NEXT:    vnmsub.vv v8, v9, v10
107; CHECK-NEXT:    ret
108entry:
109  %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
110    <vscale x 4 x i8> %0,
111    <vscale x 4 x i8> %1,
112    <vscale x 4 x i8> %2,
113    i64 %3)
114
115  ret <vscale x 4 x i8> %a
116}
117
118declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
119  <vscale x 4 x i8>,
120  <vscale x 4 x i8>,
121  <vscale x 4 x i8>,
122  <vscale x 4 x i1>,
123  i64);
124
125define <vscale x 4 x i8>  @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
126; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8:
127; CHECK:       # %bb.0: # %entry
128; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
129; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
130; CHECK-NEXT:    ret
131entry:
132  %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
133    <vscale x 4 x i8> %0,
134    <vscale x 4 x i8> %1,
135    <vscale x 4 x i8> %2,
136    <vscale x 4 x i1> %3,
137    i64 %4)
138
139  ret <vscale x 4 x i8> %a
140}
141
142declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
143  <vscale x 8 x i8>,
144  <vscale x 8 x i8>,
145  <vscale x 8 x i8>,
146  i64);
147
148define <vscale x 8 x i8>  @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
149; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8:
150; CHECK:       # %bb.0: # %entry
151; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
152; CHECK-NEXT:    vnmsub.vv v8, v9, v10
153; CHECK-NEXT:    ret
154entry:
155  %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
156    <vscale x 8 x i8> %0,
157    <vscale x 8 x i8> %1,
158    <vscale x 8 x i8> %2,
159    i64 %3)
160
161  ret <vscale x 8 x i8> %a
162}
163
164declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
165  <vscale x 8 x i8>,
166  <vscale x 8 x i8>,
167  <vscale x 8 x i8>,
168  <vscale x 8 x i1>,
169  i64);
170
171define <vscale x 8 x i8>  @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
172; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8:
173; CHECK:       # %bb.0: # %entry
174; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
175; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
176; CHECK-NEXT:    ret
177entry:
178  %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
179    <vscale x 8 x i8> %0,
180    <vscale x 8 x i8> %1,
181    <vscale x 8 x i8> %2,
182    <vscale x 8 x i1> %3,
183    i64 %4)
184
185  ret <vscale x 8 x i8> %a
186}
187
188declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
189  <vscale x 16 x i8>,
190  <vscale x 16 x i8>,
191  <vscale x 16 x i8>,
192  i64);
193
194define <vscale x 16 x i8>  @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
195; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8:
196; CHECK:       # %bb.0: # %entry
197; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
198; CHECK-NEXT:    vnmsub.vv v8, v10, v12
199; CHECK-NEXT:    ret
200entry:
201  %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
202    <vscale x 16 x i8> %0,
203    <vscale x 16 x i8> %1,
204    <vscale x 16 x i8> %2,
205    i64 %3)
206
207  ret <vscale x 16 x i8> %a
208}
209
210declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
211  <vscale x 16 x i8>,
212  <vscale x 16 x i8>,
213  <vscale x 16 x i8>,
214  <vscale x 16 x i1>,
215  i64);
216
217define <vscale x 16 x i8>  @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
218; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8:
219; CHECK:       # %bb.0: # %entry
220; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
221; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
222; CHECK-NEXT:    ret
223entry:
224  %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
225    <vscale x 16 x i8> %0,
226    <vscale x 16 x i8> %1,
227    <vscale x 16 x i8> %2,
228    <vscale x 16 x i1> %3,
229    i64 %4)
230
231  ret <vscale x 16 x i8> %a
232}
233
234declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
235  <vscale x 32 x i8>,
236  <vscale x 32 x i8>,
237  <vscale x 32 x i8>,
238  i64);
239
240define <vscale x 32 x i8>  @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
241; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8:
242; CHECK:       # %bb.0: # %entry
243; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
244; CHECK-NEXT:    vnmsub.vv v8, v12, v16
245; CHECK-NEXT:    ret
246entry:
247  %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
248    <vscale x 32 x i8> %0,
249    <vscale x 32 x i8> %1,
250    <vscale x 32 x i8> %2,
251    i64 %3)
252
253  ret <vscale x 32 x i8> %a
254}
255
256declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
257  <vscale x 32 x i8>,
258  <vscale x 32 x i8>,
259  <vscale x 32 x i8>,
260  <vscale x 32 x i1>,
261  i64);
262
263define <vscale x 32 x i8>  @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
264; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8:
265; CHECK:       # %bb.0: # %entry
266; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
267; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
268; CHECK-NEXT:    ret
269entry:
270  %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
271    <vscale x 32 x i8> %0,
272    <vscale x 32 x i8> %1,
273    <vscale x 32 x i8> %2,
274    <vscale x 32 x i1> %3,
275    i64 %4)
276
277  ret <vscale x 32 x i8> %a
278}
279
280declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
281  <vscale x 1 x i16>,
282  <vscale x 1 x i16>,
283  <vscale x 1 x i16>,
284  i64);
285
286define <vscale x 1 x i16>  @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
287; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16:
288; CHECK:       # %bb.0: # %entry
289; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
290; CHECK-NEXT:    vnmsub.vv v8, v9, v10
291; CHECK-NEXT:    ret
292entry:
293  %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
294    <vscale x 1 x i16> %0,
295    <vscale x 1 x i16> %1,
296    <vscale x 1 x i16> %2,
297    i64 %3)
298
299  ret <vscale x 1 x i16> %a
300}
301
302declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
303  <vscale x 1 x i16>,
304  <vscale x 1 x i16>,
305  <vscale x 1 x i16>,
306  <vscale x 1 x i1>,
307  i64);
308
309define <vscale x 1 x i16>  @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
310; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16:
311; CHECK:       # %bb.0: # %entry
312; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
313; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
314; CHECK-NEXT:    ret
315entry:
316  %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
317    <vscale x 1 x i16> %0,
318    <vscale x 1 x i16> %1,
319    <vscale x 1 x i16> %2,
320    <vscale x 1 x i1> %3,
321    i64 %4)
322
323  ret <vscale x 1 x i16> %a
324}
325
326declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
327  <vscale x 2 x i16>,
328  <vscale x 2 x i16>,
329  <vscale x 2 x i16>,
330  i64);
331
332define <vscale x 2 x i16>  @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
333; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16:
334; CHECK:       # %bb.0: # %entry
335; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
336; CHECK-NEXT:    vnmsub.vv v8, v9, v10
337; CHECK-NEXT:    ret
338entry:
339  %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
340    <vscale x 2 x i16> %0,
341    <vscale x 2 x i16> %1,
342    <vscale x 2 x i16> %2,
343    i64 %3)
344
345  ret <vscale x 2 x i16> %a
346}
347
348declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
349  <vscale x 2 x i16>,
350  <vscale x 2 x i16>,
351  <vscale x 2 x i16>,
352  <vscale x 2 x i1>,
353  i64);
354
355define <vscale x 2 x i16>  @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
356; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16:
357; CHECK:       # %bb.0: # %entry
358; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
359; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
360; CHECK-NEXT:    ret
361entry:
362  %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
363    <vscale x 2 x i16> %0,
364    <vscale x 2 x i16> %1,
365    <vscale x 2 x i16> %2,
366    <vscale x 2 x i1> %3,
367    i64 %4)
368
369  ret <vscale x 2 x i16> %a
370}
371
372declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
373  <vscale x 4 x i16>,
374  <vscale x 4 x i16>,
375  <vscale x 4 x i16>,
376  i64);
377
378define <vscale x 4 x i16>  @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
379; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16:
380; CHECK:       # %bb.0: # %entry
381; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
382; CHECK-NEXT:    vnmsub.vv v8, v9, v10
383; CHECK-NEXT:    ret
384entry:
385  %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
386    <vscale x 4 x i16> %0,
387    <vscale x 4 x i16> %1,
388    <vscale x 4 x i16> %2,
389    i64 %3)
390
391  ret <vscale x 4 x i16> %a
392}
393
394declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
395  <vscale x 4 x i16>,
396  <vscale x 4 x i16>,
397  <vscale x 4 x i16>,
398  <vscale x 4 x i1>,
399  i64);
400
401define <vscale x 4 x i16>  @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
402; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16:
403; CHECK:       # %bb.0: # %entry
404; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
405; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
406; CHECK-NEXT:    ret
407entry:
408  %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
409    <vscale x 4 x i16> %0,
410    <vscale x 4 x i16> %1,
411    <vscale x 4 x i16> %2,
412    <vscale x 4 x i1> %3,
413    i64 %4)
414
415  ret <vscale x 4 x i16> %a
416}
417
418declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
419  <vscale x 8 x i16>,
420  <vscale x 8 x i16>,
421  <vscale x 8 x i16>,
422  i64);
423
424define <vscale x 8 x i16>  @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
425; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16:
426; CHECK:       # %bb.0: # %entry
427; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
428; CHECK-NEXT:    vnmsub.vv v8, v10, v12
429; CHECK-NEXT:    ret
430entry:
431  %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
432    <vscale x 8 x i16> %0,
433    <vscale x 8 x i16> %1,
434    <vscale x 8 x i16> %2,
435    i64 %3)
436
437  ret <vscale x 8 x i16> %a
438}
439
440declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
441  <vscale x 8 x i16>,
442  <vscale x 8 x i16>,
443  <vscale x 8 x i16>,
444  <vscale x 8 x i1>,
445  i64);
446
447define <vscale x 8 x i16>  @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
448; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16:
449; CHECK:       # %bb.0: # %entry
450; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
451; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
452; CHECK-NEXT:    ret
453entry:
454  %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
455    <vscale x 8 x i16> %0,
456    <vscale x 8 x i16> %1,
457    <vscale x 8 x i16> %2,
458    <vscale x 8 x i1> %3,
459    i64 %4)
460
461  ret <vscale x 8 x i16> %a
462}
463
464declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
465  <vscale x 16 x i16>,
466  <vscale x 16 x i16>,
467  <vscale x 16 x i16>,
468  i64);
469
470define <vscale x 16 x i16>  @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
471; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16:
472; CHECK:       # %bb.0: # %entry
473; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
474; CHECK-NEXT:    vnmsub.vv v8, v12, v16
475; CHECK-NEXT:    ret
476entry:
477  %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
478    <vscale x 16 x i16> %0,
479    <vscale x 16 x i16> %1,
480    <vscale x 16 x i16> %2,
481    i64 %3)
482
483  ret <vscale x 16 x i16> %a
484}
485
486declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
487  <vscale x 16 x i16>,
488  <vscale x 16 x i16>,
489  <vscale x 16 x i16>,
490  <vscale x 16 x i1>,
491  i64);
492
493define <vscale x 16 x i16>  @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
494; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16:
495; CHECK:       # %bb.0: # %entry
496; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
497; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
498; CHECK-NEXT:    ret
499entry:
500  %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
501    <vscale x 16 x i16> %0,
502    <vscale x 16 x i16> %1,
503    <vscale x 16 x i16> %2,
504    <vscale x 16 x i1> %3,
505    i64 %4)
506
507  ret <vscale x 16 x i16> %a
508}
509
510declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
511  <vscale x 1 x i32>,
512  <vscale x 1 x i32>,
513  <vscale x 1 x i32>,
514  i64);
515
516define <vscale x 1 x i32>  @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
517; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32:
518; CHECK:       # %bb.0: # %entry
519; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
520; CHECK-NEXT:    vnmsub.vv v8, v9, v10
521; CHECK-NEXT:    ret
522entry:
523  %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
524    <vscale x 1 x i32> %0,
525    <vscale x 1 x i32> %1,
526    <vscale x 1 x i32> %2,
527    i64 %3)
528
529  ret <vscale x 1 x i32> %a
530}
531
532declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
533  <vscale x 1 x i32>,
534  <vscale x 1 x i32>,
535  <vscale x 1 x i32>,
536  <vscale x 1 x i1>,
537  i64);
538
539define <vscale x 1 x i32>  @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
540; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32:
541; CHECK:       # %bb.0: # %entry
542; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
543; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
544; CHECK-NEXT:    ret
545entry:
546  %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
547    <vscale x 1 x i32> %0,
548    <vscale x 1 x i32> %1,
549    <vscale x 1 x i32> %2,
550    <vscale x 1 x i1> %3,
551    i64 %4)
552
553  ret <vscale x 1 x i32> %a
554}
555
556declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
557  <vscale x 2 x i32>,
558  <vscale x 2 x i32>,
559  <vscale x 2 x i32>,
560  i64);
561
562define <vscale x 2 x i32>  @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
563; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32:
564; CHECK:       # %bb.0: # %entry
565; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
566; CHECK-NEXT:    vnmsub.vv v8, v9, v10
567; CHECK-NEXT:    ret
568entry:
569  %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
570    <vscale x 2 x i32> %0,
571    <vscale x 2 x i32> %1,
572    <vscale x 2 x i32> %2,
573    i64 %3)
574
575  ret <vscale x 2 x i32> %a
576}
577
578declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
579  <vscale x 2 x i32>,
580  <vscale x 2 x i32>,
581  <vscale x 2 x i32>,
582  <vscale x 2 x i1>,
583  i64);
584
585define <vscale x 2 x i32>  @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
586; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32:
587; CHECK:       # %bb.0: # %entry
588; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
589; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
590; CHECK-NEXT:    ret
591entry:
592  %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
593    <vscale x 2 x i32> %0,
594    <vscale x 2 x i32> %1,
595    <vscale x 2 x i32> %2,
596    <vscale x 2 x i1> %3,
597    i64 %4)
598
599  ret <vscale x 2 x i32> %a
600}
601
602declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
603  <vscale x 4 x i32>,
604  <vscale x 4 x i32>,
605  <vscale x 4 x i32>,
606  i64);
607
608define <vscale x 4 x i32>  @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
609; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32:
610; CHECK:       # %bb.0: # %entry
611; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
612; CHECK-NEXT:    vnmsub.vv v8, v10, v12
613; CHECK-NEXT:    ret
614entry:
615  %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
616    <vscale x 4 x i32> %0,
617    <vscale x 4 x i32> %1,
618    <vscale x 4 x i32> %2,
619    i64 %3)
620
621  ret <vscale x 4 x i32> %a
622}
623
624declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
625  <vscale x 4 x i32>,
626  <vscale x 4 x i32>,
627  <vscale x 4 x i32>,
628  <vscale x 4 x i1>,
629  i64);
630
631define <vscale x 4 x i32>  @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
632; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32:
633; CHECK:       # %bb.0: # %entry
634; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
635; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
636; CHECK-NEXT:    ret
637entry:
638  %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
639    <vscale x 4 x i32> %0,
640    <vscale x 4 x i32> %1,
641    <vscale x 4 x i32> %2,
642    <vscale x 4 x i1> %3,
643    i64 %4)
644
645  ret <vscale x 4 x i32> %a
646}
647
648declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
649  <vscale x 8 x i32>,
650  <vscale x 8 x i32>,
651  <vscale x 8 x i32>,
652  i64);
653
654define <vscale x 8 x i32>  @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
655; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32:
656; CHECK:       # %bb.0: # %entry
657; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
658; CHECK-NEXT:    vnmsub.vv v8, v12, v16
659; CHECK-NEXT:    ret
660entry:
661  %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
662    <vscale x 8 x i32> %0,
663    <vscale x 8 x i32> %1,
664    <vscale x 8 x i32> %2,
665    i64 %3)
666
667  ret <vscale x 8 x i32> %a
668}
669
670declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
671  <vscale x 8 x i32>,
672  <vscale x 8 x i32>,
673  <vscale x 8 x i32>,
674  <vscale x 8 x i1>,
675  i64);
676
677define <vscale x 8 x i32>  @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
678; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32:
679; CHECK:       # %bb.0: # %entry
680; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
681; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
682; CHECK-NEXT:    ret
683entry:
684  %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
685    <vscale x 8 x i32> %0,
686    <vscale x 8 x i32> %1,
687    <vscale x 8 x i32> %2,
688    <vscale x 8 x i1> %3,
689    i64 %4)
690
691  ret <vscale x 8 x i32> %a
692}
693
694declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
695  <vscale x 1 x i64>,
696  <vscale x 1 x i64>,
697  <vscale x 1 x i64>,
698  i64);
699
700define <vscale x 1 x i64>  @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
701; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64:
702; CHECK:       # %bb.0: # %entry
703; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
704; CHECK-NEXT:    vnmsub.vv v8, v9, v10
705; CHECK-NEXT:    ret
706entry:
707  %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
708    <vscale x 1 x i64> %0,
709    <vscale x 1 x i64> %1,
710    <vscale x 1 x i64> %2,
711    i64 %3)
712
713  ret <vscale x 1 x i64> %a
714}
715
716declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
717  <vscale x 1 x i64>,
718  <vscale x 1 x i64>,
719  <vscale x 1 x i64>,
720  <vscale x 1 x i1>,
721  i64);
722
723define <vscale x 1 x i64>  @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
724; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
725; CHECK:       # %bb.0: # %entry
726; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
727; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
728; CHECK-NEXT:    ret
729entry:
730  %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
731    <vscale x 1 x i64> %0,
732    <vscale x 1 x i64> %1,
733    <vscale x 1 x i64> %2,
734    <vscale x 1 x i1> %3,
735    i64 %4)
736
737  ret <vscale x 1 x i64> %a
738}
739
740declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
741  <vscale x 2 x i64>,
742  <vscale x 2 x i64>,
743  <vscale x 2 x i64>,
744  i64);
745
746define <vscale x 2 x i64>  @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
747; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64:
748; CHECK:       # %bb.0: # %entry
749; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
750; CHECK-NEXT:    vnmsub.vv v8, v10, v12
751; CHECK-NEXT:    ret
752entry:
753  %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
754    <vscale x 2 x i64> %0,
755    <vscale x 2 x i64> %1,
756    <vscale x 2 x i64> %2,
757    i64 %3)
758
759  ret <vscale x 2 x i64> %a
760}
761
762declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
763  <vscale x 2 x i64>,
764  <vscale x 2 x i64>,
765  <vscale x 2 x i64>,
766  <vscale x 2 x i1>,
767  i64);
768
769define <vscale x 2 x i64>  @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
770; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
771; CHECK:       # %bb.0: # %entry
772; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
773; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
774; CHECK-NEXT:    ret
775entry:
776  %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
777    <vscale x 2 x i64> %0,
778    <vscale x 2 x i64> %1,
779    <vscale x 2 x i64> %2,
780    <vscale x 2 x i1> %3,
781    i64 %4)
782
783  ret <vscale x 2 x i64> %a
784}
785
786declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
787  <vscale x 4 x i64>,
788  <vscale x 4 x i64>,
789  <vscale x 4 x i64>,
790  i64);
791
792define <vscale x 4 x i64>  @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
793; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64:
794; CHECK:       # %bb.0: # %entry
795; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
796; CHECK-NEXT:    vnmsub.vv v8, v12, v16
797; CHECK-NEXT:    ret
798entry:
799  %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
800    <vscale x 4 x i64> %0,
801    <vscale x 4 x i64> %1,
802    <vscale x 4 x i64> %2,
803    i64 %3)
804
805  ret <vscale x 4 x i64> %a
806}
807
808declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
809  <vscale x 4 x i64>,
810  <vscale x 4 x i64>,
811  <vscale x 4 x i64>,
812  <vscale x 4 x i1>,
813  i64);
814
815define <vscale x 4 x i64>  @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
816; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
817; CHECK:       # %bb.0: # %entry
818; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
819; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
820; CHECK-NEXT:    ret
821entry:
822  %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
823    <vscale x 4 x i64> %0,
824    <vscale x 4 x i64> %1,
825    <vscale x 4 x i64> %2,
826    <vscale x 4 x i1> %3,
827    i64 %4)
828
829  ret <vscale x 4 x i64> %a
830}
831
832declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
833  <vscale x 1 x i8>,
834  i8,
835  <vscale x 1 x i8>,
836  i64);
837
838define <vscale x 1 x i8>  @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
839; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8:
840; CHECK:       # %bb.0: # %entry
841; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
842; CHECK-NEXT:    vnmsub.vx v8, a0, v9
843; CHECK-NEXT:    ret
844entry:
845  %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
846    <vscale x 1 x i8> %0,
847    i8 %1,
848    <vscale x 1 x i8> %2,
849    i64 %3)
850
851  ret <vscale x 1 x i8> %a
852}
853
854declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
855  <vscale x 1 x i8>,
856  i8,
857  <vscale x 1 x i8>,
858  <vscale x 1 x i1>,
859  i64);
860
861define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
862; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8:
863; CHECK:       # %bb.0: # %entry
864; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
865; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
866; CHECK-NEXT:    ret
867entry:
868  %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
869    <vscale x 1 x i8> %0,
870    i8 %1,
871    <vscale x 1 x i8> %2,
872    <vscale x 1 x i1> %3,
873    i64 %4)
874
875  ret <vscale x 1 x i8> %a
876}
877
878declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
879  <vscale x 2 x i8>,
880  i8,
881  <vscale x 2 x i8>,
882  i64);
883
884define <vscale x 2 x i8>  @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
885; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8:
886; CHECK:       # %bb.0: # %entry
887; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
888; CHECK-NEXT:    vnmsub.vx v8, a0, v9
889; CHECK-NEXT:    ret
890entry:
891  %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
892    <vscale x 2 x i8> %0,
893    i8 %1,
894    <vscale x 2 x i8> %2,
895    i64 %3)
896
897  ret <vscale x 2 x i8> %a
898}
899
900declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
901  <vscale x 2 x i8>,
902  i8,
903  <vscale x 2 x i8>,
904  <vscale x 2 x i1>,
905  i64);
906
907define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
908; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8:
909; CHECK:       # %bb.0: # %entry
910; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
911; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
912; CHECK-NEXT:    ret
913entry:
914  %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
915    <vscale x 2 x i8> %0,
916    i8 %1,
917    <vscale x 2 x i8> %2,
918    <vscale x 2 x i1> %3,
919    i64 %4)
920
921  ret <vscale x 2 x i8> %a
922}
923
924declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
925  <vscale x 4 x i8>,
926  i8,
927  <vscale x 4 x i8>,
928  i64);
929
930define <vscale x 4 x i8>  @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
931; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8:
932; CHECK:       # %bb.0: # %entry
933; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
934; CHECK-NEXT:    vnmsub.vx v8, a0, v9
935; CHECK-NEXT:    ret
936entry:
937  %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
938    <vscale x 4 x i8> %0,
939    i8 %1,
940    <vscale x 4 x i8> %2,
941    i64 %3)
942
943  ret <vscale x 4 x i8> %a
944}
945
946declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
947  <vscale x 4 x i8>,
948  i8,
949  <vscale x 4 x i8>,
950  <vscale x 4 x i1>,
951  i64);
952
953define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
954; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8:
955; CHECK:       # %bb.0: # %entry
956; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
957; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
958; CHECK-NEXT:    ret
959entry:
960  %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
961    <vscale x 4 x i8> %0,
962    i8 %1,
963    <vscale x 4 x i8> %2,
964    <vscale x 4 x i1> %3,
965    i64 %4)
966
967  ret <vscale x 4 x i8> %a
968}
969
970declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
971  <vscale x 8 x i8>,
972  i8,
973  <vscale x 8 x i8>,
974  i64);
975
976define <vscale x 8 x i8>  @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
977; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8:
978; CHECK:       # %bb.0: # %entry
979; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
980; CHECK-NEXT:    vnmsub.vx v8, a0, v9
981; CHECK-NEXT:    ret
982entry:
983  %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
984    <vscale x 8 x i8> %0,
985    i8 %1,
986    <vscale x 8 x i8> %2,
987    i64 %3)
988
989  ret <vscale x 8 x i8> %a
990}
991
992declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
993  <vscale x 8 x i8>,
994  i8,
995  <vscale x 8 x i8>,
996  <vscale x 8 x i1>,
997  i64);
998
999define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1000; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8:
1001; CHECK:       # %bb.0: # %entry
1002; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
1003; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
1004; CHECK-NEXT:    ret
1005entry:
1006  %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
1007    <vscale x 8 x i8> %0,
1008    i8 %1,
1009    <vscale x 8 x i8> %2,
1010    <vscale x 8 x i1> %3,
1011    i64 %4)
1012
1013  ret <vscale x 8 x i8> %a
1014}
1015
1016declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
1017  <vscale x 16 x i8>,
1018  i8,
1019  <vscale x 16 x i8>,
1020  i64);
1021
1022define <vscale x 16 x i8>  @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
1023; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8:
1024; CHECK:       # %bb.0: # %entry
1025; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
1026; CHECK-NEXT:    vnmsub.vx v8, a0, v10
1027; CHECK-NEXT:    ret
1028entry:
1029  %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
1030    <vscale x 16 x i8> %0,
1031    i8 %1,
1032    <vscale x 16 x i8> %2,
1033    i64 %3)
1034
1035  ret <vscale x 16 x i8> %a
1036}
1037
1038declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
1039  <vscale x 16 x i8>,
1040  i8,
1041  <vscale x 16 x i8>,
1042  <vscale x 16 x i1>,
1043  i64);
1044
1045define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1046; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8:
1047; CHECK:       # %bb.0: # %entry
1048; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
1049; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
1050; CHECK-NEXT:    ret
1051entry:
1052  %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
1053    <vscale x 16 x i8> %0,
1054    i8 %1,
1055    <vscale x 16 x i8> %2,
1056    <vscale x 16 x i1> %3,
1057    i64 %4)
1058
1059  ret <vscale x 16 x i8> %a
1060}
1061
1062declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
1063  <vscale x 32 x i8>,
1064  i8,
1065  <vscale x 32 x i8>,
1066  i64);
1067
1068define <vscale x 32 x i8>  @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
1069; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8:
1070; CHECK:       # %bb.0: # %entry
1071; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
1072; CHECK-NEXT:    vnmsub.vx v8, a0, v12
1073; CHECK-NEXT:    ret
1074entry:
1075  %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
1076    <vscale x 32 x i8> %0,
1077    i8 %1,
1078    <vscale x 32 x i8> %2,
1079    i64 %3)
1080
1081  ret <vscale x 32 x i8> %a
1082}
1083
1084declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
1085  <vscale x 32 x i8>,
1086  i8,
1087  <vscale x 32 x i8>,
1088  <vscale x 32 x i1>,
1089  i64);
1090
1091define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
1092; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8:
1093; CHECK:       # %bb.0: # %entry
1094; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
1095; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
1096; CHECK-NEXT:    ret
1097entry:
1098  %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
1099    <vscale x 32 x i8> %0,
1100    i8 %1,
1101    <vscale x 32 x i8> %2,
1102    <vscale x 32 x i1> %3,
1103    i64 %4)
1104
1105  ret <vscale x 32 x i8> %a
1106}
1107
1108declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
1109  <vscale x 1 x i16>,
1110  i16,
1111  <vscale x 1 x i16>,
1112  i64);
1113
1114define <vscale x 1 x i16>  @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
1115; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16:
1116; CHECK:       # %bb.0: # %entry
1117; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
1118; CHECK-NEXT:    vnmsub.vx v8, a0, v9
1119; CHECK-NEXT:    ret
1120entry:
1121  %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
1122    <vscale x 1 x i16> %0,
1123    i16 %1,
1124    <vscale x 1 x i16> %2,
1125    i64 %3)
1126
1127  ret <vscale x 1 x i16> %a
1128}
1129
1130declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
1131  <vscale x 1 x i16>,
1132  i16,
1133  <vscale x 1 x i16>,
1134  <vscale x 1 x i1>,
1135  i64);
1136
1137define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1138; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16:
1139; CHECK:       # %bb.0: # %entry
1140; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
1141; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
1142; CHECK-NEXT:    ret
1143entry:
1144  %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
1145    <vscale x 1 x i16> %0,
1146    i16 %1,
1147    <vscale x 1 x i16> %2,
1148    <vscale x 1 x i1> %3,
1149    i64 %4)
1150
1151  ret <vscale x 1 x i16> %a
1152}
1153
1154declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
1155  <vscale x 2 x i16>,
1156  i16,
1157  <vscale x 2 x i16>,
1158  i64);
1159
1160define <vscale x 2 x i16>  @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
1161; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16:
1162; CHECK:       # %bb.0: # %entry
1163; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
1164; CHECK-NEXT:    vnmsub.vx v8, a0, v9
1165; CHECK-NEXT:    ret
1166entry:
1167  %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
1168    <vscale x 2 x i16> %0,
1169    i16 %1,
1170    <vscale x 2 x i16> %2,
1171    i64 %3)
1172
1173  ret <vscale x 2 x i16> %a
1174}
1175
1176declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
1177  <vscale x 2 x i16>,
1178  i16,
1179  <vscale x 2 x i16>,
1180  <vscale x 2 x i1>,
1181  i64);
1182
1183define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1184; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16:
1185; CHECK:       # %bb.0: # %entry
1186; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
1187; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
1188; CHECK-NEXT:    ret
1189entry:
1190  %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
1191    <vscale x 2 x i16> %0,
1192    i16 %1,
1193    <vscale x 2 x i16> %2,
1194    <vscale x 2 x i1> %3,
1195    i64 %4)
1196
1197  ret <vscale x 2 x i16> %a
1198}
1199
1200declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
1201  <vscale x 4 x i16>,
1202  i16,
1203  <vscale x 4 x i16>,
1204  i64);
1205
1206define <vscale x 4 x i16>  @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
1207; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16:
1208; CHECK:       # %bb.0: # %entry
1209; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
1210; CHECK-NEXT:    vnmsub.vx v8, a0, v9
1211; CHECK-NEXT:    ret
1212entry:
1213  %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
1214    <vscale x 4 x i16> %0,
1215    i16 %1,
1216    <vscale x 4 x i16> %2,
1217    i64 %3)
1218
1219  ret <vscale x 4 x i16> %a
1220}
1221
1222declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
1223  <vscale x 4 x i16>,
1224  i16,
1225  <vscale x 4 x i16>,
1226  <vscale x 4 x i1>,
1227  i64);
1228
1229define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1230; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16:
1231; CHECK:       # %bb.0: # %entry
1232; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
1233; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
1234; CHECK-NEXT:    ret
1235entry:
1236  %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
1237    <vscale x 4 x i16> %0,
1238    i16 %1,
1239    <vscale x 4 x i16> %2,
1240    <vscale x 4 x i1> %3,
1241    i64 %4)
1242
1243  ret <vscale x 4 x i16> %a
1244}
1245
1246declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
1247  <vscale x 8 x i16>,
1248  i16,
1249  <vscale x 8 x i16>,
1250  i64);
1251
1252define <vscale x 8 x i16>  @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
1253; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16:
1254; CHECK:       # %bb.0: # %entry
1255; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
1256; CHECK-NEXT:    vnmsub.vx v8, a0, v10
1257; CHECK-NEXT:    ret
1258entry:
1259  %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
1260    <vscale x 8 x i16> %0,
1261    i16 %1,
1262    <vscale x 8 x i16> %2,
1263    i64 %3)
1264
1265  ret <vscale x 8 x i16> %a
1266}
1267
1268declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
1269  <vscale x 8 x i16>,
1270  i16,
1271  <vscale x 8 x i16>,
1272  <vscale x 8 x i1>,
1273  i64);
1274
1275define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1276; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16:
1277; CHECK:       # %bb.0: # %entry
1278; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
1279; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
1280; CHECK-NEXT:    ret
1281entry:
1282  %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
1283    <vscale x 8 x i16> %0,
1284    i16 %1,
1285    <vscale x 8 x i16> %2,
1286    <vscale x 8 x i1> %3,
1287    i64 %4)
1288
1289  ret <vscale x 8 x i16> %a
1290}
1291
1292declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
1293  <vscale x 16 x i16>,
1294  i16,
1295  <vscale x 16 x i16>,
1296  i64);
1297
1298define <vscale x 16 x i16>  @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
1299; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16:
1300; CHECK:       # %bb.0: # %entry
1301; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
1302; CHECK-NEXT:    vnmsub.vx v8, a0, v12
1303; CHECK-NEXT:    ret
1304entry:
1305  %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
1306    <vscale x 16 x i16> %0,
1307    i16 %1,
1308    <vscale x 16 x i16> %2,
1309    i64 %3)
1310
1311  ret <vscale x 16 x i16> %a
1312}
1313
1314declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
1315  <vscale x 16 x i16>,
1316  i16,
1317  <vscale x 16 x i16>,
1318  <vscale x 16 x i1>,
1319  i64);
1320
1321define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1322; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16:
1323; CHECK:       # %bb.0: # %entry
1324; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
1325; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
1326; CHECK-NEXT:    ret
1327entry:
1328  %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
1329    <vscale x 16 x i16> %0,
1330    i16 %1,
1331    <vscale x 16 x i16> %2,
1332    <vscale x 16 x i1> %3,
1333    i64 %4)
1334
1335  ret <vscale x 16 x i16> %a
1336}
1337
1338declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
1339  <vscale x 1 x i32>,
1340  i32,
1341  <vscale x 1 x i32>,
1342  i64);
1343
1344define <vscale x 1 x i32>  @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
1345; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32:
1346; CHECK:       # %bb.0: # %entry
1347; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1348; CHECK-NEXT:    vnmsub.vx v8, a0, v9
1349; CHECK-NEXT:    ret
1350entry:
1351  %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
1352    <vscale x 1 x i32> %0,
1353    i32 %1,
1354    <vscale x 1 x i32> %2,
1355    i64 %3)
1356
1357  ret <vscale x 1 x i32> %a
1358}
1359
1360declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
1361  <vscale x 1 x i32>,
1362  i32,
1363  <vscale x 1 x i32>,
1364  <vscale x 1 x i1>,
1365  i64);
1366
1367define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1368; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32:
1369; CHECK:       # %bb.0: # %entry
1370; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1371; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
1372; CHECK-NEXT:    ret
1373entry:
1374  %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
1375    <vscale x 1 x i32> %0,
1376    i32 %1,
1377    <vscale x 1 x i32> %2,
1378    <vscale x 1 x i1> %3,
1379    i64 %4)
1380
1381  ret <vscale x 1 x i32> %a
1382}
1383
1384declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
1385  <vscale x 2 x i32>,
1386  i32,
1387  <vscale x 2 x i32>,
1388  i64);
1389
1390define <vscale x 2 x i32>  @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
1391; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32:
1392; CHECK:       # %bb.0: # %entry
1393; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1394; CHECK-NEXT:    vnmsub.vx v8, a0, v9
1395; CHECK-NEXT:    ret
1396entry:
1397  %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
1398    <vscale x 2 x i32> %0,
1399    i32 %1,
1400    <vscale x 2 x i32> %2,
1401    i64 %3)
1402
1403  ret <vscale x 2 x i32> %a
1404}
1405
1406declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
1407  <vscale x 2 x i32>,
1408  i32,
1409  <vscale x 2 x i32>,
1410  <vscale x 2 x i1>,
1411  i64);
1412
1413define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1414; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32:
1415; CHECK:       # %bb.0: # %entry
1416; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1417; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
1418; CHECK-NEXT:    ret
1419entry:
1420  %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
1421    <vscale x 2 x i32> %0,
1422    i32 %1,
1423    <vscale x 2 x i32> %2,
1424    <vscale x 2 x i1> %3,
1425    i64 %4)
1426
1427  ret <vscale x 2 x i32> %a
1428}
1429
1430declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
1431  <vscale x 4 x i32>,
1432  i32,
1433  <vscale x 4 x i32>,
1434  i64);
1435
1436define <vscale x 4 x i32>  @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
1437; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32:
1438; CHECK:       # %bb.0: # %entry
1439; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1440; CHECK-NEXT:    vnmsub.vx v8, a0, v10
1441; CHECK-NEXT:    ret
1442entry:
1443  %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
1444    <vscale x 4 x i32> %0,
1445    i32 %1,
1446    <vscale x 4 x i32> %2,
1447    i64 %3)
1448
1449  ret <vscale x 4 x i32> %a
1450}
1451
1452declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
1453  <vscale x 4 x i32>,
1454  i32,
1455  <vscale x 4 x i32>,
1456  <vscale x 4 x i1>,
1457  i64);
1458
1459define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1460; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32:
1461; CHECK:       # %bb.0: # %entry
1462; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1463; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
1464; CHECK-NEXT:    ret
1465entry:
1466  %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
1467    <vscale x 4 x i32> %0,
1468    i32 %1,
1469    <vscale x 4 x i32> %2,
1470    <vscale x 4 x i1> %3,
1471    i64 %4)
1472
1473  ret <vscale x 4 x i32> %a
1474}
1475
1476declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
1477  <vscale x 8 x i32>,
1478  i32,
1479  <vscale x 8 x i32>,
1480  i64);
1481
1482define <vscale x 8 x i32>  @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
1483; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32:
1484; CHECK:       # %bb.0: # %entry
1485; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1486; CHECK-NEXT:    vnmsub.vx v8, a0, v12
1487; CHECK-NEXT:    ret
1488entry:
1489  %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
1490    <vscale x 8 x i32> %0,
1491    i32 %1,
1492    <vscale x 8 x i32> %2,
1493    i64 %3)
1494
1495  ret <vscale x 8 x i32> %a
1496}
1497
1498declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
1499  <vscale x 8 x i32>,
1500  i32,
1501  <vscale x 8 x i32>,
1502  <vscale x 8 x i1>,
1503  i64);
1504
1505define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1506; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32:
1507; CHECK:       # %bb.0: # %entry
1508; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1509; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
1510; CHECK-NEXT:    ret
1511entry:
1512  %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
1513    <vscale x 8 x i32> %0,
1514    i32 %1,
1515    <vscale x 8 x i32> %2,
1516    <vscale x 8 x i1> %3,
1517    i64 %4)
1518
1519  ret <vscale x 8 x i32> %a
1520}
1521
1522declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
1523  <vscale x 1 x i64>,
1524  i64,
1525  <vscale x 1 x i64>,
1526  i64);
1527
1528define <vscale x 1 x i64>  @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
1529; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
1530; CHECK:       # %bb.0: # %entry
1531; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
1532; CHECK-NEXT:    vnmsub.vx v8, a0, v9
1533; CHECK-NEXT:    ret
1534entry:
1535  %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
1536    <vscale x 1 x i64> %0,
1537    i64 %1,
1538    <vscale x 1 x i64> %2,
1539    i64 %3)
1540
1541  ret <vscale x 1 x i64> %a
1542}
1543
1544declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
1545  <vscale x 1 x i64>,
1546  i64,
1547  <vscale x 1 x i64>,
1548  <vscale x 1 x i1>,
1549  i64);
1550
1551define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1552; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64:
1553; CHECK:       # %bb.0: # %entry
1554; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
1555; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
1556; CHECK-NEXT:    ret
1557entry:
1558  %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
1559    <vscale x 1 x i64> %0,
1560    i64 %1,
1561    <vscale x 1 x i64> %2,
1562    <vscale x 1 x i1> %3,
1563    i64 %4)
1564
1565  ret <vscale x 1 x i64> %a
1566}
1567
1568declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
1569  <vscale x 2 x i64>,
1570  i64,
1571  <vscale x 2 x i64>,
1572  i64);
1573
1574define <vscale x 2 x i64>  @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
1575; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64:
1576; CHECK:       # %bb.0: # %entry
1577; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
1578; CHECK-NEXT:    vnmsub.vx v8, a0, v10
1579; CHECK-NEXT:    ret
1580entry:
1581  %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
1582    <vscale x 2 x i64> %0,
1583    i64 %1,
1584    <vscale x 2 x i64> %2,
1585    i64 %3)
1586
1587  ret <vscale x 2 x i64> %a
1588}
1589
1590declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
1591  <vscale x 2 x i64>,
1592  i64,
1593  <vscale x 2 x i64>,
1594  <vscale x 2 x i1>,
1595  i64);
1596
1597define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1598; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64:
1599; CHECK:       # %bb.0: # %entry
1600; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
1601; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
1602; CHECK-NEXT:    ret
1603entry:
1604  %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
1605    <vscale x 2 x i64> %0,
1606    i64 %1,
1607    <vscale x 2 x i64> %2,
1608    <vscale x 2 x i1> %3,
1609    i64 %4)
1610
1611  ret <vscale x 2 x i64> %a
1612}
1613
1614declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
1615  <vscale x 4 x i64>,
1616  i64,
1617  <vscale x 4 x i64>,
1618  i64);
1619
1620define <vscale x 4 x i64>  @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
1621; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64:
1622; CHECK:       # %bb.0: # %entry
1623; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
1624; CHECK-NEXT:    vnmsub.vx v8, a0, v12
1625; CHECK-NEXT:    ret
1626entry:
1627  %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
1628    <vscale x 4 x i64> %0,
1629    i64 %1,
1630    <vscale x 4 x i64> %2,
1631    i64 %3)
1632
1633  ret <vscale x 4 x i64> %a
1634}
1635
1636declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
1637  <vscale x 4 x i64>,
1638  i64,
1639  <vscale x 4 x i64>,
1640  <vscale x 4 x i1>,
1641  i64);
1642
1643define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1644; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64:
1645; CHECK:       # %bb.0: # %entry
1646; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
1647; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
1648; CHECK-NEXT:    ret
1649entry:
1650  %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
1651    <vscale x 4 x i64> %0,
1652    i64 %1,
1653    <vscale x 4 x i64> %2,
1654    <vscale x 4 x i1> %3,
1655    i64 %4)
1656
1657  ret <vscale x 4 x i64> %a
1658}
1659