1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
5  <vscale x 1 x half>,
6  <vscale x 1 x half>,
7  i32);
8
9define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
10; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f16_nxv1f16:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
13; CHECK-NEXT:    vmfne.vv v0, v8, v9
14; CHECK-NEXT:    ret
15entry:
16  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
17    <vscale x 1 x half> %0,
18    <vscale x 1 x half> %1,
19    i32 %2)
20
21  ret <vscale x 1 x i1> %a
22}
23
24declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
25  <vscale x 1 x i1>,
26  <vscale x 1 x half>,
27  <vscale x 1 x half>,
28  <vscale x 1 x i1>,
29  i32);
30
31define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
32; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16:
33; CHECK:       # %bb.0: # %entry
34; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
35; CHECK-NEXT:    vmfne.vv v25, v8, v9
36; CHECK-NEXT:    vmv1r.v v26, v0
37; CHECK-NEXT:    vmv1r.v v0, v25
38; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
39; CHECK-NEXT:    vmv1r.v v0, v26
40; CHECK-NEXT:    ret
41entry:
42  %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
43    <vscale x 1 x half> %1,
44    <vscale x 1 x half> %2,
45    i32 %4)
46  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
47    <vscale x 1 x i1> %0,
48    <vscale x 1 x half> %2,
49    <vscale x 1 x half> %3,
50    <vscale x 1 x i1> %mask,
51    i32 %4)
52
53  ret <vscale x 1 x i1> %a
54}
55
56declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
57  <vscale x 2 x half>,
58  <vscale x 2 x half>,
59  i32);
60
61define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
62; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f16_nxv2f16:
63; CHECK:       # %bb.0: # %entry
64; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
65; CHECK-NEXT:    vmfne.vv v0, v8, v9
66; CHECK-NEXT:    ret
67entry:
68  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
69    <vscale x 2 x half> %0,
70    <vscale x 2 x half> %1,
71    i32 %2)
72
73  ret <vscale x 2 x i1> %a
74}
75
76declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
77  <vscale x 2 x i1>,
78  <vscale x 2 x half>,
79  <vscale x 2 x half>,
80  <vscale x 2 x i1>,
81  i32);
82
83define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
84; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16:
85; CHECK:       # %bb.0: # %entry
86; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
87; CHECK-NEXT:    vmfne.vv v25, v8, v9
88; CHECK-NEXT:    vmv1r.v v26, v0
89; CHECK-NEXT:    vmv1r.v v0, v25
90; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
91; CHECK-NEXT:    vmv1r.v v0, v26
92; CHECK-NEXT:    ret
93entry:
94  %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
95    <vscale x 2 x half> %1,
96    <vscale x 2 x half> %2,
97    i32 %4)
98  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
99    <vscale x 2 x i1> %0,
100    <vscale x 2 x half> %2,
101    <vscale x 2 x half> %3,
102    <vscale x 2 x i1> %mask,
103    i32 %4)
104
105  ret <vscale x 2 x i1> %a
106}
107
108declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
109  <vscale x 4 x half>,
110  <vscale x 4 x half>,
111  i32);
112
113define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
114; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f16_nxv4f16:
115; CHECK:       # %bb.0: # %entry
116; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
117; CHECK-NEXT:    vmfne.vv v0, v8, v9
118; CHECK-NEXT:    ret
119entry:
120  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
121    <vscale x 4 x half> %0,
122    <vscale x 4 x half> %1,
123    i32 %2)
124
125  ret <vscale x 4 x i1> %a
126}
127
128declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
129  <vscale x 4 x i1>,
130  <vscale x 4 x half>,
131  <vscale x 4 x half>,
132  <vscale x 4 x i1>,
133  i32);
134
135define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
136; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16:
137; CHECK:       # %bb.0: # %entry
138; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
139; CHECK-NEXT:    vmfne.vv v25, v8, v9
140; CHECK-NEXT:    vmv1r.v v26, v0
141; CHECK-NEXT:    vmv1r.v v0, v25
142; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
143; CHECK-NEXT:    vmv1r.v v0, v26
144; CHECK-NEXT:    ret
145entry:
146  %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
147    <vscale x 4 x half> %1,
148    <vscale x 4 x half> %2,
149    i32 %4)
150  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
151    <vscale x 4 x i1> %0,
152    <vscale x 4 x half> %2,
153    <vscale x 4 x half> %3,
154    <vscale x 4 x i1> %mask,
155    i32 %4)
156
157  ret <vscale x 4 x i1> %a
158}
159
160declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
161  <vscale x 8 x half>,
162  <vscale x 8 x half>,
163  i32);
164
165define <vscale x 8 x i1> @intrinsic_vmfne_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
166; CHECK-LABEL: intrinsic_vmfne_vv_nxv8f16_nxv8f16:
167; CHECK:       # %bb.0: # %entry
168; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
169; CHECK-NEXT:    vmfne.vv v0, v8, v10
170; CHECK-NEXT:    ret
171entry:
172  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
173    <vscale x 8 x half> %0,
174    <vscale x 8 x half> %1,
175    i32 %2)
176
177  ret <vscale x 8 x i1> %a
178}
179
180declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
181  <vscale x 8 x i1>,
182  <vscale x 8 x half>,
183  <vscale x 8 x half>,
184  <vscale x 8 x i1>,
185  i32);
186
187define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
188; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16:
189; CHECK:       # %bb.0: # %entry
190; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
191; CHECK-NEXT:    vmfne.vv v25, v8, v10
192; CHECK-NEXT:    vmv1r.v v26, v0
193; CHECK-NEXT:    vmv1r.v v0, v25
194; CHECK-NEXT:    vmfne.vv v26, v10, v12, v0.t
195; CHECK-NEXT:    vmv1r.v v0, v26
196; CHECK-NEXT:    ret
197entry:
198  %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
199    <vscale x 8 x half> %1,
200    <vscale x 8 x half> %2,
201    i32 %4)
202  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
203    <vscale x 8 x i1> %0,
204    <vscale x 8 x half> %2,
205    <vscale x 8 x half> %3,
206    <vscale x 8 x i1> %mask,
207    i32 %4)
208
209  ret <vscale x 8 x i1> %a
210}
211
212declare <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
213  <vscale x 16 x half>,
214  <vscale x 16 x half>,
215  i32);
216
217define <vscale x 16 x i1> @intrinsic_vmfne_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
218; CHECK-LABEL: intrinsic_vmfne_vv_nxv16f16_nxv16f16:
219; CHECK:       # %bb.0: # %entry
220; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
221; CHECK-NEXT:    vmfne.vv v0, v8, v12
222; CHECK-NEXT:    ret
223entry:
224  %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
225    <vscale x 16 x half> %0,
226    <vscale x 16 x half> %1,
227    i32 %2)
228
229  ret <vscale x 16 x i1> %a
230}
231
232declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
233  <vscale x 16 x i1>,
234  <vscale x 16 x half>,
235  <vscale x 16 x half>,
236  <vscale x 16 x i1>,
237  i32);
238
239define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
240; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16:
241; CHECK:       # %bb.0: # %entry
242; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
243; CHECK-NEXT:    vmfne.vv v25, v8, v12
244; CHECK-NEXT:    vmv1r.v v26, v0
245; CHECK-NEXT:    vmv1r.v v0, v25
246; CHECK-NEXT:    vmfne.vv v26, v12, v16, v0.t
247; CHECK-NEXT:    vmv1r.v v0, v26
248; CHECK-NEXT:    ret
249entry:
250  %mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
251    <vscale x 16 x half> %1,
252    <vscale x 16 x half> %2,
253    i32 %4)
254  %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
255    <vscale x 16 x i1> %0,
256    <vscale x 16 x half> %2,
257    <vscale x 16 x half> %3,
258    <vscale x 16 x i1> %mask,
259    i32 %4)
260
261  ret <vscale x 16 x i1> %a
262}
263
264declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
265  <vscale x 1 x float>,
266  <vscale x 1 x float>,
267  i32);
268
269define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
270; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f32_nxv1f32:
271; CHECK:       # %bb.0: # %entry
272; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
273; CHECK-NEXT:    vmfne.vv v0, v8, v9
274; CHECK-NEXT:    ret
275entry:
276  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
277    <vscale x 1 x float> %0,
278    <vscale x 1 x float> %1,
279    i32 %2)
280
281  ret <vscale x 1 x i1> %a
282}
283
284declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
285  <vscale x 1 x i1>,
286  <vscale x 1 x float>,
287  <vscale x 1 x float>,
288  <vscale x 1 x i1>,
289  i32);
290
291define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
292; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32:
293; CHECK:       # %bb.0: # %entry
294; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
295; CHECK-NEXT:    vmfne.vv v25, v8, v9
296; CHECK-NEXT:    vmv1r.v v26, v0
297; CHECK-NEXT:    vmv1r.v v0, v25
298; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
299; CHECK-NEXT:    vmv1r.v v0, v26
300; CHECK-NEXT:    ret
301entry:
302  %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
303    <vscale x 1 x float> %1,
304    <vscale x 1 x float> %2,
305    i32 %4)
306  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
307    <vscale x 1 x i1> %0,
308    <vscale x 1 x float> %2,
309    <vscale x 1 x float> %3,
310    <vscale x 1 x i1> %mask,
311    i32 %4)
312
313  ret <vscale x 1 x i1> %a
314}
315
316declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
317  <vscale x 2 x float>,
318  <vscale x 2 x float>,
319  i32);
320
321define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
322; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f32_nxv2f32:
323; CHECK:       # %bb.0: # %entry
324; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
325; CHECK-NEXT:    vmfne.vv v0, v8, v9
326; CHECK-NEXT:    ret
327entry:
328  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
329    <vscale x 2 x float> %0,
330    <vscale x 2 x float> %1,
331    i32 %2)
332
333  ret <vscale x 2 x i1> %a
334}
335
336declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
337  <vscale x 2 x i1>,
338  <vscale x 2 x float>,
339  <vscale x 2 x float>,
340  <vscale x 2 x i1>,
341  i32);
342
343define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
344; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32:
345; CHECK:       # %bb.0: # %entry
346; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
347; CHECK-NEXT:    vmfne.vv v25, v8, v9
348; CHECK-NEXT:    vmv1r.v v26, v0
349; CHECK-NEXT:    vmv1r.v v0, v25
350; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
351; CHECK-NEXT:    vmv1r.v v0, v26
352; CHECK-NEXT:    ret
353entry:
354  %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
355    <vscale x 2 x float> %1,
356    <vscale x 2 x float> %2,
357    i32 %4)
358  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
359    <vscale x 2 x i1> %0,
360    <vscale x 2 x float> %2,
361    <vscale x 2 x float> %3,
362    <vscale x 2 x i1> %mask,
363    i32 %4)
364
365  ret <vscale x 2 x i1> %a
366}
367
368declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
369  <vscale x 4 x float>,
370  <vscale x 4 x float>,
371  i32);
372
373define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
374; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f32_nxv4f32:
375; CHECK:       # %bb.0: # %entry
376; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
377; CHECK-NEXT:    vmfne.vv v0, v8, v10
378; CHECK-NEXT:    ret
379entry:
380  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
381    <vscale x 4 x float> %0,
382    <vscale x 4 x float> %1,
383    i32 %2)
384
385  ret <vscale x 4 x i1> %a
386}
387
388declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
389  <vscale x 4 x i1>,
390  <vscale x 4 x float>,
391  <vscale x 4 x float>,
392  <vscale x 4 x i1>,
393  i32);
394
395define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
396; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32:
397; CHECK:       # %bb.0: # %entry
398; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
399; CHECK-NEXT:    vmfne.vv v25, v8, v10
400; CHECK-NEXT:    vmv1r.v v26, v0
401; CHECK-NEXT:    vmv1r.v v0, v25
402; CHECK-NEXT:    vmfne.vv v26, v10, v12, v0.t
403; CHECK-NEXT:    vmv1r.v v0, v26
404; CHECK-NEXT:    ret
405entry:
406  %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
407    <vscale x 4 x float> %1,
408    <vscale x 4 x float> %2,
409    i32 %4)
410  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
411    <vscale x 4 x i1> %0,
412    <vscale x 4 x float> %2,
413    <vscale x 4 x float> %3,
414    <vscale x 4 x i1> %mask,
415    i32 %4)
416
417  ret <vscale x 4 x i1> %a
418}
419
420declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
421  <vscale x 8 x float>,
422  <vscale x 8 x float>,
423  i32);
424
425define <vscale x 8 x i1> @intrinsic_vmfne_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
426; CHECK-LABEL: intrinsic_vmfne_vv_nxv8f32_nxv8f32:
427; CHECK:       # %bb.0: # %entry
428; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
429; CHECK-NEXT:    vmfne.vv v0, v8, v12
430; CHECK-NEXT:    ret
431entry:
432  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
433    <vscale x 8 x float> %0,
434    <vscale x 8 x float> %1,
435    i32 %2)
436
437  ret <vscale x 8 x i1> %a
438}
439
440declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
441  <vscale x 8 x i1>,
442  <vscale x 8 x float>,
443  <vscale x 8 x float>,
444  <vscale x 8 x i1>,
445  i32);
446
447define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
448; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32:
449; CHECK:       # %bb.0: # %entry
450; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
451; CHECK-NEXT:    vmfne.vv v25, v8, v12
452; CHECK-NEXT:    vmv1r.v v26, v0
453; CHECK-NEXT:    vmv1r.v v0, v25
454; CHECK-NEXT:    vmfne.vv v26, v12, v16, v0.t
455; CHECK-NEXT:    vmv1r.v v0, v26
456; CHECK-NEXT:    ret
457entry:
458  %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
459    <vscale x 8 x float> %1,
460    <vscale x 8 x float> %2,
461    i32 %4)
462  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
463    <vscale x 8 x i1> %0,
464    <vscale x 8 x float> %2,
465    <vscale x 8 x float> %3,
466    <vscale x 8 x i1> %mask,
467    i32 %4)
468
469  ret <vscale x 8 x i1> %a
470}
471
472declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
473  <vscale x 1 x double>,
474  <vscale x 1 x double>,
475  i32);
476
477define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
478; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f64_nxv1f64:
479; CHECK:       # %bb.0: # %entry
480; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
481; CHECK-NEXT:    vmfne.vv v0, v8, v9
482; CHECK-NEXT:    ret
483entry:
484  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
485    <vscale x 1 x double> %0,
486    <vscale x 1 x double> %1,
487    i32 %2)
488
489  ret <vscale x 1 x i1> %a
490}
491
492declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
493  <vscale x 1 x i1>,
494  <vscale x 1 x double>,
495  <vscale x 1 x double>,
496  <vscale x 1 x i1>,
497  i32);
498
499define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
500; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64:
501; CHECK:       # %bb.0: # %entry
502; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
503; CHECK-NEXT:    vmfne.vv v25, v8, v9
504; CHECK-NEXT:    vmv1r.v v26, v0
505; CHECK-NEXT:    vmv1r.v v0, v25
506; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
507; CHECK-NEXT:    vmv1r.v v0, v26
508; CHECK-NEXT:    ret
509entry:
510  %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
511    <vscale x 1 x double> %1,
512    <vscale x 1 x double> %2,
513    i32 %4)
514  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
515    <vscale x 1 x i1> %0,
516    <vscale x 1 x double> %2,
517    <vscale x 1 x double> %3,
518    <vscale x 1 x i1> %mask,
519    i32 %4)
520
521  ret <vscale x 1 x i1> %a
522}
523
524declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
525  <vscale x 2 x double>,
526  <vscale x 2 x double>,
527  i32);
528
529define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
530; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f64_nxv2f64:
531; CHECK:       # %bb.0: # %entry
532; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
533; CHECK-NEXT:    vmfne.vv v0, v8, v10
534; CHECK-NEXT:    ret
535entry:
536  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
537    <vscale x 2 x double> %0,
538    <vscale x 2 x double> %1,
539    i32 %2)
540
541  ret <vscale x 2 x i1> %a
542}
543
544declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
545  <vscale x 2 x i1>,
546  <vscale x 2 x double>,
547  <vscale x 2 x double>,
548  <vscale x 2 x i1>,
549  i32);
550
551define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
552; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64:
553; CHECK:       # %bb.0: # %entry
554; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
555; CHECK-NEXT:    vmfne.vv v25, v8, v10
556; CHECK-NEXT:    vmv1r.v v26, v0
557; CHECK-NEXT:    vmv1r.v v0, v25
558; CHECK-NEXT:    vmfne.vv v26, v10, v12, v0.t
559; CHECK-NEXT:    vmv1r.v v0, v26
560; CHECK-NEXT:    ret
561entry:
562  %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
563    <vscale x 2 x double> %1,
564    <vscale x 2 x double> %2,
565    i32 %4)
566  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
567    <vscale x 2 x i1> %0,
568    <vscale x 2 x double> %2,
569    <vscale x 2 x double> %3,
570    <vscale x 2 x i1> %mask,
571    i32 %4)
572
573  ret <vscale x 2 x i1> %a
574}
575
576declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
577  <vscale x 4 x double>,
578  <vscale x 4 x double>,
579  i32);
580
581define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
582; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f64_nxv4f64:
583; CHECK:       # %bb.0: # %entry
584; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
585; CHECK-NEXT:    vmfne.vv v0, v8, v12
586; CHECK-NEXT:    ret
587entry:
588  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
589    <vscale x 4 x double> %0,
590    <vscale x 4 x double> %1,
591    i32 %2)
592
593  ret <vscale x 4 x i1> %a
594}
595
596declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
597  <vscale x 4 x i1>,
598  <vscale x 4 x double>,
599  <vscale x 4 x double>,
600  <vscale x 4 x i1>,
601  i32);
602
603define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
604; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64:
605; CHECK:       # %bb.0: # %entry
606; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
607; CHECK-NEXT:    vmfne.vv v25, v8, v12
608; CHECK-NEXT:    vmv1r.v v26, v0
609; CHECK-NEXT:    vmv1r.v v0, v25
610; CHECK-NEXT:    vmfne.vv v26, v12, v16, v0.t
611; CHECK-NEXT:    vmv1r.v v0, v26
612; CHECK-NEXT:    ret
613entry:
614  %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
615    <vscale x 4 x double> %1,
616    <vscale x 4 x double> %2,
617    i32 %4)
618  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
619    <vscale x 4 x i1> %0,
620    <vscale x 4 x double> %2,
621    <vscale x 4 x double> %3,
622    <vscale x 4 x i1> %mask,
623    i32 %4)
624
625  ret <vscale x 4 x i1> %a
626}
627
628declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16(
629  <vscale x 1 x half>,
630  half,
631  i32);
632
633define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
634; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f16_f16:
635; CHECK:       # %bb.0: # %entry
636; CHECK-NEXT:    fmv.h.x ft0, a0
637; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
638; CHECK-NEXT:    vmfne.vf v0, v8, ft0
639; CHECK-NEXT:    ret
640entry:
641  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16(
642    <vscale x 1 x half> %0,
643    half %1,
644    i32 %2)
645
646  ret <vscale x 1 x i1> %a
647}
648
649declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
650  <vscale x 1 x i1>,
651  <vscale x 1 x half>,
652  half,
653  <vscale x 1 x i1>,
654  i32);
655
656define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
657; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16:
658; CHECK:       # %bb.0: # %entry
659; CHECK-NEXT:    vmv1r.v v25, v0
660; CHECK-NEXT:    fmv.h.x ft0, a0
661; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
662; CHECK-NEXT:    vmv1r.v v0, v9
663; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
664; CHECK-NEXT:    vmv1r.v v0, v25
665; CHECK-NEXT:    ret
666entry:
667  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
668    <vscale x 1 x i1> %0,
669    <vscale x 1 x half> %1,
670    half %2,
671    <vscale x 1 x i1> %3,
672    i32 %4)
673
674  ret <vscale x 1 x i1> %a
675}
676
677declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16.f16(
678  <vscale x 2 x half>,
679  half,
680  i32);
681
682define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
683; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f16_f16:
684; CHECK:       # %bb.0: # %entry
685; CHECK-NEXT:    fmv.h.x ft0, a0
686; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
687; CHECK-NEXT:    vmfne.vf v0, v8, ft0
688; CHECK-NEXT:    ret
689entry:
690  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16.f16(
691    <vscale x 2 x half> %0,
692    half %1,
693    i32 %2)
694
695  ret <vscale x 2 x i1> %a
696}
697
698declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
699  <vscale x 2 x i1>,
700  <vscale x 2 x half>,
701  half,
702  <vscale x 2 x i1>,
703  i32);
704
705define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
706; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16:
707; CHECK:       # %bb.0: # %entry
708; CHECK-NEXT:    vmv1r.v v25, v0
709; CHECK-NEXT:    fmv.h.x ft0, a0
710; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
711; CHECK-NEXT:    vmv1r.v v0, v9
712; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
713; CHECK-NEXT:    vmv1r.v v0, v25
714; CHECK-NEXT:    ret
715entry:
716  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
717    <vscale x 2 x i1> %0,
718    <vscale x 2 x half> %1,
719    half %2,
720    <vscale x 2 x i1> %3,
721    i32 %4)
722
723  ret <vscale x 2 x i1> %a
724}
725
726declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16.f16(
727  <vscale x 4 x half>,
728  half,
729  i32);
730
731define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
732; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f16_f16:
733; CHECK:       # %bb.0: # %entry
734; CHECK-NEXT:    fmv.h.x ft0, a0
735; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
736; CHECK-NEXT:    vmfne.vf v0, v8, ft0
737; CHECK-NEXT:    ret
738entry:
739  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16.f16(
740    <vscale x 4 x half> %0,
741    half %1,
742    i32 %2)
743
744  ret <vscale x 4 x i1> %a
745}
746
747declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
748  <vscale x 4 x i1>,
749  <vscale x 4 x half>,
750  half,
751  <vscale x 4 x i1>,
752  i32);
753
754define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
755; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16:
756; CHECK:       # %bb.0: # %entry
757; CHECK-NEXT:    vmv1r.v v25, v0
758; CHECK-NEXT:    fmv.h.x ft0, a0
759; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
760; CHECK-NEXT:    vmv1r.v v0, v9
761; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
762; CHECK-NEXT:    vmv1r.v v0, v25
763; CHECK-NEXT:    ret
764entry:
765  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
766    <vscale x 4 x i1> %0,
767    <vscale x 4 x half> %1,
768    half %2,
769    <vscale x 4 x i1> %3,
770    i32 %4)
771
772  ret <vscale x 4 x i1> %a
773}
774
775declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16.f16(
776  <vscale x 8 x half>,
777  half,
778  i32);
779
780define <vscale x 8 x i1> @intrinsic_vmfne_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
781; CHECK-LABEL: intrinsic_vmfne_vf_nxv8f16_f16:
782; CHECK:       # %bb.0: # %entry
783; CHECK-NEXT:    fmv.h.x ft0, a0
784; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
785; CHECK-NEXT:    vmfne.vf v0, v8, ft0
786; CHECK-NEXT:    ret
787entry:
788  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16.f16(
789    <vscale x 8 x half> %0,
790    half %1,
791    i32 %2)
792
793  ret <vscale x 8 x i1> %a
794}
795
796declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
797  <vscale x 8 x i1>,
798  <vscale x 8 x half>,
799  half,
800  <vscale x 8 x i1>,
801  i32);
802
803define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
804; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16:
805; CHECK:       # %bb.0: # %entry
806; CHECK-NEXT:    vmv1r.v v25, v0
807; CHECK-NEXT:    fmv.h.x ft0, a0
808; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
809; CHECK-NEXT:    vmv1r.v v0, v10
810; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
811; CHECK-NEXT:    vmv1r.v v0, v25
812; CHECK-NEXT:    ret
813entry:
814  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
815    <vscale x 8 x i1> %0,
816    <vscale x 8 x half> %1,
817    half %2,
818    <vscale x 8 x i1> %3,
819    i32 %4)
820
821  ret <vscale x 8 x i1> %a
822}
823
824declare <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16.f16(
825  <vscale x 16 x half>,
826  half,
827  i32);
828
829define <vscale x 16 x i1> @intrinsic_vmfne_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
830; CHECK-LABEL: intrinsic_vmfne_vf_nxv16f16_f16:
831; CHECK:       # %bb.0: # %entry
832; CHECK-NEXT:    fmv.h.x ft0, a0
833; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
834; CHECK-NEXT:    vmfne.vf v0, v8, ft0
835; CHECK-NEXT:    ret
836entry:
837  %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16.f16(
838    <vscale x 16 x half> %0,
839    half %1,
840    i32 %2)
841
842  ret <vscale x 16 x i1> %a
843}
844
845declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
846  <vscale x 16 x i1>,
847  <vscale x 16 x half>,
848  half,
849  <vscale x 16 x i1>,
850  i32);
851
852define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
853; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16:
854; CHECK:       # %bb.0: # %entry
855; CHECK-NEXT:    vmv1r.v v25, v0
856; CHECK-NEXT:    fmv.h.x ft0, a0
857; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
858; CHECK-NEXT:    vmv1r.v v0, v12
859; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
860; CHECK-NEXT:    vmv1r.v v0, v25
861; CHECK-NEXT:    ret
862entry:
863  %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
864    <vscale x 16 x i1> %0,
865    <vscale x 16 x half> %1,
866    half %2,
867    <vscale x 16 x i1> %3,
868    i32 %4)
869
870  ret <vscale x 16 x i1> %a
871}
872
873declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32(
874  <vscale x 1 x float>,
875  float,
876  i32);
877
878define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
879; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f32_f32:
880; CHECK:       # %bb.0: # %entry
881; CHECK-NEXT:    fmv.w.x ft0, a0
882; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
883; CHECK-NEXT:    vmfne.vf v0, v8, ft0
884; CHECK-NEXT:    ret
885entry:
886  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32(
887    <vscale x 1 x float> %0,
888    float %1,
889    i32 %2)
890
891  ret <vscale x 1 x i1> %a
892}
893
894declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
895  <vscale x 1 x i1>,
896  <vscale x 1 x float>,
897  float,
898  <vscale x 1 x i1>,
899  i32);
900
901define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
902; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32:
903; CHECK:       # %bb.0: # %entry
904; CHECK-NEXT:    vmv1r.v v25, v0
905; CHECK-NEXT:    fmv.w.x ft0, a0
906; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
907; CHECK-NEXT:    vmv1r.v v0, v9
908; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
909; CHECK-NEXT:    vmv1r.v v0, v25
910; CHECK-NEXT:    ret
911entry:
912  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
913    <vscale x 1 x i1> %0,
914    <vscale x 1 x float> %1,
915    float %2,
916    <vscale x 1 x i1> %3,
917    i32 %4)
918
919  ret <vscale x 1 x i1> %a
920}
921
922declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32(
923  <vscale x 2 x float>,
924  float,
925  i32);
926
927define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
928; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f32_f32:
929; CHECK:       # %bb.0: # %entry
930; CHECK-NEXT:    fmv.w.x ft0, a0
931; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
932; CHECK-NEXT:    vmfne.vf v0, v8, ft0
933; CHECK-NEXT:    ret
934entry:
935  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32(
936    <vscale x 2 x float> %0,
937    float %1,
938    i32 %2)
939
940  ret <vscale x 2 x i1> %a
941}
942
943declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
944  <vscale x 2 x i1>,
945  <vscale x 2 x float>,
946  float,
947  <vscale x 2 x i1>,
948  i32);
949
950define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
951; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32:
952; CHECK:       # %bb.0: # %entry
953; CHECK-NEXT:    vmv1r.v v25, v0
954; CHECK-NEXT:    fmv.w.x ft0, a0
955; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
956; CHECK-NEXT:    vmv1r.v v0, v9
957; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
958; CHECK-NEXT:    vmv1r.v v0, v25
959; CHECK-NEXT:    ret
960entry:
961  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
962    <vscale x 2 x i1> %0,
963    <vscale x 2 x float> %1,
964    float %2,
965    <vscale x 2 x i1> %3,
966    i32 %4)
967
968  ret <vscale x 2 x i1> %a
969}
970
971declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32(
972  <vscale x 4 x float>,
973  float,
974  i32);
975
976define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
977; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f32_f32:
978; CHECK:       # %bb.0: # %entry
979; CHECK-NEXT:    fmv.w.x ft0, a0
980; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
981; CHECK-NEXT:    vmfne.vf v0, v8, ft0
982; CHECK-NEXT:    ret
983entry:
984  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32(
985    <vscale x 4 x float> %0,
986    float %1,
987    i32 %2)
988
989  ret <vscale x 4 x i1> %a
990}
991
992declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
993  <vscale x 4 x i1>,
994  <vscale x 4 x float>,
995  float,
996  <vscale x 4 x i1>,
997  i32);
998
999define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1000; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32:
1001; CHECK:       # %bb.0: # %entry
1002; CHECK-NEXT:    vmv1r.v v25, v0
1003; CHECK-NEXT:    fmv.w.x ft0, a0
1004; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1005; CHECK-NEXT:    vmv1r.v v0, v10
1006; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
1007; CHECK-NEXT:    vmv1r.v v0, v25
1008; CHECK-NEXT:    ret
1009entry:
1010  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
1011    <vscale x 4 x i1> %0,
1012    <vscale x 4 x float> %1,
1013    float %2,
1014    <vscale x 4 x i1> %3,
1015    i32 %4)
1016
1017  ret <vscale x 4 x i1> %a
1018}
1019
1020declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32(
1021  <vscale x 8 x float>,
1022  float,
1023  i32);
1024
1025define <vscale x 8 x i1> @intrinsic_vmfne_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
1026; CHECK-LABEL: intrinsic_vmfne_vf_nxv8f32_f32:
1027; CHECK:       # %bb.0: # %entry
1028; CHECK-NEXT:    fmv.w.x ft0, a0
1029; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1030; CHECK-NEXT:    vmfne.vf v0, v8, ft0
1031; CHECK-NEXT:    ret
1032entry:
1033  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32(
1034    <vscale x 8 x float> %0,
1035    float %1,
1036    i32 %2)
1037
1038  ret <vscale x 8 x i1> %a
1039}
1040
1041declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
1042  <vscale x 8 x i1>,
1043  <vscale x 8 x float>,
1044  float,
1045  <vscale x 8 x i1>,
1046  i32);
1047
1048define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1049; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32:
1050; CHECK:       # %bb.0: # %entry
1051; CHECK-NEXT:    vmv1r.v v25, v0
1052; CHECK-NEXT:    fmv.w.x ft0, a0
1053; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1054; CHECK-NEXT:    vmv1r.v v0, v12
1055; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
1056; CHECK-NEXT:    vmv1r.v v0, v25
1057; CHECK-NEXT:    ret
1058entry:
1059  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
1060    <vscale x 8 x i1> %0,
1061    <vscale x 8 x float> %1,
1062    float %2,
1063    <vscale x 8 x i1> %3,
1064    i32 %4)
1065
1066  ret <vscale x 8 x i1> %a
1067}
1068
1069declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64(
1070  <vscale x 1 x double>,
1071  double,
1072  i32);
1073
1074define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
1075; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f64_f64:
1076; CHECK:       # %bb.0: # %entry
1077; CHECK-NEXT:    addi sp, sp, -16
1078; CHECK-NEXT:    sw a0, 8(sp)
1079; CHECK-NEXT:    sw a1, 12(sp)
1080; CHECK-NEXT:    fld ft0, 8(sp)
1081; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
1082; CHECK-NEXT:    vmfne.vf v0, v8, ft0
1083; CHECK-NEXT:    addi sp, sp, 16
1084; CHECK-NEXT:    ret
1085entry:
1086  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64(
1087    <vscale x 1 x double> %0,
1088    double %1,
1089    i32 %2)
1090
1091  ret <vscale x 1 x i1> %a
1092}
1093
1094declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
1095  <vscale x 1 x i1>,
1096  <vscale x 1 x double>,
1097  double,
1098  <vscale x 1 x i1>,
1099  i32);
1100
1101define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1102; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64:
1103; CHECK:       # %bb.0: # %entry
1104; CHECK-NEXT:    addi sp, sp, -16
1105; CHECK-NEXT:    sw a0, 8(sp)
1106; CHECK-NEXT:    sw a1, 12(sp)
1107; CHECK-NEXT:    fld ft0, 8(sp)
1108; CHECK-NEXT:    vmv1r.v v25, v0
1109; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
1110; CHECK-NEXT:    vmv1r.v v0, v9
1111; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
1112; CHECK-NEXT:    vmv1r.v v0, v25
1113; CHECK-NEXT:    addi sp, sp, 16
1114; CHECK-NEXT:    ret
1115entry:
1116  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
1117    <vscale x 1 x i1> %0,
1118    <vscale x 1 x double> %1,
1119    double %2,
1120    <vscale x 1 x i1> %3,
1121    i32 %4)
1122
1123  ret <vscale x 1 x i1> %a
1124}
1125
1126declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64(
1127  <vscale x 2 x double>,
1128  double,
1129  i32);
1130
1131define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
1132; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f64_f64:
1133; CHECK:       # %bb.0: # %entry
1134; CHECK-NEXT:    addi sp, sp, -16
1135; CHECK-NEXT:    sw a0, 8(sp)
1136; CHECK-NEXT:    sw a1, 12(sp)
1137; CHECK-NEXT:    fld ft0, 8(sp)
1138; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
1139; CHECK-NEXT:    vmfne.vf v0, v8, ft0
1140; CHECK-NEXT:    addi sp, sp, 16
1141; CHECK-NEXT:    ret
1142entry:
1143  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64(
1144    <vscale x 2 x double> %0,
1145    double %1,
1146    i32 %2)
1147
1148  ret <vscale x 2 x i1> %a
1149}
1150
1151declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
1152  <vscale x 2 x i1>,
1153  <vscale x 2 x double>,
1154  double,
1155  <vscale x 2 x i1>,
1156  i32);
1157
1158define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1159; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64:
1160; CHECK:       # %bb.0: # %entry
1161; CHECK-NEXT:    addi sp, sp, -16
1162; CHECK-NEXT:    sw a0, 8(sp)
1163; CHECK-NEXT:    sw a1, 12(sp)
1164; CHECK-NEXT:    fld ft0, 8(sp)
1165; CHECK-NEXT:    vmv1r.v v25, v0
1166; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
1167; CHECK-NEXT:    vmv1r.v v0, v10
1168; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
1169; CHECK-NEXT:    vmv1r.v v0, v25
1170; CHECK-NEXT:    addi sp, sp, 16
1171; CHECK-NEXT:    ret
1172entry:
1173  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
1174    <vscale x 2 x i1> %0,
1175    <vscale x 2 x double> %1,
1176    double %2,
1177    <vscale x 2 x i1> %3,
1178    i32 %4)
1179
1180  ret <vscale x 2 x i1> %a
1181}
1182
1183declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64(
1184  <vscale x 4 x double>,
1185  double,
1186  i32);
1187
1188define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
1189; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f64_f64:
1190; CHECK:       # %bb.0: # %entry
1191; CHECK-NEXT:    addi sp, sp, -16
1192; CHECK-NEXT:    sw a0, 8(sp)
1193; CHECK-NEXT:    sw a1, 12(sp)
1194; CHECK-NEXT:    fld ft0, 8(sp)
1195; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
1196; CHECK-NEXT:    vmfne.vf v0, v8, ft0
1197; CHECK-NEXT:    addi sp, sp, 16
1198; CHECK-NEXT:    ret
1199entry:
1200  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64(
1201    <vscale x 4 x double> %0,
1202    double %1,
1203    i32 %2)
1204
1205  ret <vscale x 4 x i1> %a
1206}
1207
1208declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
1209  <vscale x 4 x i1>,
1210  <vscale x 4 x double>,
1211  double,
1212  <vscale x 4 x i1>,
1213  i32);
1214
1215define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1216; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64:
1217; CHECK:       # %bb.0: # %entry
1218; CHECK-NEXT:    addi sp, sp, -16
1219; CHECK-NEXT:    sw a0, 8(sp)
1220; CHECK-NEXT:    sw a1, 12(sp)
1221; CHECK-NEXT:    fld ft0, 8(sp)
1222; CHECK-NEXT:    vmv1r.v v25, v0
1223; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
1224; CHECK-NEXT:    vmv1r.v v0, v12
1225; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
1226; CHECK-NEXT:    vmv1r.v v0, v25
1227; CHECK-NEXT:    addi sp, sp, 16
1228; CHECK-NEXT:    ret
1229entry:
1230  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
1231    <vscale x 4 x i1> %0,
1232    <vscale x 4 x double> %1,
1233    double %2,
1234    <vscale x 4 x i1> %3,
1235    i32 %4)
1236
1237  ret <vscale x 4 x i1> %a
1238}
1239