1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -target-abi lp64d -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
5  half,
6  i64);
7
8define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i64 %1) nounwind {
9; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16:
10; CHECK:       # %bb.0: # %entry
11; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
12; CHECK-NEXT:    vfmv.v.f v8, fa0
13; CHECK-NEXT:    ret
14entry:
15  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
16    half %0,
17    i64 %1)
18
19  ret <vscale x 1 x half> %a
20}
21
22declare <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
23  half,
24  i64);
25
26define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i64 %1) nounwind {
27; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16:
28; CHECK:       # %bb.0: # %entry
29; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
30; CHECK-NEXT:    vfmv.v.f v8, fa0
31; CHECK-NEXT:    ret
32entry:
33  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
34    half %0,
35    i64 %1)
36
37  ret <vscale x 2 x half> %a
38}
39
40declare <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
41  half,
42  i64);
43
44define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i64 %1) nounwind {
45; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16:
46; CHECK:       # %bb.0: # %entry
47; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
48; CHECK-NEXT:    vfmv.v.f v8, fa0
49; CHECK-NEXT:    ret
50entry:
51  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
52    half %0,
53    i64 %1)
54
55  ret <vscale x 4 x half> %a
56}
57
58declare <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
59  half,
60  i64);
61
62define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i64 %1) nounwind {
63; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16:
64; CHECK:       # %bb.0: # %entry
65; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
66; CHECK-NEXT:    vfmv.v.f v8, fa0
67; CHECK-NEXT:    ret
68entry:
69  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
70    half %0,
71    i64 %1)
72
73  ret <vscale x 8 x half> %a
74}
75
76declare <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
77  half,
78  i64);
79
80define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i64 %1) nounwind {
81; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16:
82; CHECK:       # %bb.0: # %entry
83; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
84; CHECK-NEXT:    vfmv.v.f v8, fa0
85; CHECK-NEXT:    ret
86entry:
87  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
88    half %0,
89    i64 %1)
90
91  ret <vscale x 16 x half> %a
92}
93
94declare <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
95  half,
96  i64);
97
98define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i64 %1) nounwind {
99; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16:
100; CHECK:       # %bb.0: # %entry
101; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
102; CHECK-NEXT:    vfmv.v.f v8, fa0
103; CHECK-NEXT:    ret
104entry:
105  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
106    half %0,
107    i64 %1)
108
109  ret <vscale x 32 x half> %a
110}
111
112declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
113  float,
114  i64);
115
116define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i64 %1) nounwind {
117; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32:
118; CHECK:       # %bb.0: # %entry
119; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
120; CHECK-NEXT:    vfmv.v.f v8, fa0
121; CHECK-NEXT:    ret
122entry:
123  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
124    float %0,
125    i64 %1)
126
127  ret <vscale x 1 x float> %a
128}
129
130declare <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
131  float,
132  i64);
133
134define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i64 %1) nounwind {
135; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32:
136; CHECK:       # %bb.0: # %entry
137; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
138; CHECK-NEXT:    vfmv.v.f v8, fa0
139; CHECK-NEXT:    ret
140entry:
141  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
142    float %0,
143    i64 %1)
144
145  ret <vscale x 2 x float> %a
146}
147
148declare <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
149  float,
150  i64);
151
152define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i64 %1) nounwind {
153; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32:
154; CHECK:       # %bb.0: # %entry
155; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
156; CHECK-NEXT:    vfmv.v.f v8, fa0
157; CHECK-NEXT:    ret
158entry:
159  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
160    float %0,
161    i64 %1)
162
163  ret <vscale x 4 x float> %a
164}
165
166declare <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
167  float,
168  i64);
169
170define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i64 %1) nounwind {
171; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32:
172; CHECK:       # %bb.0: # %entry
173; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
174; CHECK-NEXT:    vfmv.v.f v8, fa0
175; CHECK-NEXT:    ret
176entry:
177  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
178    float %0,
179    i64 %1)
180
181  ret <vscale x 8 x float> %a
182}
183
184declare <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
185  float,
186  i64);
187
188define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i64 %1) nounwind {
189; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32:
190; CHECK:       # %bb.0: # %entry
191; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
192; CHECK-NEXT:    vfmv.v.f v8, fa0
193; CHECK-NEXT:    ret
194entry:
195  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
196    float %0,
197    i64 %1)
198
199  ret <vscale x 16 x float> %a
200}
201
202declare <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
203  double,
204  i64);
205
206define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i64 %1) nounwind {
207; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64:
208; CHECK:       # %bb.0: # %entry
209; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
210; CHECK-NEXT:    vfmv.v.f v8, fa0
211; CHECK-NEXT:    ret
212entry:
213  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
214    double %0,
215    i64 %1)
216
217  ret <vscale x 1 x double> %a
218}
219
220declare <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
221  double,
222  i64);
223
224define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i64 %1) nounwind {
225; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64:
226; CHECK:       # %bb.0: # %entry
227; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
228; CHECK-NEXT:    vfmv.v.f v8, fa0
229; CHECK-NEXT:    ret
230entry:
231  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
232    double %0,
233    i64 %1)
234
235  ret <vscale x 2 x double> %a
236}
237
238declare <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
239  double,
240  i64);
241
242define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i64 %1) nounwind {
243; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64:
244; CHECK:       # %bb.0: # %entry
245; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
246; CHECK-NEXT:    vfmv.v.f v8, fa0
247; CHECK-NEXT:    ret
248entry:
249  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
250    double %0,
251    i64 %1)
252
253  ret <vscale x 4 x double> %a
254}
255
256declare <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
257  double,
258  i64);
259
260define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i64 %1) nounwind {
261; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64:
262; CHECK:       # %bb.0: # %entry
263; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
264; CHECK-NEXT:    vfmv.v.f v8, fa0
265; CHECK-NEXT:    ret
266entry:
267  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
268    double %0,
269    i64 %1)
270
271  ret <vscale x 8 x double> %a
272}
273
274define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16(i64 %0) nounwind {
275; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16:
276; CHECK:       # %bb.0: # %entry
277; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
278; CHECK-NEXT:    vmv.v.i v8, 0
279; CHECK-NEXT:    ret
280entry:
281  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
282    half 0.0,
283    i64 %0)
284
285  ret <vscale x 1 x half> %a
286}
287
288define <vscale x 2 x half> @intrinsic_vmv.v.i_zero_nxv2f16(i64 %0) nounwind {
289; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f16:
290; CHECK:       # %bb.0: # %entry
291; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
292; CHECK-NEXT:    vmv.v.i v8, 0
293; CHECK-NEXT:    ret
294entry:
295  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
296    half 0.0,
297    i64 %0)
298
299  ret <vscale x 2 x half> %a
300}
301
302define <vscale x 4 x half> @intrinsic_vmv.v.i_zero_nxv4f16(i64 %0) nounwind {
303; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f16:
304; CHECK:       # %bb.0: # %entry
305; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
306; CHECK-NEXT:    vmv.v.i v8, 0
307; CHECK-NEXT:    ret
308entry:
309  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
310    half 0.0,
311    i64 %0)
312
313  ret <vscale x 4 x half> %a
314}
315
316define <vscale x 8 x half> @intrinsic_vmv.v.i_zero_nxv8f16(i64 %0) nounwind {
317; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f16:
318; CHECK:       # %bb.0: # %entry
319; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
320; CHECK-NEXT:    vmv.v.i v8, 0
321; CHECK-NEXT:    ret
322entry:
323  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
324    half 0.0,
325    i64 %0)
326
327  ret <vscale x 8 x half> %a
328}
329
330define <vscale x 16 x half> @intrinsic_vmv.v.i_zero_nxv16f16(i64 %0) nounwind {
331; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f16:
332; CHECK:       # %bb.0: # %entry
333; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
334; CHECK-NEXT:    vmv.v.i v8, 0
335; CHECK-NEXT:    ret
336entry:
337  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
338    half 0.0,
339    i64 %0)
340
341  ret <vscale x 16 x half> %a
342}
343
344define <vscale x 32 x half> @intrinsic_vmv.v.i_zero_nxv32f16(i64 %0) nounwind {
345; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32f16:
346; CHECK:       # %bb.0: # %entry
347; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
348; CHECK-NEXT:    vmv.v.i v8, 0
349; CHECK-NEXT:    ret
350entry:
351  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
352    half 0.0,
353    i64 %0)
354
355  ret <vscale x 32 x half> %a
356}
357
358define <vscale x 1 x float> @intrinsic_vmv.v.i_zero_nxv1f32(i64 %0) nounwind {
359; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f32:
360; CHECK:       # %bb.0: # %entry
361; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
362; CHECK-NEXT:    vmv.v.i v8, 0
363; CHECK-NEXT:    ret
364entry:
365  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
366    float 0.0,
367    i64 %0)
368
369  ret <vscale x 1 x float> %a
370}
371
372define <vscale x 2 x float> @intrinsic_vmv.v.i_zero_nxv2f32(i64 %0) nounwind {
373; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f32:
374; CHECK:       # %bb.0: # %entry
375; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
376; CHECK-NEXT:    vmv.v.i v8, 0
377; CHECK-NEXT:    ret
378entry:
379  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
380    float 0.0,
381    i64 %0)
382
383  ret <vscale x 2 x float> %a
384}
385
386define <vscale x 4 x float> @intrinsic_vmv.v.i_zero_nxv4f32(i64 %0) nounwind {
387; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f32:
388; CHECK:       # %bb.0: # %entry
389; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
390; CHECK-NEXT:    vmv.v.i v8, 0
391; CHECK-NEXT:    ret
392entry:
393  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
394    float 0.0,
395    i64 %0)
396
397  ret <vscale x 4 x float> %a
398}
399
400define <vscale x 8 x float> @intrinsic_vmv.v.i_zero_nxv8f32(i64 %0) nounwind {
401; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f32:
402; CHECK:       # %bb.0: # %entry
403; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
404; CHECK-NEXT:    vmv.v.i v8, 0
405; CHECK-NEXT:    ret
406entry:
407  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
408    float 0.0,
409    i64 %0)
410
411  ret <vscale x 8 x float> %a
412}
413
414define <vscale x 16 x float> @intrinsic_vmv.v.i_zero_nxv16f32(i64 %0) nounwind {
415; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f32:
416; CHECK:       # %bb.0: # %entry
417; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
418; CHECK-NEXT:    vmv.v.i v8, 0
419; CHECK-NEXT:    ret
420entry:
421  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
422    float 0.0,
423    i64 %0)
424
425  ret <vscale x 16 x float> %a
426}
427
428define <vscale x 1 x double> @intrinsic_vmv.v.i_zero_nxv1f64(i64 %0) nounwind {
429; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f64:
430; CHECK:       # %bb.0: # %entry
431; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
432; CHECK-NEXT:    vmv.v.i v8, 0
433; CHECK-NEXT:    ret
434entry:
435  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
436    double 0.0,
437    i64 %0)
438
439  ret <vscale x 1 x double> %a
440}
441
442define <vscale x 2 x double> @intrinsic_vmv.v.i_zero_nxv2f64(i64 %0) nounwind {
443; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f64:
444; CHECK:       # %bb.0: # %entry
445; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
446; CHECK-NEXT:    vmv.v.i v8, 0
447; CHECK-NEXT:    ret
448entry:
449  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
450    double 0.0,
451    i64 %0)
452
453  ret <vscale x 2 x double> %a
454}
455
456define <vscale x 4 x double> @intrinsic_vmv.v.i_zero_nxv4f64(i64 %0) nounwind {
457; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f64:
458; CHECK:       # %bb.0: # %entry
459; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
460; CHECK-NEXT:    vmv.v.i v8, 0
461; CHECK-NEXT:    ret
462entry:
463  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
464    double 0.0,
465    i64 %0)
466
467  ret <vscale x 4 x double> %a
468}
469
470define <vscale x 8 x double> @intrinsic_vmv.v.i_zero_nxv8f64(i64 %0) nounwind {
471; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f64:
472; CHECK:       # %bb.0: # %entry
473; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
474; CHECK-NEXT:    vmv.v.i v8, 0
475; CHECK-NEXT:    ret
476entry:
477  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
478    double 0.0,
479    i64 %0)
480
481  ret <vscale x 8 x double> %a
482}
483