1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4
5define <vscale x 1 x half> @insertelt_nxv1f16_0(<vscale x 1 x half> %v, half %elt) {
6; CHECK-LABEL: insertelt_nxv1f16_0:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, tu, mu
9; CHECK-NEXT:    vfmv.s.f v8, fa0
10; CHECK-NEXT:    ret
11  %r = insertelement <vscale x 1 x half> %v, half %elt, i32 0
12  ret <vscale x 1 x half> %r
13}
14
15define <vscale x 1 x half> @insertelt_nxv1f16_imm(<vscale x 1 x half> %v, half %elt) {
16; CHECK-LABEL: insertelt_nxv1f16_imm:
17; CHECK:       # %bb.0:
18; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
19; CHECK-NEXT:    vfmv.s.f v25, fa0
20; CHECK-NEXT:    vsetivli zero, 4, e16, mf4, tu, mu
21; CHECK-NEXT:    vslideup.vi v8, v25, 3
22; CHECK-NEXT:    ret
23  %r = insertelement <vscale x 1 x half> %v, half %elt, i32 3
24  ret <vscale x 1 x half> %r
25}
26
27define <vscale x 1 x half> @insertelt_nxv1f16_idx(<vscale x 1 x half> %v, half %elt, i32 %idx) {
28; CHECK-LABEL: insertelt_nxv1f16_idx:
29; CHECK:       # %bb.0:
30; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
31; CHECK-NEXT:    vfmv.s.f v25, fa0
32; CHECK-NEXT:    addi a1, a0, 1
33; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
34; CHECK-NEXT:    vslideup.vx v8, v25, a0
35; CHECK-NEXT:    ret
36  %r = insertelement <vscale x 1 x half> %v, half %elt, i32 %idx
37  ret <vscale x 1 x half> %r
38}
39
40define <vscale x 2 x half> @insertelt_nxv2f16_0(<vscale x 2 x half> %v, half %elt) {
41; CHECK-LABEL: insertelt_nxv2f16_0:
42; CHECK:       # %bb.0:
43; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, tu, mu
44; CHECK-NEXT:    vfmv.s.f v8, fa0
45; CHECK-NEXT:    ret
46  %r = insertelement <vscale x 2 x half> %v, half %elt, i32 0
47  ret <vscale x 2 x half> %r
48}
49
50define <vscale x 2 x half> @insertelt_nxv2f16_imm(<vscale x 2 x half> %v, half %elt) {
51; CHECK-LABEL: insertelt_nxv2f16_imm:
52; CHECK:       # %bb.0:
53; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
54; CHECK-NEXT:    vfmv.s.f v25, fa0
55; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, tu, mu
56; CHECK-NEXT:    vslideup.vi v8, v25, 3
57; CHECK-NEXT:    ret
58  %r = insertelement <vscale x 2 x half> %v, half %elt, i32 3
59  ret <vscale x 2 x half> %r
60}
61
62define <vscale x 2 x half> @insertelt_nxv2f16_idx(<vscale x 2 x half> %v, half %elt, i32 %idx) {
63; CHECK-LABEL: insertelt_nxv2f16_idx:
64; CHECK:       # %bb.0:
65; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
66; CHECK-NEXT:    vfmv.s.f v25, fa0
67; CHECK-NEXT:    addi a1, a0, 1
68; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
69; CHECK-NEXT:    vslideup.vx v8, v25, a0
70; CHECK-NEXT:    ret
71  %r = insertelement <vscale x 2 x half> %v, half %elt, i32 %idx
72  ret <vscale x 2 x half> %r
73}
74
75define <vscale x 4 x half> @insertelt_nxv4f16_0(<vscale x 4 x half> %v, half %elt) {
76; CHECK-LABEL: insertelt_nxv4f16_0:
77; CHECK:       # %bb.0:
78; CHECK-NEXT:    vsetvli a0, zero, e16, m1, tu, mu
79; CHECK-NEXT:    vfmv.s.f v8, fa0
80; CHECK-NEXT:    ret
81  %r = insertelement <vscale x 4 x half> %v, half %elt, i32 0
82  ret <vscale x 4 x half> %r
83}
84
85define <vscale x 4 x half> @insertelt_nxv4f16_imm(<vscale x 4 x half> %v, half %elt) {
86; CHECK-LABEL: insertelt_nxv4f16_imm:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
89; CHECK-NEXT:    vfmv.s.f v25, fa0
90; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
91; CHECK-NEXT:    vslideup.vi v8, v25, 3
92; CHECK-NEXT:    ret
93  %r = insertelement <vscale x 4 x half> %v, half %elt, i32 3
94  ret <vscale x 4 x half> %r
95}
96
97define <vscale x 4 x half> @insertelt_nxv4f16_idx(<vscale x 4 x half> %v, half %elt, i32 %idx) {
98; CHECK-LABEL: insertelt_nxv4f16_idx:
99; CHECK:       # %bb.0:
100; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
101; CHECK-NEXT:    vfmv.s.f v25, fa0
102; CHECK-NEXT:    addi a1, a0, 1
103; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
104; CHECK-NEXT:    vslideup.vx v8, v25, a0
105; CHECK-NEXT:    ret
106  %r = insertelement <vscale x 4 x half> %v, half %elt, i32 %idx
107  ret <vscale x 4 x half> %r
108}
109
110define <vscale x 8 x half> @insertelt_nxv8f16_0(<vscale x 8 x half> %v, half %elt) {
111; CHECK-LABEL: insertelt_nxv8f16_0:
112; CHECK:       # %bb.0:
113; CHECK-NEXT:    vsetvli a0, zero, e16, m2, tu, mu
114; CHECK-NEXT:    vfmv.s.f v8, fa0
115; CHECK-NEXT:    ret
116  %r = insertelement <vscale x 8 x half> %v, half %elt, i32 0
117  ret <vscale x 8 x half> %r
118}
119
120define <vscale x 8 x half> @insertelt_nxv8f16_imm(<vscale x 8 x half> %v, half %elt) {
121; CHECK-LABEL: insertelt_nxv8f16_imm:
122; CHECK:       # %bb.0:
123; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
124; CHECK-NEXT:    vfmv.s.f v26, fa0
125; CHECK-NEXT:    vsetivli zero, 4, e16, m2, tu, mu
126; CHECK-NEXT:    vslideup.vi v8, v26, 3
127; CHECK-NEXT:    ret
128  %r = insertelement <vscale x 8 x half> %v, half %elt, i32 3
129  ret <vscale x 8 x half> %r
130}
131
132define <vscale x 8 x half> @insertelt_nxv8f16_idx(<vscale x 8 x half> %v, half %elt, i32 %idx) {
133; CHECK-LABEL: insertelt_nxv8f16_idx:
134; CHECK:       # %bb.0:
135; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
136; CHECK-NEXT:    vfmv.s.f v26, fa0
137; CHECK-NEXT:    addi a1, a0, 1
138; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
139; CHECK-NEXT:    vslideup.vx v8, v26, a0
140; CHECK-NEXT:    ret
141  %r = insertelement <vscale x 8 x half> %v, half %elt, i32 %idx
142  ret <vscale x 8 x half> %r
143}
144
145define <vscale x 16 x half> @insertelt_nxv16f16_0(<vscale x 16 x half> %v, half %elt) {
146; CHECK-LABEL: insertelt_nxv16f16_0:
147; CHECK:       # %bb.0:
148; CHECK-NEXT:    vsetvli a0, zero, e16, m4, tu, mu
149; CHECK-NEXT:    vfmv.s.f v8, fa0
150; CHECK-NEXT:    ret
151  %r = insertelement <vscale x 16 x half> %v, half %elt, i32 0
152  ret <vscale x 16 x half> %r
153}
154
155define <vscale x 16 x half> @insertelt_nxv16f16_imm(<vscale x 16 x half> %v, half %elt) {
156; CHECK-LABEL: insertelt_nxv16f16_imm:
157; CHECK:       # %bb.0:
158; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
159; CHECK-NEXT:    vfmv.s.f v28, fa0
160; CHECK-NEXT:    vsetivli zero, 4, e16, m4, tu, mu
161; CHECK-NEXT:    vslideup.vi v8, v28, 3
162; CHECK-NEXT:    ret
163  %r = insertelement <vscale x 16 x half> %v, half %elt, i32 3
164  ret <vscale x 16 x half> %r
165}
166
167define <vscale x 16 x half> @insertelt_nxv16f16_idx(<vscale x 16 x half> %v, half %elt, i32 %idx) {
168; CHECK-LABEL: insertelt_nxv16f16_idx:
169; CHECK:       # %bb.0:
170; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
171; CHECK-NEXT:    vfmv.s.f v28, fa0
172; CHECK-NEXT:    addi a1, a0, 1
173; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
174; CHECK-NEXT:    vslideup.vx v8, v28, a0
175; CHECK-NEXT:    ret
176  %r = insertelement <vscale x 16 x half> %v, half %elt, i32 %idx
177  ret <vscale x 16 x half> %r
178}
179
180define <vscale x 32 x half> @insertelt_nxv32f16_0(<vscale x 32 x half> %v, half %elt) {
181; CHECK-LABEL: insertelt_nxv32f16_0:
182; CHECK:       # %bb.0:
183; CHECK-NEXT:    vsetvli a0, zero, e16, m8, tu, mu
184; CHECK-NEXT:    vfmv.s.f v8, fa0
185; CHECK-NEXT:    ret
186  %r = insertelement <vscale x 32 x half> %v, half %elt, i32 0
187  ret <vscale x 32 x half> %r
188}
189
190define <vscale x 32 x half> @insertelt_nxv32f16_imm(<vscale x 32 x half> %v, half %elt) {
191; CHECK-LABEL: insertelt_nxv32f16_imm:
192; CHECK:       # %bb.0:
193; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
194; CHECK-NEXT:    vfmv.s.f v16, fa0
195; CHECK-NEXT:    vsetivli zero, 4, e16, m8, tu, mu
196; CHECK-NEXT:    vslideup.vi v8, v16, 3
197; CHECK-NEXT:    ret
198  %r = insertelement <vscale x 32 x half> %v, half %elt, i32 3
199  ret <vscale x 32 x half> %r
200}
201
202define <vscale x 32 x half> @insertelt_nxv32f16_idx(<vscale x 32 x half> %v, half %elt, i32 %idx) {
203; CHECK-LABEL: insertelt_nxv32f16_idx:
204; CHECK:       # %bb.0:
205; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
206; CHECK-NEXT:    vfmv.s.f v16, fa0
207; CHECK-NEXT:    addi a1, a0, 1
208; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
209; CHECK-NEXT:    vslideup.vx v8, v16, a0
210; CHECK-NEXT:    ret
211  %r = insertelement <vscale x 32 x half> %v, half %elt, i32 %idx
212  ret <vscale x 32 x half> %r
213}
214
215define <vscale x 1 x float> @insertelt_nxv1f32_0(<vscale x 1 x float> %v, float %elt) {
216; CHECK-LABEL: insertelt_nxv1f32_0:
217; CHECK:       # %bb.0:
218; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, tu, mu
219; CHECK-NEXT:    vfmv.s.f v8, fa0
220; CHECK-NEXT:    ret
221  %r = insertelement <vscale x 1 x float> %v, float %elt, i32 0
222  ret <vscale x 1 x float> %r
223}
224
225define <vscale x 1 x float> @insertelt_nxv1f32_imm(<vscale x 1 x float> %v, float %elt) {
226; CHECK-LABEL: insertelt_nxv1f32_imm:
227; CHECK:       # %bb.0:
228; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
229; CHECK-NEXT:    vfmv.s.f v25, fa0
230; CHECK-NEXT:    vsetivli zero, 4, e32, mf2, tu, mu
231; CHECK-NEXT:    vslideup.vi v8, v25, 3
232; CHECK-NEXT:    ret
233  %r = insertelement <vscale x 1 x float> %v, float %elt, i32 3
234  ret <vscale x 1 x float> %r
235}
236
237define <vscale x 1 x float> @insertelt_nxv1f32_idx(<vscale x 1 x float> %v, float %elt, i32 %idx) {
238; CHECK-LABEL: insertelt_nxv1f32_idx:
239; CHECK:       # %bb.0:
240; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
241; CHECK-NEXT:    vfmv.s.f v25, fa0
242; CHECK-NEXT:    addi a1, a0, 1
243; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
244; CHECK-NEXT:    vslideup.vx v8, v25, a0
245; CHECK-NEXT:    ret
246  %r = insertelement <vscale x 1 x float> %v, float %elt, i32 %idx
247  ret <vscale x 1 x float> %r
248}
249
250define <vscale x 2 x float> @insertelt_nxv2f32_0(<vscale x 2 x float> %v, float %elt) {
251; CHECK-LABEL: insertelt_nxv2f32_0:
252; CHECK:       # %bb.0:
253; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, mu
254; CHECK-NEXT:    vfmv.s.f v8, fa0
255; CHECK-NEXT:    ret
256  %r = insertelement <vscale x 2 x float> %v, float %elt, i32 0
257  ret <vscale x 2 x float> %r
258}
259
260define <vscale x 2 x float> @insertelt_nxv2f32_imm(<vscale x 2 x float> %v, float %elt) {
261; CHECK-LABEL: insertelt_nxv2f32_imm:
262; CHECK:       # %bb.0:
263; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
264; CHECK-NEXT:    vfmv.s.f v25, fa0
265; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
266; CHECK-NEXT:    vslideup.vi v8, v25, 3
267; CHECK-NEXT:    ret
268  %r = insertelement <vscale x 2 x float> %v, float %elt, i32 3
269  ret <vscale x 2 x float> %r
270}
271
272define <vscale x 2 x float> @insertelt_nxv2f32_idx(<vscale x 2 x float> %v, float %elt, i32 %idx) {
273; CHECK-LABEL: insertelt_nxv2f32_idx:
274; CHECK:       # %bb.0:
275; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
276; CHECK-NEXT:    vfmv.s.f v25, fa0
277; CHECK-NEXT:    addi a1, a0, 1
278; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
279; CHECK-NEXT:    vslideup.vx v8, v25, a0
280; CHECK-NEXT:    ret
281  %r = insertelement <vscale x 2 x float> %v, float %elt, i32 %idx
282  ret <vscale x 2 x float> %r
283}
284
285define <vscale x 4 x float> @insertelt_nxv4f32_0(<vscale x 4 x float> %v, float %elt) {
286; CHECK-LABEL: insertelt_nxv4f32_0:
287; CHECK:       # %bb.0:
288; CHECK-NEXT:    vsetvli a0, zero, e32, m2, tu, mu
289; CHECK-NEXT:    vfmv.s.f v8, fa0
290; CHECK-NEXT:    ret
291  %r = insertelement <vscale x 4 x float> %v, float %elt, i32 0
292  ret <vscale x 4 x float> %r
293}
294
295define <vscale x 4 x float> @insertelt_nxv4f32_imm(<vscale x 4 x float> %v, float %elt) {
296; CHECK-LABEL: insertelt_nxv4f32_imm:
297; CHECK:       # %bb.0:
298; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
299; CHECK-NEXT:    vfmv.s.f v26, fa0
300; CHECK-NEXT:    vsetivli zero, 4, e32, m2, tu, mu
301; CHECK-NEXT:    vslideup.vi v8, v26, 3
302; CHECK-NEXT:    ret
303  %r = insertelement <vscale x 4 x float> %v, float %elt, i32 3
304  ret <vscale x 4 x float> %r
305}
306
307define <vscale x 4 x float> @insertelt_nxv4f32_idx(<vscale x 4 x float> %v, float %elt, i32 %idx) {
308; CHECK-LABEL: insertelt_nxv4f32_idx:
309; CHECK:       # %bb.0:
310; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
311; CHECK-NEXT:    vfmv.s.f v26, fa0
312; CHECK-NEXT:    addi a1, a0, 1
313; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
314; CHECK-NEXT:    vslideup.vx v8, v26, a0
315; CHECK-NEXT:    ret
316  %r = insertelement <vscale x 4 x float> %v, float %elt, i32 %idx
317  ret <vscale x 4 x float> %r
318}
319
320define <vscale x 8 x float> @insertelt_nxv8f32_0(<vscale x 8 x float> %v, float %elt) {
321; CHECK-LABEL: insertelt_nxv8f32_0:
322; CHECK:       # %bb.0:
323; CHECK-NEXT:    vsetvli a0, zero, e32, m4, tu, mu
324; CHECK-NEXT:    vfmv.s.f v8, fa0
325; CHECK-NEXT:    ret
326  %r = insertelement <vscale x 8 x float> %v, float %elt, i32 0
327  ret <vscale x 8 x float> %r
328}
329
330define <vscale x 8 x float> @insertelt_nxv8f32_imm(<vscale x 8 x float> %v, float %elt) {
331; CHECK-LABEL: insertelt_nxv8f32_imm:
332; CHECK:       # %bb.0:
333; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
334; CHECK-NEXT:    vfmv.s.f v28, fa0
335; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, mu
336; CHECK-NEXT:    vslideup.vi v8, v28, 3
337; CHECK-NEXT:    ret
338  %r = insertelement <vscale x 8 x float> %v, float %elt, i32 3
339  ret <vscale x 8 x float> %r
340}
341
342define <vscale x 8 x float> @insertelt_nxv8f32_idx(<vscale x 8 x float> %v, float %elt, i32 %idx) {
343; CHECK-LABEL: insertelt_nxv8f32_idx:
344; CHECK:       # %bb.0:
345; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
346; CHECK-NEXT:    vfmv.s.f v28, fa0
347; CHECK-NEXT:    addi a1, a0, 1
348; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
349; CHECK-NEXT:    vslideup.vx v8, v28, a0
350; CHECK-NEXT:    ret
351  %r = insertelement <vscale x 8 x float> %v, float %elt, i32 %idx
352  ret <vscale x 8 x float> %r
353}
354
355define <vscale x 16 x float> @insertelt_nxv16f32_0(<vscale x 16 x float> %v, float %elt) {
356; CHECK-LABEL: insertelt_nxv16f32_0:
357; CHECK:       # %bb.0:
358; CHECK-NEXT:    vsetvli a0, zero, e32, m8, tu, mu
359; CHECK-NEXT:    vfmv.s.f v8, fa0
360; CHECK-NEXT:    ret
361  %r = insertelement <vscale x 16 x float> %v, float %elt, i32 0
362  ret <vscale x 16 x float> %r
363}
364
365define <vscale x 16 x float> @insertelt_nxv16f32_imm(<vscale x 16 x float> %v, float %elt) {
366; CHECK-LABEL: insertelt_nxv16f32_imm:
367; CHECK:       # %bb.0:
368; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
369; CHECK-NEXT:    vfmv.s.f v16, fa0
370; CHECK-NEXT:    vsetivli zero, 4, e32, m8, tu, mu
371; CHECK-NEXT:    vslideup.vi v8, v16, 3
372; CHECK-NEXT:    ret
373  %r = insertelement <vscale x 16 x float> %v, float %elt, i32 3
374  ret <vscale x 16 x float> %r
375}
376
377define <vscale x 16 x float> @insertelt_nxv16f32_idx(<vscale x 16 x float> %v, float %elt, i32 %idx) {
378; CHECK-LABEL: insertelt_nxv16f32_idx:
379; CHECK:       # %bb.0:
380; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
381; CHECK-NEXT:    vfmv.s.f v16, fa0
382; CHECK-NEXT:    addi a1, a0, 1
383; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
384; CHECK-NEXT:    vslideup.vx v8, v16, a0
385; CHECK-NEXT:    ret
386  %r = insertelement <vscale x 16 x float> %v, float %elt, i32 %idx
387  ret <vscale x 16 x float> %r
388}
389
390define <vscale x 1 x double> @insertelt_nxv1f64_0(<vscale x 1 x double> %v, double %elt) {
391; CHECK-LABEL: insertelt_nxv1f64_0:
392; CHECK:       # %bb.0:
393; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, mu
394; CHECK-NEXT:    vfmv.s.f v8, fa0
395; CHECK-NEXT:    ret
396  %r = insertelement <vscale x 1 x double> %v, double %elt, i32 0
397  ret <vscale x 1 x double> %r
398}
399
400define <vscale x 1 x double> @insertelt_nxv1f64_imm(<vscale x 1 x double> %v, double %elt) {
401; CHECK-LABEL: insertelt_nxv1f64_imm:
402; CHECK:       # %bb.0:
403; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
404; CHECK-NEXT:    vfmv.s.f v25, fa0
405; CHECK-NEXT:    vsetivli zero, 4, e64, m1, tu, mu
406; CHECK-NEXT:    vslideup.vi v8, v25, 3
407; CHECK-NEXT:    ret
408  %r = insertelement <vscale x 1 x double> %v, double %elt, i32 3
409  ret <vscale x 1 x double> %r
410}
411
412define <vscale x 1 x double> @insertelt_nxv1f64_idx(<vscale x 1 x double> %v, double %elt, i32 %idx) {
413; CHECK-LABEL: insertelt_nxv1f64_idx:
414; CHECK:       # %bb.0:
415; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
416; CHECK-NEXT:    vfmv.s.f v25, fa0
417; CHECK-NEXT:    addi a1, a0, 1
418; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
419; CHECK-NEXT:    vslideup.vx v8, v25, a0
420; CHECK-NEXT:    ret
421  %r = insertelement <vscale x 1 x double> %v, double %elt, i32 %idx
422  ret <vscale x 1 x double> %r
423}
424
425define <vscale x 2 x double> @insertelt_nxv2f64_0(<vscale x 2 x double> %v, double %elt) {
426; CHECK-LABEL: insertelt_nxv2f64_0:
427; CHECK:       # %bb.0:
428; CHECK-NEXT:    vsetvli a0, zero, e64, m2, tu, mu
429; CHECK-NEXT:    vfmv.s.f v8, fa0
430; CHECK-NEXT:    ret
431  %r = insertelement <vscale x 2 x double> %v, double %elt, i32 0
432  ret <vscale x 2 x double> %r
433}
434
435define <vscale x 2 x double> @insertelt_nxv2f64_imm(<vscale x 2 x double> %v, double %elt) {
436; CHECK-LABEL: insertelt_nxv2f64_imm:
437; CHECK:       # %bb.0:
438; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
439; CHECK-NEXT:    vfmv.s.f v26, fa0
440; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, mu
441; CHECK-NEXT:    vslideup.vi v8, v26, 3
442; CHECK-NEXT:    ret
443  %r = insertelement <vscale x 2 x double> %v, double %elt, i32 3
444  ret <vscale x 2 x double> %r
445}
446
447define <vscale x 2 x double> @insertelt_nxv2f64_idx(<vscale x 2 x double> %v, double %elt, i32 %idx) {
448; CHECK-LABEL: insertelt_nxv2f64_idx:
449; CHECK:       # %bb.0:
450; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
451; CHECK-NEXT:    vfmv.s.f v26, fa0
452; CHECK-NEXT:    addi a1, a0, 1
453; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
454; CHECK-NEXT:    vslideup.vx v8, v26, a0
455; CHECK-NEXT:    ret
456  %r = insertelement <vscale x 2 x double> %v, double %elt, i32 %idx
457  ret <vscale x 2 x double> %r
458}
459
460define <vscale x 4 x double> @insertelt_nxv4f64_0(<vscale x 4 x double> %v, double %elt) {
461; CHECK-LABEL: insertelt_nxv4f64_0:
462; CHECK:       # %bb.0:
463; CHECK-NEXT:    vsetvli a0, zero, e64, m4, tu, mu
464; CHECK-NEXT:    vfmv.s.f v8, fa0
465; CHECK-NEXT:    ret
466  %r = insertelement <vscale x 4 x double> %v, double %elt, i32 0
467  ret <vscale x 4 x double> %r
468}
469
470define <vscale x 4 x double> @insertelt_nxv4f64_imm(<vscale x 4 x double> %v, double %elt) {
471; CHECK-LABEL: insertelt_nxv4f64_imm:
472; CHECK:       # %bb.0:
473; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
474; CHECK-NEXT:    vfmv.s.f v28, fa0
475; CHECK-NEXT:    vsetivli zero, 4, e64, m4, tu, mu
476; CHECK-NEXT:    vslideup.vi v8, v28, 3
477; CHECK-NEXT:    ret
478  %r = insertelement <vscale x 4 x double> %v, double %elt, i32 3
479  ret <vscale x 4 x double> %r
480}
481
482define <vscale x 4 x double> @insertelt_nxv4f64_idx(<vscale x 4 x double> %v, double %elt, i32 %idx) {
483; CHECK-LABEL: insertelt_nxv4f64_idx:
484; CHECK:       # %bb.0:
485; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
486; CHECK-NEXT:    vfmv.s.f v28, fa0
487; CHECK-NEXT:    addi a1, a0, 1
488; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
489; CHECK-NEXT:    vslideup.vx v8, v28, a0
490; CHECK-NEXT:    ret
491  %r = insertelement <vscale x 4 x double> %v, double %elt, i32 %idx
492  ret <vscale x 4 x double> %r
493}
494
495define <vscale x 8 x double> @insertelt_nxv8f64_0(<vscale x 8 x double> %v, double %elt) {
496; CHECK-LABEL: insertelt_nxv8f64_0:
497; CHECK:       # %bb.0:
498; CHECK-NEXT:    vsetvli a0, zero, e64, m8, tu, mu
499; CHECK-NEXT:    vfmv.s.f v8, fa0
500; CHECK-NEXT:    ret
501  %r = insertelement <vscale x 8 x double> %v, double %elt, i32 0
502  ret <vscale x 8 x double> %r
503}
504
505define <vscale x 8 x double> @insertelt_nxv8f64_imm(<vscale x 8 x double> %v, double %elt) {
506; CHECK-LABEL: insertelt_nxv8f64_imm:
507; CHECK:       # %bb.0:
508; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
509; CHECK-NEXT:    vfmv.s.f v16, fa0
510; CHECK-NEXT:    vsetivli zero, 4, e64, m8, tu, mu
511; CHECK-NEXT:    vslideup.vi v8, v16, 3
512; CHECK-NEXT:    ret
513  %r = insertelement <vscale x 8 x double> %v, double %elt, i32 3
514  ret <vscale x 8 x double> %r
515}
516
517define <vscale x 8 x double> @insertelt_nxv8f64_idx(<vscale x 8 x double> %v, double %elt, i32 %idx) {
518; CHECK-LABEL: insertelt_nxv8f64_idx:
519; CHECK:       # %bb.0:
520; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
521; CHECK-NEXT:    vfmv.s.f v16, fa0
522; CHECK-NEXT:    addi a1, a0, 1
523; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
524; CHECK-NEXT:    vslideup.vx v8, v16, a0
525; CHECK-NEXT:    ret
526  %r = insertelement <vscale x 8 x double> %v, double %elt, i32 %idx
527  ret <vscale x 8 x double> %r
528}
529
530