1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4
5define <vscale x 1 x i8> @insertelt_nxv1i8_0(<vscale x 1 x i8> %v, i8 signext %elt) {
6; CHECK-LABEL: insertelt_nxv1i8_0:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, tu, mu
9; CHECK-NEXT:    vmv.s.x v8, a0
10; CHECK-NEXT:    ret
11  %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 0
12  ret <vscale x 1 x i8> %r
13}
14
15define <vscale x 1 x i8> @insertelt_nxv1i8_imm(<vscale x 1 x i8> %v, i8 signext %elt) {
16; CHECK-LABEL: insertelt_nxv1i8_imm:
17; CHECK:       # %bb.0:
18; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
19; CHECK-NEXT:    vmv.s.x v25, a0
20; CHECK-NEXT:    vsetivli zero, 4, e8, mf8, tu, mu
21; CHECK-NEXT:    vslideup.vi v8, v25, 3
22; CHECK-NEXT:    ret
23  %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 3
24  ret <vscale x 1 x i8> %r
25}
26
27define <vscale x 1 x i8> @insertelt_nxv1i8_idx(<vscale x 1 x i8> %v, i8 signext %elt, i32 signext %idx) {
28; CHECK-LABEL: insertelt_nxv1i8_idx:
29; CHECK:       # %bb.0:
30; CHECK-NEXT:    vsetvli a2, zero, e8, mf8, ta, mu
31; CHECK-NEXT:    vmv.s.x v25, a0
32; CHECK-NEXT:    addi a0, a1, 1
33; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
34; CHECK-NEXT:    vslideup.vx v8, v25, a1
35; CHECK-NEXT:    ret
36  %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 %idx
37  ret <vscale x 1 x i8> %r
38}
39
40define <vscale x 2 x i8> @insertelt_nxv2i8_0(<vscale x 2 x i8> %v, i8 signext %elt) {
41; CHECK-LABEL: insertelt_nxv2i8_0:
42; CHECK:       # %bb.0:
43; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, tu, mu
44; CHECK-NEXT:    vmv.s.x v8, a0
45; CHECK-NEXT:    ret
46  %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 0
47  ret <vscale x 2 x i8> %r
48}
49
50define <vscale x 2 x i8> @insertelt_nxv2i8_imm(<vscale x 2 x i8> %v, i8 signext %elt) {
51; CHECK-LABEL: insertelt_nxv2i8_imm:
52; CHECK:       # %bb.0:
53; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
54; CHECK-NEXT:    vmv.s.x v25, a0
55; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, tu, mu
56; CHECK-NEXT:    vslideup.vi v8, v25, 3
57; CHECK-NEXT:    ret
58  %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 3
59  ret <vscale x 2 x i8> %r
60}
61
62define <vscale x 2 x i8> @insertelt_nxv2i8_idx(<vscale x 2 x i8> %v, i8 signext %elt, i32 signext %idx) {
63; CHECK-LABEL: insertelt_nxv2i8_idx:
64; CHECK:       # %bb.0:
65; CHECK-NEXT:    vsetvli a2, zero, e8, mf4, ta, mu
66; CHECK-NEXT:    vmv.s.x v25, a0
67; CHECK-NEXT:    addi a0, a1, 1
68; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
69; CHECK-NEXT:    vslideup.vx v8, v25, a1
70; CHECK-NEXT:    ret
71  %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 %idx
72  ret <vscale x 2 x i8> %r
73}
74
75define <vscale x 4 x i8> @insertelt_nxv4i8_0(<vscale x 4 x i8> %v, i8 signext %elt) {
76; CHECK-LABEL: insertelt_nxv4i8_0:
77; CHECK:       # %bb.0:
78; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, tu, mu
79; CHECK-NEXT:    vmv.s.x v8, a0
80; CHECK-NEXT:    ret
81  %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 0
82  ret <vscale x 4 x i8> %r
83}
84
85define <vscale x 4 x i8> @insertelt_nxv4i8_imm(<vscale x 4 x i8> %v, i8 signext %elt) {
86; CHECK-LABEL: insertelt_nxv4i8_imm:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
89; CHECK-NEXT:    vmv.s.x v25, a0
90; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
91; CHECK-NEXT:    vslideup.vi v8, v25, 3
92; CHECK-NEXT:    ret
93  %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 3
94  ret <vscale x 4 x i8> %r
95}
96
97define <vscale x 4 x i8> @insertelt_nxv4i8_idx(<vscale x 4 x i8> %v, i8 signext %elt, i32 signext %idx) {
98; CHECK-LABEL: insertelt_nxv4i8_idx:
99; CHECK:       # %bb.0:
100; CHECK-NEXT:    vsetvli a2, zero, e8, mf2, ta, mu
101; CHECK-NEXT:    vmv.s.x v25, a0
102; CHECK-NEXT:    addi a0, a1, 1
103; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
104; CHECK-NEXT:    vslideup.vx v8, v25, a1
105; CHECK-NEXT:    ret
106  %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 %idx
107  ret <vscale x 4 x i8> %r
108}
109
110define <vscale x 8 x i8> @insertelt_nxv8i8_0(<vscale x 8 x i8> %v, i8 signext %elt) {
111; CHECK-LABEL: insertelt_nxv8i8_0:
112; CHECK:       # %bb.0:
113; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, mu
114; CHECK-NEXT:    vmv.s.x v8, a0
115; CHECK-NEXT:    ret
116  %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 0
117  ret <vscale x 8 x i8> %r
118}
119
120define <vscale x 8 x i8> @insertelt_nxv8i8_imm(<vscale x 8 x i8> %v, i8 signext %elt) {
121; CHECK-LABEL: insertelt_nxv8i8_imm:
122; CHECK:       # %bb.0:
123; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
124; CHECK-NEXT:    vmv.s.x v25, a0
125; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, mu
126; CHECK-NEXT:    vslideup.vi v8, v25, 3
127; CHECK-NEXT:    ret
128  %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 3
129  ret <vscale x 8 x i8> %r
130}
131
132define <vscale x 8 x i8> @insertelt_nxv8i8_idx(<vscale x 8 x i8> %v, i8 signext %elt, i32 signext %idx) {
133; CHECK-LABEL: insertelt_nxv8i8_idx:
134; CHECK:       # %bb.0:
135; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, mu
136; CHECK-NEXT:    vmv.s.x v25, a0
137; CHECK-NEXT:    addi a0, a1, 1
138; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
139; CHECK-NEXT:    vslideup.vx v8, v25, a1
140; CHECK-NEXT:    ret
141  %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 %idx
142  ret <vscale x 8 x i8> %r
143}
144
145define <vscale x 16 x i8> @insertelt_nxv16i8_0(<vscale x 16 x i8> %v, i8 signext %elt) {
146; CHECK-LABEL: insertelt_nxv16i8_0:
147; CHECK:       # %bb.0:
148; CHECK-NEXT:    vsetvli a1, zero, e8, m2, tu, mu
149; CHECK-NEXT:    vmv.s.x v8, a0
150; CHECK-NEXT:    ret
151  %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 0
152  ret <vscale x 16 x i8> %r
153}
154
155define <vscale x 16 x i8> @insertelt_nxv16i8_imm(<vscale x 16 x i8> %v, i8 signext %elt) {
156; CHECK-LABEL: insertelt_nxv16i8_imm:
157; CHECK:       # %bb.0:
158; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
159; CHECK-NEXT:    vmv.s.x v26, a0
160; CHECK-NEXT:    vsetivli zero, 4, e8, m2, tu, mu
161; CHECK-NEXT:    vslideup.vi v8, v26, 3
162; CHECK-NEXT:    ret
163  %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 3
164  ret <vscale x 16 x i8> %r
165}
166
167define <vscale x 16 x i8> @insertelt_nxv16i8_idx(<vscale x 16 x i8> %v, i8 signext %elt, i32 signext %idx) {
168; CHECK-LABEL: insertelt_nxv16i8_idx:
169; CHECK:       # %bb.0:
170; CHECK-NEXT:    vsetvli a2, zero, e8, m2, ta, mu
171; CHECK-NEXT:    vmv.s.x v26, a0
172; CHECK-NEXT:    addi a0, a1, 1
173; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
174; CHECK-NEXT:    vslideup.vx v8, v26, a1
175; CHECK-NEXT:    ret
176  %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 %idx
177  ret <vscale x 16 x i8> %r
178}
179
180define <vscale x 32 x i8> @insertelt_nxv32i8_0(<vscale x 32 x i8> %v, i8 signext %elt) {
181; CHECK-LABEL: insertelt_nxv32i8_0:
182; CHECK:       # %bb.0:
183; CHECK-NEXT:    vsetvli a1, zero, e8, m4, tu, mu
184; CHECK-NEXT:    vmv.s.x v8, a0
185; CHECK-NEXT:    ret
186  %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 0
187  ret <vscale x 32 x i8> %r
188}
189
190define <vscale x 32 x i8> @insertelt_nxv32i8_imm(<vscale x 32 x i8> %v, i8 signext %elt) {
191; CHECK-LABEL: insertelt_nxv32i8_imm:
192; CHECK:       # %bb.0:
193; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
194; CHECK-NEXT:    vmv.s.x v28, a0
195; CHECK-NEXT:    vsetivli zero, 4, e8, m4, tu, mu
196; CHECK-NEXT:    vslideup.vi v8, v28, 3
197; CHECK-NEXT:    ret
198  %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 3
199  ret <vscale x 32 x i8> %r
200}
201
202define <vscale x 32 x i8> @insertelt_nxv32i8_idx(<vscale x 32 x i8> %v, i8 signext %elt, i32 signext %idx) {
203; CHECK-LABEL: insertelt_nxv32i8_idx:
204; CHECK:       # %bb.0:
205; CHECK-NEXT:    vsetvli a2, zero, e8, m4, ta, mu
206; CHECK-NEXT:    vmv.s.x v28, a0
207; CHECK-NEXT:    addi a0, a1, 1
208; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
209; CHECK-NEXT:    vslideup.vx v8, v28, a1
210; CHECK-NEXT:    ret
211  %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 %idx
212  ret <vscale x 32 x i8> %r
213}
214
215define <vscale x 64 x i8> @insertelt_nxv64i8_0(<vscale x 64 x i8> %v, i8 signext %elt) {
216; CHECK-LABEL: insertelt_nxv64i8_0:
217; CHECK:       # %bb.0:
218; CHECK-NEXT:    vsetvli a1, zero, e8, m8, tu, mu
219; CHECK-NEXT:    vmv.s.x v8, a0
220; CHECK-NEXT:    ret
221  %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 0
222  ret <vscale x 64 x i8> %r
223}
224
225define <vscale x 64 x i8> @insertelt_nxv64i8_imm(<vscale x 64 x i8> %v, i8 signext %elt) {
226; CHECK-LABEL: insertelt_nxv64i8_imm:
227; CHECK:       # %bb.0:
228; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
229; CHECK-NEXT:    vmv.s.x v16, a0
230; CHECK-NEXT:    vsetivli zero, 4, e8, m8, tu, mu
231; CHECK-NEXT:    vslideup.vi v8, v16, 3
232; CHECK-NEXT:    ret
233  %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 3
234  ret <vscale x 64 x i8> %r
235}
236
237define <vscale x 64 x i8> @insertelt_nxv64i8_idx(<vscale x 64 x i8> %v, i8 signext %elt, i32 signext %idx) {
238; CHECK-LABEL: insertelt_nxv64i8_idx:
239; CHECK:       # %bb.0:
240; CHECK-NEXT:    vsetvli a2, zero, e8, m8, ta, mu
241; CHECK-NEXT:    vmv.s.x v16, a0
242; CHECK-NEXT:    addi a0, a1, 1
243; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, mu
244; CHECK-NEXT:    vslideup.vx v8, v16, a1
245; CHECK-NEXT:    ret
246  %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 %idx
247  ret <vscale x 64 x i8> %r
248}
249
250define <vscale x 1 x i16> @insertelt_nxv1i16_0(<vscale x 1 x i16> %v, i16 signext %elt) {
251; CHECK-LABEL: insertelt_nxv1i16_0:
252; CHECK:       # %bb.0:
253; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, tu, mu
254; CHECK-NEXT:    vmv.s.x v8, a0
255; CHECK-NEXT:    ret
256  %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 0
257  ret <vscale x 1 x i16> %r
258}
259
260define <vscale x 1 x i16> @insertelt_nxv1i16_imm(<vscale x 1 x i16> %v, i16 signext %elt) {
261; CHECK-LABEL: insertelt_nxv1i16_imm:
262; CHECK:       # %bb.0:
263; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
264; CHECK-NEXT:    vmv.s.x v25, a0
265; CHECK-NEXT:    vsetivli zero, 4, e16, mf4, tu, mu
266; CHECK-NEXT:    vslideup.vi v8, v25, 3
267; CHECK-NEXT:    ret
268  %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 3
269  ret <vscale x 1 x i16> %r
270}
271
272define <vscale x 1 x i16> @insertelt_nxv1i16_idx(<vscale x 1 x i16> %v, i16 signext %elt, i32 signext %idx) {
273; CHECK-LABEL: insertelt_nxv1i16_idx:
274; CHECK:       # %bb.0:
275; CHECK-NEXT:    vsetvli a2, zero, e16, mf4, ta, mu
276; CHECK-NEXT:    vmv.s.x v25, a0
277; CHECK-NEXT:    addi a0, a1, 1
278; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
279; CHECK-NEXT:    vslideup.vx v8, v25, a1
280; CHECK-NEXT:    ret
281  %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 %idx
282  ret <vscale x 1 x i16> %r
283}
284
285define <vscale x 2 x i16> @insertelt_nxv2i16_0(<vscale x 2 x i16> %v, i16 signext %elt) {
286; CHECK-LABEL: insertelt_nxv2i16_0:
287; CHECK:       # %bb.0:
288; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, tu, mu
289; CHECK-NEXT:    vmv.s.x v8, a0
290; CHECK-NEXT:    ret
291  %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 0
292  ret <vscale x 2 x i16> %r
293}
294
295define <vscale x 2 x i16> @insertelt_nxv2i16_imm(<vscale x 2 x i16> %v, i16 signext %elt) {
296; CHECK-LABEL: insertelt_nxv2i16_imm:
297; CHECK:       # %bb.0:
298; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
299; CHECK-NEXT:    vmv.s.x v25, a0
300; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, tu, mu
301; CHECK-NEXT:    vslideup.vi v8, v25, 3
302; CHECK-NEXT:    ret
303  %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 3
304  ret <vscale x 2 x i16> %r
305}
306
307define <vscale x 2 x i16> @insertelt_nxv2i16_idx(<vscale x 2 x i16> %v, i16 signext %elt, i32 signext %idx) {
308; CHECK-LABEL: insertelt_nxv2i16_idx:
309; CHECK:       # %bb.0:
310; CHECK-NEXT:    vsetvli a2, zero, e16, mf2, ta, mu
311; CHECK-NEXT:    vmv.s.x v25, a0
312; CHECK-NEXT:    addi a0, a1, 1
313; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
314; CHECK-NEXT:    vslideup.vx v8, v25, a1
315; CHECK-NEXT:    ret
316  %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 %idx
317  ret <vscale x 2 x i16> %r
318}
319
320define <vscale x 4 x i16> @insertelt_nxv4i16_0(<vscale x 4 x i16> %v, i16 signext %elt) {
321; CHECK-LABEL: insertelt_nxv4i16_0:
322; CHECK:       # %bb.0:
323; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, mu
324; CHECK-NEXT:    vmv.s.x v8, a0
325; CHECK-NEXT:    ret
326  %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 0
327  ret <vscale x 4 x i16> %r
328}
329
330define <vscale x 4 x i16> @insertelt_nxv4i16_imm(<vscale x 4 x i16> %v, i16 signext %elt) {
331; CHECK-LABEL: insertelt_nxv4i16_imm:
332; CHECK:       # %bb.0:
333; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
334; CHECK-NEXT:    vmv.s.x v25, a0
335; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
336; CHECK-NEXT:    vslideup.vi v8, v25, 3
337; CHECK-NEXT:    ret
338  %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 3
339  ret <vscale x 4 x i16> %r
340}
341
342define <vscale x 4 x i16> @insertelt_nxv4i16_idx(<vscale x 4 x i16> %v, i16 signext %elt, i32 signext %idx) {
343; CHECK-LABEL: insertelt_nxv4i16_idx:
344; CHECK:       # %bb.0:
345; CHECK-NEXT:    vsetvli a2, zero, e16, m1, ta, mu
346; CHECK-NEXT:    vmv.s.x v25, a0
347; CHECK-NEXT:    addi a0, a1, 1
348; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
349; CHECK-NEXT:    vslideup.vx v8, v25, a1
350; CHECK-NEXT:    ret
351  %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 %idx
352  ret <vscale x 4 x i16> %r
353}
354
355define <vscale x 8 x i16> @insertelt_nxv8i16_0(<vscale x 8 x i16> %v, i16 signext %elt) {
356; CHECK-LABEL: insertelt_nxv8i16_0:
357; CHECK:       # %bb.0:
358; CHECK-NEXT:    vsetvli a1, zero, e16, m2, tu, mu
359; CHECK-NEXT:    vmv.s.x v8, a0
360; CHECK-NEXT:    ret
361  %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 0
362  ret <vscale x 8 x i16> %r
363}
364
365define <vscale x 8 x i16> @insertelt_nxv8i16_imm(<vscale x 8 x i16> %v, i16 signext %elt) {
366; CHECK-LABEL: insertelt_nxv8i16_imm:
367; CHECK:       # %bb.0:
368; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
369; CHECK-NEXT:    vmv.s.x v26, a0
370; CHECK-NEXT:    vsetivli zero, 4, e16, m2, tu, mu
371; CHECK-NEXT:    vslideup.vi v8, v26, 3
372; CHECK-NEXT:    ret
373  %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 3
374  ret <vscale x 8 x i16> %r
375}
376
377define <vscale x 8 x i16> @insertelt_nxv8i16_idx(<vscale x 8 x i16> %v, i16 signext %elt, i32 signext %idx) {
378; CHECK-LABEL: insertelt_nxv8i16_idx:
379; CHECK:       # %bb.0:
380; CHECK-NEXT:    vsetvli a2, zero, e16, m2, ta, mu
381; CHECK-NEXT:    vmv.s.x v26, a0
382; CHECK-NEXT:    addi a0, a1, 1
383; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
384; CHECK-NEXT:    vslideup.vx v8, v26, a1
385; CHECK-NEXT:    ret
386  %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 %idx
387  ret <vscale x 8 x i16> %r
388}
389
390define <vscale x 16 x i16> @insertelt_nxv16i16_0(<vscale x 16 x i16> %v, i16 signext %elt) {
391; CHECK-LABEL: insertelt_nxv16i16_0:
392; CHECK:       # %bb.0:
393; CHECK-NEXT:    vsetvli a1, zero, e16, m4, tu, mu
394; CHECK-NEXT:    vmv.s.x v8, a0
395; CHECK-NEXT:    ret
396  %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 0
397  ret <vscale x 16 x i16> %r
398}
399
400define <vscale x 16 x i16> @insertelt_nxv16i16_imm(<vscale x 16 x i16> %v, i16 signext %elt) {
401; CHECK-LABEL: insertelt_nxv16i16_imm:
402; CHECK:       # %bb.0:
403; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
404; CHECK-NEXT:    vmv.s.x v28, a0
405; CHECK-NEXT:    vsetivli zero, 4, e16, m4, tu, mu
406; CHECK-NEXT:    vslideup.vi v8, v28, 3
407; CHECK-NEXT:    ret
408  %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 3
409  ret <vscale x 16 x i16> %r
410}
411
412define <vscale x 16 x i16> @insertelt_nxv16i16_idx(<vscale x 16 x i16> %v, i16 signext %elt, i32 signext %idx) {
413; CHECK-LABEL: insertelt_nxv16i16_idx:
414; CHECK:       # %bb.0:
415; CHECK-NEXT:    vsetvli a2, zero, e16, m4, ta, mu
416; CHECK-NEXT:    vmv.s.x v28, a0
417; CHECK-NEXT:    addi a0, a1, 1
418; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
419; CHECK-NEXT:    vslideup.vx v8, v28, a1
420; CHECK-NEXT:    ret
421  %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 %idx
422  ret <vscale x 16 x i16> %r
423}
424
425define <vscale x 32 x i16> @insertelt_nxv32i16_0(<vscale x 32 x i16> %v, i16 signext %elt) {
426; CHECK-LABEL: insertelt_nxv32i16_0:
427; CHECK:       # %bb.0:
428; CHECK-NEXT:    vsetvli a1, zero, e16, m8, tu, mu
429; CHECK-NEXT:    vmv.s.x v8, a0
430; CHECK-NEXT:    ret
431  %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 0
432  ret <vscale x 32 x i16> %r
433}
434
435define <vscale x 32 x i16> @insertelt_nxv32i16_imm(<vscale x 32 x i16> %v, i16 signext %elt) {
436; CHECK-LABEL: insertelt_nxv32i16_imm:
437; CHECK:       # %bb.0:
438; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
439; CHECK-NEXT:    vmv.s.x v16, a0
440; CHECK-NEXT:    vsetivli zero, 4, e16, m8, tu, mu
441; CHECK-NEXT:    vslideup.vi v8, v16, 3
442; CHECK-NEXT:    ret
443  %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 3
444  ret <vscale x 32 x i16> %r
445}
446
447define <vscale x 32 x i16> @insertelt_nxv32i16_idx(<vscale x 32 x i16> %v, i16 signext %elt, i32 signext %idx) {
448; CHECK-LABEL: insertelt_nxv32i16_idx:
449; CHECK:       # %bb.0:
450; CHECK-NEXT:    vsetvli a2, zero, e16, m8, ta, mu
451; CHECK-NEXT:    vmv.s.x v16, a0
452; CHECK-NEXT:    addi a0, a1, 1
453; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
454; CHECK-NEXT:    vslideup.vx v8, v16, a1
455; CHECK-NEXT:    ret
456  %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 %idx
457  ret <vscale x 32 x i16> %r
458}
459
460define <vscale x 1 x i32> @insertelt_nxv1i32_0(<vscale x 1 x i32> %v, i32 %elt) {
461; CHECK-LABEL: insertelt_nxv1i32_0:
462; CHECK:       # %bb.0:
463; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, tu, mu
464; CHECK-NEXT:    vmv.s.x v8, a0
465; CHECK-NEXT:    ret
466  %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 0
467  ret <vscale x 1 x i32> %r
468}
469
470define <vscale x 1 x i32> @insertelt_nxv1i32_imm(<vscale x 1 x i32> %v, i32 %elt) {
471; CHECK-LABEL: insertelt_nxv1i32_imm:
472; CHECK:       # %bb.0:
473; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
474; CHECK-NEXT:    vmv.s.x v25, a0
475; CHECK-NEXT:    vsetivli zero, 4, e32, mf2, tu, mu
476; CHECK-NEXT:    vslideup.vi v8, v25, 3
477; CHECK-NEXT:    ret
478  %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 3
479  ret <vscale x 1 x i32> %r
480}
481
482define <vscale x 1 x i32> @insertelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 %elt, i32 %idx) {
483; CHECK-LABEL: insertelt_nxv1i32_idx:
484; CHECK:       # %bb.0:
485; CHECK-NEXT:    vsetvli a2, zero, e32, mf2, ta, mu
486; CHECK-NEXT:    vmv.s.x v25, a0
487; CHECK-NEXT:    addi a0, a1, 1
488; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
489; CHECK-NEXT:    vslideup.vx v8, v25, a1
490; CHECK-NEXT:    ret
491  %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 %idx
492  ret <vscale x 1 x i32> %r
493}
494
495define <vscale x 2 x i32> @insertelt_nxv2i32_0(<vscale x 2 x i32> %v, i32 %elt) {
496; CHECK-LABEL: insertelt_nxv2i32_0:
497; CHECK:       # %bb.0:
498; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, mu
499; CHECK-NEXT:    vmv.s.x v8, a0
500; CHECK-NEXT:    ret
501  %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 0
502  ret <vscale x 2 x i32> %r
503}
504
505define <vscale x 2 x i32> @insertelt_nxv2i32_imm(<vscale x 2 x i32> %v, i32 %elt) {
506; CHECK-LABEL: insertelt_nxv2i32_imm:
507; CHECK:       # %bb.0:
508; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
509; CHECK-NEXT:    vmv.s.x v25, a0
510; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
511; CHECK-NEXT:    vslideup.vi v8, v25, 3
512; CHECK-NEXT:    ret
513  %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 3
514  ret <vscale x 2 x i32> %r
515}
516
517define <vscale x 2 x i32> @insertelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 %elt, i32 %idx) {
518; CHECK-LABEL: insertelt_nxv2i32_idx:
519; CHECK:       # %bb.0:
520; CHECK-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
521; CHECK-NEXT:    vmv.s.x v25, a0
522; CHECK-NEXT:    addi a0, a1, 1
523; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
524; CHECK-NEXT:    vslideup.vx v8, v25, a1
525; CHECK-NEXT:    ret
526  %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 %idx
527  ret <vscale x 2 x i32> %r
528}
529
530define <vscale x 4 x i32> @insertelt_nxv4i32_0(<vscale x 4 x i32> %v, i32 %elt) {
531; CHECK-LABEL: insertelt_nxv4i32_0:
532; CHECK:       # %bb.0:
533; CHECK-NEXT:    vsetvli a1, zero, e32, m2, tu, mu
534; CHECK-NEXT:    vmv.s.x v8, a0
535; CHECK-NEXT:    ret
536  %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 0
537  ret <vscale x 4 x i32> %r
538}
539
540define <vscale x 4 x i32> @insertelt_nxv4i32_imm(<vscale x 4 x i32> %v, i32 %elt) {
541; CHECK-LABEL: insertelt_nxv4i32_imm:
542; CHECK:       # %bb.0:
543; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
544; CHECK-NEXT:    vmv.s.x v26, a0
545; CHECK-NEXT:    vsetivli zero, 4, e32, m2, tu, mu
546; CHECK-NEXT:    vslideup.vi v8, v26, 3
547; CHECK-NEXT:    ret
548  %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 3
549  ret <vscale x 4 x i32> %r
550}
551
552define <vscale x 4 x i32> @insertelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 %elt, i32 %idx) {
553; CHECK-LABEL: insertelt_nxv4i32_idx:
554; CHECK:       # %bb.0:
555; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
556; CHECK-NEXT:    vmv.s.x v26, a0
557; CHECK-NEXT:    addi a0, a1, 1
558; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
559; CHECK-NEXT:    vslideup.vx v8, v26, a1
560; CHECK-NEXT:    ret
561  %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 %idx
562  ret <vscale x 4 x i32> %r
563}
564
565define <vscale x 8 x i32> @insertelt_nxv8i32_0(<vscale x 8 x i32> %v, i32 %elt) {
566; CHECK-LABEL: insertelt_nxv8i32_0:
567; CHECK:       # %bb.0:
568; CHECK-NEXT:    vsetvli a1, zero, e32, m4, tu, mu
569; CHECK-NEXT:    vmv.s.x v8, a0
570; CHECK-NEXT:    ret
571  %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 0
572  ret <vscale x 8 x i32> %r
573}
574
575define <vscale x 8 x i32> @insertelt_nxv8i32_imm(<vscale x 8 x i32> %v, i32 %elt) {
576; CHECK-LABEL: insertelt_nxv8i32_imm:
577; CHECK:       # %bb.0:
578; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
579; CHECK-NEXT:    vmv.s.x v28, a0
580; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, mu
581; CHECK-NEXT:    vslideup.vi v8, v28, 3
582; CHECK-NEXT:    ret
583  %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 3
584  ret <vscale x 8 x i32> %r
585}
586
587define <vscale x 8 x i32> @insertelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 %elt, i32 %idx) {
588; CHECK-LABEL: insertelt_nxv8i32_idx:
589; CHECK:       # %bb.0:
590; CHECK-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
591; CHECK-NEXT:    vmv.s.x v28, a0
592; CHECK-NEXT:    addi a0, a1, 1
593; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
594; CHECK-NEXT:    vslideup.vx v8, v28, a1
595; CHECK-NEXT:    ret
596  %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 %idx
597  ret <vscale x 8 x i32> %r
598}
599
600define <vscale x 16 x i32> @insertelt_nxv16i32_0(<vscale x 16 x i32> %v, i32 %elt) {
601; CHECK-LABEL: insertelt_nxv16i32_0:
602; CHECK:       # %bb.0:
603; CHECK-NEXT:    vsetvli a1, zero, e32, m8, tu, mu
604; CHECK-NEXT:    vmv.s.x v8, a0
605; CHECK-NEXT:    ret
606  %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 0
607  ret <vscale x 16 x i32> %r
608}
609
610define <vscale x 16 x i32> @insertelt_nxv16i32_imm(<vscale x 16 x i32> %v, i32 %elt) {
611; CHECK-LABEL: insertelt_nxv16i32_imm:
612; CHECK:       # %bb.0:
613; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
614; CHECK-NEXT:    vmv.s.x v16, a0
615; CHECK-NEXT:    vsetivli zero, 4, e32, m8, tu, mu
616; CHECK-NEXT:    vslideup.vi v8, v16, 3
617; CHECK-NEXT:    ret
618  %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 3
619  ret <vscale x 16 x i32> %r
620}
621
622define <vscale x 16 x i32> @insertelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 %elt, i32 %idx) {
623; CHECK-LABEL: insertelt_nxv16i32_idx:
624; CHECK:       # %bb.0:
625; CHECK-NEXT:    vsetvli a2, zero, e32, m8, ta, mu
626; CHECK-NEXT:    vmv.s.x v16, a0
627; CHECK-NEXT:    addi a0, a1, 1
628; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
629; CHECK-NEXT:    vslideup.vx v8, v16, a1
630; CHECK-NEXT:    ret
631  %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 %idx
632  ret <vscale x 16 x i32> %r
633}
634
635define <vscale x 1 x i64> @insertelt_nxv1i64_0(<vscale x 1 x i64> %v, i64 %elt) {
636; CHECK-LABEL: insertelt_nxv1i64_0:
637; CHECK:       # %bb.0:
638; CHECK-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
639; CHECK-NEXT:    vmv.v.i v25, 0
640; CHECK-NEXT:    vslide1up.vx v26, v25, a1
641; CHECK-NEXT:    vslide1up.vx v25, v26, a0
642; CHECK-NEXT:    vsetivli zero, 1, e64, m1, tu, mu
643; CHECK-NEXT:    vslideup.vi v8, v25, 0
644; CHECK-NEXT:    ret
645  %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 0
646  ret <vscale x 1 x i64> %r
647}
648
649define <vscale x 1 x i64> @insertelt_nxv1i64_imm(<vscale x 1 x i64> %v, i64 %elt) {
650; CHECK-LABEL: insertelt_nxv1i64_imm:
651; CHECK:       # %bb.0:
652; CHECK-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
653; CHECK-NEXT:    vmv.v.i v25, 0
654; CHECK-NEXT:    vslide1up.vx v26, v25, a1
655; CHECK-NEXT:    vslide1up.vx v25, v26, a0
656; CHECK-NEXT:    vsetivli zero, 4, e64, m1, tu, mu
657; CHECK-NEXT:    vslideup.vi v8, v25, 3
658; CHECK-NEXT:    ret
659  %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 3
660  ret <vscale x 1 x i64> %r
661}
662
663define <vscale x 1 x i64> @insertelt_nxv1i64_idx(<vscale x 1 x i64> %v, i64 %elt, i32 %idx) {
664; CHECK-LABEL: insertelt_nxv1i64_idx:
665; CHECK:       # %bb.0:
666; CHECK-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
667; CHECK-NEXT:    vmv.v.i v25, 0
668; CHECK-NEXT:    vslide1up.vx v26, v25, a1
669; CHECK-NEXT:    vslide1up.vx v25, v26, a0
670; CHECK-NEXT:    addi a0, a2, 1
671; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
672; CHECK-NEXT:    vslideup.vx v8, v25, a2
673; CHECK-NEXT:    ret
674  %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 %idx
675  ret <vscale x 1 x i64> %r
676}
677
678define <vscale x 2 x i64> @insertelt_nxv2i64_0(<vscale x 2 x i64> %v, i64 %elt) {
679; CHECK-LABEL: insertelt_nxv2i64_0:
680; CHECK:       # %bb.0:
681; CHECK-NEXT:    vsetivli zero, 2, e32, m2, ta, mu
682; CHECK-NEXT:    vmv.v.i v26, 0
683; CHECK-NEXT:    vslide1up.vx v28, v26, a1
684; CHECK-NEXT:    vslide1up.vx v26, v28, a0
685; CHECK-NEXT:    vsetivli zero, 1, e64, m2, tu, mu
686; CHECK-NEXT:    vslideup.vi v8, v26, 0
687; CHECK-NEXT:    ret
688  %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 0
689  ret <vscale x 2 x i64> %r
690}
691
692define <vscale x 2 x i64> @insertelt_nxv2i64_imm(<vscale x 2 x i64> %v, i64 %elt) {
693; CHECK-LABEL: insertelt_nxv2i64_imm:
694; CHECK:       # %bb.0:
695; CHECK-NEXT:    vsetivli zero, 2, e32, m2, ta, mu
696; CHECK-NEXT:    vmv.v.i v26, 0
697; CHECK-NEXT:    vslide1up.vx v28, v26, a1
698; CHECK-NEXT:    vslide1up.vx v26, v28, a0
699; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, mu
700; CHECK-NEXT:    vslideup.vi v8, v26, 3
701; CHECK-NEXT:    ret
702  %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 3
703  ret <vscale x 2 x i64> %r
704}
705
706define <vscale x 2 x i64> @insertelt_nxv2i64_idx(<vscale x 2 x i64> %v, i64 %elt, i32 %idx) {
707; CHECK-LABEL: insertelt_nxv2i64_idx:
708; CHECK:       # %bb.0:
709; CHECK-NEXT:    vsetivli zero, 2, e32, m2, ta, mu
710; CHECK-NEXT:    vmv.v.i v26, 0
711; CHECK-NEXT:    vslide1up.vx v28, v26, a1
712; CHECK-NEXT:    vslide1up.vx v26, v28, a0
713; CHECK-NEXT:    addi a0, a2, 1
714; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
715; CHECK-NEXT:    vslideup.vx v8, v26, a2
716; CHECK-NEXT:    ret
717  %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 %idx
718  ret <vscale x 2 x i64> %r
719}
720
721define <vscale x 4 x i64> @insertelt_nxv4i64_0(<vscale x 4 x i64> %v, i64 %elt) {
722; CHECK-LABEL: insertelt_nxv4i64_0:
723; CHECK:       # %bb.0:
724; CHECK-NEXT:    vsetivli zero, 2, e32, m4, ta, mu
725; CHECK-NEXT:    vmv.v.i v28, 0
726; CHECK-NEXT:    vslide1up.vx v12, v28, a1
727; CHECK-NEXT:    vslide1up.vx v28, v12, a0
728; CHECK-NEXT:    vsetivli zero, 1, e64, m4, tu, mu
729; CHECK-NEXT:    vslideup.vi v8, v28, 0
730; CHECK-NEXT:    ret
731  %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 0
732  ret <vscale x 4 x i64> %r
733}
734
735define <vscale x 4 x i64> @insertelt_nxv4i64_imm(<vscale x 4 x i64> %v, i64 %elt) {
736; CHECK-LABEL: insertelt_nxv4i64_imm:
737; CHECK:       # %bb.0:
738; CHECK-NEXT:    vsetivli zero, 2, e32, m4, ta, mu
739; CHECK-NEXT:    vmv.v.i v28, 0
740; CHECK-NEXT:    vslide1up.vx v12, v28, a1
741; CHECK-NEXT:    vslide1up.vx v28, v12, a0
742; CHECK-NEXT:    vsetivli zero, 4, e64, m4, tu, mu
743; CHECK-NEXT:    vslideup.vi v8, v28, 3
744; CHECK-NEXT:    ret
745  %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 3
746  ret <vscale x 4 x i64> %r
747}
748
749define <vscale x 4 x i64> @insertelt_nxv4i64_idx(<vscale x 4 x i64> %v, i64 %elt, i32 %idx) {
750; CHECK-LABEL: insertelt_nxv4i64_idx:
751; CHECK:       # %bb.0:
752; CHECK-NEXT:    vsetivli zero, 2, e32, m4, ta, mu
753; CHECK-NEXT:    vmv.v.i v28, 0
754; CHECK-NEXT:    vslide1up.vx v12, v28, a1
755; CHECK-NEXT:    vslide1up.vx v28, v12, a0
756; CHECK-NEXT:    addi a0, a2, 1
757; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
758; CHECK-NEXT:    vslideup.vx v8, v28, a2
759; CHECK-NEXT:    ret
760  %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 %idx
761  ret <vscale x 4 x i64> %r
762}
763
764define <vscale x 8 x i64> @insertelt_nxv8i64_0(<vscale x 8 x i64> %v, i64 %elt) {
765; CHECK-LABEL: insertelt_nxv8i64_0:
766; CHECK:       # %bb.0:
767; CHECK-NEXT:    vsetivli zero, 2, e32, m8, ta, mu
768; CHECK-NEXT:    vmv.v.i v16, 0
769; CHECK-NEXT:    vslide1up.vx v24, v16, a1
770; CHECK-NEXT:    vslide1up.vx v16, v24, a0
771; CHECK-NEXT:    vsetivli zero, 1, e64, m8, tu, mu
772; CHECK-NEXT:    vslideup.vi v8, v16, 0
773; CHECK-NEXT:    ret
774  %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 0
775  ret <vscale x 8 x i64> %r
776}
777
778define <vscale x 8 x i64> @insertelt_nxv8i64_imm(<vscale x 8 x i64> %v, i64 %elt) {
779; CHECK-LABEL: insertelt_nxv8i64_imm:
780; CHECK:       # %bb.0:
781; CHECK-NEXT:    vsetivli zero, 2, e32, m8, ta, mu
782; CHECK-NEXT:    vmv.v.i v16, 0
783; CHECK-NEXT:    vslide1up.vx v24, v16, a1
784; CHECK-NEXT:    vslide1up.vx v16, v24, a0
785; CHECK-NEXT:    vsetivli zero, 4, e64, m8, tu, mu
786; CHECK-NEXT:    vslideup.vi v8, v16, 3
787; CHECK-NEXT:    ret
788  %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 3
789  ret <vscale x 8 x i64> %r
790}
791
792define <vscale x 8 x i64> @insertelt_nxv8i64_idx(<vscale x 8 x i64> %v, i64 %elt, i32 %idx) {
793; CHECK-LABEL: insertelt_nxv8i64_idx:
794; CHECK:       # %bb.0:
795; CHECK-NEXT:    vsetivli zero, 2, e32, m8, ta, mu
796; CHECK-NEXT:    vmv.v.i v16, 0
797; CHECK-NEXT:    vslide1up.vx v24, v16, a1
798; CHECK-NEXT:    vslide1up.vx v16, v24, a0
799; CHECK-NEXT:    addi a0, a2, 1
800; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
801; CHECK-NEXT:    vslideup.vx v8, v16, a2
802; CHECK-NEXT:    ret
803  %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 %idx
804  ret <vscale x 8 x i64> %r
805}
806
807; Extra tests to check lowering of constant values
808define <vscale x 2 x i64> @insertelt_nxv2i64_0_c10(<vscale x 2 x i64> %v) {
809; CHECK-LABEL: insertelt_nxv2i64_0_c10:
810; CHECK:       # %bb.0:
811; CHECK-NEXT:    addi a0, zero, 10
812; CHECK-NEXT:    vsetvli a1, zero, e64, m2, tu, mu
813; CHECK-NEXT:    vmv.s.x v8, a0
814; CHECK-NEXT:    ret
815  %r = insertelement <vscale x 2 x i64> %v, i64 10, i32 0
816  ret <vscale x 2 x i64> %r
817}
818
819define <vscale x 2 x i64> @insertelt_nxv2i64_imm_c10(<vscale x 2 x i64> %v) {
820; CHECK-LABEL: insertelt_nxv2i64_imm_c10:
821; CHECK:       # %bb.0:
822; CHECK-NEXT:    addi a0, zero, 10
823; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
824; CHECK-NEXT:    vmv.s.x v26, a0
825; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, mu
826; CHECK-NEXT:    vslideup.vi v8, v26, 3
827; CHECK-NEXT:    ret
828  %r = insertelement <vscale x 2 x i64> %v, i64 10, i32 3
829  ret <vscale x 2 x i64> %r
830}
831
832define <vscale x 2 x i64> @insertelt_nxv2i64_idx_c10(<vscale x 2 x i64> %v, i32 %idx) {
833; CHECK-LABEL: insertelt_nxv2i64_idx_c10:
834; CHECK:       # %bb.0:
835; CHECK-NEXT:    addi a1, zero, 10
836; CHECK-NEXT:    vsetvli a2, zero, e64, m2, ta, mu
837; CHECK-NEXT:    vmv.s.x v26, a1
838; CHECK-NEXT:    addi a1, a0, 1
839; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
840; CHECK-NEXT:    vslideup.vx v8, v26, a0
841; CHECK-NEXT:    ret
842  %r = insertelement <vscale x 2 x i64> %v, i64 10, i32 %idx
843  ret <vscale x 2 x i64> %r
844}
845
846define <vscale x 2 x i64> @insertelt_nxv2i64_0_cn1(<vscale x 2 x i64> %v) {
847; CHECK-LABEL: insertelt_nxv2i64_0_cn1:
848; CHECK:       # %bb.0:
849; CHECK-NEXT:    addi a0, zero, -1
850; CHECK-NEXT:    vsetvli a1, zero, e64, m2, tu, mu
851; CHECK-NEXT:    vmv.s.x v8, a0
852; CHECK-NEXT:    ret
853  %r = insertelement <vscale x 2 x i64> %v, i64 -1, i32 0
854  ret <vscale x 2 x i64> %r
855}
856
857define <vscale x 2 x i64> @insertelt_nxv2i64_imm_cn1(<vscale x 2 x i64> %v) {
858; CHECK-LABEL: insertelt_nxv2i64_imm_cn1:
859; CHECK:       # %bb.0:
860; CHECK-NEXT:    addi a0, zero, -1
861; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
862; CHECK-NEXT:    vmv.s.x v26, a0
863; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, mu
864; CHECK-NEXT:    vslideup.vi v8, v26, 3
865; CHECK-NEXT:    ret
866  %r = insertelement <vscale x 2 x i64> %v, i64 -1, i32 3
867  ret <vscale x 2 x i64> %r
868}
869
870define <vscale x 2 x i64> @insertelt_nxv2i64_idx_cn1(<vscale x 2 x i64> %v, i32 %idx) {
871; CHECK-LABEL: insertelt_nxv2i64_idx_cn1:
872; CHECK:       # %bb.0:
873; CHECK-NEXT:    addi a1, zero, -1
874; CHECK-NEXT:    vsetvli a2, zero, e64, m2, ta, mu
875; CHECK-NEXT:    vmv.s.x v26, a1
876; CHECK-NEXT:    addi a1, a0, 1
877; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
878; CHECK-NEXT:    vslideup.vx v8, v26, a0
879; CHECK-NEXT:    ret
880  %r = insertelement <vscale x 2 x i64> %v, i64 -1, i32 %idx
881  ret <vscale x 2 x i64> %r
882}
883