1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
6
7define <1 x i1> @insertelt_v1i1(<1 x i1> %x, i1 %elt) nounwind {
8; CHECK-LABEL: insertelt_v1i1:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
11; CHECK-NEXT:    vmv.v.i v25, 0
12; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
13; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, tu, mu
14; CHECK-NEXT:    vmv.s.x v25, a0
15; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
16; CHECK-NEXT:    vand.vi v25, v25, 1
17; CHECK-NEXT:    vmsne.vi v0, v25, 0
18; CHECK-NEXT:    ret
19  %y = insertelement <1 x i1> %x, i1 %elt, i64 0
20  ret <1 x i1> %y
21}
22
23define <1 x i1> @insertelt_idx_v1i1(<1 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind {
24; RV32-LABEL: insertelt_idx_v1i1:
25; RV32:       # %bb.0:
26; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
27; RV32-NEXT:    vmv.s.x v25, a0
28; RV32-NEXT:    vmv.v.i v26, 0
29; RV32-NEXT:    vmerge.vim v26, v26, 1, v0
30; RV32-NEXT:    addi a0, a1, 1
31; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
32; RV32-NEXT:    vslideup.vx v26, v25, a1
33; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
34; RV32-NEXT:    vand.vi v25, v26, 1
35; RV32-NEXT:    vmsne.vi v0, v25, 0
36; RV32-NEXT:    ret
37;
38; RV64-LABEL: insertelt_idx_v1i1:
39; RV64:       # %bb.0:
40; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
41; RV64-NEXT:    vmv.s.x v25, a0
42; RV64-NEXT:    vmv.v.i v26, 0
43; RV64-NEXT:    vmerge.vim v26, v26, 1, v0
44; RV64-NEXT:    sext.w a0, a1
45; RV64-NEXT:    addi a1, a0, 1
46; RV64-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
47; RV64-NEXT:    vslideup.vx v26, v25, a0
48; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
49; RV64-NEXT:    vand.vi v25, v26, 1
50; RV64-NEXT:    vmsne.vi v0, v25, 0
51; RV64-NEXT:    ret
52  %y = insertelement <1 x i1> %x, i1 %elt, i32 %idx
53  ret <1 x i1> %y
54}
55
56define <2 x i1> @insertelt_v2i1(<2 x i1> %x, i1 %elt) nounwind {
57; CHECK-LABEL: insertelt_v2i1:
58; CHECK:       # %bb.0:
59; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
60; CHECK-NEXT:    vmv.s.x v25, a0
61; CHECK-NEXT:    vmv.v.i v26, 0
62; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
63; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, tu, mu
64; CHECK-NEXT:    vslideup.vi v26, v25, 1
65; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
66; CHECK-NEXT:    vand.vi v25, v26, 1
67; CHECK-NEXT:    vmsne.vi v0, v25, 0
68; CHECK-NEXT:    ret
69  %y = insertelement <2 x i1> %x, i1 %elt, i64 1
70  ret <2 x i1> %y
71}
72
73define <2 x i1> @insertelt_idx_v2i1(<2 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind {
74; RV32-LABEL: insertelt_idx_v2i1:
75; RV32:       # %bb.0:
76; RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
77; RV32-NEXT:    vmv.s.x v25, a0
78; RV32-NEXT:    vmv.v.i v26, 0
79; RV32-NEXT:    vmerge.vim v26, v26, 1, v0
80; RV32-NEXT:    addi a0, a1, 1
81; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
82; RV32-NEXT:    vslideup.vx v26, v25, a1
83; RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
84; RV32-NEXT:    vand.vi v25, v26, 1
85; RV32-NEXT:    vmsne.vi v0, v25, 0
86; RV32-NEXT:    ret
87;
88; RV64-LABEL: insertelt_idx_v2i1:
89; RV64:       # %bb.0:
90; RV64-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
91; RV64-NEXT:    vmv.s.x v25, a0
92; RV64-NEXT:    vmv.v.i v26, 0
93; RV64-NEXT:    vmerge.vim v26, v26, 1, v0
94; RV64-NEXT:    sext.w a0, a1
95; RV64-NEXT:    addi a1, a0, 1
96; RV64-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
97; RV64-NEXT:    vslideup.vx v26, v25, a0
98; RV64-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
99; RV64-NEXT:    vand.vi v25, v26, 1
100; RV64-NEXT:    vmsne.vi v0, v25, 0
101; RV64-NEXT:    ret
102  %y = insertelement <2 x i1> %x, i1 %elt, i32 %idx
103  ret <2 x i1> %y
104}
105
106define <8 x i1> @insertelt_v8i1(<8 x i1> %x, i1 %elt) nounwind {
107; CHECK-LABEL: insertelt_v8i1:
108; CHECK:       # %bb.0:
109; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
110; CHECK-NEXT:    vmv.s.x v25, a0
111; CHECK-NEXT:    vmv.v.i v26, 0
112; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
113; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
114; CHECK-NEXT:    vslideup.vi v26, v25, 1
115; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
116; CHECK-NEXT:    vand.vi v25, v26, 1
117; CHECK-NEXT:    vmsne.vi v0, v25, 0
118; CHECK-NEXT:    ret
119  %y = insertelement <8 x i1> %x, i1 %elt, i64 1
120  ret <8 x i1> %y
121}
122
123define <8 x i1> @insertelt_idx_v8i1(<8 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind {
124; RV32-LABEL: insertelt_idx_v8i1:
125; RV32:       # %bb.0:
126; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
127; RV32-NEXT:    vmv.s.x v25, a0
128; RV32-NEXT:    vmv.v.i v26, 0
129; RV32-NEXT:    vmerge.vim v26, v26, 1, v0
130; RV32-NEXT:    addi a0, a1, 1
131; RV32-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
132; RV32-NEXT:    vslideup.vx v26, v25, a1
133; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
134; RV32-NEXT:    vand.vi v25, v26, 1
135; RV32-NEXT:    vmsne.vi v0, v25, 0
136; RV32-NEXT:    ret
137;
138; RV64-LABEL: insertelt_idx_v8i1:
139; RV64:       # %bb.0:
140; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
141; RV64-NEXT:    vmv.s.x v25, a0
142; RV64-NEXT:    vmv.v.i v26, 0
143; RV64-NEXT:    vmerge.vim v26, v26, 1, v0
144; RV64-NEXT:    sext.w a0, a1
145; RV64-NEXT:    addi a1, a0, 1
146; RV64-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
147; RV64-NEXT:    vslideup.vx v26, v25, a0
148; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
149; RV64-NEXT:    vand.vi v25, v26, 1
150; RV64-NEXT:    vmsne.vi v0, v25, 0
151; RV64-NEXT:    ret
152  %y = insertelement <8 x i1> %x, i1 %elt, i32 %idx
153  ret <8 x i1> %y
154}
155
156define <64 x i1> @insertelt_v64i1(<64 x i1> %x, i1 %elt) nounwind {
157; CHECK-LABEL: insertelt_v64i1:
158; CHECK:       # %bb.0:
159; CHECK-NEXT:    addi a1, zero, 64
160; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
161; CHECK-NEXT:    vmv.s.x v28, a0
162; CHECK-NEXT:    vmv.v.i v8, 0
163; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
164; CHECK-NEXT:    vsetivli zero, 2, e8, m4, tu, mu
165; CHECK-NEXT:    vslideup.vi v8, v28, 1
166; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
167; CHECK-NEXT:    vand.vi v28, v8, 1
168; CHECK-NEXT:    vmsne.vi v0, v28, 0
169; CHECK-NEXT:    ret
170  %y = insertelement <64 x i1> %x, i1 %elt, i64 1
171  ret <64 x i1> %y
172}
173
174define <64 x i1> @insertelt_idx_v64i1(<64 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind {
175; RV32-LABEL: insertelt_idx_v64i1:
176; RV32:       # %bb.0:
177; RV32-NEXT:    addi a2, zero, 64
178; RV32-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
179; RV32-NEXT:    vmv.s.x v28, a0
180; RV32-NEXT:    vmv.v.i v8, 0
181; RV32-NEXT:    vmerge.vim v8, v8, 1, v0
182; RV32-NEXT:    addi a0, a1, 1
183; RV32-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
184; RV32-NEXT:    vslideup.vx v8, v28, a1
185; RV32-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
186; RV32-NEXT:    vand.vi v28, v8, 1
187; RV32-NEXT:    vmsne.vi v0, v28, 0
188; RV32-NEXT:    ret
189;
190; RV64-LABEL: insertelt_idx_v64i1:
191; RV64:       # %bb.0:
192; RV64-NEXT:    addi a2, zero, 64
193; RV64-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
194; RV64-NEXT:    vmv.s.x v28, a0
195; RV64-NEXT:    vmv.v.i v8, 0
196; RV64-NEXT:    vmerge.vim v8, v8, 1, v0
197; RV64-NEXT:    sext.w a0, a1
198; RV64-NEXT:    addi a1, a0, 1
199; RV64-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
200; RV64-NEXT:    vslideup.vx v8, v28, a0
201; RV64-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
202; RV64-NEXT:    vand.vi v28, v8, 1
203; RV64-NEXT:    vmsne.vi v0, v28, 0
204; RV64-NEXT:    ret
205  %y = insertelement <64 x i1> %x, i1 %elt, i32 %idx
206  ret <64 x i1> %y
207}
208