1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefix=RV32V
4; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefix=RV64V
6
7define <vscale x 8 x i64> @vsplat_nxv8i64_1() {
8; RV32V-LABEL: vsplat_nxv8i64_1:
9; RV32V:       # %bb.0:
10; RV32V-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
11; RV32V-NEXT:    vmv.v.i v8, -1
12; RV32V-NEXT:    ret
13;
14; RV64V-LABEL: vsplat_nxv8i64_1:
15; RV64V:       # %bb.0:
16; RV64V-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
17; RV64V-NEXT:    vmv.v.i v8, -1
18; RV64V-NEXT:    ret
19  %head = insertelement <vscale x 8 x i64> undef, i64 -1, i32 0
20  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
21  ret <vscale x 8 x i64> %splat
22}
23
24define <vscale x 8 x i64> @vsplat_nxv8i64_2() {
25; RV32V-LABEL: vsplat_nxv8i64_2:
26; RV32V:       # %bb.0:
27; RV32V-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
28; RV32V-NEXT:    vmv.v.i v8, 4
29; RV32V-NEXT:    ret
30;
31; RV64V-LABEL: vsplat_nxv8i64_2:
32; RV64V:       # %bb.0:
33; RV64V-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
34; RV64V-NEXT:    vmv.v.i v8, 4
35; RV64V-NEXT:    ret
36  %head = insertelement <vscale x 8 x i64> undef, i64 4, i32 0
37  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
38  ret <vscale x 8 x i64> %splat
39}
40
41define <vscale x 8 x i64> @vsplat_nxv8i64_3() {
42; RV32V-LABEL: vsplat_nxv8i64_3:
43; RV32V:       # %bb.0:
44; RV32V-NEXT:    addi a0, zero, 255
45; RV32V-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
46; RV32V-NEXT:    vmv.v.x v8, a0
47; RV32V-NEXT:    ret
48;
49; RV64V-LABEL: vsplat_nxv8i64_3:
50; RV64V:       # %bb.0:
51; RV64V-NEXT:    addi a0, zero, 255
52; RV64V-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
53; RV64V-NEXT:    vmv.v.x v8, a0
54; RV64V-NEXT:    ret
55  %head = insertelement <vscale x 8 x i64> undef, i64 255, i32 0
56  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
57  ret <vscale x 8 x i64> %splat
58}
59
60define <vscale x 8 x i64> @vsplat_nxv8i64_4() {
61; RV32V-LABEL: vsplat_nxv8i64_4:
62; RV32V:       # %bb.0:
63; RV32V-NEXT:    lui a0, 1028096
64; RV32V-NEXT:    addi a0, a0, -1281
65; RV32V-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
66; RV32V-NEXT:    vmv.v.x v8, a0
67; RV32V-NEXT:    addi a0, zero, 32
68; RV32V-NEXT:    vsll.vx v8, v8, a0
69; RV32V-NEXT:    vsrl.vx v8, v8, a0
70; RV32V-NEXT:    ret
71;
72; RV64V-LABEL: vsplat_nxv8i64_4:
73; RV64V:       # %bb.0:
74; RV64V-NEXT:    addi a0, zero, 251
75; RV64V-NEXT:    slli a0, a0, 24
76; RV64V-NEXT:    addi a0, a0, -1281
77; RV64V-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
78; RV64V-NEXT:    vmv.v.x v8, a0
79; RV64V-NEXT:    ret
80  %head = insertelement <vscale x 8 x i64> undef, i64 4211079935, i32 0
81  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
82  ret <vscale x 8 x i64> %splat
83}
84
85define <vscale x 8 x i64> @vsplat_nxv8i64_5(i64 %a) {
86; RV32V-LABEL: vsplat_nxv8i64_5:
87; RV32V:       # %bb.0:
88; RV32V-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
89; RV32V-NEXT:    vmv.v.x v8, a1
90; RV32V-NEXT:    addi a1, zero, 32
91; RV32V-NEXT:    vsll.vx v8, v8, a1
92; RV32V-NEXT:    vmv.v.x v16, a0
93; RV32V-NEXT:    vsll.vx v16, v16, a1
94; RV32V-NEXT:    vsrl.vx v16, v16, a1
95; RV32V-NEXT:    vor.vv v8, v16, v8
96; RV32V-NEXT:    ret
97;
98; RV64V-LABEL: vsplat_nxv8i64_5:
99; RV64V:       # %bb.0:
100; RV64V-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
101; RV64V-NEXT:    vmv.v.x v8, a0
102; RV64V-NEXT:    ret
103  %head = insertelement <vscale x 8 x i64> undef, i64 %a, i32 0
104  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
105  ret <vscale x 8 x i64> %splat
106}
107
108define <vscale x 8 x i64> @vadd_vx_nxv8i64_6(<vscale x 8 x i64> %v) {
109; RV32V-LABEL: vadd_vx_nxv8i64_6:
110; RV32V:       # %bb.0:
111; RV32V-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
112; RV32V-NEXT:    vadd.vi v8, v8, 2
113; RV32V-NEXT:    ret
114;
115; RV64V-LABEL: vadd_vx_nxv8i64_6:
116; RV64V:       # %bb.0:
117; RV64V-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
118; RV64V-NEXT:    vadd.vi v8, v8, 2
119; RV64V-NEXT:    ret
120  %head = insertelement <vscale x 8 x i64> undef, i64 2, i32 0
121  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
122  %vret = add <vscale x 8 x i64> %v, %splat
123  ret <vscale x 8 x i64> %vret
124}
125
126define <vscale x 8 x i64> @vadd_vx_nxv8i64_7(<vscale x 8 x i64> %v) {
127; RV32V-LABEL: vadd_vx_nxv8i64_7:
128; RV32V:       # %bb.0:
129; RV32V-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
130; RV32V-NEXT:    vadd.vi v8, v8, -1
131; RV32V-NEXT:    ret
132;
133; RV64V-LABEL: vadd_vx_nxv8i64_7:
134; RV64V:       # %bb.0:
135; RV64V-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
136; RV64V-NEXT:    vadd.vi v8, v8, -1
137; RV64V-NEXT:    ret
138  %head = insertelement <vscale x 8 x i64> undef, i64 -1, i32 0
139  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
140  %vret = add <vscale x 8 x i64> %v, %splat
141  ret <vscale x 8 x i64> %vret
142}
143
144define <vscale x 8 x i64> @vadd_vx_nxv8i64_8(<vscale x 8 x i64> %v) {
145; RV32V-LABEL: vadd_vx_nxv8i64_8:
146; RV32V:       # %bb.0:
147; RV32V-NEXT:    addi a0, zero, 255
148; RV32V-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
149; RV32V-NEXT:    vadd.vx v8, v8, a0
150; RV32V-NEXT:    ret
151;
152; RV64V-LABEL: vadd_vx_nxv8i64_8:
153; RV64V:       # %bb.0:
154; RV64V-NEXT:    addi a0, zero, 255
155; RV64V-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
156; RV64V-NEXT:    vadd.vx v8, v8, a0
157; RV64V-NEXT:    ret
158  %head = insertelement <vscale x 8 x i64> undef, i64 255, i32 0
159  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
160  %vret = add <vscale x 8 x i64> %v, %splat
161  ret <vscale x 8 x i64> %vret
162}
163
164define <vscale x 8 x i64> @vadd_vx_nxv8i64_9(<vscale x 8 x i64> %v) {
165; RV32V-LABEL: vadd_vx_nxv8i64_9:
166; RV32V:       # %bb.0:
167; RV32V-NEXT:    lui a0, 503808
168; RV32V-NEXT:    addi a0, a0, -1281
169; RV32V-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
170; RV32V-NEXT:    vadd.vx v8, v8, a0
171; RV32V-NEXT:    ret
172;
173; RV64V-LABEL: vadd_vx_nxv8i64_9:
174; RV64V:       # %bb.0:
175; RV64V-NEXT:    lui a0, 503808
176; RV64V-NEXT:    addiw a0, a0, -1281
177; RV64V-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
178; RV64V-NEXT:    vadd.vx v8, v8, a0
179; RV64V-NEXT:    ret
180  %head = insertelement <vscale x 8 x i64> undef, i64 2063596287, i32 0
181  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
182  %vret = add <vscale x 8 x i64> %v, %splat
183  ret <vscale x 8 x i64> %vret
184}
185
186define <vscale x 8 x i64> @vadd_vx_nxv8i64_10(<vscale x 8 x i64> %v) {
187; RV32V-LABEL: vadd_vx_nxv8i64_10:
188; RV32V:       # %bb.0:
189; RV32V-NEXT:    lui a0, 1028096
190; RV32V-NEXT:    addi a0, a0, -1281
191; RV32V-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
192; RV32V-NEXT:    vmv.v.x v16, a0
193; RV32V-NEXT:    addi a0, zero, 32
194; RV32V-NEXT:    vsll.vx v16, v16, a0
195; RV32V-NEXT:    vsrl.vx v16, v16, a0
196; RV32V-NEXT:    vadd.vv v8, v8, v16
197; RV32V-NEXT:    ret
198;
199; RV64V-LABEL: vadd_vx_nxv8i64_10:
200; RV64V:       # %bb.0:
201; RV64V-NEXT:    addi a0, zero, 251
202; RV64V-NEXT:    slli a0, a0, 24
203; RV64V-NEXT:    addi a0, a0, -1281
204; RV64V-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
205; RV64V-NEXT:    vadd.vx v8, v8, a0
206; RV64V-NEXT:    ret
207  %head = insertelement <vscale x 8 x i64> undef, i64 4211079935, i32 0
208  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
209  %vret = add <vscale x 8 x i64> %v, %splat
210  ret <vscale x 8 x i64> %vret
211}
212
213define <vscale x 8 x i64> @vadd_vx_nxv8i64_11(<vscale x 8 x i64> %v) {
214; RV32V-LABEL: vadd_vx_nxv8i64_11:
215; RV32V:       # %bb.0:
216; RV32V-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
217; RV32V-NEXT:    vmv.v.i v16, 1
218; RV32V-NEXT:    addi a0, zero, 32
219; RV32V-NEXT:    vsll.vx v16, v16, a0
220; RV32V-NEXT:    lui a1, 1028096
221; RV32V-NEXT:    addi a1, a1, -1281
222; RV32V-NEXT:    vmv.v.x v24, a1
223; RV32V-NEXT:    vsll.vx v24, v24, a0
224; RV32V-NEXT:    vsrl.vx v24, v24, a0
225; RV32V-NEXT:    vor.vv v16, v24, v16
226; RV32V-NEXT:    vadd.vv v8, v8, v16
227; RV32V-NEXT:    ret
228;
229; RV64V-LABEL: vadd_vx_nxv8i64_11:
230; RV64V:       # %bb.0:
231; RV64V-NEXT:    addi a0, zero, 507
232; RV64V-NEXT:    slli a0, a0, 24
233; RV64V-NEXT:    addi a0, a0, -1281
234; RV64V-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
235; RV64V-NEXT:    vadd.vx v8, v8, a0
236; RV64V-NEXT:    ret
237  %head = insertelement <vscale x 8 x i64> undef, i64 8506047231, i32 0
238  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
239  %vret = add <vscale x 8 x i64> %v, %splat
240  ret <vscale x 8 x i64> %vret
241}
242
243define <vscale x 8 x i64> @vadd_vx_nxv8i64_12(<vscale x 8 x i64> %v, i64 %a) {
244; RV32V-LABEL: vadd_vx_nxv8i64_12:
245; RV32V:       # %bb.0:
246; RV32V-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
247; RV32V-NEXT:    vmv.v.x v16, a1
248; RV32V-NEXT:    addi a1, zero, 32
249; RV32V-NEXT:    vsll.vx v16, v16, a1
250; RV32V-NEXT:    vmv.v.x v24, a0
251; RV32V-NEXT:    vsll.vx v24, v24, a1
252; RV32V-NEXT:    vsrl.vx v24, v24, a1
253; RV32V-NEXT:    vor.vv v16, v24, v16
254; RV32V-NEXT:    vadd.vv v8, v8, v16
255; RV32V-NEXT:    ret
256;
257; RV64V-LABEL: vadd_vx_nxv8i64_12:
258; RV64V:       # %bb.0:
259; RV64V-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
260; RV64V-NEXT:    vadd.vx v8, v8, a0
261; RV64V-NEXT:    ret
262  %head = insertelement <vscale x 8 x i64> undef, i64 %a, i32 0
263  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
264  %vret = add <vscale x 8 x i64> %v, %splat
265  ret <vscale x 8 x i64> %vret
266}
267