1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
5  <vscale x 1 x i1>,
6  i32);
7
8define <vscale x 1 x i1> @intrinsic_vmsbf_m_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
9; CHECK-LABEL: intrinsic_vmsbf_m_nxv1i1:
10; CHECK:       # %bb.0: # %entry
11; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
12; CHECK-NEXT:    vmsbf.m v25, v0
13; CHECK-NEXT:    vmv1r.v v0, v25
14; CHECK-NEXT:    ret
15entry:
16  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
17    <vscale x 1 x i1> %0,
18    i32 %1)
19  ret <vscale x 1 x i1> %a
20}
21
22declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
23  <vscale x 1 x i1>,
24  <vscale x 1 x i1>,
25  <vscale x 1 x i1>,
26  i32);
27
28define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
29; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
30; CHECK:       # %bb.0: # %entry
31; CHECK-NEXT:    vmv1r.v v25, v0
32; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
33; CHECK-NEXT:    vmv1r.v v0, v9
34; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
35; CHECK-NEXT:    vmv1r.v v0, v25
36; CHECK-NEXT:    ret
37entry:
38  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
39    <vscale x 1 x i1> %0,
40    <vscale x 1 x i1> %1,
41    <vscale x 1 x i1> %2,
42    i32 %3)
43  ret <vscale x 1 x i1> %a
44}
45
46declare <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1(
47  <vscale x 2 x i1>,
48  i32);
49
50define <vscale x 2 x i1> @intrinsic_vmsbf_m_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
51; CHECK-LABEL: intrinsic_vmsbf_m_nxv2i1:
52; CHECK:       # %bb.0: # %entry
53; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
54; CHECK-NEXT:    vmsbf.m v25, v0
55; CHECK-NEXT:    vmv1r.v v0, v25
56; CHECK-NEXT:    ret
57entry:
58  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1(
59    <vscale x 2 x i1> %0,
60    i32 %1)
61  ret <vscale x 2 x i1> %a
62}
63
64declare <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
65  <vscale x 2 x i1>,
66  <vscale x 2 x i1>,
67  <vscale x 2 x i1>,
68  i32);
69
70define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
71; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1:
72; CHECK:       # %bb.0: # %entry
73; CHECK-NEXT:    vmv1r.v v25, v0
74; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
75; CHECK-NEXT:    vmv1r.v v0, v9
76; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
77; CHECK-NEXT:    vmv1r.v v0, v25
78; CHECK-NEXT:    ret
79entry:
80  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
81    <vscale x 2 x i1> %0,
82    <vscale x 2 x i1> %1,
83    <vscale x 2 x i1> %2,
84    i32 %3)
85  ret <vscale x 2 x i1> %a
86}
87
88declare <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1(
89  <vscale x 4 x i1>,
90  i32);
91
92define <vscale x 4 x i1> @intrinsic_vmsbf_m_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
93; CHECK-LABEL: intrinsic_vmsbf_m_nxv4i1:
94; CHECK:       # %bb.0: # %entry
95; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
96; CHECK-NEXT:    vmsbf.m v25, v0
97; CHECK-NEXT:    vmv1r.v v0, v25
98; CHECK-NEXT:    ret
99entry:
100  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1(
101    <vscale x 4 x i1> %0,
102    i32 %1)
103  ret <vscale x 4 x i1> %a
104}
105
106declare <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
107  <vscale x 4 x i1>,
108  <vscale x 4 x i1>,
109  <vscale x 4 x i1>,
110  i32);
111
112define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
113; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1:
114; CHECK:       # %bb.0: # %entry
115; CHECK-NEXT:    vmv1r.v v25, v0
116; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
117; CHECK-NEXT:    vmv1r.v v0, v9
118; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
119; CHECK-NEXT:    vmv1r.v v0, v25
120; CHECK-NEXT:    ret
121entry:
122  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
123    <vscale x 4 x i1> %0,
124    <vscale x 4 x i1> %1,
125    <vscale x 4 x i1> %2,
126    i32 %3)
127  ret <vscale x 4 x i1> %a
128}
129
130declare <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1(
131  <vscale x 8 x i1>,
132  i32);
133
134define <vscale x 8 x i1> @intrinsic_vmsbf_m_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
135; CHECK-LABEL: intrinsic_vmsbf_m_nxv8i1:
136; CHECK:       # %bb.0: # %entry
137; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
138; CHECK-NEXT:    vmsbf.m v25, v0
139; CHECK-NEXT:    vmv1r.v v0, v25
140; CHECK-NEXT:    ret
141entry:
142  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1(
143    <vscale x 8 x i1> %0,
144    i32 %1)
145  ret <vscale x 8 x i1> %a
146}
147
148declare <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
149  <vscale x 8 x i1>,
150  <vscale x 8 x i1>,
151  <vscale x 8 x i1>,
152  i32);
153
154define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
155; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1:
156; CHECK:       # %bb.0: # %entry
157; CHECK-NEXT:    vmv1r.v v25, v0
158; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
159; CHECK-NEXT:    vmv1r.v v0, v9
160; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
161; CHECK-NEXT:    vmv1r.v v0, v25
162; CHECK-NEXT:    ret
163entry:
164  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
165    <vscale x 8 x i1> %0,
166    <vscale x 8 x i1> %1,
167    <vscale x 8 x i1> %2,
168    i32 %3)
169  ret <vscale x 8 x i1> %a
170}
171
172declare <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1(
173  <vscale x 16 x i1>,
174  i32);
175
176define <vscale x 16 x i1> @intrinsic_vmsbf_m_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
177; CHECK-LABEL: intrinsic_vmsbf_m_nxv16i1:
178; CHECK:       # %bb.0: # %entry
179; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
180; CHECK-NEXT:    vmsbf.m v25, v0
181; CHECK-NEXT:    vmv1r.v v0, v25
182; CHECK-NEXT:    ret
183entry:
184  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1(
185    <vscale x 16 x i1> %0,
186    i32 %1)
187  ret <vscale x 16 x i1> %a
188}
189
190declare <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
191  <vscale x 16 x i1>,
192  <vscale x 16 x i1>,
193  <vscale x 16 x i1>,
194  i32);
195
196define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
197; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1:
198; CHECK:       # %bb.0: # %entry
199; CHECK-NEXT:    vmv1r.v v25, v0
200; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
201; CHECK-NEXT:    vmv1r.v v0, v9
202; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
203; CHECK-NEXT:    vmv1r.v v0, v25
204; CHECK-NEXT:    ret
205entry:
206  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
207    <vscale x 16 x i1> %0,
208    <vscale x 16 x i1> %1,
209    <vscale x 16 x i1> %2,
210    i32 %3)
211  ret <vscale x 16 x i1> %a
212}
213
214declare <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1(
215  <vscale x 32 x i1>,
216  i32);
217
218define <vscale x 32 x i1> @intrinsic_vmsbf_m_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
219; CHECK-LABEL: intrinsic_vmsbf_m_nxv32i1:
220; CHECK:       # %bb.0: # %entry
221; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
222; CHECK-NEXT:    vmsbf.m v25, v0
223; CHECK-NEXT:    vmv1r.v v0, v25
224; CHECK-NEXT:    ret
225entry:
226  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1(
227    <vscale x 32 x i1> %0,
228    i32 %1)
229  ret <vscale x 32 x i1> %a
230}
231
232declare <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
233  <vscale x 32 x i1>,
234  <vscale x 32 x i1>,
235  <vscale x 32 x i1>,
236  i32);
237
238define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
239; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1:
240; CHECK:       # %bb.0: # %entry
241; CHECK-NEXT:    vmv1r.v v25, v0
242; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
243; CHECK-NEXT:    vmv1r.v v0, v9
244; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
245; CHECK-NEXT:    vmv1r.v v0, v25
246; CHECK-NEXT:    ret
247entry:
248  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
249    <vscale x 32 x i1> %0,
250    <vscale x 32 x i1> %1,
251    <vscale x 32 x i1> %2,
252    i32 %3)
253  ret <vscale x 32 x i1> %a
254}
255
256declare <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1(
257  <vscale x 64 x i1>,
258  i32);
259
260define <vscale x 64 x i1> @intrinsic_vmsbf_m_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
261; CHECK-LABEL: intrinsic_vmsbf_m_nxv64i1:
262; CHECK:       # %bb.0: # %entry
263; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
264; CHECK-NEXT:    vmsbf.m v25, v0
265; CHECK-NEXT:    vmv1r.v v0, v25
266; CHECK-NEXT:    ret
267entry:
268  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1(
269    <vscale x 64 x i1> %0,
270    i32 %1)
271  ret <vscale x 64 x i1> %a
272}
273
274declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
275  <vscale x 64 x i1>,
276  <vscale x 64 x i1>,
277  <vscale x 64 x i1>,
278  i32);
279
280define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
281; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
282; CHECK:       # %bb.0: # %entry
283; CHECK-NEXT:    vmv1r.v v25, v0
284; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, mu
285; CHECK-NEXT:    vmv1r.v v0, v9
286; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
287; CHECK-NEXT:    vmv1r.v v0, v25
288; CHECK-NEXT:    ret
289entry:
290  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
291    <vscale x 64 x i1> %0,
292    <vscale x 64 x i1> %1,
293    <vscale x 64 x i1> %2,
294    i32 %3)
295  ret <vscale x 64 x i1> %a
296}
297