1; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
2; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
3
4; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
5; WARN-NOT: warning
6
7;
8; ASR
9;
10
11define <vscale x 16 x i8> @asr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
12; CHECK-LABEL: asr_i8:
13; CHECK: asr z0.b, p0/m, z0.b, z1.b
14; CHECK-NEXT: ret
15  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg,
16                                                               <vscale x 16 x i8> %a,
17                                                               <vscale x 16 x i8> %b)
18  ret <vscale x 16 x i8> %out
19}
20
21define <vscale x 8 x i16> @asr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
22; CHECK-LABEL: asr_i16:
23; CHECK: asr z0.h, p0/m, z0.h, z1.h
24; CHECK-NEXT: ret
25  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg,
26                                                               <vscale x 8 x i16> %a,
27                                                               <vscale x 8 x i16> %b)
28  ret <vscale x 8 x i16> %out
29}
30
31define <vscale x 4 x i32> @asr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
32; CHECK-LABEL: asr_i32:
33; CHECK: asr z0.s, p0/m, z0.s, z1.s
34; CHECK-NEXT: ret
35  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %pg,
36                                                               <vscale x 4 x i32> %a,
37                                                               <vscale x 4 x i32> %b)
38  ret <vscale x 4 x i32> %out
39}
40
41define <vscale x 2 x i64> @asr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
42; CHECK-LABEL: asr_i64:
43; CHECK: asr z0.d, p0/m, z0.d, z1.d
44; CHECK-NEXT: ret
45  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %pg,
46                                                               <vscale x 2 x i64> %a,
47                                                               <vscale x 2 x i64> %b)
48  ret <vscale x 2 x i64> %out
49}
50
51define <vscale x 16 x i8> @asr_wide_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
52; CHECK-LABEL: asr_wide_i8:
53; CHECK: asr z0.b, p0/m, z0.b, z1.d
54; CHECK-NEXT: ret
55  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg,
56                                                                    <vscale x 16 x i8> %a,
57                                                                    <vscale x 2 x i64> %b)
58  ret <vscale x 16 x i8> %out
59}
60
61define <vscale x 8 x i16> @asr_wide_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
62; CHECK-LABEL: asr_wide_i16:
63; CHECK: asr z0.h, p0/m, z0.h, z1.d
64; CHECK-NEXT: ret
65  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %pg,
66                                                                    <vscale x 8 x i16> %a,
67                                                                    <vscale x 2 x i64> %b)
68  ret <vscale x 8 x i16> %out
69}
70
71define <vscale x 4 x i32> @asr_wide_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
72; CHECK-LABEL: asr_wide_i32:
73; CHECK: asr z0.s, p0/m, z0.s, z1.d
74; CHECK-NEXT: ret
75  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %pg,
76                                                                    <vscale x 4 x i32> %a,
77                                                                    <vscale x 2 x i64> %b)
78  ret <vscale x 4 x i32> %out
79}
80
81;
82; ASRD
83;
84
85define <vscale x 16 x i8> @asrd_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
86; CHECK-LABEL: asrd_i8:
87; CHECK: asrd z0.b, p0/m, z0.b, #1
88; CHECK-NEXT: ret
89  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> %pg,
90                                                                <vscale x 16 x i8> %a,
91                                                                i32 1)
92  ret <vscale x 16 x i8> %out
93}
94
95define <vscale x 8 x i16> @asrd_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
96; CHECK-LABEL: asrd_i16:
97; CHECK: asrd z0.h, p0/m, z0.h, #2
98; CHECK-NEXT: ret
99  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asrd.nxv8i16(<vscale x 8 x i1> %pg,
100                                                                <vscale x 8 x i16> %a,
101                                                                i32 2)
102  ret <vscale x 8 x i16> %out
103}
104
105define <vscale x 4 x i32> @asrd_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
106; CHECK-LABEL: asrd_i32:
107; CHECK: asrd z0.s, p0/m, z0.s, #31
108; CHECK-NEXT: ret
109  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asrd.nxv4i32(<vscale x 4 x i1> %pg,
110                                                                <vscale x 4 x i32> %a,
111                                                                i32 31)
112  ret <vscale x 4 x i32> %out
113}
114
115define <vscale x 2 x i64> @asrd_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
116; CHECK-LABEL: asrd_i64:
117; CHECK: asrd z0.d, p0/m, z0.d, #64
118; CHECK-NEXT: ret
119  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asrd.nxv2i64(<vscale x 2 x i1> %pg,
120                                                                <vscale x 2 x i64> %a,
121                                                                i32 64)
122  ret <vscale x 2 x i64> %out
123}
124
125;
126; INSR
127;
128
129define <vscale x 16 x i8> @insr_i8(<vscale x 16 x i8> %a, i8 %b) {
130; CHECK-LABEL: insr_i8:
131; CHECK: insr z0.b, w0
132; CHECK-NEXT: ret
133  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8> %a, i8 %b)
134  ret <vscale x 16 x i8> %out
135}
136
137define <vscale x 8 x i16> @insr_i16(<vscale x 8 x i16> %a, i16 %b) {
138; CHECK-LABEL: insr_i16:
139; CHECK: insr z0.h, w0
140; CHECK-NEXT: ret
141  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16> %a, i16 %b)
142  ret <vscale x 8 x i16> %out
143}
144
145define <vscale x 4 x i32> @insr_i32(<vscale x 4 x i32> %a, i32 %b) {
146; CHECK-LABEL: insr_i32:
147; CHECK: insr z0.s, w0
148; CHECK-NEXT: ret
149  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.insr.nxv4i32(<vscale x 4 x i32> %a, i32 %b)
150  ret <vscale x 4 x i32> %out
151}
152
153define <vscale x 2 x i64> @insr_i64(<vscale x 2 x i64> %a, i64 %b) {
154; CHECK-LABEL: insr_i64:
155; CHECK: insr z0.d, x0
156; CHECK-NEXT: ret
157  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.insr.nxv2i64(<vscale x 2 x i64> %a, i64 %b)
158  ret <vscale x 2 x i64> %out
159}
160
161define <vscale x 8 x half> @insr_f16(<vscale x 8 x half> %a, half %b) {
162; CHECK-LABEL: insr_f16:
163; CHECK: insr z0.h, h1
164; CHECK-NEXT: ret
165  %out = call <vscale x 8 x half> @llvm.aarch64.sve.insr.nxv8f16(<vscale x 8 x half> %a, half %b)
166  ret <vscale x 8 x half> %out
167}
168
169define <vscale x 8 x bfloat> @insr_bf16(<vscale x 8 x bfloat> %a, bfloat %b) #0 {
170; CHECK-LABEL: insr_bf16:
171; CHECK: insr z0.h, h1
172; CHECK-NEXT: ret
173  %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.insr.nxv8bf16(<vscale x 8 x bfloat> %a, bfloat %b)
174  ret <vscale x 8 x bfloat> %out
175}
176
177define <vscale x 4 x float> @insr_f32(<vscale x 4 x float> %a, float %b) {
178; CHECK-LABEL: insr_f32:
179; CHECK: insr z0.s, s1
180; CHECK-NEXT: ret
181  %out = call <vscale x 4 x float> @llvm.aarch64.sve.insr.nxv4f32(<vscale x 4 x float> %a, float %b)
182  ret <vscale x 4 x float> %out
183}
184
185define <vscale x 2 x double> @insr_f64(<vscale x 2 x double> %a, double %b) {
186; CHECK-LABEL: insr_f64:
187; CHECK: insr z0.d, d1
188; CHECK-NEXT: ret
189  %out = call <vscale x 2 x double> @llvm.aarch64.sve.insr.nxv2f64(<vscale x 2 x double> %a, double %b)
190  ret <vscale x 2 x double> %out
191}
192
193;
194; LSL
195;
196
197define <vscale x 16 x i8> @lsl_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
198; CHECK-LABEL: lsl_i8:
199; CHECK: lsl z0.b, p0/m, z0.b, z1.b
200; CHECK-NEXT: ret
201  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg,
202                                                               <vscale x 16 x i8> %a,
203                                                               <vscale x 16 x i8> %b)
204  ret <vscale x 16 x i8> %out
205}
206
207define <vscale x 8 x i16> @lsl_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
208; CHECK-LABEL: lsl_i16:
209; CHECK: lsl z0.h, p0/m, z0.h, z1.h
210; CHECK-NEXT: ret
211  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg,
212                                                               <vscale x 8 x i16> %a,
213                                                               <vscale x 8 x i16> %b)
214  ret <vscale x 8 x i16> %out
215}
216
217define <vscale x 4 x i32> @lsl_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
218; CHECK-LABEL: lsl_i32:
219; CHECK: lsl z0.s, p0/m, z0.s, z1.s
220; CHECK-NEXT: ret
221  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg,
222                                                               <vscale x 4 x i32> %a,
223                                                               <vscale x 4 x i32> %b)
224  ret <vscale x 4 x i32> %out
225}
226
227define <vscale x 2 x i64> @lsl_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
228; CHECK-LABEL: lsl_i64:
229; CHECK: lsl z0.d, p0/m, z0.d, z1.d
230; CHECK-NEXT: ret
231  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg,
232                                                               <vscale x 2 x i64> %a,
233                                                               <vscale x 2 x i64> %b)
234  ret <vscale x 2 x i64> %out
235}
236
237define <vscale x 16 x i8> @lsl_wide_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
238; CHECK-LABEL: lsl_wide_i8:
239; CHECK: lsl z0.b, p0/m, z0.b, z1.d
240; CHECK-NEXT: ret
241  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg,
242                                                                    <vscale x 16 x i8> %a,
243                                                                    <vscale x 2 x i64> %b)
244  ret <vscale x 16 x i8> %out
245}
246
247define <vscale x 8 x i16> @lsl_wide_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
248; CHECK-LABEL: lsl_wide_i16:
249; CHECK: lsl z0.h, p0/m, z0.h, z1.d
250; CHECK-NEXT: ret
251  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %pg,
252                                                                    <vscale x 8 x i16> %a,
253                                                                    <vscale x 2 x i64> %b)
254  ret <vscale x 8 x i16> %out
255}
256
257define <vscale x 4 x i32> @lsl_wide_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
258; CHECK-LABEL: lsl_wide_i32:
259; CHECK: lsl z0.s, p0/m, z0.s, z1.d
260; CHECK-NEXT: ret
261  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %pg,
262                                                                    <vscale x 4 x i32> %a,
263                                                                    <vscale x 2 x i64> %b)
264  ret <vscale x 4 x i32> %out
265}
266
267;
268; LSR
269;
270
271define <vscale x 16 x i8> @lsr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
272; CHECK-LABEL: lsr_i8:
273; CHECK: lsr z0.b, p0/m, z0.b, z1.b
274; CHECK-NEXT: ret
275  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> %pg,
276                                                               <vscale x 16 x i8> %a,
277                                                               <vscale x 16 x i8> %b)
278  ret <vscale x 16 x i8> %out
279}
280
281define <vscale x 8 x i16> @lsr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
282; CHECK-LABEL: lsr_i16:
283; CHECK: lsr z0.h, p0/m, z0.h, z1.h
284; CHECK-NEXT: ret
285  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> %pg,
286                                                               <vscale x 8 x i16> %a,
287                                                               <vscale x 8 x i16> %b)
288  ret <vscale x 8 x i16> %out
289}
290
291define <vscale x 4 x i32> @lsr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
292; CHECK-LABEL: lsr_i32:
293; CHECK: lsr z0.s, p0/m, z0.s, z1.s
294; CHECK-NEXT: ret
295  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> %pg,
296                                                               <vscale x 4 x i32> %a,
297                                                               <vscale x 4 x i32> %b)
298  ret <vscale x 4 x i32> %out
299}
300
301define <vscale x 2 x i64> @lsr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
302; CHECK-LABEL: lsr_i64:
303; CHECK: lsr z0.d, p0/m, z0.d, z1.d
304; CHECK-NEXT: ret
305  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> %pg,
306                                                               <vscale x 2 x i64> %a,
307                                                               <vscale x 2 x i64> %b)
308  ret <vscale x 2 x i64> %out
309}
310
311define <vscale x 16 x i8> @lsr_wide_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
312; CHECK-LABEL: lsr_wide_i8:
313; CHECK: lsr z0.b, p0/m, z0.b, z1.d
314; CHECK-NEXT: ret
315  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> %pg,
316                                                                    <vscale x 16 x i8> %a,
317                                                                    <vscale x 2 x i64> %b)
318  ret <vscale x 16 x i8> %out
319}
320
321define <vscale x 8 x i16> @lsr_wide_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
322; CHECK-LABEL: lsr_wide_i16:
323; CHECK: lsr z0.h, p0/m, z0.h, z1.d
324; CHECK-NEXT: ret
325  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> %pg,
326                                                                    <vscale x 8 x i16> %a,
327                                                                    <vscale x 2 x i64> %b)
328  ret <vscale x 8 x i16> %out
329}
330
331define <vscale x 4 x i32> @lsr_wide_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
332; CHECK-LABEL: lsr_wide_i32:
333; CHECK: lsr z0.s, p0/m, z0.s, z1.d
334; CHECK-NEXT: ret
335  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> %pg,
336                                                                    <vscale x 4 x i32> %a,
337                                                                    <vscale x 2 x i64> %b)
338  ret <vscale x 4 x i32> %out
339}
340
341declare <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
342declare <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
343declare <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
344declare <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
345
346declare <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
347declare <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
348declare <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
349
350declare <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, i32)
351declare <vscale x 8 x i16> @llvm.aarch64.sve.asrd.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, i32)
352declare <vscale x 4 x i32> @llvm.aarch64.sve.asrd.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i32)
353declare <vscale x 2 x i64> @llvm.aarch64.sve.asrd.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i32)
354
355declare <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8>, i8)
356declare <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16>, i16)
357declare <vscale x 4 x i32> @llvm.aarch64.sve.insr.nxv4i32(<vscale x 4 x i32>, i32)
358declare <vscale x 2 x i64> @llvm.aarch64.sve.insr.nxv2i64(<vscale x 2 x i64>, i64)
359declare <vscale x 8 x half> @llvm.aarch64.sve.insr.nxv8f16(<vscale x 8 x half>, half)
360declare <vscale x 8 x bfloat> @llvm.aarch64.sve.insr.nxv8bf16(<vscale x 8 x bfloat>, bfloat)
361declare <vscale x 4 x float> @llvm.aarch64.sve.insr.nxv4f32(<vscale x 4 x float>, float)
362declare <vscale x 2 x double> @llvm.aarch64.sve.insr.nxv2f64(<vscale x 2 x double>, double)
363
364declare <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
365declare <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
366declare <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
367declare <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
368
369declare <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
370declare <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
371declare <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
372
373declare <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
374declare <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
375declare <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
376declare <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
377
378declare <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
379declare <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
380declare <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
381
382; +bf16 is required for the bfloat version.
383attributes #0 = { "target-features"="+sve,+bf16" }
384