1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null 2>%t
5 // RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
6
7 // If this check fails please read test/CodeGen/aarch64-sve-intrinsics/README for instructions on how to resolve it.
8 // ASM-NOT: warning
9 #include <arm_sve.h>
10
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
14 #else
15 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
16 #endif
17
test_svlsl_s8_z(svbool_t pg,svint8_t op1,svuint8_t op2)18 svint8_t test_svlsl_s8_z(svbool_t pg, svint8_t op1, svuint8_t op2)
19 {
20 // CHECK-LABEL: test_svlsl_s8_z
21 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
22 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
23 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
24 return SVE_ACLE_FUNC(svlsl,_s8,_z,)(pg, op1, op2);
25 }
26
test_svlsl_s16_z(svbool_t pg,svint16_t op1,svuint16_t op2)27 svint16_t test_svlsl_s16_z(svbool_t pg, svint16_t op1, svuint16_t op2)
28 {
29 // CHECK-LABEL: test_svlsl_s16_z
30 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
31 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
32 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
33 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
34 return SVE_ACLE_FUNC(svlsl,_s16,_z,)(pg, op1, op2);
35 }
36
test_svlsl_s32_z(svbool_t pg,svint32_t op1,svuint32_t op2)37 svint32_t test_svlsl_s32_z(svbool_t pg, svint32_t op1, svuint32_t op2)
38 {
39 // CHECK-LABEL: test_svlsl_s32_z
40 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
41 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
42 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
43 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
44 return SVE_ACLE_FUNC(svlsl,_s32,_z,)(pg, op1, op2);
45 }
46
test_svlsl_s64_z(svbool_t pg,svint64_t op1,svuint64_t op2)47 svint64_t test_svlsl_s64_z(svbool_t pg, svint64_t op1, svuint64_t op2)
48 {
49 // CHECK-LABEL: test_svlsl_s64_z
50 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
51 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
52 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
53 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
54 return SVE_ACLE_FUNC(svlsl,_s64,_z,)(pg, op1, op2);
55 }
56
test_svlsl_u8_z(svbool_t pg,svuint8_t op1,svuint8_t op2)57 svuint8_t test_svlsl_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
58 {
59 // CHECK-LABEL: test_svlsl_u8_z
60 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
61 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
62 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
63 return SVE_ACLE_FUNC(svlsl,_u8,_z,)(pg, op1, op2);
64 }
65
test_svlsl_u16_z(svbool_t pg,svuint16_t op1,svuint16_t op2)66 svuint16_t test_svlsl_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
67 {
68 // CHECK-LABEL: test_svlsl_u16_z
69 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
70 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
71 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
72 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
73 return SVE_ACLE_FUNC(svlsl,_u16,_z,)(pg, op1, op2);
74 }
75
test_svlsl_u32_z(svbool_t pg,svuint32_t op1,svuint32_t op2)76 svuint32_t test_svlsl_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
77 {
78 // CHECK-LABEL: test_svlsl_u32_z
79 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
80 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
81 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
82 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
83 return SVE_ACLE_FUNC(svlsl,_u32,_z,)(pg, op1, op2);
84 }
85
test_svlsl_u64_z(svbool_t pg,svuint64_t op1,svuint64_t op2)86 svuint64_t test_svlsl_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
87 {
88 // CHECK-LABEL: test_svlsl_u64_z
89 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
90 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
91 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
92 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
93 return SVE_ACLE_FUNC(svlsl,_u64,_z,)(pg, op1, op2);
94 }
95
test_svlsl_s8_m(svbool_t pg,svint8_t op1,svuint8_t op2)96 svint8_t test_svlsl_s8_m(svbool_t pg, svint8_t op1, svuint8_t op2)
97 {
98 // CHECK-LABEL: test_svlsl_s8_m
99 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
100 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
101 return SVE_ACLE_FUNC(svlsl,_s8,_m,)(pg, op1, op2);
102 }
103
test_svlsl_s16_m(svbool_t pg,svint16_t op1,svuint16_t op2)104 svint16_t test_svlsl_s16_m(svbool_t pg, svint16_t op1, svuint16_t op2)
105 {
106 // CHECK-LABEL: test_svlsl_s16_m
107 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
108 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
109 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
110 return SVE_ACLE_FUNC(svlsl,_s16,_m,)(pg, op1, op2);
111 }
112
test_svlsl_s32_m(svbool_t pg,svint32_t op1,svuint32_t op2)113 svint32_t test_svlsl_s32_m(svbool_t pg, svint32_t op1, svuint32_t op2)
114 {
115 // CHECK-LABEL: test_svlsl_s32_m
116 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
117 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
118 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
119 return SVE_ACLE_FUNC(svlsl,_s32,_m,)(pg, op1, op2);
120 }
121
test_svlsl_s64_m(svbool_t pg,svint64_t op1,svuint64_t op2)122 svint64_t test_svlsl_s64_m(svbool_t pg, svint64_t op1, svuint64_t op2)
123 {
124 // CHECK-LABEL: test_svlsl_s64_m
125 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
126 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
127 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
128 return SVE_ACLE_FUNC(svlsl,_s64,_m,)(pg, op1, op2);
129 }
130
test_svlsl_u8_m(svbool_t pg,svuint8_t op1,svuint8_t op2)131 svuint8_t test_svlsl_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
132 {
133 // CHECK-LABEL: test_svlsl_u8_m
134 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
135 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
136 return SVE_ACLE_FUNC(svlsl,_u8,_m,)(pg, op1, op2);
137 }
138
test_svlsl_u16_m(svbool_t pg,svuint16_t op1,svuint16_t op2)139 svuint16_t test_svlsl_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
140 {
141 // CHECK-LABEL: test_svlsl_u16_m
142 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
143 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
144 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
145 return SVE_ACLE_FUNC(svlsl,_u16,_m,)(pg, op1, op2);
146 }
147
test_svlsl_u32_m(svbool_t pg,svuint32_t op1,svuint32_t op2)148 svuint32_t test_svlsl_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
149 {
150 // CHECK-LABEL: test_svlsl_u32_m
151 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
152 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
153 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
154 return SVE_ACLE_FUNC(svlsl,_u32,_m,)(pg, op1, op2);
155 }
156
test_svlsl_u64_m(svbool_t pg,svuint64_t op1,svuint64_t op2)157 svuint64_t test_svlsl_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
158 {
159 // CHECK-LABEL: test_svlsl_u64_m
160 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
161 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
162 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
163 return SVE_ACLE_FUNC(svlsl,_u64,_m,)(pg, op1, op2);
164 }
165
test_svlsl_s8_x(svbool_t pg,svint8_t op1,svuint8_t op2)166 svint8_t test_svlsl_s8_x(svbool_t pg, svint8_t op1, svuint8_t op2)
167 {
168 // CHECK-LABEL: test_svlsl_s8_x
169 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
170 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
171 return SVE_ACLE_FUNC(svlsl,_s8,_x,)(pg, op1, op2);
172 }
173
test_svlsl_s16_x(svbool_t pg,svint16_t op1,svuint16_t op2)174 svint16_t test_svlsl_s16_x(svbool_t pg, svint16_t op1, svuint16_t op2)
175 {
176 // CHECK-LABEL: test_svlsl_s16_x
177 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
178 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
179 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
180 return SVE_ACLE_FUNC(svlsl,_s16,_x,)(pg, op1, op2);
181 }
182
test_svlsl_s32_x(svbool_t pg,svint32_t op1,svuint32_t op2)183 svint32_t test_svlsl_s32_x(svbool_t pg, svint32_t op1, svuint32_t op2)
184 {
185 // CHECK-LABEL: test_svlsl_s32_x
186 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
187 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
188 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
189 return SVE_ACLE_FUNC(svlsl,_s32,_x,)(pg, op1, op2);
190 }
191
test_svlsl_s64_x(svbool_t pg,svint64_t op1,svuint64_t op2)192 svint64_t test_svlsl_s64_x(svbool_t pg, svint64_t op1, svuint64_t op2)
193 {
194 // CHECK-LABEL: test_svlsl_s64_x
195 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
196 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
197 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
198 return SVE_ACLE_FUNC(svlsl,_s64,_x,)(pg, op1, op2);
199 }
200
test_svlsl_u8_x(svbool_t pg,svuint8_t op1,svuint8_t op2)201 svuint8_t test_svlsl_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
202 {
203 // CHECK-LABEL: test_svlsl_u8_x
204 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
205 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
206 return SVE_ACLE_FUNC(svlsl,_u8,_x,)(pg, op1, op2);
207 }
208
test_svlsl_u16_x(svbool_t pg,svuint16_t op1,svuint16_t op2)209 svuint16_t test_svlsl_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
210 {
211 // CHECK-LABEL: test_svlsl_u16_x
212 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
213 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
214 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
215 return SVE_ACLE_FUNC(svlsl,_u16,_x,)(pg, op1, op2);
216 }
217
test_svlsl_u32_x(svbool_t pg,svuint32_t op1,svuint32_t op2)218 svuint32_t test_svlsl_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
219 {
220 // CHECK-LABEL: test_svlsl_u32_x
221 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
222 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
223 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
224 return SVE_ACLE_FUNC(svlsl,_u32,_x,)(pg, op1, op2);
225 }
226
test_svlsl_u64_x(svbool_t pg,svuint64_t op1,svuint64_t op2)227 svuint64_t test_svlsl_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
228 {
229 // CHECK-LABEL: test_svlsl_u64_x
230 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
231 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
232 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
233 return SVE_ACLE_FUNC(svlsl,_u64,_x,)(pg, op1, op2);
234 }
235
test_svlsl_wide_s8_z(svbool_t pg,svint8_t op1,svuint64_t op2)236 svint8_t test_svlsl_wide_s8_z(svbool_t pg, svint8_t op1, svuint64_t op2)
237 {
238 // CHECK-LABEL: test_svlsl_wide_s8_z
239 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
240 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 2 x i64> %op2)
241 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
242 return SVE_ACLE_FUNC(svlsl_wide,_s8,_z,)(pg, op1, op2);
243 }
244
test_svlsl_wide_s16_z(svbool_t pg,svint16_t op1,svuint64_t op2)245 svint16_t test_svlsl_wide_s16_z(svbool_t pg, svint16_t op1, svuint64_t op2)
246 {
247 // CHECK-LABEL: test_svlsl_wide_s16_z
248 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
249 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
250 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 2 x i64> %op2)
251 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
252 return SVE_ACLE_FUNC(svlsl_wide,_s16,_z,)(pg, op1, op2);
253 }
254
test_svlsl_wide_s32_z(svbool_t pg,svint32_t op1,svuint64_t op2)255 svint32_t test_svlsl_wide_s32_z(svbool_t pg, svint32_t op1, svuint64_t op2)
256 {
257 // CHECK-LABEL: test_svlsl_wide_s32_z
258 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
259 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
260 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 2 x i64> %op2)
261 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
262 return SVE_ACLE_FUNC(svlsl_wide,_s32,_z,)(pg, op1, op2);
263 }
264
test_svlsl_wide_u8_z(svbool_t pg,svuint8_t op1,svuint64_t op2)265 svuint8_t test_svlsl_wide_u8_z(svbool_t pg, svuint8_t op1, svuint64_t op2)
266 {
267 // CHECK-LABEL: test_svlsl_wide_u8_z
268 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
269 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 2 x i64> %op2)
270 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
271 return SVE_ACLE_FUNC(svlsl_wide,_u8,_z,)(pg, op1, op2);
272 }
273
test_svlsl_wide_u16_z(svbool_t pg,svuint16_t op1,svuint64_t op2)274 svuint16_t test_svlsl_wide_u16_z(svbool_t pg, svuint16_t op1, svuint64_t op2)
275 {
276 // CHECK-LABEL: test_svlsl_wide_u16_z
277 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
278 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
279 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 2 x i64> %op2)
280 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
281 return SVE_ACLE_FUNC(svlsl_wide,_u16,_z,)(pg, op1, op2);
282 }
283
test_svlsl_wide_u32_z(svbool_t pg,svuint32_t op1,svuint64_t op2)284 svuint32_t test_svlsl_wide_u32_z(svbool_t pg, svuint32_t op1, svuint64_t op2)
285 {
286 // CHECK-LABEL: test_svlsl_wide_u32_z
287 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
288 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
289 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 2 x i64> %op2)
290 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
291 return SVE_ACLE_FUNC(svlsl_wide,_u32,_z,)(pg, op1, op2);
292 }
293
test_svlsl_wide_s8_m(svbool_t pg,svint8_t op1,svuint64_t op2)294 svint8_t test_svlsl_wide_s8_m(svbool_t pg, svint8_t op1, svuint64_t op2)
295 {
296 // CHECK-LABEL: test_svlsl_wide_s8_m
297 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
298 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
299 return SVE_ACLE_FUNC(svlsl_wide,_s8,_m,)(pg, op1, op2);
300 }
301
test_svlsl_wide_s16_m(svbool_t pg,svint16_t op1,svuint64_t op2)302 svint16_t test_svlsl_wide_s16_m(svbool_t pg, svint16_t op1, svuint64_t op2)
303 {
304 // CHECK-LABEL: test_svlsl_wide_s16_m
305 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
306 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
307 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
308 return SVE_ACLE_FUNC(svlsl_wide,_s16,_m,)(pg, op1, op2);
309 }
310
test_svlsl_wide_s32_m(svbool_t pg,svint32_t op1,svuint64_t op2)311 svint32_t test_svlsl_wide_s32_m(svbool_t pg, svint32_t op1, svuint64_t op2)
312 {
313 // CHECK-LABEL: test_svlsl_wide_s32_m
314 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
315 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
316 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
317 return SVE_ACLE_FUNC(svlsl_wide,_s32,_m,)(pg, op1, op2);
318 }
319
test_svlsl_wide_u8_m(svbool_t pg,svuint8_t op1,svuint64_t op2)320 svuint8_t test_svlsl_wide_u8_m(svbool_t pg, svuint8_t op1, svuint64_t op2)
321 {
322 // CHECK-LABEL: test_svlsl_wide_u8_m
323 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
324 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
325 return SVE_ACLE_FUNC(svlsl_wide,_u8,_m,)(pg, op1, op2);
326 }
327
test_svlsl_wide_u16_m(svbool_t pg,svuint16_t op1,svuint64_t op2)328 svuint16_t test_svlsl_wide_u16_m(svbool_t pg, svuint16_t op1, svuint64_t op2)
329 {
330 // CHECK-LABEL: test_svlsl_wide_u16_m
331 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
332 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
333 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
334 return SVE_ACLE_FUNC(svlsl_wide,_u16,_m,)(pg, op1, op2);
335 }
336
test_svlsl_wide_u32_m(svbool_t pg,svuint32_t op1,svuint64_t op2)337 svuint32_t test_svlsl_wide_u32_m(svbool_t pg, svuint32_t op1, svuint64_t op2)
338 {
339 // CHECK-LABEL: test_svlsl_wide_u32_m
340 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
341 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
342 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
343 return SVE_ACLE_FUNC(svlsl_wide,_u32,_m,)(pg, op1, op2);
344 }
345
test_svlsl_wide_s8_x(svbool_t pg,svint8_t op1,svuint64_t op2)346 svint8_t test_svlsl_wide_s8_x(svbool_t pg, svint8_t op1, svuint64_t op2)
347 {
348 // CHECK-LABEL: test_svlsl_wide_s8_x
349 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
350 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
351 return SVE_ACLE_FUNC(svlsl_wide,_s8,_x,)(pg, op1, op2);
352 }
353
test_svlsl_wide_s16_x(svbool_t pg,svint16_t op1,svuint64_t op2)354 svint16_t test_svlsl_wide_s16_x(svbool_t pg, svint16_t op1, svuint64_t op2)
355 {
356 // CHECK-LABEL: test_svlsl_wide_s16_x
357 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
358 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
359 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
360 return SVE_ACLE_FUNC(svlsl_wide,_s16,_x,)(pg, op1, op2);
361 }
362
test_svlsl_wide_s32_x(svbool_t pg,svint32_t op1,svuint64_t op2)363 svint32_t test_svlsl_wide_s32_x(svbool_t pg, svint32_t op1, svuint64_t op2)
364 {
365 // CHECK-LABEL: test_svlsl_wide_s32_x
366 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
367 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
368 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
369 return SVE_ACLE_FUNC(svlsl_wide,_s32,_x,)(pg, op1, op2);
370 }
371
test_svlsl_wide_u8_x(svbool_t pg,svuint8_t op1,svuint64_t op2)372 svuint8_t test_svlsl_wide_u8_x(svbool_t pg, svuint8_t op1, svuint64_t op2)
373 {
374 // CHECK-LABEL: test_svlsl_wide_u8_x
375 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
376 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
377 return SVE_ACLE_FUNC(svlsl_wide,_u8,_x,)(pg, op1, op2);
378 }
379
test_svlsl_wide_u16_x(svbool_t pg,svuint16_t op1,svuint64_t op2)380 svuint16_t test_svlsl_wide_u16_x(svbool_t pg, svuint16_t op1, svuint64_t op2)
381 {
382 // CHECK-LABEL: test_svlsl_wide_u16_x
383 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
384 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
385 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
386 return SVE_ACLE_FUNC(svlsl_wide,_u16,_x,)(pg, op1, op2);
387 }
388
test_svlsl_wide_u32_x(svbool_t pg,svuint32_t op1,svuint64_t op2)389 svuint32_t test_svlsl_wide_u32_x(svbool_t pg, svuint32_t op1, svuint64_t op2)
390 {
391 // CHECK-LABEL: test_svlsl_wide_u32_x
392 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
393 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
394 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
395 return SVE_ACLE_FUNC(svlsl_wide,_u32,_x,)(pg, op1, op2);
396 }
397
test_svlsl_wide_n_s8_m(svbool_t pg,svint8_t op1,uint64_t op2)398 svint8_t test_svlsl_wide_n_s8_m(svbool_t pg, svint8_t op1, uint64_t op2)
399 {
400 // CHECK-LABEL: test_svlsl_wide_n_s8_m
401 // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
402 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
403 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
404 return SVE_ACLE_FUNC(svlsl_wide,_n_s8,_m,)(pg, op1, op2);
405 }
406
test_svlsl_wide_n_s16_m(svbool_t pg,svint16_t op1,uint64_t op2)407 svint16_t test_svlsl_wide_n_s16_m(svbool_t pg, svint16_t op1, uint64_t op2)
408 {
409 // CHECK-LABEL: test_svlsl_wide_n_s16_m
410 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
411 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
412 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
413 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
414 return SVE_ACLE_FUNC(svlsl_wide,_n_s16,_m,)(pg, op1, op2);
415 }
416
test_svlsl_wide_n_s32_m(svbool_t pg,svint32_t op1,uint64_t op2)417 svint32_t test_svlsl_wide_n_s32_m(svbool_t pg, svint32_t op1, uint64_t op2)
418 {
419 // CHECK-LABEL: test_svlsl_wide_n_s32_m
420 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
421 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
422 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
423 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
424 return SVE_ACLE_FUNC(svlsl_wide,_n_s32,_m,)(pg, op1, op2);
425 }
426
test_svlsl_wide_n_s8_z(svbool_t pg,svint8_t op1,uint64_t op2)427 svint8_t test_svlsl_wide_n_s8_z(svbool_t pg, svint8_t op1, uint64_t op2)
428 {
429 // CHECK-LABEL: test_svlsl_wide_n_s8_z
430 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
431 // CHECK-DAG: %[[PG:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
432 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[PG]], <vscale x 2 x i64> %[[DUP]])
433 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
434 return SVE_ACLE_FUNC(svlsl_wide,_n_s8,_z,)(pg, op1, op2);
435 }
436
test_svlsl_wide_n_s16_z(svbool_t pg,svint16_t op1,uint64_t op2)437 svint16_t test_svlsl_wide_n_s16_z(svbool_t pg, svint16_t op1, uint64_t op2)
438 {
439 // CHECK-LABEL: test_svlsl_wide_n_s16_z
440 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
441 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
442 // CHECK-DAG: %[[OP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
443 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[OP]], <vscale x 2 x i64> %[[DUP]])
444 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
445 return SVE_ACLE_FUNC(svlsl_wide,_n_s16,_z,)(pg, op1, op2);
446 }
447
test_svlsl_wide_n_s32_z(svbool_t pg,svint32_t op1,uint64_t op2)448 svint32_t test_svlsl_wide_n_s32_z(svbool_t pg, svint32_t op1, uint64_t op2)
449 {
450 // CHECK-LABEL: test_svlsl_wide_n_s32_z
451 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
452 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
453 // CHECK-DAG: %[[OP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
454 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[OP]], <vscale x 2 x i64> %[[DUP]])
455 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
456 return SVE_ACLE_FUNC(svlsl_wide,_n_s32,_z,)(pg, op1, op2);
457 }
458
test_svlsl_wide_n_s8_x(svbool_t pg,svint8_t op1,uint64_t op2)459 svint8_t test_svlsl_wide_n_s8_x(svbool_t pg, svint8_t op1, uint64_t op2)
460 {
461 // CHECK-LABEL: test_svlsl_wide_n_s8_x
462 // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
463 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
464 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
465 return SVE_ACLE_FUNC(svlsl_wide,_n_s8,_x,)(pg, op1, op2);
466 }
467
test_svlsl_wide_n_s16_x(svbool_t pg,svint16_t op1,uint64_t op2)468 svint16_t test_svlsl_wide_n_s16_x(svbool_t pg, svint16_t op1, uint64_t op2)
469 {
470 // CHECK-LABEL: test_svlsl_wide_n_s16_x
471 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
472 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
473 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
474 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
475 return SVE_ACLE_FUNC(svlsl_wide,_n_s16,_x,)(pg, op1, op2);
476 }
477
test_svlsl_wide_n_s32_x(svbool_t pg,svint32_t op1,uint64_t op2)478 svint32_t test_svlsl_wide_n_s32_x(svbool_t pg, svint32_t op1, uint64_t op2)
479 {
480 // CHECK-LABEL: test_svlsl_wide_n_s32_x
481 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
482 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
483 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
484 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
485 return SVE_ACLE_FUNC(svlsl_wide,_n_s32,_x,)(pg, op1, op2);
486 }
487