1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
6 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null
7 #include <arm_sve.h>
8
9 #ifdef SVE_OVERLOADED_FORMS
10 // A simple used,unused... macro, long enough to represent any SVE builtin.
11 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
12 #else
13 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
14 #endif
15
test_svlsl_s8_z(svbool_t pg,svint8_t op1,svuint8_t op2)16 svint8_t test_svlsl_s8_z(svbool_t pg, svint8_t op1, svuint8_t op2)
17 {
18 // CHECK-LABEL: test_svlsl_s8_z
19 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
20 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
21 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
22 return SVE_ACLE_FUNC(svlsl,_s8,_z,)(pg, op1, op2);
23 }
24
test_svlsl_s16_z(svbool_t pg,svint16_t op1,svuint16_t op2)25 svint16_t test_svlsl_s16_z(svbool_t pg, svint16_t op1, svuint16_t op2)
26 {
27 // CHECK-LABEL: test_svlsl_s16_z
28 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
29 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
30 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
31 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
32 return SVE_ACLE_FUNC(svlsl,_s16,_z,)(pg, op1, op2);
33 }
34
test_svlsl_s32_z(svbool_t pg,svint32_t op1,svuint32_t op2)35 svint32_t test_svlsl_s32_z(svbool_t pg, svint32_t op1, svuint32_t op2)
36 {
37 // CHECK-LABEL: test_svlsl_s32_z
38 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
39 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
40 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
41 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
42 return SVE_ACLE_FUNC(svlsl,_s32,_z,)(pg, op1, op2);
43 }
44
test_svlsl_s64_z(svbool_t pg,svint64_t op1,svuint64_t op2)45 svint64_t test_svlsl_s64_z(svbool_t pg, svint64_t op1, svuint64_t op2)
46 {
47 // CHECK-LABEL: test_svlsl_s64_z
48 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
49 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
50 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
51 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
52 return SVE_ACLE_FUNC(svlsl,_s64,_z,)(pg, op1, op2);
53 }
54
test_svlsl_u8_z(svbool_t pg,svuint8_t op1,svuint8_t op2)55 svuint8_t test_svlsl_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
56 {
57 // CHECK-LABEL: test_svlsl_u8_z
58 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
59 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
60 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
61 return SVE_ACLE_FUNC(svlsl,_u8,_z,)(pg, op1, op2);
62 }
63
test_svlsl_u16_z(svbool_t pg,svuint16_t op1,svuint16_t op2)64 svuint16_t test_svlsl_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
65 {
66 // CHECK-LABEL: test_svlsl_u16_z
67 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
68 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
69 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
70 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
71 return SVE_ACLE_FUNC(svlsl,_u16,_z,)(pg, op1, op2);
72 }
73
test_svlsl_u32_z(svbool_t pg,svuint32_t op1,svuint32_t op2)74 svuint32_t test_svlsl_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
75 {
76 // CHECK-LABEL: test_svlsl_u32_z
77 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
78 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
79 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
80 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
81 return SVE_ACLE_FUNC(svlsl,_u32,_z,)(pg, op1, op2);
82 }
83
test_svlsl_u64_z(svbool_t pg,svuint64_t op1,svuint64_t op2)84 svuint64_t test_svlsl_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
85 {
86 // CHECK-LABEL: test_svlsl_u64_z
87 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
88 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
89 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
90 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
91 return SVE_ACLE_FUNC(svlsl,_u64,_z,)(pg, op1, op2);
92 }
93
test_svlsl_s8_m(svbool_t pg,svint8_t op1,svuint8_t op2)94 svint8_t test_svlsl_s8_m(svbool_t pg, svint8_t op1, svuint8_t op2)
95 {
96 // CHECK-LABEL: test_svlsl_s8_m
97 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
98 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
99 return SVE_ACLE_FUNC(svlsl,_s8,_m,)(pg, op1, op2);
100 }
101
test_svlsl_s16_m(svbool_t pg,svint16_t op1,svuint16_t op2)102 svint16_t test_svlsl_s16_m(svbool_t pg, svint16_t op1, svuint16_t op2)
103 {
104 // CHECK-LABEL: test_svlsl_s16_m
105 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
106 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
107 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
108 return SVE_ACLE_FUNC(svlsl,_s16,_m,)(pg, op1, op2);
109 }
110
test_svlsl_s32_m(svbool_t pg,svint32_t op1,svuint32_t op2)111 svint32_t test_svlsl_s32_m(svbool_t pg, svint32_t op1, svuint32_t op2)
112 {
113 // CHECK-LABEL: test_svlsl_s32_m
114 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
115 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
116 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
117 return SVE_ACLE_FUNC(svlsl,_s32,_m,)(pg, op1, op2);
118 }
119
test_svlsl_s64_m(svbool_t pg,svint64_t op1,svuint64_t op2)120 svint64_t test_svlsl_s64_m(svbool_t pg, svint64_t op1, svuint64_t op2)
121 {
122 // CHECK-LABEL: test_svlsl_s64_m
123 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
124 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
125 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
126 return SVE_ACLE_FUNC(svlsl,_s64,_m,)(pg, op1, op2);
127 }
128
test_svlsl_u8_m(svbool_t pg,svuint8_t op1,svuint8_t op2)129 svuint8_t test_svlsl_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
130 {
131 // CHECK-LABEL: test_svlsl_u8_m
132 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
133 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
134 return SVE_ACLE_FUNC(svlsl,_u8,_m,)(pg, op1, op2);
135 }
136
test_svlsl_u16_m(svbool_t pg,svuint16_t op1,svuint16_t op2)137 svuint16_t test_svlsl_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
138 {
139 // CHECK-LABEL: test_svlsl_u16_m
140 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
141 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
142 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
143 return SVE_ACLE_FUNC(svlsl,_u16,_m,)(pg, op1, op2);
144 }
145
test_svlsl_u32_m(svbool_t pg,svuint32_t op1,svuint32_t op2)146 svuint32_t test_svlsl_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
147 {
148 // CHECK-LABEL: test_svlsl_u32_m
149 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
150 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
151 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
152 return SVE_ACLE_FUNC(svlsl,_u32,_m,)(pg, op1, op2);
153 }
154
test_svlsl_u64_m(svbool_t pg,svuint64_t op1,svuint64_t op2)155 svuint64_t test_svlsl_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
156 {
157 // CHECK-LABEL: test_svlsl_u64_m
158 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
159 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
160 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
161 return SVE_ACLE_FUNC(svlsl,_u64,_m,)(pg, op1, op2);
162 }
163
test_svlsl_s8_x(svbool_t pg,svint8_t op1,svuint8_t op2)164 svint8_t test_svlsl_s8_x(svbool_t pg, svint8_t op1, svuint8_t op2)
165 {
166 // CHECK-LABEL: test_svlsl_s8_x
167 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
168 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
169 return SVE_ACLE_FUNC(svlsl,_s8,_x,)(pg, op1, op2);
170 }
171
test_svlsl_s16_x(svbool_t pg,svint16_t op1,svuint16_t op2)172 svint16_t test_svlsl_s16_x(svbool_t pg, svint16_t op1, svuint16_t op2)
173 {
174 // CHECK-LABEL: test_svlsl_s16_x
175 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
176 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
177 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
178 return SVE_ACLE_FUNC(svlsl,_s16,_x,)(pg, op1, op2);
179 }
180
test_svlsl_s32_x(svbool_t pg,svint32_t op1,svuint32_t op2)181 svint32_t test_svlsl_s32_x(svbool_t pg, svint32_t op1, svuint32_t op2)
182 {
183 // CHECK-LABEL: test_svlsl_s32_x
184 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
185 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
186 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
187 return SVE_ACLE_FUNC(svlsl,_s32,_x,)(pg, op1, op2);
188 }
189
test_svlsl_s64_x(svbool_t pg,svint64_t op1,svuint64_t op2)190 svint64_t test_svlsl_s64_x(svbool_t pg, svint64_t op1, svuint64_t op2)
191 {
192 // CHECK-LABEL: test_svlsl_s64_x
193 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
194 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
195 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
196 return SVE_ACLE_FUNC(svlsl,_s64,_x,)(pg, op1, op2);
197 }
198
test_svlsl_u8_x(svbool_t pg,svuint8_t op1,svuint8_t op2)199 svuint8_t test_svlsl_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
200 {
201 // CHECK-LABEL: test_svlsl_u8_x
202 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
203 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
204 return SVE_ACLE_FUNC(svlsl,_u8,_x,)(pg, op1, op2);
205 }
206
test_svlsl_u16_x(svbool_t pg,svuint16_t op1,svuint16_t op2)207 svuint16_t test_svlsl_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
208 {
209 // CHECK-LABEL: test_svlsl_u16_x
210 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
211 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
212 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
213 return SVE_ACLE_FUNC(svlsl,_u16,_x,)(pg, op1, op2);
214 }
215
test_svlsl_u32_x(svbool_t pg,svuint32_t op1,svuint32_t op2)216 svuint32_t test_svlsl_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
217 {
218 // CHECK-LABEL: test_svlsl_u32_x
219 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
220 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
221 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
222 return SVE_ACLE_FUNC(svlsl,_u32,_x,)(pg, op1, op2);
223 }
224
test_svlsl_u64_x(svbool_t pg,svuint64_t op1,svuint64_t op2)225 svuint64_t test_svlsl_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
226 {
227 // CHECK-LABEL: test_svlsl_u64_x
228 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
229 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
230 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
231 return SVE_ACLE_FUNC(svlsl,_u64,_x,)(pg, op1, op2);
232 }
233
test_svlsl_wide_s8_z(svbool_t pg,svint8_t op1,svuint64_t op2)234 svint8_t test_svlsl_wide_s8_z(svbool_t pg, svint8_t op1, svuint64_t op2)
235 {
236 // CHECK-LABEL: test_svlsl_wide_s8_z
237 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
238 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 2 x i64> %op2)
239 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
240 return SVE_ACLE_FUNC(svlsl_wide,_s8,_z,)(pg, op1, op2);
241 }
242
test_svlsl_wide_s16_z(svbool_t pg,svint16_t op1,svuint64_t op2)243 svint16_t test_svlsl_wide_s16_z(svbool_t pg, svint16_t op1, svuint64_t op2)
244 {
245 // CHECK-LABEL: test_svlsl_wide_s16_z
246 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
247 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
248 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 2 x i64> %op2)
249 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
250 return SVE_ACLE_FUNC(svlsl_wide,_s16,_z,)(pg, op1, op2);
251 }
252
test_svlsl_wide_s32_z(svbool_t pg,svint32_t op1,svuint64_t op2)253 svint32_t test_svlsl_wide_s32_z(svbool_t pg, svint32_t op1, svuint64_t op2)
254 {
255 // CHECK-LABEL: test_svlsl_wide_s32_z
256 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
257 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
258 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 2 x i64> %op2)
259 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
260 return SVE_ACLE_FUNC(svlsl_wide,_s32,_z,)(pg, op1, op2);
261 }
262
test_svlsl_wide_u8_z(svbool_t pg,svuint8_t op1,svuint64_t op2)263 svuint8_t test_svlsl_wide_u8_z(svbool_t pg, svuint8_t op1, svuint64_t op2)
264 {
265 // CHECK-LABEL: test_svlsl_wide_u8_z
266 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
267 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 2 x i64> %op2)
268 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
269 return SVE_ACLE_FUNC(svlsl_wide,_u8,_z,)(pg, op1, op2);
270 }
271
test_svlsl_wide_u16_z(svbool_t pg,svuint16_t op1,svuint64_t op2)272 svuint16_t test_svlsl_wide_u16_z(svbool_t pg, svuint16_t op1, svuint64_t op2)
273 {
274 // CHECK-LABEL: test_svlsl_wide_u16_z
275 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
276 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
277 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 2 x i64> %op2)
278 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
279 return SVE_ACLE_FUNC(svlsl_wide,_u16,_z,)(pg, op1, op2);
280 }
281
test_svlsl_wide_u32_z(svbool_t pg,svuint32_t op1,svuint64_t op2)282 svuint32_t test_svlsl_wide_u32_z(svbool_t pg, svuint32_t op1, svuint64_t op2)
283 {
284 // CHECK-LABEL: test_svlsl_wide_u32_z
285 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
286 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
287 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 2 x i64> %op2)
288 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
289 return SVE_ACLE_FUNC(svlsl_wide,_u32,_z,)(pg, op1, op2);
290 }
291
test_svlsl_wide_s8_m(svbool_t pg,svint8_t op1,svuint64_t op2)292 svint8_t test_svlsl_wide_s8_m(svbool_t pg, svint8_t op1, svuint64_t op2)
293 {
294 // CHECK-LABEL: test_svlsl_wide_s8_m
295 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
296 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
297 return SVE_ACLE_FUNC(svlsl_wide,_s8,_m,)(pg, op1, op2);
298 }
299
test_svlsl_wide_s16_m(svbool_t pg,svint16_t op1,svuint64_t op2)300 svint16_t test_svlsl_wide_s16_m(svbool_t pg, svint16_t op1, svuint64_t op2)
301 {
302 // CHECK-LABEL: test_svlsl_wide_s16_m
303 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
304 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
305 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
306 return SVE_ACLE_FUNC(svlsl_wide,_s16,_m,)(pg, op1, op2);
307 }
308
test_svlsl_wide_s32_m(svbool_t pg,svint32_t op1,svuint64_t op2)309 svint32_t test_svlsl_wide_s32_m(svbool_t pg, svint32_t op1, svuint64_t op2)
310 {
311 // CHECK-LABEL: test_svlsl_wide_s32_m
312 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
313 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
314 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
315 return SVE_ACLE_FUNC(svlsl_wide,_s32,_m,)(pg, op1, op2);
316 }
317
test_svlsl_wide_u8_m(svbool_t pg,svuint8_t op1,svuint64_t op2)318 svuint8_t test_svlsl_wide_u8_m(svbool_t pg, svuint8_t op1, svuint64_t op2)
319 {
320 // CHECK-LABEL: test_svlsl_wide_u8_m
321 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
322 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
323 return SVE_ACLE_FUNC(svlsl_wide,_u8,_m,)(pg, op1, op2);
324 }
325
test_svlsl_wide_u16_m(svbool_t pg,svuint16_t op1,svuint64_t op2)326 svuint16_t test_svlsl_wide_u16_m(svbool_t pg, svuint16_t op1, svuint64_t op2)
327 {
328 // CHECK-LABEL: test_svlsl_wide_u16_m
329 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
330 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
331 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
332 return SVE_ACLE_FUNC(svlsl_wide,_u16,_m,)(pg, op1, op2);
333 }
334
test_svlsl_wide_u32_m(svbool_t pg,svuint32_t op1,svuint64_t op2)335 svuint32_t test_svlsl_wide_u32_m(svbool_t pg, svuint32_t op1, svuint64_t op2)
336 {
337 // CHECK-LABEL: test_svlsl_wide_u32_m
338 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
339 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
340 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
341 return SVE_ACLE_FUNC(svlsl_wide,_u32,_m,)(pg, op1, op2);
342 }
343
test_svlsl_wide_s8_x(svbool_t pg,svint8_t op1,svuint64_t op2)344 svint8_t test_svlsl_wide_s8_x(svbool_t pg, svint8_t op1, svuint64_t op2)
345 {
346 // CHECK-LABEL: test_svlsl_wide_s8_x
347 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
348 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
349 return SVE_ACLE_FUNC(svlsl_wide,_s8,_x,)(pg, op1, op2);
350 }
351
test_svlsl_wide_s16_x(svbool_t pg,svint16_t op1,svuint64_t op2)352 svint16_t test_svlsl_wide_s16_x(svbool_t pg, svint16_t op1, svuint64_t op2)
353 {
354 // CHECK-LABEL: test_svlsl_wide_s16_x
355 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
356 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
357 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
358 return SVE_ACLE_FUNC(svlsl_wide,_s16,_x,)(pg, op1, op2);
359 }
360
test_svlsl_wide_s32_x(svbool_t pg,svint32_t op1,svuint64_t op2)361 svint32_t test_svlsl_wide_s32_x(svbool_t pg, svint32_t op1, svuint64_t op2)
362 {
363 // CHECK-LABEL: test_svlsl_wide_s32_x
364 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
365 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
366 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
367 return SVE_ACLE_FUNC(svlsl_wide,_s32,_x,)(pg, op1, op2);
368 }
369
test_svlsl_wide_u8_x(svbool_t pg,svuint8_t op1,svuint64_t op2)370 svuint8_t test_svlsl_wide_u8_x(svbool_t pg, svuint8_t op1, svuint64_t op2)
371 {
372 // CHECK-LABEL: test_svlsl_wide_u8_x
373 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
374 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
375 return SVE_ACLE_FUNC(svlsl_wide,_u8,_x,)(pg, op1, op2);
376 }
377
test_svlsl_wide_u16_x(svbool_t pg,svuint16_t op1,svuint64_t op2)378 svuint16_t test_svlsl_wide_u16_x(svbool_t pg, svuint16_t op1, svuint64_t op2)
379 {
380 // CHECK-LABEL: test_svlsl_wide_u16_x
381 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
382 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
383 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
384 return SVE_ACLE_FUNC(svlsl_wide,_u16,_x,)(pg, op1, op2);
385 }
386
test_svlsl_wide_u32_x(svbool_t pg,svuint32_t op1,svuint64_t op2)387 svuint32_t test_svlsl_wide_u32_x(svbool_t pg, svuint32_t op1, svuint64_t op2)
388 {
389 // CHECK-LABEL: test_svlsl_wide_u32_x
390 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
391 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
392 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
393 return SVE_ACLE_FUNC(svlsl_wide,_u32,_x,)(pg, op1, op2);
394 }
395
test_svlsl_wide_n_s8_m(svbool_t pg,svint8_t op1,uint64_t op2)396 svint8_t test_svlsl_wide_n_s8_m(svbool_t pg, svint8_t op1, uint64_t op2)
397 {
398 // CHECK-LABEL: test_svlsl_wide_n_s8_m
399 // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
400 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
401 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
402 return SVE_ACLE_FUNC(svlsl_wide,_n_s8,_m,)(pg, op1, op2);
403 }
404
test_svlsl_wide_n_s16_m(svbool_t pg,svint16_t op1,uint64_t op2)405 svint16_t test_svlsl_wide_n_s16_m(svbool_t pg, svint16_t op1, uint64_t op2)
406 {
407 // CHECK-LABEL: test_svlsl_wide_n_s16_m
408 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
409 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
410 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
411 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
412 return SVE_ACLE_FUNC(svlsl_wide,_n_s16,_m,)(pg, op1, op2);
413 }
414
test_svlsl_wide_n_s32_m(svbool_t pg,svint32_t op1,uint64_t op2)415 svint32_t test_svlsl_wide_n_s32_m(svbool_t pg, svint32_t op1, uint64_t op2)
416 {
417 // CHECK-LABEL: test_svlsl_wide_n_s32_m
418 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
419 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
420 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
421 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
422 return SVE_ACLE_FUNC(svlsl_wide,_n_s32,_m,)(pg, op1, op2);
423 }
424
test_svlsl_wide_n_s8_z(svbool_t pg,svint8_t op1,uint64_t op2)425 svint8_t test_svlsl_wide_n_s8_z(svbool_t pg, svint8_t op1, uint64_t op2)
426 {
427 // CHECK-LABEL: test_svlsl_wide_n_s8_z
428 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
429 // CHECK-DAG: %[[PG:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
430 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[PG]], <vscale x 2 x i64> %[[DUP]])
431 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
432 return SVE_ACLE_FUNC(svlsl_wide,_n_s8,_z,)(pg, op1, op2);
433 }
434
test_svlsl_wide_n_s16_z(svbool_t pg,svint16_t op1,uint64_t op2)435 svint16_t test_svlsl_wide_n_s16_z(svbool_t pg, svint16_t op1, uint64_t op2)
436 {
437 // CHECK-LABEL: test_svlsl_wide_n_s16_z
438 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
439 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
440 // CHECK-DAG: %[[OP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
441 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[OP]], <vscale x 2 x i64> %[[DUP]])
442 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
443 return SVE_ACLE_FUNC(svlsl_wide,_n_s16,_z,)(pg, op1, op2);
444 }
445
test_svlsl_wide_n_s32_z(svbool_t pg,svint32_t op1,uint64_t op2)446 svint32_t test_svlsl_wide_n_s32_z(svbool_t pg, svint32_t op1, uint64_t op2)
447 {
448 // CHECK-LABEL: test_svlsl_wide_n_s32_z
449 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
450 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
451 // CHECK-DAG: %[[OP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
452 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[OP]], <vscale x 2 x i64> %[[DUP]])
453 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
454 return SVE_ACLE_FUNC(svlsl_wide,_n_s32,_z,)(pg, op1, op2);
455 }
456
test_svlsl_wide_n_s8_x(svbool_t pg,svint8_t op1,uint64_t op2)457 svint8_t test_svlsl_wide_n_s8_x(svbool_t pg, svint8_t op1, uint64_t op2)
458 {
459 // CHECK-LABEL: test_svlsl_wide_n_s8_x
460 // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
461 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
462 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
463 return SVE_ACLE_FUNC(svlsl_wide,_n_s8,_x,)(pg, op1, op2);
464 }
465
test_svlsl_wide_n_s16_x(svbool_t pg,svint16_t op1,uint64_t op2)466 svint16_t test_svlsl_wide_n_s16_x(svbool_t pg, svint16_t op1, uint64_t op2)
467 {
468 // CHECK-LABEL: test_svlsl_wide_n_s16_x
469 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
470 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
471 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
472 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
473 return SVE_ACLE_FUNC(svlsl_wide,_n_s16,_x,)(pg, op1, op2);
474 }
475
test_svlsl_wide_n_s32_x(svbool_t pg,svint32_t op1,uint64_t op2)476 svint32_t test_svlsl_wide_n_s32_x(svbool_t pg, svint32_t op1, uint64_t op2)
477 {
478 // CHECK-LABEL: test_svlsl_wide_n_s32_x
479 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
480 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
481 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
482 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
483 return SVE_ACLE_FUNC(svlsl_wide,_n_s32,_x,)(pg, op1, op2);
484 }
485