1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null 2>%t
5 // RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
6
7 // If this check fails please read test/CodeGen/aarch64-sve-intrinsics/README for instructions on how to resolve it.
8 // ASM-NOT: warning
9 #include <arm_sve.h>
10
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
14 #else
15 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
16 #endif
17
test_svasr_s8_z(svbool_t pg,svint8_t op1,svuint8_t op2)18 svint8_t test_svasr_s8_z(svbool_t pg, svint8_t op1, svuint8_t op2)
19 {
20 // CHECK-LABEL: test_svasr_s8_z
21 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
22 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
23 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
24 return SVE_ACLE_FUNC(svasr,_s8,_z,)(pg, op1, op2);
25 }
26
test_svasr_s16_z(svbool_t pg,svint16_t op1,svuint16_t op2)27 svint16_t test_svasr_s16_z(svbool_t pg, svint16_t op1, svuint16_t op2)
28 {
29 // CHECK-LABEL: test_svasr_s16_z
30 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
31 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
32 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
33 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
34 return SVE_ACLE_FUNC(svasr,_s16,_z,)(pg, op1, op2);
35 }
36
test_svasr_s32_z(svbool_t pg,svint32_t op1,svuint32_t op2)37 svint32_t test_svasr_s32_z(svbool_t pg, svint32_t op1, svuint32_t op2)
38 {
39 // CHECK-LABEL: test_svasr_s32_z
40 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
41 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
42 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
43 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
44 return SVE_ACLE_FUNC(svasr,_s32,_z,)(pg, op1, op2);
45 }
46
test_svasr_s64_z(svbool_t pg,svint64_t op1,svuint64_t op2)47 svint64_t test_svasr_s64_z(svbool_t pg, svint64_t op1, svuint64_t op2)
48 {
49 // CHECK-LABEL: test_svasr_s64_z
50 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
51 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
52 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
53 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
54 return SVE_ACLE_FUNC(svasr,_s64,_z,)(pg, op1, op2);
55 }
56
test_svasr_s8_m(svbool_t pg,svint8_t op1,svuint8_t op2)57 svint8_t test_svasr_s8_m(svbool_t pg, svint8_t op1, svuint8_t op2)
58 {
59 // CHECK-LABEL: test_svasr_s8_m
60 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
61 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
62 return SVE_ACLE_FUNC(svasr,_s8,_m,)(pg, op1, op2);
63 }
64
test_svasr_s16_m(svbool_t pg,svint16_t op1,svuint16_t op2)65 svint16_t test_svasr_s16_m(svbool_t pg, svint16_t op1, svuint16_t op2)
66 {
67 // CHECK-LABEL: test_svasr_s16_m
68 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
69 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
70 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
71 return SVE_ACLE_FUNC(svasr,_s16,_m,)(pg, op1, op2);
72 }
73
test_svasr_s32_m(svbool_t pg,svint32_t op1,svuint32_t op2)74 svint32_t test_svasr_s32_m(svbool_t pg, svint32_t op1, svuint32_t op2)
75 {
76 // CHECK-LABEL: test_svasr_s32_m
77 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
78 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
79 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
80 return SVE_ACLE_FUNC(svasr,_s32,_m,)(pg, op1, op2);
81 }
82
test_svasr_s64_m(svbool_t pg,svint64_t op1,svuint64_t op2)83 svint64_t test_svasr_s64_m(svbool_t pg, svint64_t op1, svuint64_t op2)
84 {
85 // CHECK-LABEL: test_svasr_s64_m
86 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
87 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
88 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
89 return SVE_ACLE_FUNC(svasr,_s64,_m,)(pg, op1, op2);
90 }
91
test_svasr_s8_x(svbool_t pg,svint8_t op1,svuint8_t op2)92 svint8_t test_svasr_s8_x(svbool_t pg, svint8_t op1, svuint8_t op2)
93 {
94 // CHECK-LABEL: test_svasr_s8_x
95 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
96 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
97 return SVE_ACLE_FUNC(svasr,_s8,_x,)(pg, op1, op2);
98 }
99
test_svasr_s16_x(svbool_t pg,svint16_t op1,svuint16_t op2)100 svint16_t test_svasr_s16_x(svbool_t pg, svint16_t op1, svuint16_t op2)
101 {
102 // CHECK-LABEL: test_svasr_s16_x
103 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
104 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
105 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
106 return SVE_ACLE_FUNC(svasr,_s16,_x,)(pg, op1, op2);
107 }
108
test_svasr_s32_x(svbool_t pg,svint32_t op1,svuint32_t op2)109 svint32_t test_svasr_s32_x(svbool_t pg, svint32_t op1, svuint32_t op2)
110 {
111 // CHECK-LABEL: test_svasr_s32_x
112 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
113 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
114 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
115 return SVE_ACLE_FUNC(svasr,_s32,_x,)(pg, op1, op2);
116 }
117
test_svasr_s64_x(svbool_t pg,svint64_t op1,svuint64_t op2)118 svint64_t test_svasr_s64_x(svbool_t pg, svint64_t op1, svuint64_t op2)
119 {
120 // CHECK-LABEL: test_svasr_s64_x
121 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
122 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
123 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
124 return SVE_ACLE_FUNC(svasr,_s64,_x,)(pg, op1, op2);
125 }
126
test_svasr_n_s64_z(svbool_t pg,svint64_t op1,uint64_t op2)127 svint64_t test_svasr_n_s64_z(svbool_t pg, svint64_t op1, uint64_t op2)
128 {
129 // CHECK-LABEL: test_svasr_n_s64_z
130 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
131 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
132 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
133 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %[[DUP]])
134 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
135 return SVE_ACLE_FUNC(svasr,_n_s64,_z,)(pg, op1, op2);
136 }
137
test_svasr_n_s64_m(svbool_t pg,svint64_t op1,uint64_t op2)138 svint64_t test_svasr_n_s64_m(svbool_t pg, svint64_t op1, uint64_t op2)
139 {
140 // CHECK-LABEL: test_svasr_n_s64_m
141 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
142 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
143 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
144 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
145 return SVE_ACLE_FUNC(svasr,_n_s64,_m,)(pg, op1, op2);
146 }
147
test_svasr_n_s64_x(svbool_t pg,svint64_t op1,uint64_t op2)148 svint64_t test_svasr_n_s64_x(svbool_t pg, svint64_t op1, uint64_t op2)
149 {
150 // CHECK-LABEL: test_svasr_n_s64_x
151 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
152 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
153 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
154 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
155 return SVE_ACLE_FUNC(svasr,_n_s64,_x,)(pg, op1, op2);
156 }
157
test_svasr_wide_s8_z(svbool_t pg,svint8_t op1,svuint64_t op2)158 svint8_t test_svasr_wide_s8_z(svbool_t pg, svint8_t op1, svuint64_t op2)
159 {
160 // CHECK-LABEL: test_svasr_wide_s8_z
161 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
162 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 2 x i64> %op2)
163 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
164 return SVE_ACLE_FUNC(svasr_wide,_s8,_z,)(pg, op1, op2);
165 }
166
test_svasr_wide_s16_z(svbool_t pg,svint16_t op1,svuint64_t op2)167 svint16_t test_svasr_wide_s16_z(svbool_t pg, svint16_t op1, svuint64_t op2)
168 {
169 // CHECK-LABEL: test_svasr_wide_s16_z
170 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
171 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
172 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 2 x i64> %op2)
173 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
174 return SVE_ACLE_FUNC(svasr_wide,_s16,_z,)(pg, op1, op2);
175 }
176
test_svasr_wide_s32_z(svbool_t pg,svint32_t op1,svuint64_t op2)177 svint32_t test_svasr_wide_s32_z(svbool_t pg, svint32_t op1, svuint64_t op2)
178 {
179 // CHECK-LABEL: test_svasr_wide_s32_z
180 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
181 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
182 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 2 x i64> %op2)
183 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
184 return SVE_ACLE_FUNC(svasr_wide,_s32,_z,)(pg, op1, op2);
185 }
186
test_svasr_wide_s8_m(svbool_t pg,svint8_t op1,svuint64_t op2)187 svint8_t test_svasr_wide_s8_m(svbool_t pg, svint8_t op1, svuint64_t op2)
188 {
189 // CHECK-LABEL: test_svasr_wide_s8_m
190 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
191 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
192 return SVE_ACLE_FUNC(svasr_wide,_s8,_m,)(pg, op1, op2);
193 }
194
test_svasr_wide_s16_m(svbool_t pg,svint16_t op1,svuint64_t op2)195 svint16_t test_svasr_wide_s16_m(svbool_t pg, svint16_t op1, svuint64_t op2)
196 {
197 // CHECK-LABEL: test_svasr_wide_s16_m
198 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
199 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
200 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
201 return SVE_ACLE_FUNC(svasr_wide,_s16,_m,)(pg, op1, op2);
202 }
203
test_svasr_wide_s32_m(svbool_t pg,svint32_t op1,svuint64_t op2)204 svint32_t test_svasr_wide_s32_m(svbool_t pg, svint32_t op1, svuint64_t op2)
205 {
206 // CHECK-LABEL: test_svasr_wide_s32_m
207 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
208 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
209 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
210 return SVE_ACLE_FUNC(svasr_wide,_s32,_m,)(pg, op1, op2);
211 }
212
test_svasr_wide_s8_x(svbool_t pg,svint8_t op1,svuint64_t op2)213 svint8_t test_svasr_wide_s8_x(svbool_t pg, svint8_t op1, svuint64_t op2)
214 {
215 // CHECK-LABEL: test_svasr_wide_s8_x
216 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
217 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
218 return SVE_ACLE_FUNC(svasr_wide,_s8,_x,)(pg, op1, op2);
219 }
220
test_svasr_wide_s16_x(svbool_t pg,svint16_t op1,svuint64_t op2)221 svint16_t test_svasr_wide_s16_x(svbool_t pg, svint16_t op1, svuint64_t op2)
222 {
223 // CHECK-LABEL: test_svasr_wide_s16_x
224 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
225 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
226 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
227 return SVE_ACLE_FUNC(svasr_wide,_s16,_x,)(pg, op1, op2);
228 }
229
test_svasr_wide_s32_x(svbool_t pg,svint32_t op1,svuint64_t op2)230 svint32_t test_svasr_wide_s32_x(svbool_t pg, svint32_t op1, svuint64_t op2)
231 {
232 // CHECK-LABEL: test_svasr_wide_s32_x
233 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
234 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
235 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
236 return SVE_ACLE_FUNC(svasr_wide,_s32,_x,)(pg, op1, op2);
237 }
238
test_svasr_n_s8_z(svbool_t pg,svint8_t op1,uint8_t op2)239 svint8_t test_svasr_n_s8_z(svbool_t pg, svint8_t op1, uint8_t op2)
240 {
241 // CHECK-LABEL: test_svasr_n_s8_z
242 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
243 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
244 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %[[DUP]])
245 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
246 return SVE_ACLE_FUNC(svasr,_n_s8,_z,)(pg, op1, op2);
247 }
248
test_svasr_n_s16_z(svbool_t pg,svint16_t op1,uint16_t op2)249 svint16_t test_svasr_n_s16_z(svbool_t pg, svint16_t op1, uint16_t op2)
250 {
251 // CHECK-LABEL: test_svasr_n_s16_z
252 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
253 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
254 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
255 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %[[DUP]])
256 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
257 return SVE_ACLE_FUNC(svasr,_n_s16,_z,)(pg, op1, op2);
258 }
259
test_svasr_n_s32_z(svbool_t pg,svint32_t op1,uint32_t op2)260 svint32_t test_svasr_n_s32_z(svbool_t pg, svint32_t op1, uint32_t op2)
261 {
262 // CHECK-LABEL: test_svasr_n_s32_z
263 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
264 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
265 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
266 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %[[DUP]])
267 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
268 return SVE_ACLE_FUNC(svasr,_n_s32,_z,)(pg, op1, op2);
269 }
270
test_svasr_n_s8_m(svbool_t pg,svint8_t op1,uint8_t op2)271 svint8_t test_svasr_n_s8_m(svbool_t pg, svint8_t op1, uint8_t op2)
272 {
273 // CHECK-LABEL: test_svasr_n_s8_m
274 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
275 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
276 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
277 return SVE_ACLE_FUNC(svasr,_n_s8,_m,)(pg, op1, op2);
278 }
279
test_svasr_n_s16_m(svbool_t pg,svint16_t op1,uint16_t op2)280 svint16_t test_svasr_n_s16_m(svbool_t pg, svint16_t op1, uint16_t op2)
281 {
282 // CHECK-LABEL: test_svasr_n_s16_m
283 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
284 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
285 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
286 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
287 return SVE_ACLE_FUNC(svasr,_n_s16,_m,)(pg, op1, op2);
288 }
289
test_svasr_n_s32_m(svbool_t pg,svint32_t op1,uint32_t op2)290 svint32_t test_svasr_n_s32_m(svbool_t pg, svint32_t op1, uint32_t op2)
291 {
292 // CHECK-LABEL: test_svasr_n_s32_m
293 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
294 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
295 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
296 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
297 return SVE_ACLE_FUNC(svasr,_n_s32,_m,)(pg, op1, op2);
298 }
299
test_svasr_n_s8_x(svbool_t pg,svint8_t op1,uint8_t op2)300 svint8_t test_svasr_n_s8_x(svbool_t pg, svint8_t op1, uint8_t op2)
301 {
302 // CHECK-LABEL: test_svasr_n_s8_x
303 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
304 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
305 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
306 return SVE_ACLE_FUNC(svasr,_n_s8,_x,)(pg, op1, op2);
307 }
308
test_svasr_n_s16_x(svbool_t pg,svint16_t op1,uint16_t op2)309 svint16_t test_svasr_n_s16_x(svbool_t pg, svint16_t op1, uint16_t op2)
310 {
311 // CHECK-LABEL: test_svasr_n_s16_x
312 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
313 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
314 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
315 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
316 return SVE_ACLE_FUNC(svasr,_n_s16,_x,)(pg, op1, op2);
317 }
318
test_svasr_n_s32_x(svbool_t pg,svint32_t op1,uint32_t op2)319 svint32_t test_svasr_n_s32_x(svbool_t pg, svint32_t op1, uint32_t op2)
320 {
321 // CHECK-LABEL: test_svasr_n_s32_x
322 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
323 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
324 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
325 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
326 return SVE_ACLE_FUNC(svasr,_n_s32,_x,)(pg, op1, op2);
327 }
328
test_svasr_wide_n_s8_m(svbool_t pg,svint8_t op1,uint64_t op2)329 svint8_t test_svasr_wide_n_s8_m(svbool_t pg, svint8_t op1, uint64_t op2)
330 {
331 // CHECK-LABEL: test_svasr_wide_n_s8_m
332 // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
333 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
334 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
335 return SVE_ACLE_FUNC(svasr_wide,_n_s8,_m,)(pg, op1, op2);
336 }
337
test_svasr_wide_n_s16_m(svbool_t pg,svint16_t op1,uint64_t op2)338 svint16_t test_svasr_wide_n_s16_m(svbool_t pg, svint16_t op1, uint64_t op2)
339 {
340 // CHECK-LABEL: test_svasr_wide_n_s16_m
341 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
342 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
343 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
344 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
345 return SVE_ACLE_FUNC(svasr_wide,_n_s16,_m,)(pg, op1, op2);
346 }
347
test_svasr_wide_n_s32_m(svbool_t pg,svint32_t op1,uint64_t op2)348 svint32_t test_svasr_wide_n_s32_m(svbool_t pg, svint32_t op1, uint64_t op2)
349 {
350 // CHECK-LABEL: test_svasr_wide_n_s32_m
351 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
352 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
353 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
354 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
355 return SVE_ACLE_FUNC(svasr_wide,_n_s32,_m,)(pg, op1, op2);
356 }
357
test_svasr_wide_n_s8_z(svbool_t pg,svint8_t op1,uint64_t op2)358 svint8_t test_svasr_wide_n_s8_z(svbool_t pg, svint8_t op1, uint64_t op2)
359 {
360 // CHECK-LABEL: test_svasr_wide_n_s8_z
361 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
362 // CHECK-DAG: %[[PG:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
363 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[PG]], <vscale x 2 x i64> %[[DUP]])
364 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
365 return SVE_ACLE_FUNC(svasr_wide,_n_s8,_z,)(pg, op1, op2);
366 }
367
test_svasr_wide_n_s16_z(svbool_t pg,svint16_t op1,uint64_t op2)368 svint16_t test_svasr_wide_n_s16_z(svbool_t pg, svint16_t op1, uint64_t op2)
369 {
370 // CHECK-LABEL: test_svasr_wide_n_s16_z
371 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
372 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
373 // CHECK-DAG: %[[OP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
374 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[OP]], <vscale x 2 x i64> %[[DUP]])
375 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
376 return SVE_ACLE_FUNC(svasr_wide,_n_s16,_z,)(pg, op1, op2);
377 }
378
test_svasr_wide_n_s32_z(svbool_t pg,svint32_t op1,uint64_t op2)379 svint32_t test_svasr_wide_n_s32_z(svbool_t pg, svint32_t op1, uint64_t op2)
380 {
381 // CHECK-LABEL: test_svasr_wide_n_s32_z
382 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
383 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
384 // CHECK-DAG: %[[OP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
385 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[OP]], <vscale x 2 x i64> %[[DUP]])
386 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
387 return SVE_ACLE_FUNC(svasr_wide,_n_s32,_z,)(pg, op1, op2);
388 }
389
test_svasr_wide_n_s8_x(svbool_t pg,svint8_t op1,uint64_t op2)390 svint8_t test_svasr_wide_n_s8_x(svbool_t pg, svint8_t op1, uint64_t op2)
391 {
392 // CHECK-LABEL: test_svasr_wide_n_s8_x
393 // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
394 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
395 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
396 return SVE_ACLE_FUNC(svasr_wide,_n_s8,_x,)(pg, op1, op2);
397 }
398
test_svasr_wide_n_s16_x(svbool_t pg,svint16_t op1,uint64_t op2)399 svint16_t test_svasr_wide_n_s16_x(svbool_t pg, svint16_t op1, uint64_t op2)
400 {
401 // CHECK-LABEL: test_svasr_wide_n_s16_x
402 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
403 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
404 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
405 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
406 return SVE_ACLE_FUNC(svasr_wide,_n_s16,_x,)(pg, op1, op2);
407 }
408
test_svasr_wide_n_s32_x(svbool_t pg,svint32_t op1,uint64_t op2)409 svint32_t test_svasr_wide_n_s32_x(svbool_t pg, svint32_t op1, uint64_t op2)
410 {
411 // CHECK-LABEL: test_svasr_wide_n_s32_x
412 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
413 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
414 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
415 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
416 return SVE_ACLE_FUNC(svasr_wide,_n_s32,_x,)(pg, op1, op2);
417 }
418