1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
6 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null
7 #include <arm_sve.h>
8
9 #ifdef SVE_OVERLOADED_FORMS
10 // A simple used,unused... macro, long enough to represent any SVE builtin.
11 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
12 #else
13 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
14 #endif
15
test_svadd_s8_z(svbool_t pg,svint8_t op1,svint8_t op2)16 svint8_t test_svadd_s8_z(svbool_t pg, svint8_t op1, svint8_t op2)
17 {
18 // CHECK-LABEL: test_svadd_s8_z
19 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
20 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
21 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
22 return SVE_ACLE_FUNC(svadd,_s8,_z,)(pg, op1, op2);
23 }
24
test_svadd_s16_z(svbool_t pg,svint16_t op1,svint16_t op2)25 svint16_t test_svadd_s16_z(svbool_t pg, svint16_t op1, svint16_t op2)
26 {
27 // CHECK-LABEL: test_svadd_s16_z
28 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
29 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
30 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
31 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
32 return SVE_ACLE_FUNC(svadd,_s16,_z,)(pg, op1, op2);
33 }
34
test_svadd_s32_z(svbool_t pg,svint32_t op1,svint32_t op2)35 svint32_t test_svadd_s32_z(svbool_t pg, svint32_t op1, svint32_t op2)
36 {
37 // CHECK-LABEL: test_svadd_s32_z
38 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
39 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
40 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
41 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
42 return SVE_ACLE_FUNC(svadd,_s32,_z,)(pg, op1, op2);
43 }
44
test_svadd_s64_z(svbool_t pg,svint64_t op1,svint64_t op2)45 svint64_t test_svadd_s64_z(svbool_t pg, svint64_t op1, svint64_t op2)
46 {
47 // CHECK-LABEL: test_svadd_s64_z
48 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
49 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
50 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
51 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
52 return SVE_ACLE_FUNC(svadd,_s64,_z,)(pg, op1, op2);
53 }
54
test_svadd_u8_z(svbool_t pg,svuint8_t op1,svuint8_t op2)55 svuint8_t test_svadd_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
56 {
57 // CHECK-LABEL: test_svadd_u8_z
58 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
59 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
60 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
61 return SVE_ACLE_FUNC(svadd,_u8,_z,)(pg, op1, op2);
62 }
63
test_svadd_u16_z(svbool_t pg,svuint16_t op1,svuint16_t op2)64 svuint16_t test_svadd_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
65 {
66 // CHECK-LABEL: test_svadd_u16_z
67 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
68 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
69 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
70 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
71 return SVE_ACLE_FUNC(svadd,_u16,_z,)(pg, op1, op2);
72 }
73
test_svadd_u32_z(svbool_t pg,svuint32_t op1,svuint32_t op2)74 svuint32_t test_svadd_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
75 {
76 // CHECK-LABEL: test_svadd_u32_z
77 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
78 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
79 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
80 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
81 return SVE_ACLE_FUNC(svadd,_u32,_z,)(pg, op1, op2);
82 }
83
test_svadd_u64_z(svbool_t pg,svuint64_t op1,svuint64_t op2)84 svuint64_t test_svadd_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
85 {
86 // CHECK-LABEL: test_svadd_u64_z
87 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
88 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
89 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
90 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
91 return SVE_ACLE_FUNC(svadd,_u64,_z,)(pg, op1, op2);
92 }
93
test_svadd_s8_m(svbool_t pg,svint8_t op1,svint8_t op2)94 svint8_t test_svadd_s8_m(svbool_t pg, svint8_t op1, svint8_t op2)
95 {
96 // CHECK-LABEL: test_svadd_s8_m
97 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
98 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
99 return SVE_ACLE_FUNC(svadd,_s8,_m,)(pg, op1, op2);
100 }
101
test_svadd_s16_m(svbool_t pg,svint16_t op1,svint16_t op2)102 svint16_t test_svadd_s16_m(svbool_t pg, svint16_t op1, svint16_t op2)
103 {
104 // CHECK-LABEL: test_svadd_s16_m
105 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
106 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
107 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
108 return SVE_ACLE_FUNC(svadd,_s16,_m,)(pg, op1, op2);
109 }
110
test_svadd_s32_m(svbool_t pg,svint32_t op1,svint32_t op2)111 svint32_t test_svadd_s32_m(svbool_t pg, svint32_t op1, svint32_t op2)
112 {
113 // CHECK-LABEL: test_svadd_s32_m
114 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
115 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
116 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
117 return SVE_ACLE_FUNC(svadd,_s32,_m,)(pg, op1, op2);
118 }
119
test_svadd_s64_m(svbool_t pg,svint64_t op1,svint64_t op2)120 svint64_t test_svadd_s64_m(svbool_t pg, svint64_t op1, svint64_t op2)
121 {
122 // CHECK-LABEL: test_svadd_s64_m
123 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
124 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
125 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
126 return SVE_ACLE_FUNC(svadd,_s64,_m,)(pg, op1, op2);
127 }
128
test_svadd_u8_m(svbool_t pg,svuint8_t op1,svuint8_t op2)129 svuint8_t test_svadd_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
130 {
131 // CHECK-LABEL: test_svadd_u8_m
132 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
133 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
134 return SVE_ACLE_FUNC(svadd,_u8,_m,)(pg, op1, op2);
135 }
136
test_svadd_u16_m(svbool_t pg,svuint16_t op1,svuint16_t op2)137 svuint16_t test_svadd_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
138 {
139 // CHECK-LABEL: test_svadd_u16_m
140 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
141 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
142 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
143 return SVE_ACLE_FUNC(svadd,_u16,_m,)(pg, op1, op2);
144 }
145
test_svadd_u32_m(svbool_t pg,svuint32_t op1,svuint32_t op2)146 svuint32_t test_svadd_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
147 {
148 // CHECK-LABEL: test_svadd_u32_m
149 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
150 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
151 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
152 return SVE_ACLE_FUNC(svadd,_u32,_m,)(pg, op1, op2);
153 }
154
test_svadd_u64_m(svbool_t pg,svuint64_t op1,svuint64_t op2)155 svuint64_t test_svadd_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
156 {
157 // CHECK-LABEL: test_svadd_u64_m
158 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
159 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
160 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
161 return SVE_ACLE_FUNC(svadd,_u64,_m,)(pg, op1, op2);
162 }
163
test_svadd_s8_x(svbool_t pg,svint8_t op1,svint8_t op2)164 svint8_t test_svadd_s8_x(svbool_t pg, svint8_t op1, svint8_t op2)
165 {
166 // CHECK-LABEL: test_svadd_s8_x
167 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
168 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
169 return SVE_ACLE_FUNC(svadd,_s8,_x,)(pg, op1, op2);
170 }
171
test_svadd_s16_x(svbool_t pg,svint16_t op1,svint16_t op2)172 svint16_t test_svadd_s16_x(svbool_t pg, svint16_t op1, svint16_t op2)
173 {
174 // CHECK-LABEL: test_svadd_s16_x
175 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
176 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
177 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
178 return SVE_ACLE_FUNC(svadd,_s16,_x,)(pg, op1, op2);
179 }
180
test_svadd_s32_x(svbool_t pg,svint32_t op1,svint32_t op2)181 svint32_t test_svadd_s32_x(svbool_t pg, svint32_t op1, svint32_t op2)
182 {
183 // CHECK-LABEL: test_svadd_s32_x
184 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
185 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
186 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
187 return SVE_ACLE_FUNC(svadd,_s32,_x,)(pg, op1, op2);
188 }
189
test_svadd_s64_x(svbool_t pg,svint64_t op1,svint64_t op2)190 svint64_t test_svadd_s64_x(svbool_t pg, svint64_t op1, svint64_t op2)
191 {
192 // CHECK-LABEL: test_svadd_s64_x
193 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
194 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
195 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
196 return SVE_ACLE_FUNC(svadd,_s64,_x,)(pg, op1, op2);
197 }
198
test_svadd_u8_x(svbool_t pg,svuint8_t op1,svuint8_t op2)199 svuint8_t test_svadd_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
200 {
201 // CHECK-LABEL: test_svadd_u8_x
202 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
203 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
204 return SVE_ACLE_FUNC(svadd,_u8,_x,)(pg, op1, op2);
205 }
206
test_svadd_u16_x(svbool_t pg,svuint16_t op1,svuint16_t op2)207 svuint16_t test_svadd_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
208 {
209 // CHECK-LABEL: test_svadd_u16_x
210 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
211 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
212 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
213 return SVE_ACLE_FUNC(svadd,_u16,_x,)(pg, op1, op2);
214 }
215
test_svadd_u32_x(svbool_t pg,svuint32_t op1,svuint32_t op2)216 svuint32_t test_svadd_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
217 {
218 // CHECK-LABEL: test_svadd_u32_x
219 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
220 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
221 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
222 return SVE_ACLE_FUNC(svadd,_u32,_x,)(pg, op1, op2);
223 }
224
test_svadd_u64_x(svbool_t pg,svuint64_t op1,svuint64_t op2)225 svuint64_t test_svadd_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
226 {
227 // CHECK-LABEL: test_svadd_u64_x
228 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
229 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
230 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
231 return SVE_ACLE_FUNC(svadd,_u64,_x,)(pg, op1, op2);
232 }
233
test_svadd_n_s8_z(svbool_t pg,svint8_t op1,int8_t op2)234 svint8_t test_svadd_n_s8_z(svbool_t pg, svint8_t op1, int8_t op2)
235 {
236 // CHECK-LABEL: test_svadd_n_s8_z
237 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
238 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
239 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %[[DUP]])
240 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
241 return SVE_ACLE_FUNC(svadd,_n_s8,_z,)(pg, op1, op2);
242 }
243
test_svadd_n_s16_z(svbool_t pg,svint16_t op1,int16_t op2)244 svint16_t test_svadd_n_s16_z(svbool_t pg, svint16_t op1, int16_t op2)
245 {
246 // CHECK-LABEL: test_svadd_n_s16_z
247 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
248 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
249 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
250 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %[[DUP]])
251 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
252 return SVE_ACLE_FUNC(svadd,_n_s16,_z,)(pg, op1, op2);
253 }
254
test_svadd_n_s32_z(svbool_t pg,svint32_t op1,int32_t op2)255 svint32_t test_svadd_n_s32_z(svbool_t pg, svint32_t op1, int32_t op2)
256 {
257 // CHECK-LABEL: test_svadd_n_s32_z
258 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
259 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
260 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
261 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %[[DUP]])
262 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
263 return SVE_ACLE_FUNC(svadd,_n_s32,_z,)(pg, op1, op2);
264 }
265
test_svadd_n_s64_z(svbool_t pg,svint64_t op1,int64_t op2)266 svint64_t test_svadd_n_s64_z(svbool_t pg, svint64_t op1, int64_t op2)
267 {
268 // CHECK-LABEL: test_svadd_n_s64_z
269 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
270 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
271 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
272 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %[[DUP]])
273 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
274 return SVE_ACLE_FUNC(svadd,_n_s64,_z,)(pg, op1, op2);
275 }
276
test_svadd_n_u8_z(svbool_t pg,svuint8_t op1,uint8_t op2)277 svuint8_t test_svadd_n_u8_z(svbool_t pg, svuint8_t op1, uint8_t op2)
278 {
279 // CHECK-LABEL: test_svadd_n_u8_z
280 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
281 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
282 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %[[DUP]])
283 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
284 return SVE_ACLE_FUNC(svadd,_n_u8,_z,)(pg, op1, op2);
285 }
286
test_svadd_n_u16_z(svbool_t pg,svuint16_t op1,uint16_t op2)287 svuint16_t test_svadd_n_u16_z(svbool_t pg, svuint16_t op1, uint16_t op2)
288 {
289 // CHECK-LABEL: test_svadd_n_u16_z
290 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
291 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
292 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
293 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %[[DUP]])
294 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
295 return SVE_ACLE_FUNC(svadd,_n_u16,_z,)(pg, op1, op2);
296 }
297
test_svadd_n_u32_z(svbool_t pg,svuint32_t op1,uint32_t op2)298 svuint32_t test_svadd_n_u32_z(svbool_t pg, svuint32_t op1, uint32_t op2)
299 {
300 // CHECK-LABEL: test_svadd_n_u32_z
301 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
302 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
303 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
304 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %[[DUP]])
305 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
306 return SVE_ACLE_FUNC(svadd,_n_u32,_z,)(pg, op1, op2);
307 }
308
test_svadd_n_u64_z(svbool_t pg,svuint64_t op1,uint64_t op2)309 svuint64_t test_svadd_n_u64_z(svbool_t pg, svuint64_t op1, uint64_t op2)
310 {
311 // CHECK-LABEL: test_svadd_n_u64_z
312 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
313 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
314 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
315 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %[[DUP]])
316 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
317 return SVE_ACLE_FUNC(svadd,_n_u64,_z,)(pg, op1, op2);
318 }
319
test_svadd_n_s8_m(svbool_t pg,svint8_t op1,int8_t op2)320 svint8_t test_svadd_n_s8_m(svbool_t pg, svint8_t op1, int8_t op2)
321 {
322 // CHECK-LABEL: test_svadd_n_s8_m
323 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
324 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
325 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
326 return SVE_ACLE_FUNC(svadd,_n_s8,_m,)(pg, op1, op2);
327 }
328
test_svadd_n_s16_m(svbool_t pg,svint16_t op1,int16_t op2)329 svint16_t test_svadd_n_s16_m(svbool_t pg, svint16_t op1, int16_t op2)
330 {
331 // CHECK-LABEL: test_svadd_n_s16_m
332 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
333 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
334 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
335 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
336 return SVE_ACLE_FUNC(svadd,_n_s16,_m,)(pg, op1, op2);
337 }
338
test_svadd_n_s32_m(svbool_t pg,svint32_t op1,int32_t op2)339 svint32_t test_svadd_n_s32_m(svbool_t pg, svint32_t op1, int32_t op2)
340 {
341 // CHECK-LABEL: test_svadd_n_s32_m
342 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
343 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
344 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
345 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
346 return SVE_ACLE_FUNC(svadd,_n_s32,_m,)(pg, op1, op2);
347 }
348
test_svadd_n_s64_m(svbool_t pg,svint64_t op1,int64_t op2)349 svint64_t test_svadd_n_s64_m(svbool_t pg, svint64_t op1, int64_t op2)
350 {
351 // CHECK-LABEL: test_svadd_n_s64_m
352 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
353 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
354 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
355 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
356 return SVE_ACLE_FUNC(svadd,_n_s64,_m,)(pg, op1, op2);
357 }
358
test_svadd_n_u8_m(svbool_t pg,svuint8_t op1,uint8_t op2)359 svuint8_t test_svadd_n_u8_m(svbool_t pg, svuint8_t op1, uint8_t op2)
360 {
361 // CHECK-LABEL: test_svadd_n_u8_m
362 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
363 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
364 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
365 return SVE_ACLE_FUNC(svadd,_n_u8,_m,)(pg, op1, op2);
366 }
367
test_svadd_n_u16_m(svbool_t pg,svuint16_t op1,uint16_t op2)368 svuint16_t test_svadd_n_u16_m(svbool_t pg, svuint16_t op1, uint16_t op2)
369 {
370 // CHECK-LABEL: test_svadd_n_u16_m
371 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
372 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
373 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
374 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
375 return SVE_ACLE_FUNC(svadd,_n_u16,_m,)(pg, op1, op2);
376 }
377
test_svadd_n_u32_m(svbool_t pg,svuint32_t op1,uint32_t op2)378 svuint32_t test_svadd_n_u32_m(svbool_t pg, svuint32_t op1, uint32_t op2)
379 {
380 // CHECK-LABEL: test_svadd_n_u32_m
381 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
382 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
383 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
384 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
385 return SVE_ACLE_FUNC(svadd,_n_u32,_m,)(pg, op1, op2);
386 }
387
test_svadd_n_u64_m(svbool_t pg,svuint64_t op1,uint64_t op2)388 svuint64_t test_svadd_n_u64_m(svbool_t pg, svuint64_t op1, uint64_t op2)
389 {
390 // CHECK-LABEL: test_svadd_n_u64_m
391 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
392 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
393 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
394 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
395 return SVE_ACLE_FUNC(svadd,_n_u64,_m,)(pg, op1, op2);
396 }
397
test_svadd_n_s8_x(svbool_t pg,svint8_t op1,int8_t op2)398 svint8_t test_svadd_n_s8_x(svbool_t pg, svint8_t op1, int8_t op2)
399 {
400 // CHECK-LABEL: test_svadd_n_s8_x
401 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
402 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
403 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
404 return SVE_ACLE_FUNC(svadd,_n_s8,_x,)(pg, op1, op2);
405 }
406
test_svadd_n_s16_x(svbool_t pg,svint16_t op1,int16_t op2)407 svint16_t test_svadd_n_s16_x(svbool_t pg, svint16_t op1, int16_t op2)
408 {
409 // CHECK-LABEL: test_svadd_n_s16_x
410 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
411 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
412 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
413 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
414 return SVE_ACLE_FUNC(svadd,_n_s16,_x,)(pg, op1, op2);
415 }
416
test_svadd_n_s32_x(svbool_t pg,svint32_t op1,int32_t op2)417 svint32_t test_svadd_n_s32_x(svbool_t pg, svint32_t op1, int32_t op2)
418 {
419 // CHECK-LABEL: test_svadd_n_s32_x
420 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
421 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
422 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
423 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
424 return SVE_ACLE_FUNC(svadd,_n_s32,_x,)(pg, op1, op2);
425 }
426
test_svadd_n_s64_x(svbool_t pg,svint64_t op1,int64_t op2)427 svint64_t test_svadd_n_s64_x(svbool_t pg, svint64_t op1, int64_t op2)
428 {
429 // CHECK-LABEL: test_svadd_n_s64_x
430 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
431 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
432 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
433 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
434 return SVE_ACLE_FUNC(svadd,_n_s64,_x,)(pg, op1, op2);
435 }
436
test_svadd_n_u8_x(svbool_t pg,svuint8_t op1,uint8_t op2)437 svuint8_t test_svadd_n_u8_x(svbool_t pg, svuint8_t op1, uint8_t op2)
438 {
439 // CHECK-LABEL: test_svadd_n_u8_x
440 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
441 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
442 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
443 return SVE_ACLE_FUNC(svadd,_n_u8,_x,)(pg, op1, op2);
444 }
445
test_svadd_n_u16_x(svbool_t pg,svuint16_t op1,uint16_t op2)446 svuint16_t test_svadd_n_u16_x(svbool_t pg, svuint16_t op1, uint16_t op2)
447 {
448 // CHECK-LABEL: test_svadd_n_u16_x
449 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
450 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
451 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
452 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
453 return SVE_ACLE_FUNC(svadd,_n_u16,_x,)(pg, op1, op2);
454 }
455
test_svadd_n_u32_x(svbool_t pg,svuint32_t op1,uint32_t op2)456 svuint32_t test_svadd_n_u32_x(svbool_t pg, svuint32_t op1, uint32_t op2)
457 {
458 // CHECK-LABEL: test_svadd_n_u32_x
459 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
460 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
461 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
462 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
463 return SVE_ACLE_FUNC(svadd,_n_u32,_x,)(pg, op1, op2);
464 }
465
test_svadd_n_u64_x(svbool_t pg,svuint64_t op1,uint64_t op2)466 svuint64_t test_svadd_n_u64_x(svbool_t pg, svuint64_t op1, uint64_t op2)
467 {
468 // CHECK-LABEL: test_svadd_n_u64_x
469 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
470 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
471 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
472 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
473 return SVE_ACLE_FUNC(svadd,_n_u64,_x,)(pg, op1, op2);
474 }
475
test_svadd_f16_z(svbool_t pg,svfloat16_t op1,svfloat16_t op2)476 svfloat16_t test_svadd_f16_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
477 {
478 // CHECK-LABEL: test_svadd_f16_z
479 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
480 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.sel.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> zeroinitializer)
481 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %[[SEL]], <vscale x 8 x half> %op2)
482 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
483 return SVE_ACLE_FUNC(svadd,_f16,_z,)(pg, op1, op2);
484 }
485
test_svadd_f32_z(svbool_t pg,svfloat32_t op1,svfloat32_t op2)486 svfloat32_t test_svadd_f32_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
487 {
488 // CHECK-LABEL: test_svadd_f32_z
489 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
490 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.sel.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> zeroinitializer)
491 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %[[SEL]], <vscale x 4 x float> %op2)
492 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
493 return SVE_ACLE_FUNC(svadd,_f32,_z,)(pg, op1, op2);
494 }
495
test_svadd_f64_z(svbool_t pg,svfloat64_t op1,svfloat64_t op2)496 svfloat64_t test_svadd_f64_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
497 {
498 // CHECK-LABEL: test_svadd_f64_z
499 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
500 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> zeroinitializer)
501 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %[[SEL]], <vscale x 2 x double> %op2)
502 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
503 return SVE_ACLE_FUNC(svadd,_f64,_z,)(pg, op1, op2);
504 }
505
test_svadd_f16_m(svbool_t pg,svfloat16_t op1,svfloat16_t op2)506 svfloat16_t test_svadd_f16_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
507 {
508 // CHECK-LABEL: test_svadd_f16_m
509 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
510 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
511 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
512 return SVE_ACLE_FUNC(svadd,_f16,_m,)(pg, op1, op2);
513 }
514
test_svadd_f32_m(svbool_t pg,svfloat32_t op1,svfloat32_t op2)515 svfloat32_t test_svadd_f32_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
516 {
517 // CHECK-LABEL: test_svadd_f32_m
518 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
519 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
520 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
521 return SVE_ACLE_FUNC(svadd,_f32,_m,)(pg, op1, op2);
522 }
523
test_svadd_f64_m(svbool_t pg,svfloat64_t op1,svfloat64_t op2)524 svfloat64_t test_svadd_f64_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
525 {
526 // CHECK-LABEL: test_svadd_f64_m
527 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
528 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
529 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
530 return SVE_ACLE_FUNC(svadd,_f64,_m,)(pg, op1, op2);
531 }
532
test_svadd_f16_x(svbool_t pg,svfloat16_t op1,svfloat16_t op2)533 svfloat16_t test_svadd_f16_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
534 {
535 // CHECK-LABEL: test_svadd_f16_x
536 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
537 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
538 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
539 return SVE_ACLE_FUNC(svadd,_f16,_x,)(pg, op1, op2);
540 }
541
test_svadd_f32_x(svbool_t pg,svfloat32_t op1,svfloat32_t op2)542 svfloat32_t test_svadd_f32_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
543 {
544 // CHECK-LABEL: test_svadd_f32_x
545 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
546 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
547 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
548 return SVE_ACLE_FUNC(svadd,_f32,_x,)(pg, op1, op2);
549 }
550
test_svadd_f64_x(svbool_t pg,svfloat64_t op1,svfloat64_t op2)551 svfloat64_t test_svadd_f64_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
552 {
553 // CHECK-LABEL: test_svadd_f64_x
554 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
555 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
556 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
557 return SVE_ACLE_FUNC(svadd,_f64,_x,)(pg, op1, op2);
558 }
559
test_svadd_n_f16_z(svbool_t pg,svfloat16_t op1,float16_t op2)560 svfloat16_t test_svadd_n_f16_z(svbool_t pg, svfloat16_t op1, float16_t op2)
561 {
562 // CHECK-LABEL: test_svadd_n_f16_z
563 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
564 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op2)
565 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.sel.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> zeroinitializer)
566 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %[[SEL]], <vscale x 8 x half> %[[DUP]])
567 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
568 return SVE_ACLE_FUNC(svadd,_n_f16,_z,)(pg, op1, op2);
569 }
570
test_svadd_n_f32_z(svbool_t pg,svfloat32_t op1,float32_t op2)571 svfloat32_t test_svadd_n_f32_z(svbool_t pg, svfloat32_t op1, float32_t op2)
572 {
573 // CHECK-LABEL: test_svadd_n_f32_z
574 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
575 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op2)
576 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.sel.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> zeroinitializer)
577 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %[[SEL]], <vscale x 4 x float> %[[DUP]])
578 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
579 return SVE_ACLE_FUNC(svadd,_n_f32,_z,)(pg, op1, op2);
580 }
581
test_svadd_n_f64_z(svbool_t pg,svfloat64_t op1,float64_t op2)582 svfloat64_t test_svadd_n_f64_z(svbool_t pg, svfloat64_t op1, float64_t op2)
583 {
584 // CHECK-LABEL: test_svadd_n_f64_z
585 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
586 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op2)
587 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> zeroinitializer)
588 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %[[SEL]], <vscale x 2 x double> %[[DUP]])
589 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
590 return SVE_ACLE_FUNC(svadd,_n_f64,_z,)(pg, op1, op2);
591 }
592
test_svadd_n_f16_m(svbool_t pg,svfloat16_t op1,float16_t op2)593 svfloat16_t test_svadd_n_f16_m(svbool_t pg, svfloat16_t op1, float16_t op2)
594 {
595 // CHECK-LABEL: test_svadd_n_f16_m
596 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
597 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op2)
598 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %[[DUP]])
599 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
600 return SVE_ACLE_FUNC(svadd,_n_f16,_m,)(pg, op1, op2);
601 }
602
test_svadd_n_f32_m(svbool_t pg,svfloat32_t op1,float32_t op2)603 svfloat32_t test_svadd_n_f32_m(svbool_t pg, svfloat32_t op1, float32_t op2)
604 {
605 // CHECK-LABEL: test_svadd_n_f32_m
606 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
607 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op2)
608 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %[[DUP]])
609 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
610 return SVE_ACLE_FUNC(svadd,_n_f32,_m,)(pg, op1, op2);
611 }
612
test_svadd_n_f64_m(svbool_t pg,svfloat64_t op1,float64_t op2)613 svfloat64_t test_svadd_n_f64_m(svbool_t pg, svfloat64_t op1, float64_t op2)
614 {
615 // CHECK-LABEL: test_svadd_n_f64_m
616 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
617 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op2)
618 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %[[DUP]])
619 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
620 return SVE_ACLE_FUNC(svadd,_n_f64,_m,)(pg, op1, op2);
621 }
622
test_svadd_n_f16_x(svbool_t pg,svfloat16_t op1,float16_t op2)623 svfloat16_t test_svadd_n_f16_x(svbool_t pg, svfloat16_t op1, float16_t op2)
624 {
625 // CHECK-LABEL: test_svadd_n_f16_x
626 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
627 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op2)
628 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %[[DUP]])
629 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
630 return SVE_ACLE_FUNC(svadd,_n_f16,_x,)(pg, op1, op2);
631 }
632
test_svadd_n_f32_x(svbool_t pg,svfloat32_t op1,float32_t op2)633 svfloat32_t test_svadd_n_f32_x(svbool_t pg, svfloat32_t op1, float32_t op2)
634 {
635 // CHECK-LABEL: test_svadd_n_f32_x
636 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
637 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op2)
638 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %[[DUP]])
639 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
640 return SVE_ACLE_FUNC(svadd,_n_f32,_x,)(pg, op1, op2);
641 }
642
test_svadd_n_f64_x(svbool_t pg,svfloat64_t op1,float64_t op2)643 svfloat64_t test_svadd_n_f64_x(svbool_t pg, svfloat64_t op1, float64_t op2)
644 {
645 // CHECK-LABEL: test_svadd_n_f64_x
646 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
647 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op2)
648 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %[[DUP]])
649 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
650 return SVE_ACLE_FUNC(svadd,_n_f64,_x,)(pg, op1, op2);
651 }
652