1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
6 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null
7 #include <arm_sve.h>
8
9 #ifdef SVE_OVERLOADED_FORMS
10 // A simple used,unused... macro, long enough to represent any SVE builtin.
11 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
12 #else
13 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
14 #endif
15
test_svmad_s8_z(svbool_t pg,svint8_t op1,svint8_t op2,svint8_t op3)16 svint8_t test_svmad_s8_z(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3)
17 {
18 // CHECK-LABEL: test_svmad_s8_z
19 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
20 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mad.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
21 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
22 return SVE_ACLE_FUNC(svmad,_s8,_z,)(pg, op1, op2, op3);
23 }
24
test_svmad_s16_z(svbool_t pg,svint16_t op1,svint16_t op2,svint16_t op3)25 svint16_t test_svmad_s16_z(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3)
26 {
27 // CHECK-LABEL: test_svmad_s16_z
28 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
29 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
30 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mad.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
31 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
32 return SVE_ACLE_FUNC(svmad,_s16,_z,)(pg, op1, op2, op3);
33 }
34
test_svmad_s32_z(svbool_t pg,svint32_t op1,svint32_t op2,svint32_t op3)35 svint32_t test_svmad_s32_z(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3)
36 {
37 // CHECK-LABEL: test_svmad_s32_z
38 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
39 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
40 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mad.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
41 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
42 return SVE_ACLE_FUNC(svmad,_s32,_z,)(pg, op1, op2, op3);
43 }
44
test_svmad_s64_z(svbool_t pg,svint64_t op1,svint64_t op2,svint64_t op3)45 svint64_t test_svmad_s64_z(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3)
46 {
47 // CHECK-LABEL: test_svmad_s64_z
48 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
49 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
50 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mad.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
51 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
52 return SVE_ACLE_FUNC(svmad,_s64,_z,)(pg, op1, op2, op3);
53 }
54
test_svmad_u8_z(svbool_t pg,svuint8_t op1,svuint8_t op2,svuint8_t op3)55 svuint8_t test_svmad_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3)
56 {
57 // CHECK-LABEL: test_svmad_u8_z
58 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
59 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mad.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
60 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
61 return SVE_ACLE_FUNC(svmad,_u8,_z,)(pg, op1, op2, op3);
62 }
63
test_svmad_u16_z(svbool_t pg,svuint16_t op1,svuint16_t op2,svuint16_t op3)64 svuint16_t test_svmad_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3)
65 {
66 // CHECK-LABEL: test_svmad_u16_z
67 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
68 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
69 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mad.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
70 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
71 return SVE_ACLE_FUNC(svmad,_u16,_z,)(pg, op1, op2, op3);
72 }
73
test_svmad_u32_z(svbool_t pg,svuint32_t op1,svuint32_t op2,svuint32_t op3)74 svuint32_t test_svmad_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3)
75 {
76 // CHECK-LABEL: test_svmad_u32_z
77 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
78 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
79 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mad.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
80 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
81 return SVE_ACLE_FUNC(svmad,_u32,_z,)(pg, op1, op2, op3);
82 }
83
test_svmad_u64_z(svbool_t pg,svuint64_t op1,svuint64_t op2,svuint64_t op3)84 svuint64_t test_svmad_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3)
85 {
86 // CHECK-LABEL: test_svmad_u64_z
87 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
88 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
89 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mad.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
90 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
91 return SVE_ACLE_FUNC(svmad,_u64,_z,)(pg, op1, op2, op3);
92 }
93
test_svmad_s8_m(svbool_t pg,svint8_t op1,svint8_t op2,svint8_t op3)94 svint8_t test_svmad_s8_m(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3)
95 {
96 // CHECK-LABEL: test_svmad_s8_m
97 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mad.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
98 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
99 return SVE_ACLE_FUNC(svmad,_s8,_m,)(pg, op1, op2, op3);
100 }
101
test_svmad_s16_m(svbool_t pg,svint16_t op1,svint16_t op2,svint16_t op3)102 svint16_t test_svmad_s16_m(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3)
103 {
104 // CHECK-LABEL: test_svmad_s16_m
105 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
106 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mad.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
107 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
108 return SVE_ACLE_FUNC(svmad,_s16,_m,)(pg, op1, op2, op3);
109 }
110
test_svmad_s32_m(svbool_t pg,svint32_t op1,svint32_t op2,svint32_t op3)111 svint32_t test_svmad_s32_m(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3)
112 {
113 // CHECK-LABEL: test_svmad_s32_m
114 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
115 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mad.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
116 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
117 return SVE_ACLE_FUNC(svmad,_s32,_m,)(pg, op1, op2, op3);
118 }
119
test_svmad_s64_m(svbool_t pg,svint64_t op1,svint64_t op2,svint64_t op3)120 svint64_t test_svmad_s64_m(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3)
121 {
122 // CHECK-LABEL: test_svmad_s64_m
123 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
124 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mad.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
125 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
126 return SVE_ACLE_FUNC(svmad,_s64,_m,)(pg, op1, op2, op3);
127 }
128
test_svmad_u8_m(svbool_t pg,svuint8_t op1,svuint8_t op2,svuint8_t op3)129 svuint8_t test_svmad_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3)
130 {
131 // CHECK-LABEL: test_svmad_u8_m
132 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mad.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
133 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
134 return SVE_ACLE_FUNC(svmad,_u8,_m,)(pg, op1, op2, op3);
135 }
136
test_svmad_u16_m(svbool_t pg,svuint16_t op1,svuint16_t op2,svuint16_t op3)137 svuint16_t test_svmad_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3)
138 {
139 // CHECK-LABEL: test_svmad_u16_m
140 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
141 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mad.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
142 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
143 return SVE_ACLE_FUNC(svmad,_u16,_m,)(pg, op1, op2, op3);
144 }
145
test_svmad_u32_m(svbool_t pg,svuint32_t op1,svuint32_t op2,svuint32_t op3)146 svuint32_t test_svmad_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3)
147 {
148 // CHECK-LABEL: test_svmad_u32_m
149 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
150 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mad.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
151 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
152 return SVE_ACLE_FUNC(svmad,_u32,_m,)(pg, op1, op2, op3);
153 }
154
test_svmad_u64_m(svbool_t pg,svuint64_t op1,svuint64_t op2,svuint64_t op3)155 svuint64_t test_svmad_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3)
156 {
157 // CHECK-LABEL: test_svmad_u64_m
158 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
159 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mad.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
160 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
161 return SVE_ACLE_FUNC(svmad,_u64,_m,)(pg, op1, op2, op3);
162 }
163
test_svmad_s8_x(svbool_t pg,svint8_t op1,svint8_t op2,svint8_t op3)164 svint8_t test_svmad_s8_x(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3)
165 {
166 // CHECK-LABEL: test_svmad_s8_x
167 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mad.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
168 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
169 return SVE_ACLE_FUNC(svmad,_s8,_x,)(pg, op1, op2, op3);
170 }
171
test_svmad_s16_x(svbool_t pg,svint16_t op1,svint16_t op2,svint16_t op3)172 svint16_t test_svmad_s16_x(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3)
173 {
174 // CHECK-LABEL: test_svmad_s16_x
175 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
176 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mad.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
177 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
178 return SVE_ACLE_FUNC(svmad,_s16,_x,)(pg, op1, op2, op3);
179 }
180
test_svmad_s32_x(svbool_t pg,svint32_t op1,svint32_t op2,svint32_t op3)181 svint32_t test_svmad_s32_x(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3)
182 {
183 // CHECK-LABEL: test_svmad_s32_x
184 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
185 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mad.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
186 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
187 return SVE_ACLE_FUNC(svmad,_s32,_x,)(pg, op1, op2, op3);
188 }
189
test_svmad_s64_x(svbool_t pg,svint64_t op1,svint64_t op2,svint64_t op3)190 svint64_t test_svmad_s64_x(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3)
191 {
192 // CHECK-LABEL: test_svmad_s64_x
193 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
194 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mad.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
195 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
196 return SVE_ACLE_FUNC(svmad,_s64,_x,)(pg, op1, op2, op3);
197 }
198
test_svmad_u8_x(svbool_t pg,svuint8_t op1,svuint8_t op2,svuint8_t op3)199 svuint8_t test_svmad_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3)
200 {
201 // CHECK-LABEL: test_svmad_u8_x
202 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mad.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
203 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
204 return SVE_ACLE_FUNC(svmad,_u8,_x,)(pg, op1, op2, op3);
205 }
206
test_svmad_u16_x(svbool_t pg,svuint16_t op1,svuint16_t op2,svuint16_t op3)207 svuint16_t test_svmad_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3)
208 {
209 // CHECK-LABEL: test_svmad_u16_x
210 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
211 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mad.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
212 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
213 return SVE_ACLE_FUNC(svmad,_u16,_x,)(pg, op1, op2, op3);
214 }
215
test_svmad_u32_x(svbool_t pg,svuint32_t op1,svuint32_t op2,svuint32_t op3)216 svuint32_t test_svmad_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3)
217 {
218 // CHECK-LABEL: test_svmad_u32_x
219 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
220 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mad.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
221 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
222 return SVE_ACLE_FUNC(svmad,_u32,_x,)(pg, op1, op2, op3);
223 }
224
test_svmad_u64_x(svbool_t pg,svuint64_t op1,svuint64_t op2,svuint64_t op3)225 svuint64_t test_svmad_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3)
226 {
227 // CHECK-LABEL: test_svmad_u64_x
228 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
229 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mad.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
230 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
231 return SVE_ACLE_FUNC(svmad,_u64,_x,)(pg, op1, op2, op3);
232 }
233
test_svmad_n_s8_z(svbool_t pg,svint8_t op1,svint8_t op2,int8_t op3)234 svint8_t test_svmad_n_s8_z(svbool_t pg, svint8_t op1, svint8_t op2, int8_t op3)
235 {
236 // CHECK-LABEL: test_svmad_n_s8_z
237 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
238 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
239 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mad.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
240 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
241 return SVE_ACLE_FUNC(svmad,_n_s8,_z,)(pg, op1, op2, op3);
242 }
243
test_svmad_n_s16_z(svbool_t pg,svint16_t op1,svint16_t op2,int16_t op3)244 svint16_t test_svmad_n_s16_z(svbool_t pg, svint16_t op1, svint16_t op2, int16_t op3)
245 {
246 // CHECK-LABEL: test_svmad_n_s16_z
247 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
248 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
249 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
250 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mad.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
251 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
252 return SVE_ACLE_FUNC(svmad,_n_s16,_z,)(pg, op1, op2, op3);
253 }
254
test_svmad_n_s32_z(svbool_t pg,svint32_t op1,svint32_t op2,int32_t op3)255 svint32_t test_svmad_n_s32_z(svbool_t pg, svint32_t op1, svint32_t op2, int32_t op3)
256 {
257 // CHECK-LABEL: test_svmad_n_s32_z
258 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
259 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
260 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mad.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
261 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
262 return SVE_ACLE_FUNC(svmad,_n_s32,_z,)(pg, op1, op2, op3);
263 }
264
test_svmad_n_s64_z(svbool_t pg,svint64_t op1,svint64_t op2,int64_t op3)265 svint64_t test_svmad_n_s64_z(svbool_t pg, svint64_t op1, svint64_t op2, int64_t op3)
266 {
267 // CHECK-LABEL: test_svmad_n_s64_z
268 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
269 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
270 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
271 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mad.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
272 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
273 return SVE_ACLE_FUNC(svmad,_n_s64,_z,)(pg, op1, op2, op3);
274 }
275
test_svmad_n_u8_z(svbool_t pg,svuint8_t op1,svuint8_t op2,uint8_t op3)276 svuint8_t test_svmad_n_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2, uint8_t op3)
277 {
278 // CHECK-LABEL: test_svmad_n_u8_z
279 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
280 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
281 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mad.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
282 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
283 return SVE_ACLE_FUNC(svmad,_n_u8,_z,)(pg, op1, op2, op3);
284 }
285
test_svmad_n_u16_z(svbool_t pg,svuint16_t op1,svuint16_t op2,uint16_t op3)286 svuint16_t test_svmad_n_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2, uint16_t op3)
287 {
288 // CHECK-LABEL: test_svmad_n_u16_z
289 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
290 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
291 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
292 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mad.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
293 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
294 return SVE_ACLE_FUNC(svmad,_n_u16,_z,)(pg, op1, op2, op3);
295 }
296
test_svmad_n_u32_z(svbool_t pg,svuint32_t op1,svuint32_t op2,uint32_t op3)297 svuint32_t test_svmad_n_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2, uint32_t op3)
298 {
299 // CHECK-LABEL: test_svmad_n_u32_z
300 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
301 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
302 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mad.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
303 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
304 return SVE_ACLE_FUNC(svmad,_n_u32,_z,)(pg, op1, op2, op3);
305 }
306
test_svmad_n_u64_z(svbool_t pg,svuint64_t op1,svuint64_t op2,uint64_t op3)307 svuint64_t test_svmad_n_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2, uint64_t op3)
308 {
309 // CHECK-LABEL: test_svmad_n_u64_z
310 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
311 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
312 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
313 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mad.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
314 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
315 return SVE_ACLE_FUNC(svmad,_n_u64,_z,)(pg, op1, op2, op3);
316 }
317
test_svmad_n_s8_m(svbool_t pg,svint8_t op1,svint8_t op2,int8_t op3)318 svint8_t test_svmad_n_s8_m(svbool_t pg, svint8_t op1, svint8_t op2, int8_t op3)
319 {
320 // CHECK-LABEL: test_svmad_n_s8_m
321 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
322 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mad.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
323 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
324 return SVE_ACLE_FUNC(svmad,_n_s8,_m,)(pg, op1, op2, op3);
325 }
326
test_svmad_n_s16_m(svbool_t pg,svint16_t op1,svint16_t op2,int16_t op3)327 svint16_t test_svmad_n_s16_m(svbool_t pg, svint16_t op1, svint16_t op2, int16_t op3)
328 {
329 // CHECK-LABEL: test_svmad_n_s16_m
330 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
331 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
332 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mad.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
333 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
334 return SVE_ACLE_FUNC(svmad,_n_s16,_m,)(pg, op1, op2, op3);
335 }
336
test_svmad_n_s32_m(svbool_t pg,svint32_t op1,svint32_t op2,int32_t op3)337 svint32_t test_svmad_n_s32_m(svbool_t pg, svint32_t op1, svint32_t op2, int32_t op3)
338 {
339 // CHECK-LABEL: test_svmad_n_s32_m
340 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
341 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
342 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mad.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
343 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
344 return SVE_ACLE_FUNC(svmad,_n_s32,_m,)(pg, op1, op2, op3);
345 }
346
test_svmad_n_s64_m(svbool_t pg,svint64_t op1,svint64_t op2,int64_t op3)347 svint64_t test_svmad_n_s64_m(svbool_t pg, svint64_t op1, svint64_t op2, int64_t op3)
348 {
349 // CHECK-LABEL: test_svmad_n_s64_m
350 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
351 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
352 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mad.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
353 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
354 return SVE_ACLE_FUNC(svmad,_n_s64,_m,)(pg, op1, op2, op3);
355 }
356
test_svmad_n_u8_m(svbool_t pg,svuint8_t op1,svuint8_t op2,uint8_t op3)357 svuint8_t test_svmad_n_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2, uint8_t op3)
358 {
359 // CHECK-LABEL: test_svmad_n_u8_m
360 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
361 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mad.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
362 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
363 return SVE_ACLE_FUNC(svmad,_n_u8,_m,)(pg, op1, op2, op3);
364 }
365
test_svmad_n_u16_m(svbool_t pg,svuint16_t op1,svuint16_t op2,uint16_t op3)366 svuint16_t test_svmad_n_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2, uint16_t op3)
367 {
368 // CHECK-LABEL: test_svmad_n_u16_m
369 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
370 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
371 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mad.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
372 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
373 return SVE_ACLE_FUNC(svmad,_n_u16,_m,)(pg, op1, op2, op3);
374 }
375
test_svmad_n_u32_m(svbool_t pg,svuint32_t op1,svuint32_t op2,uint32_t op3)376 svuint32_t test_svmad_n_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2, uint32_t op3)
377 {
378 // CHECK-LABEL: test_svmad_n_u32_m
379 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
380 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
381 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mad.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
382 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
383 return SVE_ACLE_FUNC(svmad,_n_u32,_m,)(pg, op1, op2, op3);
384 }
385
test_svmad_n_u64_m(svbool_t pg,svuint64_t op1,svuint64_t op2,uint64_t op3)386 svuint64_t test_svmad_n_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2, uint64_t op3)
387 {
388 // CHECK-LABEL: test_svmad_n_u64_m
389 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
390 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
391 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mad.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
392 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
393 return SVE_ACLE_FUNC(svmad,_n_u64,_m,)(pg, op1, op2, op3);
394 }
395
test_svmad_n_s8_x(svbool_t pg,svint8_t op1,svint8_t op2,int8_t op3)396 svint8_t test_svmad_n_s8_x(svbool_t pg, svint8_t op1, svint8_t op2, int8_t op3)
397 {
398 // CHECK-LABEL: test_svmad_n_s8_x
399 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
400 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mad.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
401 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
402 return SVE_ACLE_FUNC(svmad,_n_s8,_x,)(pg, op1, op2, op3);
403 }
404
test_svmad_n_s16_x(svbool_t pg,svint16_t op1,svint16_t op2,int16_t op3)405 svint16_t test_svmad_n_s16_x(svbool_t pg, svint16_t op1, svint16_t op2, int16_t op3)
406 {
407 // CHECK-LABEL: test_svmad_n_s16_x
408 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
409 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
410 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mad.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
411 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
412 return SVE_ACLE_FUNC(svmad,_n_s16,_x,)(pg, op1, op2, op3);
413 }
414
test_svmad_n_s32_x(svbool_t pg,svint32_t op1,svint32_t op2,int32_t op3)415 svint32_t test_svmad_n_s32_x(svbool_t pg, svint32_t op1, svint32_t op2, int32_t op3)
416 {
417 // CHECK-LABEL: test_svmad_n_s32_x
418 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
419 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
420 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mad.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
421 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
422 return SVE_ACLE_FUNC(svmad,_n_s32,_x,)(pg, op1, op2, op3);
423 }
424
test_svmad_n_s64_x(svbool_t pg,svint64_t op1,svint64_t op2,int64_t op3)425 svint64_t test_svmad_n_s64_x(svbool_t pg, svint64_t op1, svint64_t op2, int64_t op3)
426 {
427 // CHECK-LABEL: test_svmad_n_s64_x
428 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
429 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
430 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mad.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
431 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
432 return SVE_ACLE_FUNC(svmad,_n_s64,_x,)(pg, op1, op2, op3);
433 }
434
test_svmad_n_u8_x(svbool_t pg,svuint8_t op1,svuint8_t op2,uint8_t op3)435 svuint8_t test_svmad_n_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2, uint8_t op3)
436 {
437 // CHECK-LABEL: test_svmad_n_u8_x
438 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
439 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mad.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
440 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
441 return SVE_ACLE_FUNC(svmad,_n_u8,_x,)(pg, op1, op2, op3);
442 }
443
test_svmad_n_u16_x(svbool_t pg,svuint16_t op1,svuint16_t op2,uint16_t op3)444 svuint16_t test_svmad_n_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2, uint16_t op3)
445 {
446 // CHECK-LABEL: test_svmad_n_u16_x
447 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
448 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
449 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mad.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
450 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
451 return SVE_ACLE_FUNC(svmad,_n_u16,_x,)(pg, op1, op2, op3);
452 }
453
test_svmad_n_u32_x(svbool_t pg,svuint32_t op1,svuint32_t op2,uint32_t op3)454 svuint32_t test_svmad_n_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2, uint32_t op3)
455 {
456 // CHECK-LABEL: test_svmad_n_u32_x
457 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
458 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
459 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mad.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
460 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
461 return SVE_ACLE_FUNC(svmad,_n_u32,_x,)(pg, op1, op2, op3);
462 }
463
test_svmad_n_u64_x(svbool_t pg,svuint64_t op1,svuint64_t op2,uint64_t op3)464 svuint64_t test_svmad_n_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2, uint64_t op3)
465 {
466 // CHECK-LABEL: test_svmad_n_u64_x
467 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
468 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
469 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mad.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
470 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
471 return SVE_ACLE_FUNC(svmad,_n_u64,_x,)(pg, op1, op2, op3);
472 }
473
test_svmad_f16_z(svbool_t pg,svfloat16_t op1,svfloat16_t op2,svfloat16_t op3)474 svfloat16_t test_svmad_f16_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3)
475 {
476 // CHECK-LABEL: test_svmad_f16_z
477 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
478 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.sel.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> zeroinitializer)
479 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fmad.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %[[SEL]], <vscale x 8 x half> %op2, <vscale x 8 x half> %op3)
480 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
481 return SVE_ACLE_FUNC(svmad,_f16,_z,)(pg, op1, op2, op3);
482 }
483
test_svmad_f32_z(svbool_t pg,svfloat32_t op1,svfloat32_t op2,svfloat32_t op3)484 svfloat32_t test_svmad_f32_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
485 {
486 // CHECK-LABEL: test_svmad_f32_z
487 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
488 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.sel.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> zeroinitializer)
489 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fmad.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %[[SEL]], <vscale x 4 x float> %op2, <vscale x 4 x float> %op3)
490 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
491 return SVE_ACLE_FUNC(svmad,_f32,_z,)(pg, op1, op2, op3);
492 }
493
test_svmad_f64_z(svbool_t pg,svfloat64_t op1,svfloat64_t op2,svfloat64_t op3)494 svfloat64_t test_svmad_f64_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
495 {
496 // CHECK-LABEL: test_svmad_f64_z
497 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
498 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> zeroinitializer)
499 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fmad.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %[[SEL]], <vscale x 2 x double> %op2, <vscale x 2 x double> %op3)
500 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
501 return SVE_ACLE_FUNC(svmad,_f64,_z,)(pg, op1, op2, op3);
502 }
503
test_svmad_f16_m(svbool_t pg,svfloat16_t op1,svfloat16_t op2,svfloat16_t op3)504 svfloat16_t test_svmad_f16_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3)
505 {
506 // CHECK-LABEL: test_svmad_f16_m
507 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
508 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fmad.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2, <vscale x 8 x half> %op3)
509 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
510 return SVE_ACLE_FUNC(svmad,_f16,_m,)(pg, op1, op2, op3);
511 }
512
test_svmad_f32_m(svbool_t pg,svfloat32_t op1,svfloat32_t op2,svfloat32_t op3)513 svfloat32_t test_svmad_f32_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
514 {
515 // CHECK-LABEL: test_svmad_f32_m
516 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
517 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fmad.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2, <vscale x 4 x float> %op3)
518 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
519 return SVE_ACLE_FUNC(svmad,_f32,_m,)(pg, op1, op2, op3);
520 }
521
test_svmad_f64_m(svbool_t pg,svfloat64_t op1,svfloat64_t op2,svfloat64_t op3)522 svfloat64_t test_svmad_f64_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
523 {
524 // CHECK-LABEL: test_svmad_f64_m
525 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
526 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fmad.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2, <vscale x 2 x double> %op3)
527 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
528 return SVE_ACLE_FUNC(svmad,_f64,_m,)(pg, op1, op2, op3);
529 }
530
test_svmad_f16_x(svbool_t pg,svfloat16_t op1,svfloat16_t op2,svfloat16_t op3)531 svfloat16_t test_svmad_f16_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3)
532 {
533 // CHECK-LABEL: test_svmad_f16_x
534 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
535 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fmad.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2, <vscale x 8 x half> %op3)
536 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
537 return SVE_ACLE_FUNC(svmad,_f16,_x,)(pg, op1, op2, op3);
538 }
539
test_svmad_f32_x(svbool_t pg,svfloat32_t op1,svfloat32_t op2,svfloat32_t op3)540 svfloat32_t test_svmad_f32_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
541 {
542 // CHECK-LABEL: test_svmad_f32_x
543 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
544 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fmad.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2, <vscale x 4 x float> %op3)
545 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
546 return SVE_ACLE_FUNC(svmad,_f32,_x,)(pg, op1, op2, op3);
547 }
548
test_svmad_f64_x(svbool_t pg,svfloat64_t op1,svfloat64_t op2,svfloat64_t op3)549 svfloat64_t test_svmad_f64_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
550 {
551 // CHECK-LABEL: test_svmad_f64_x
552 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
553 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fmad.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2, <vscale x 2 x double> %op3)
554 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
555 return SVE_ACLE_FUNC(svmad,_f64,_x,)(pg, op1, op2, op3);
556 }
557
test_svmad_n_f16_z(svbool_t pg,svfloat16_t op1,svfloat16_t op2,float16_t op3)558 svfloat16_t test_svmad_n_f16_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, float16_t op3)
559 {
560 // CHECK-LABEL: test_svmad_n_f16_z
561 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
562 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op3)
563 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.sel.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> zeroinitializer)
564 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fmad.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %[[SEL]], <vscale x 8 x half> %op2, <vscale x 8 x half> %[[DUP]])
565 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
566 return SVE_ACLE_FUNC(svmad,_n_f16,_z,)(pg, op1, op2, op3);
567 }
568
test_svmad_n_f32_z(svbool_t pg,svfloat32_t op1,svfloat32_t op2,float32_t op3)569 svfloat32_t test_svmad_n_f32_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, float32_t op3)
570 {
571 // CHECK-LABEL: test_svmad_n_f32_z
572 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
573 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op3)
574 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.sel.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> zeroinitializer)
575 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fmad.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %[[SEL]], <vscale x 4 x float> %op2, <vscale x 4 x float> %[[DUP]])
576 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
577 return SVE_ACLE_FUNC(svmad,_n_f32,_z,)(pg, op1, op2, op3);
578 }
579
test_svmad_n_f64_z(svbool_t pg,svfloat64_t op1,svfloat64_t op2,float64_t op3)580 svfloat64_t test_svmad_n_f64_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, float64_t op3)
581 {
582 // CHECK-LABEL: test_svmad_n_f64_z
583 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
584 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op3)
585 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> zeroinitializer)
586 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fmad.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %[[SEL]], <vscale x 2 x double> %op2, <vscale x 2 x double> %[[DUP]])
587 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
588 return SVE_ACLE_FUNC(svmad,_n_f64,_z,)(pg, op1, op2, op3);
589 }
590
test_svmad_n_f16_m(svbool_t pg,svfloat16_t op1,svfloat16_t op2,float16_t op3)591 svfloat16_t test_svmad_n_f16_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, float16_t op3)
592 {
593 // CHECK-LABEL: test_svmad_n_f16_m
594 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
595 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op3)
596 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fmad.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2, <vscale x 8 x half> %[[DUP]])
597 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
598 return SVE_ACLE_FUNC(svmad,_n_f16,_m,)(pg, op1, op2, op3);
599 }
600
test_svmad_n_f32_m(svbool_t pg,svfloat32_t op1,svfloat32_t op2,float32_t op3)601 svfloat32_t test_svmad_n_f32_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, float32_t op3)
602 {
603 // CHECK-LABEL: test_svmad_n_f32_m
604 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
605 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op3)
606 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fmad.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2, <vscale x 4 x float> %[[DUP]])
607 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
608 return SVE_ACLE_FUNC(svmad,_n_f32,_m,)(pg, op1, op2, op3);
609 }
610
test_svmad_n_f64_m(svbool_t pg,svfloat64_t op1,svfloat64_t op2,float64_t op3)611 svfloat64_t test_svmad_n_f64_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, float64_t op3)
612 {
613 // CHECK-LABEL: test_svmad_n_f64_m
614 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
615 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op3)
616 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fmad.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2, <vscale x 2 x double> %[[DUP]])
617 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
618 return SVE_ACLE_FUNC(svmad,_n_f64,_m,)(pg, op1, op2, op3);
619 }
620
test_svmad_n_f16_x(svbool_t pg,svfloat16_t op1,svfloat16_t op2,float16_t op3)621 svfloat16_t test_svmad_n_f16_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, float16_t op3)
622 {
623 // CHECK-LABEL: test_svmad_n_f16_x
624 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
625 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op3)
626 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fmad.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2, <vscale x 8 x half> %[[DUP]])
627 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
628 return SVE_ACLE_FUNC(svmad,_n_f16,_x,)(pg, op1, op2, op3);
629 }
630
test_svmad_n_f32_x(svbool_t pg,svfloat32_t op1,svfloat32_t op2,float32_t op3)631 svfloat32_t test_svmad_n_f32_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, float32_t op3)
632 {
633 // CHECK-LABEL: test_svmad_n_f32_x
634 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
635 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op3)
636 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fmad.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2, <vscale x 4 x float> %[[DUP]])
637 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
638 return SVE_ACLE_FUNC(svmad,_n_f32,_x,)(pg, op1, op2, op3);
639 }
640
test_svmad_n_f64_x(svbool_t pg,svfloat64_t op1,svfloat64_t op2,float64_t op3)641 svfloat64_t test_svmad_n_f64_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, float64_t op3)
642 {
643 // CHECK-LABEL: test_svmad_n_f64_x
644 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
645 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op3)
646 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fmad.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2, <vscale x 2 x double> %[[DUP]])
647 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
648 return SVE_ACLE_FUNC(svmad,_n_f64,_x,)(pg, op1, op2, op3);
649 }
650