1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
6 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null
7 #include <arm_sve.h>
8 
9 #ifdef SVE_OVERLOADED_FORMS
10 // A simple used,unused... macro, long enough to represent any SVE builtin.
11 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
12 #else
13 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
14 #endif
15 
test_svasr_s8_z(svbool_t pg,svint8_t op1,svuint8_t op2)16 svint8_t test_svasr_s8_z(svbool_t pg, svint8_t op1, svuint8_t op2)
17 {
18   // CHECK-LABEL: test_svasr_s8_z
19   // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
20   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
21   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
22   return SVE_ACLE_FUNC(svasr,_s8,_z,)(pg, op1, op2);
23 }
24 
test_svasr_s16_z(svbool_t pg,svint16_t op1,svuint16_t op2)25 svint16_t test_svasr_s16_z(svbool_t pg, svint16_t op1, svuint16_t op2)
26 {
27   // CHECK-LABEL: test_svasr_s16_z
28   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
29   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
30   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
31   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
32   return SVE_ACLE_FUNC(svasr,_s16,_z,)(pg, op1, op2);
33 }
34 
test_svasr_s32_z(svbool_t pg,svint32_t op1,svuint32_t op2)35 svint32_t test_svasr_s32_z(svbool_t pg, svint32_t op1, svuint32_t op2)
36 {
37   // CHECK-LABEL: test_svasr_s32_z
38   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
39   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
40   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
41   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
42   return SVE_ACLE_FUNC(svasr,_s32,_z,)(pg, op1, op2);
43 }
44 
test_svasr_s64_z(svbool_t pg,svint64_t op1,svuint64_t op2)45 svint64_t test_svasr_s64_z(svbool_t pg, svint64_t op1, svuint64_t op2)
46 {
47   // CHECK-LABEL: test_svasr_s64_z
48   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
49   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
50   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
51   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
52   return SVE_ACLE_FUNC(svasr,_s64,_z,)(pg, op1, op2);
53 }
54 
test_svasr_s8_m(svbool_t pg,svint8_t op1,svuint8_t op2)55 svint8_t test_svasr_s8_m(svbool_t pg, svint8_t op1, svuint8_t op2)
56 {
57   // CHECK-LABEL: test_svasr_s8_m
58   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
59   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
60   return SVE_ACLE_FUNC(svasr,_s8,_m,)(pg, op1, op2);
61 }
62 
test_svasr_s16_m(svbool_t pg,svint16_t op1,svuint16_t op2)63 svint16_t test_svasr_s16_m(svbool_t pg, svint16_t op1, svuint16_t op2)
64 {
65   // CHECK-LABEL: test_svasr_s16_m
66   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
67   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
68   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
69   return SVE_ACLE_FUNC(svasr,_s16,_m,)(pg, op1, op2);
70 }
71 
test_svasr_s32_m(svbool_t pg,svint32_t op1,svuint32_t op2)72 svint32_t test_svasr_s32_m(svbool_t pg, svint32_t op1, svuint32_t op2)
73 {
74   // CHECK-LABEL: test_svasr_s32_m
75   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
76   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
77   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
78   return SVE_ACLE_FUNC(svasr,_s32,_m,)(pg, op1, op2);
79 }
80 
test_svasr_s64_m(svbool_t pg,svint64_t op1,svuint64_t op2)81 svint64_t test_svasr_s64_m(svbool_t pg, svint64_t op1, svuint64_t op2)
82 {
83   // CHECK-LABEL: test_svasr_s64_m
84   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
85   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
86   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
87   return SVE_ACLE_FUNC(svasr,_s64,_m,)(pg, op1, op2);
88 }
89 
test_svasr_s8_x(svbool_t pg,svint8_t op1,svuint8_t op2)90 svint8_t test_svasr_s8_x(svbool_t pg, svint8_t op1, svuint8_t op2)
91 {
92   // CHECK-LABEL: test_svasr_s8_x
93   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
94   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
95   return SVE_ACLE_FUNC(svasr,_s8,_x,)(pg, op1, op2);
96 }
97 
test_svasr_s16_x(svbool_t pg,svint16_t op1,svuint16_t op2)98 svint16_t test_svasr_s16_x(svbool_t pg, svint16_t op1, svuint16_t op2)
99 {
100   // CHECK-LABEL: test_svasr_s16_x
101   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
102   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
103   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
104   return SVE_ACLE_FUNC(svasr,_s16,_x,)(pg, op1, op2);
105 }
106 
test_svasr_s32_x(svbool_t pg,svint32_t op1,svuint32_t op2)107 svint32_t test_svasr_s32_x(svbool_t pg, svint32_t op1, svuint32_t op2)
108 {
109   // CHECK-LABEL: test_svasr_s32_x
110   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
111   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
112   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
113   return SVE_ACLE_FUNC(svasr,_s32,_x,)(pg, op1, op2);
114 }
115 
test_svasr_s64_x(svbool_t pg,svint64_t op1,svuint64_t op2)116 svint64_t test_svasr_s64_x(svbool_t pg, svint64_t op1, svuint64_t op2)
117 {
118   // CHECK-LABEL: test_svasr_s64_x
119   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
120   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
121   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
122   return SVE_ACLE_FUNC(svasr,_s64,_x,)(pg, op1, op2);
123 }
124 
test_svasr_n_s64_z(svbool_t pg,svint64_t op1,uint64_t op2)125 svint64_t test_svasr_n_s64_z(svbool_t pg, svint64_t op1, uint64_t op2)
126 {
127   // CHECK-LABEL: test_svasr_n_s64_z
128   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
129   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
130   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
131   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %[[DUP]])
132   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
133   return SVE_ACLE_FUNC(svasr,_n_s64,_z,)(pg, op1, op2);
134 }
135 
test_svasr_n_s64_m(svbool_t pg,svint64_t op1,uint64_t op2)136 svint64_t test_svasr_n_s64_m(svbool_t pg, svint64_t op1, uint64_t op2)
137 {
138   // CHECK-LABEL: test_svasr_n_s64_m
139   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
140   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
141   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
142   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
143   return SVE_ACLE_FUNC(svasr,_n_s64,_m,)(pg, op1, op2);
144 }
145 
test_svasr_n_s64_x(svbool_t pg,svint64_t op1,uint64_t op2)146 svint64_t test_svasr_n_s64_x(svbool_t pg, svint64_t op1, uint64_t op2)
147 {
148   // CHECK-LABEL: test_svasr_n_s64_x
149   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
150   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
151   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
152   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
153   return SVE_ACLE_FUNC(svasr,_n_s64,_x,)(pg, op1, op2);
154 }
155 
test_svasr_wide_s8_z(svbool_t pg,svint8_t op1,svuint64_t op2)156 svint8_t test_svasr_wide_s8_z(svbool_t pg, svint8_t op1, svuint64_t op2)
157 {
158   // CHECK-LABEL: test_svasr_wide_s8_z
159   // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
160   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 2 x i64> %op2)
161   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
162   return SVE_ACLE_FUNC(svasr_wide,_s8,_z,)(pg, op1, op2);
163 }
164 
test_svasr_wide_s16_z(svbool_t pg,svint16_t op1,svuint64_t op2)165 svint16_t test_svasr_wide_s16_z(svbool_t pg, svint16_t op1, svuint64_t op2)
166 {
167   // CHECK-LABEL: test_svasr_wide_s16_z
168   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
169   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
170   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 2 x i64> %op2)
171   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
172   return SVE_ACLE_FUNC(svasr_wide,_s16,_z,)(pg, op1, op2);
173 }
174 
test_svasr_wide_s32_z(svbool_t pg,svint32_t op1,svuint64_t op2)175 svint32_t test_svasr_wide_s32_z(svbool_t pg, svint32_t op1, svuint64_t op2)
176 {
177   // CHECK-LABEL: test_svasr_wide_s32_z
178   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
179   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
180   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 2 x i64> %op2)
181   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
182   return SVE_ACLE_FUNC(svasr_wide,_s32,_z,)(pg, op1, op2);
183 }
184 
test_svasr_wide_s8_m(svbool_t pg,svint8_t op1,svuint64_t op2)185 svint8_t test_svasr_wide_s8_m(svbool_t pg, svint8_t op1, svuint64_t op2)
186 {
187   // CHECK-LABEL: test_svasr_wide_s8_m
188   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
189   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
190   return SVE_ACLE_FUNC(svasr_wide,_s8,_m,)(pg, op1, op2);
191 }
192 
test_svasr_wide_s16_m(svbool_t pg,svint16_t op1,svuint64_t op2)193 svint16_t test_svasr_wide_s16_m(svbool_t pg, svint16_t op1, svuint64_t op2)
194 {
195   // CHECK-LABEL: test_svasr_wide_s16_m
196   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
197   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
198   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
199   return SVE_ACLE_FUNC(svasr_wide,_s16,_m,)(pg, op1, op2);
200 }
201 
test_svasr_wide_s32_m(svbool_t pg,svint32_t op1,svuint64_t op2)202 svint32_t test_svasr_wide_s32_m(svbool_t pg, svint32_t op1, svuint64_t op2)
203 {
204   // CHECK-LABEL: test_svasr_wide_s32_m
205   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
206   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
207   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
208   return SVE_ACLE_FUNC(svasr_wide,_s32,_m,)(pg, op1, op2);
209 }
210 
test_svasr_wide_s8_x(svbool_t pg,svint8_t op1,svuint64_t op2)211 svint8_t test_svasr_wide_s8_x(svbool_t pg, svint8_t op1, svuint64_t op2)
212 {
213   // CHECK-LABEL: test_svasr_wide_s8_x
214   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
215   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
216   return SVE_ACLE_FUNC(svasr_wide,_s8,_x,)(pg, op1, op2);
217 }
218 
test_svasr_wide_s16_x(svbool_t pg,svint16_t op1,svuint64_t op2)219 svint16_t test_svasr_wide_s16_x(svbool_t pg, svint16_t op1, svuint64_t op2)
220 {
221   // CHECK-LABEL: test_svasr_wide_s16_x
222   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
223   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
224   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
225   return SVE_ACLE_FUNC(svasr_wide,_s16,_x,)(pg, op1, op2);
226 }
227 
test_svasr_wide_s32_x(svbool_t pg,svint32_t op1,svuint64_t op2)228 svint32_t test_svasr_wide_s32_x(svbool_t pg, svint32_t op1, svuint64_t op2)
229 {
230   // CHECK-LABEL: test_svasr_wide_s32_x
231   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
232   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
233   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
234   return SVE_ACLE_FUNC(svasr_wide,_s32,_x,)(pg, op1, op2);
235 }
236 
test_svasr_n_s8_z(svbool_t pg,svint8_t op1,uint8_t op2)237 svint8_t test_svasr_n_s8_z(svbool_t pg, svint8_t op1, uint8_t op2)
238 {
239   // CHECK-LABEL: test_svasr_n_s8_z
240   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
241   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
242   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %[[DUP]])
243   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
244   return SVE_ACLE_FUNC(svasr,_n_s8,_z,)(pg, op1, op2);
245 }
246 
test_svasr_n_s16_z(svbool_t pg,svint16_t op1,uint16_t op2)247 svint16_t test_svasr_n_s16_z(svbool_t pg, svint16_t op1, uint16_t op2)
248 {
249   // CHECK-LABEL: test_svasr_n_s16_z
250   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
251   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
252   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
253   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %[[DUP]])
254   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
255   return SVE_ACLE_FUNC(svasr,_n_s16,_z,)(pg, op1, op2);
256 }
257 
test_svasr_n_s32_z(svbool_t pg,svint32_t op1,uint32_t op2)258 svint32_t test_svasr_n_s32_z(svbool_t pg, svint32_t op1, uint32_t op2)
259 {
260   // CHECK-LABEL: test_svasr_n_s32_z
261   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
262   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
263   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
264   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %[[DUP]])
265   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
266   return SVE_ACLE_FUNC(svasr,_n_s32,_z,)(pg, op1, op2);
267 }
268 
test_svasr_n_s8_m(svbool_t pg,svint8_t op1,uint8_t op2)269 svint8_t test_svasr_n_s8_m(svbool_t pg, svint8_t op1, uint8_t op2)
270 {
271   // CHECK-LABEL: test_svasr_n_s8_m
272   // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
273   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
274   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
275   return SVE_ACLE_FUNC(svasr,_n_s8,_m,)(pg, op1, op2);
276 }
277 
test_svasr_n_s16_m(svbool_t pg,svint16_t op1,uint16_t op2)278 svint16_t test_svasr_n_s16_m(svbool_t pg, svint16_t op1, uint16_t op2)
279 {
280   // CHECK-LABEL: test_svasr_n_s16_m
281   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
282   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
283   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
284   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
285   return SVE_ACLE_FUNC(svasr,_n_s16,_m,)(pg, op1, op2);
286 }
287 
test_svasr_n_s32_m(svbool_t pg,svint32_t op1,uint32_t op2)288 svint32_t test_svasr_n_s32_m(svbool_t pg, svint32_t op1, uint32_t op2)
289 {
290   // CHECK-LABEL: test_svasr_n_s32_m
291   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
292   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
293   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
294   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
295   return SVE_ACLE_FUNC(svasr,_n_s32,_m,)(pg, op1, op2);
296 }
297 
test_svasr_n_s8_x(svbool_t pg,svint8_t op1,uint8_t op2)298 svint8_t test_svasr_n_s8_x(svbool_t pg, svint8_t op1, uint8_t op2)
299 {
300   // CHECK-LABEL: test_svasr_n_s8_x
301   // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
302   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
303   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
304   return SVE_ACLE_FUNC(svasr,_n_s8,_x,)(pg, op1, op2);
305 }
306 
test_svasr_n_s16_x(svbool_t pg,svint16_t op1,uint16_t op2)307 svint16_t test_svasr_n_s16_x(svbool_t pg, svint16_t op1, uint16_t op2)
308 {
309   // CHECK-LABEL: test_svasr_n_s16_x
310   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
311   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
312   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
313   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
314   return SVE_ACLE_FUNC(svasr,_n_s16,_x,)(pg, op1, op2);
315 }
316 
test_svasr_n_s32_x(svbool_t pg,svint32_t op1,uint32_t op2)317 svint32_t test_svasr_n_s32_x(svbool_t pg, svint32_t op1, uint32_t op2)
318 {
319   // CHECK-LABEL: test_svasr_n_s32_x
320   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
321   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
322   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
323   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
324   return SVE_ACLE_FUNC(svasr,_n_s32,_x,)(pg, op1, op2);
325 }
326 
test_svasr_wide_n_s8_m(svbool_t pg,svint8_t op1,uint64_t op2)327 svint8_t test_svasr_wide_n_s8_m(svbool_t pg, svint8_t op1, uint64_t op2)
328 {
329   // CHECK-LABEL: test_svasr_wide_n_s8_m
330   // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
331   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
332   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
333   return SVE_ACLE_FUNC(svasr_wide,_n_s8,_m,)(pg, op1, op2);
334 }
335 
test_svasr_wide_n_s16_m(svbool_t pg,svint16_t op1,uint64_t op2)336 svint16_t test_svasr_wide_n_s16_m(svbool_t pg, svint16_t op1, uint64_t op2)
337 {
338   // CHECK-LABEL: test_svasr_wide_n_s16_m
339   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
340   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
341   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
342   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
343   return SVE_ACLE_FUNC(svasr_wide,_n_s16,_m,)(pg, op1, op2);
344 }
345 
test_svasr_wide_n_s32_m(svbool_t pg,svint32_t op1,uint64_t op2)346 svint32_t test_svasr_wide_n_s32_m(svbool_t pg, svint32_t op1, uint64_t op2)
347 {
348   // CHECK-LABEL: test_svasr_wide_n_s32_m
349   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
350   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
351   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
352   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
353   return SVE_ACLE_FUNC(svasr_wide,_n_s32,_m,)(pg, op1, op2);
354 }
355 
test_svasr_wide_n_s8_z(svbool_t pg,svint8_t op1,uint64_t op2)356 svint8_t test_svasr_wide_n_s8_z(svbool_t pg, svint8_t op1, uint64_t op2)
357 {
358   // CHECK-LABEL: test_svasr_wide_n_s8_z
359   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
360   // CHECK-DAG: %[[PG:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
361   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[PG]], <vscale x 2 x i64> %[[DUP]])
362   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
363   return SVE_ACLE_FUNC(svasr_wide,_n_s8,_z,)(pg, op1, op2);
364 }
365 
test_svasr_wide_n_s16_z(svbool_t pg,svint16_t op1,uint64_t op2)366 svint16_t test_svasr_wide_n_s16_z(svbool_t pg, svint16_t op1, uint64_t op2)
367 {
368   // CHECK-LABEL: test_svasr_wide_n_s16_z
369   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
370   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
371   // CHECK-DAG: %[[OP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
372   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[OP]], <vscale x 2 x i64> %[[DUP]])
373   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
374   return SVE_ACLE_FUNC(svasr_wide,_n_s16,_z,)(pg, op1, op2);
375 }
376 
test_svasr_wide_n_s32_z(svbool_t pg,svint32_t op1,uint64_t op2)377 svint32_t test_svasr_wide_n_s32_z(svbool_t pg, svint32_t op1, uint64_t op2)
378 {
379   // CHECK-LABEL: test_svasr_wide_n_s32_z
380   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
381   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
382   // CHECK-DAG: %[[OP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
383   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[OP]], <vscale x 2 x i64> %[[DUP]])
384   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
385   return SVE_ACLE_FUNC(svasr_wide,_n_s32,_z,)(pg, op1, op2);
386 }
387 
test_svasr_wide_n_s8_x(svbool_t pg,svint8_t op1,uint64_t op2)388 svint8_t test_svasr_wide_n_s8_x(svbool_t pg, svint8_t op1, uint64_t op2)
389 {
390   // CHECK-LABEL: test_svasr_wide_n_s8_x
391   // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
392   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
393   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
394   return SVE_ACLE_FUNC(svasr_wide,_n_s8,_x,)(pg, op1, op2);
395 }
396 
test_svasr_wide_n_s16_x(svbool_t pg,svint16_t op1,uint64_t op2)397 svint16_t test_svasr_wide_n_s16_x(svbool_t pg, svint16_t op1, uint64_t op2)
398 {
399   // CHECK-LABEL: test_svasr_wide_n_s16_x
400   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
401   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
402   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
403   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
404   return SVE_ACLE_FUNC(svasr_wide,_n_s16,_x,)(pg, op1, op2);
405 }
406 
test_svasr_wide_n_s32_x(svbool_t pg,svint32_t op1,uint64_t op2)407 svint32_t test_svasr_wide_n_s32_x(svbool_t pg, svint32_t op1, uint64_t op2)
408 {
409   // CHECK-LABEL: test_svasr_wide_n_s32_x
410   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
411   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
412   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
413   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
414   return SVE_ACLE_FUNC(svasr_wide,_n_s32,_x,)(pg, op1, op2);
415 }
416