1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - %s >/dev/null 2>%t
5 // RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
6 
7 // If this check fails please read test/CodeGen/aarch64-sve-intrinsics/README for instructions on how to resolve it.
8 // ASM-NOT: warning
9 #include <arm_sve.h>
10 
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
14 #else
15 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
16 #endif
17 
test_svwhilelt_b8_s32(int32_t op1,int32_t op2)18 svbool_t test_svwhilelt_b8_s32(int32_t op1, int32_t op2)
19 {
20   // CHECK-LABEL: test_svwhilelt_b8_s32
21   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i32(i32 %op1, i32 %op2)
22   // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
23   return SVE_ACLE_FUNC(svwhilelt_b8,_s32,,)(op1, op2);
24 }
25 
test_svwhilelt_b16_s32(int32_t op1,int32_t op2)26 svbool_t test_svwhilelt_b16_s32(int32_t op1, int32_t op2)
27 {
28   // CHECK-LABEL: test_svwhilelt_b16_s32
29   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i32(i32 %op1, i32 %op2)
30   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
31   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
32   return SVE_ACLE_FUNC(svwhilelt_b16,_s32,,)(op1, op2);
33 }
34 
test_svwhilelt_b32_s32(int32_t op1,int32_t op2)35 svbool_t test_svwhilelt_b32_s32(int32_t op1, int32_t op2)
36 {
37   // CHECK-LABEL: test_svwhilelt_b32_s32
38   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i32(i32 %op1, i32 %op2)
39   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
40   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
41   return SVE_ACLE_FUNC(svwhilelt_b32,_s32,,)(op1, op2);
42 }
43 
test_svwhilelt_b64_s32(int32_t op1,int32_t op2)44 svbool_t test_svwhilelt_b64_s32(int32_t op1, int32_t op2)
45 {
46   // CHECK-LABEL: test_svwhilelt_b64_s32
47   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32 %op1, i32 %op2)
48   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
49   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
50   return SVE_ACLE_FUNC(svwhilelt_b64,_s32,,)(op1, op2);
51 }
52 
test_svwhilelt_b8_u32(uint32_t op1,uint32_t op2)53 svbool_t test_svwhilelt_b8_u32(uint32_t op1, uint32_t op2)
54 {
55   // CHECK-LABEL: test_svwhilelt_b8_u32
56   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32 %op1, i32 %op2)
57   // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
58   return SVE_ACLE_FUNC(svwhilelt_b8,_u32,,)(op1, op2);
59 }
60 
test_svwhilelt_b16_u32(uint32_t op1,uint32_t op2)61 svbool_t test_svwhilelt_b16_u32(uint32_t op1, uint32_t op2)
62 {
63   // CHECK-LABEL: test_svwhilelt_b16_u32
64   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32 %op1, i32 %op2)
65   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
66   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
67   return SVE_ACLE_FUNC(svwhilelt_b16,_u32,,)(op1, op2);
68 }
69 
test_svwhilelt_b32_u32(uint32_t op1,uint32_t op2)70 svbool_t test_svwhilelt_b32_u32(uint32_t op1, uint32_t op2)
71 {
72   // CHECK-LABEL: test_svwhilelt_b32_u32
73   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32 %op1, i32 %op2)
74   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
75   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
76   return SVE_ACLE_FUNC(svwhilelt_b32,_u32,,)(op1, op2);
77 }
78 
test_svwhilelt_b64_u32(uint32_t op1,uint32_t op2)79 svbool_t test_svwhilelt_b64_u32(uint32_t op1, uint32_t op2)
80 {
81   // CHECK-LABEL: test_svwhilelt_b64_u32
82   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32 %op1, i32 %op2)
83   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
84   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
85   return SVE_ACLE_FUNC(svwhilelt_b64,_u32,,)(op1, op2);
86 }
87 
test_svwhilelt_b8_s64(int64_t op1,int64_t op2)88 svbool_t test_svwhilelt_b8_s64(int64_t op1, int64_t op2)
89 {
90   // CHECK-LABEL: test_svwhilelt_b8_s64
91   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64 %op1, i64 %op2)
92   // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
93   return SVE_ACLE_FUNC(svwhilelt_b8,_s64,,)(op1, op2);
94 }
95 
test_svwhilelt_b16_s64(int64_t op1,int64_t op2)96 svbool_t test_svwhilelt_b16_s64(int64_t op1, int64_t op2)
97 {
98   // CHECK-LABEL: test_svwhilelt_b16_s64
99   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i64(i64 %op1, i64 %op2)
100   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
101   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
102   return SVE_ACLE_FUNC(svwhilelt_b16,_s64,,)(op1, op2);
103 }
104 
test_svwhilelt_b32_s64(int64_t op1,int64_t op2)105 svbool_t test_svwhilelt_b32_s64(int64_t op1, int64_t op2)
106 {
107   // CHECK-LABEL: test_svwhilelt_b32_s64
108   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i64(i64 %op1, i64 %op2)
109   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
110   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
111   return SVE_ACLE_FUNC(svwhilelt_b32,_s64,,)(op1, op2);
112 }
113 
test_svwhilelt_b64_s64(int64_t op1,int64_t op2)114 svbool_t test_svwhilelt_b64_s64(int64_t op1, int64_t op2)
115 {
116   // CHECK-LABEL: test_svwhilelt_b64_s64
117   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64 %op1, i64 %op2)
118   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
119   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
120   return SVE_ACLE_FUNC(svwhilelt_b64,_s64,,)(op1, op2);
121 }
122 
test_svwhilelt_b8_u64(uint64_t op1,uint64_t op2)123 svbool_t test_svwhilelt_b8_u64(uint64_t op1, uint64_t op2)
124 {
125   // CHECK-LABEL: test_svwhilelt_b8_u64
126   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 %op1, i64 %op2)
127   // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
128   return SVE_ACLE_FUNC(svwhilelt_b8,_u64,,)(op1, op2);
129 }
130 
test_svwhilelt_b16_u64(uint64_t op1,uint64_t op2)131 svbool_t test_svwhilelt_b16_u64(uint64_t op1, uint64_t op2)
132 {
133   // CHECK-LABEL: test_svwhilelt_b16_u64
134   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i64(i64 %op1, i64 %op2)
135   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
136   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
137   return SVE_ACLE_FUNC(svwhilelt_b16,_u64,,)(op1, op2);
138 }
139 
test_svwhilelt_b32_u64(uint64_t op1,uint64_t op2)140 svbool_t test_svwhilelt_b32_u64(uint64_t op1, uint64_t op2)
141 {
142   // CHECK-LABEL: test_svwhilelt_b32_u64
143   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i64(i64 %op1, i64 %op2)
144   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
145   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
146   return SVE_ACLE_FUNC(svwhilelt_b32,_u64,,)(op1, op2);
147 }
148 
test_svwhilelt_b64_u64(uint64_t op1,uint64_t op2)149 svbool_t test_svwhilelt_b64_u64(uint64_t op1, uint64_t op2)
150 {
151   // CHECK-LABEL: test_svwhilelt_b64_u64
152   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64 %op1, i64 %op2)
153   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
154   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
155   return SVE_ACLE_FUNC(svwhilelt_b64,_u64,,)(op1, op2);
156 }
157