1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
6 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null
7 #include <arm_sve.h>
8 
9 #ifdef SVE_OVERLOADED_FORMS
10 // A simple used,unused... macro, long enough to represent any SVE builtin.
11 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
12 #else
13 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
14 #endif
15 
test_svext_s8(svint8_t op1,svint8_t op2)16 svint8_t test_svext_s8(svint8_t op1, svint8_t op2)
17 {
18   // CHECK-LABEL: test_svext_s8
19   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 0)
20   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
21   return SVE_ACLE_FUNC(svext,_s8,,)(op1, op2, 0);
22 }
23 
test_svext_s8_1(svint8_t op1,svint8_t op2)24 svint8_t test_svext_s8_1(svint8_t op1, svint8_t op2)
25 {
26   // CHECK-LABEL: test_svext_s8_1
27   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 255)
28   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
29   return SVE_ACLE_FUNC(svext,_s8,,)(op1, op2, 255);
30 }
31 
test_svext_s16(svint16_t op1,svint16_t op2)32 svint16_t test_svext_s16(svint16_t op1, svint16_t op2)
33 {
34   // CHECK-LABEL: test_svext_s16
35   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0)
36   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
37   return SVE_ACLE_FUNC(svext,_s16,,)(op1, op2, 0);
38 }
39 
test_svext_s16_1(svint16_t op1,svint16_t op2)40 svint16_t test_svext_s16_1(svint16_t op1, svint16_t op2)
41 {
42   // CHECK-LABEL: test_svext_s16_1
43   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 127)
44   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
45   return SVE_ACLE_FUNC(svext,_s16,,)(op1, op2, 127);
46 }
47 
test_svext_s32(svint32_t op1,svint32_t op2)48 svint32_t test_svext_s32(svint32_t op1, svint32_t op2)
49 {
50   // CHECK-LABEL: test_svext_s32
51   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0)
52   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
53   return SVE_ACLE_FUNC(svext,_s32,,)(op1, op2, 0);
54 }
55 
test_svext_s32_1(svint32_t op1,svint32_t op2)56 svint32_t test_svext_s32_1(svint32_t op1, svint32_t op2)
57 {
58   // CHECK-LABEL: test_svext_s32_1
59   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 63)
60   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
61   return SVE_ACLE_FUNC(svext,_s32,,)(op1, op2, 63);
62 }
63 
test_svext_s64(svint64_t op1,svint64_t op2)64 svint64_t test_svext_s64(svint64_t op1, svint64_t op2)
65 {
66   // CHECK-LABEL: test_svext_s64
67   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 0)
68   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
69   return SVE_ACLE_FUNC(svext,_s64,,)(op1, op2, 0);
70 }
71 
test_svext_s64_1(svint64_t op1,svint64_t op2)72 svint64_t test_svext_s64_1(svint64_t op1, svint64_t op2)
73 {
74   // CHECK-LABEL: test_svext_s64_1
75   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 31)
76   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
77   return SVE_ACLE_FUNC(svext,_s64,,)(op1, op2, 31);
78 }
79 
test_svext_u8(svuint8_t op1,svuint8_t op2)80 svuint8_t test_svext_u8(svuint8_t op1, svuint8_t op2)
81 {
82   // CHECK-LABEL: test_svext_u8
83   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 255)
84   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
85   return SVE_ACLE_FUNC(svext,_u8,,)(op1, op2, 255);
86 }
87 
test_svext_u16(svuint16_t op1,svuint16_t op2)88 svuint16_t test_svext_u16(svuint16_t op1, svuint16_t op2)
89 {
90   // CHECK-LABEL: test_svext_u16
91   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 127)
92   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
93   return SVE_ACLE_FUNC(svext,_u16,,)(op1, op2, 127);
94 }
95 
test_svext_u32(svuint32_t op1,svuint32_t op2)96 svuint32_t test_svext_u32(svuint32_t op1, svuint32_t op2)
97 {
98   // CHECK-LABEL: test_svext_u32
99   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 63)
100   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
101   return SVE_ACLE_FUNC(svext,_u32,,)(op1, op2, 63);
102 }
103 
test_svext_u64(svuint64_t op1,svuint64_t op2)104 svuint64_t test_svext_u64(svuint64_t op1, svuint64_t op2)
105 {
106   // CHECK-LABEL: test_svext_u64
107   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 31)
108   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
109   return SVE_ACLE_FUNC(svext,_u64,,)(op1, op2, 31);
110 }
111 
test_svext_f16(svfloat16_t op1,svfloat16_t op2)112 svfloat16_t test_svext_f16(svfloat16_t op1, svfloat16_t op2)
113 {
114   // CHECK-LABEL: test_svext_f16
115   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ext.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2, i32 127)
116   // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
117   return SVE_ACLE_FUNC(svext,_f16,,)(op1, op2, 127);
118 }
119 
test_svext_f32(svfloat32_t op1,svfloat32_t op2)120 svfloat32_t test_svext_f32(svfloat32_t op1, svfloat32_t op2)
121 {
122   // CHECK-LABEL: test_svext_f32
123   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ext.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2, i32 63)
124   // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
125   return SVE_ACLE_FUNC(svext,_f32,,)(op1, op2, 63);
126 }
127 
test_svext_f64(svfloat64_t op1,svfloat64_t op2)128 svfloat64_t test_svext_f64(svfloat64_t op1, svfloat64_t op2)
129 {
130   // CHECK-LABEL: test_svext_f64
131   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ext.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2, i32 31)
132   // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
133   return SVE_ACLE_FUNC(svext,_f64,,)(op1, op2, 31);
134 }
135