1 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
3 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
5 
6 #include <arm_sve.h>
7 
8 #ifdef SVE_OVERLOADED_FORMS
9 // A simple used,unused... macro, long enough to represent any SVE builtin.
10 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
11 #else
12 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
13 #endif
14 
15 
16 // NOTE: For these tests clang converts the struct parameter into
17 // several parameters, one for each member of the original struct.
test_svset3_s8(svint8x3_t tuple,svint8_t x)18 svint8x3_t test_svset3_s8(svint8x3_t tuple, svint8_t x)
19 {
20   // CHECK-LABEL: test_svset3_s8
21   // CHECK: %[[INSERT:.*]] = call <vscale x 48 x i8> @llvm.aarch64.sve.tuple.set.nxv48i8.nxv16i8(<vscale x 48 x i8> %tuple, i32 1, <vscale x 16 x i8> %x)
22   // CHECK-NEXT: ret <vscale x 48 x i8> %[[INSERT]]
23   return SVE_ACLE_FUNC(svset3,_s8,,)(tuple, 1, x);
24 }
25 
test_svset3_s16(svint16x3_t tuple,svint16_t x)26 svint16x3_t test_svset3_s16(svint16x3_t tuple, svint16_t x)
27 {
28   // CHECK-LABEL: test_svset3_s16
29   // CHECK: %[[INSERT:.*]] = call <vscale x 24 x i16> @llvm.aarch64.sve.tuple.set.nxv24i16.nxv8i16(<vscale x 24 x i16> %tuple, i32 2, <vscale x 8 x i16> %x)
30   // CHECK-NEXT: ret <vscale x 24 x i16> %[[INSERT]]
31   return SVE_ACLE_FUNC(svset3,_s16,,)(tuple, 2, x);
32 }
33 
test_svset3_s32(svint32x3_t tuple,svint32_t x)34 svint32x3_t test_svset3_s32(svint32x3_t tuple, svint32_t x)
35 {
36   // CHECK-LABEL: test_svset3_s32
37   // CHECK: %[[INSERT:.*]] = call <vscale x 12 x i32> @llvm.aarch64.sve.tuple.set.nxv12i32.nxv4i32(<vscale x 12 x i32> %tuple, i32 0, <vscale x 4 x i32> %x)
38   // CHECK-NEXT: ret <vscale x 12 x i32> %[[INSERT]]
39   return SVE_ACLE_FUNC(svset3,_s32,,)(tuple, 0, x);
40 }
41 
test_svset3_s64(svint64x3_t tuple,svint64_t x)42 svint64x3_t test_svset3_s64(svint64x3_t tuple, svint64_t x)
43 {
44   // CHECK-LABEL: test_svset3_s64
45   // CHECK: %[[INSERT:.*]] = call <vscale x 6 x i64> @llvm.aarch64.sve.tuple.set.nxv6i64.nxv2i64(<vscale x 6 x i64> %tuple, i32 1, <vscale x 2 x i64> %x)
46   // CHECK-NEXT: ret <vscale x 6 x i64> %[[INSERT]]
47   return SVE_ACLE_FUNC(svset3,_s64,,)(tuple, 1, x);
48 }
49 
test_svset3_u8(svuint8x3_t tuple,svuint8_t x)50 svuint8x3_t test_svset3_u8(svuint8x3_t tuple, svuint8_t x)
51 {
52   // CHECK-LABEL: test_svset3_u8
53   // CHECK: %[[INSERT:.*]] = call <vscale x 48 x i8> @llvm.aarch64.sve.tuple.set.nxv48i8.nxv16i8(<vscale x 48 x i8> %tuple, i32 2, <vscale x 16 x i8> %x)
54   // CHECK-NEXT: ret <vscale x 48 x i8> %[[INSERT]]
55   return SVE_ACLE_FUNC(svset3,_u8,,)(tuple, 2, x);
56 }
57 
test_svset3_u16(svuint16x3_t tuple,svuint16_t x)58 svuint16x3_t test_svset3_u16(svuint16x3_t tuple, svuint16_t x)
59 {
60   // CHECK-LABEL: test_svset3_u16
61   // CHECK: %[[INSERT:.*]] = call <vscale x 24 x i16> @llvm.aarch64.sve.tuple.set.nxv24i16.nxv8i16(<vscale x 24 x i16> %tuple, i32 0, <vscale x 8 x i16> %x)
62   // CHECK-NEXT: ret <vscale x 24 x i16> %[[INSERT]]
63   return SVE_ACLE_FUNC(svset3,_u16,,)(tuple, 0, x);
64 }
65 
test_svset3_u32(svuint32x3_t tuple,svuint32_t x)66 svuint32x3_t test_svset3_u32(svuint32x3_t tuple, svuint32_t x)
67 {
68   // CHECK-LABEL: test_svset3_u32
69   // CHECK: %[[INSERT:.*]] = call <vscale x 12 x i32> @llvm.aarch64.sve.tuple.set.nxv12i32.nxv4i32(<vscale x 12 x i32> %tuple, i32 1, <vscale x 4 x i32> %x)
70   // CHECK-NEXT: ret <vscale x 12 x i32> %[[INSERT]]
71   return SVE_ACLE_FUNC(svset3,_u32,,)(tuple, 1, x);
72 }
73 
test_svset3_u64(svuint64x3_t tuple,svuint64_t x)74 svuint64x3_t test_svset3_u64(svuint64x3_t tuple, svuint64_t x)
75 {
76   // CHECK-LABEL: test_svset3_u64
77   // CHECK: %[[INSERT:.*]] = call <vscale x 6 x i64> @llvm.aarch64.sve.tuple.set.nxv6i64.nxv2i64(<vscale x 6 x i64> %tuple, i32 2, <vscale x 2 x i64> %x)
78   // CHECK-NEXT: ret <vscale x 6 x i64> %[[INSERT]]
79   return SVE_ACLE_FUNC(svset3,_u64,,)(tuple, 2, x);
80 }
81 
test_svset3_f16(svfloat16x3_t tuple,svfloat16_t x)82 svfloat16x3_t test_svset3_f16(svfloat16x3_t tuple, svfloat16_t x)
83 {
84   // CHECK-LABEL: test_svset3_f16
85   // CHECK: %[[INSERT:.*]] = call <vscale x 24 x half> @llvm.aarch64.sve.tuple.set.nxv24f16.nxv8f16(<vscale x 24 x half> %tuple, i32 0, <vscale x 8 x half> %x)
86   // CHECK-NEXT: ret <vscale x 24 x half> %[[INSERT]]
87   return SVE_ACLE_FUNC(svset3,_f16,,)(tuple, 0, x);
88 }
89 
test_svset3_f32(svfloat32x3_t tuple,svfloat32_t x)90 svfloat32x3_t test_svset3_f32(svfloat32x3_t tuple, svfloat32_t x)
91 {
92   // CHECK-LABEL: test_svset3_f32
93   // CHECK: %[[INSERT:.*]] = call <vscale x 12 x float> @llvm.aarch64.sve.tuple.set.nxv12f32.nxv4f32(<vscale x 12 x float> %tuple, i32 1, <vscale x 4 x float> %x)
94   // CHECK-NEXT: ret <vscale x 12 x float> %[[INSERT]]
95   return SVE_ACLE_FUNC(svset3,_f32,,)(tuple, 1, x);
96 }
97 
test_svset3_f64(svfloat64x3_t tuple,svfloat64_t x)98 svfloat64x3_t test_svset3_f64(svfloat64x3_t tuple, svfloat64_t x)
99 {
100   // CHECK-LABEL: test_svset3_f64
101   // CHECK: %[[INSERT:.*]] = call <vscale x 6 x double> @llvm.aarch64.sve.tuple.set.nxv6f64.nxv2f64(<vscale x 6 x double> %tuple, i32 2, <vscale x 2 x double> %x)
102   // CHECK-NEXT: ret <vscale x 6 x double> %[[INSERT]]
103   return SVE_ACLE_FUNC(svset3,_f64,,)(tuple, 2, x);
104 }
105