1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
6
7 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null
8
9 #include <arm_sve.h>
10
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3
14 #else
15 #define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4
16 #endif
17
test_svreinterpret_s8_bf16(svbfloat16_t op)18 svint8_t test_svreinterpret_s8_bf16(svbfloat16_t op) {
19 // CHECK-LABEL: test_svreinterpret_s8_bf16
20 // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 16 x i8>
21 // CHECK: ret <vscale x 16 x i8> %[[CAST]]
22 return SVE_ACLE_FUNC(svreinterpret_s8, _bf16, , )(op);
23 }
24
test_svreinterpret_s16_bf16(svbfloat16_t op)25 svint16_t test_svreinterpret_s16_bf16(svbfloat16_t op) {
26 // CHECK-LABEL: test_svreinterpret_s16_bf16
27 // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 8 x i16>
28 // CHECK: ret <vscale x 8 x i16> %[[CAST]]
29 return SVE_ACLE_FUNC(svreinterpret_s16, _bf16, , )(op);
30 }
31
test_svreinterpret_s32_bf16(svbfloat16_t op)32 svint32_t test_svreinterpret_s32_bf16(svbfloat16_t op) {
33 // CHECK-LABEL: test_svreinterpret_s32_bf16
34 // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 4 x i32>
35 // CHECK: ret <vscale x 4 x i32> %[[CAST]]
36 return SVE_ACLE_FUNC(svreinterpret_s32, _bf16, , )(op);
37 }
test_svreinterpret_s64_bf16(svbfloat16_t op)38 svint64_t test_svreinterpret_s64_bf16(svbfloat16_t op) {
39 // CHECK-LABEL: test_svreinterpret_s64_bf16
40 // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 2 x i64>
41 // CHECK: ret <vscale x 2 x i64> %[[CAST]]
42 return SVE_ACLE_FUNC(svreinterpret_s64, _bf16, , )(op);
43 }
44
test_svreinterpret_u8_bf16(svbfloat16_t op)45 svuint8_t test_svreinterpret_u8_bf16(svbfloat16_t op) {
46 // CHECK-LABEL: test_svreinterpret_u8_bf16
47 // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 16 x i8>
48 // CHECK: ret <vscale x 16 x i8> %[[CAST]]
49 return SVE_ACLE_FUNC(svreinterpret_u8, _bf16, , )(op);
50 }
51
test_svreinterpret_u16_bf16(svbfloat16_t op)52 svuint16_t test_svreinterpret_u16_bf16(svbfloat16_t op) {
53 // CHECK-LABEL: test_svreinterpret_u16_bf16
54 // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 8 x i16>
55 // CHECK: ret <vscale x 8 x i16> %[[CAST]]
56 return SVE_ACLE_FUNC(svreinterpret_u16, _bf16, , )(op);
57 }
58
test_svreinterpret_u32_bf16(svbfloat16_t op)59 svuint32_t test_svreinterpret_u32_bf16(svbfloat16_t op) {
60 // CHECK-LABEL: test_svreinterpret_u32_bf16
61 // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 4 x i32>
62 // CHECK: ret <vscale x 4 x i32> %[[CAST]]
63 return SVE_ACLE_FUNC(svreinterpret_u32, _bf16, , )(op);
64 }
65
test_svreinterpret_u64_bf16(svbfloat16_t op)66 svuint64_t test_svreinterpret_u64_bf16(svbfloat16_t op) {
67 // CHECK-LABEL: test_svreinterpret_u64_bf16
68 // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 2 x i64>
69 // CHECK: ret <vscale x 2 x i64> %[[CAST]]
70 return SVE_ACLE_FUNC(svreinterpret_u64, _bf16, , )(op);
71 }
72
test_svreinterpret_bf16_s8(svint8_t op)73 svbfloat16_t test_svreinterpret_bf16_s8(svint8_t op) {
74 // CHECK-LABEL: test_svreinterpret_bf16_s8
75 // CHECK: %[[CAST:.*]] = bitcast <vscale x 16 x i8> %op to <vscale x 8 x bfloat>
76 // CHECK: ret <vscale x 8 x bfloat> %[[CAST]]
77 return SVE_ACLE_FUNC(svreinterpret_bf16, _s8, , )(op);
78 }
79
test_svreinterpret_bf16_s16(svint16_t op)80 svbfloat16_t test_svreinterpret_bf16_s16(svint16_t op) {
81 // CHECK-LABEL: test_svreinterpret_bf16_s16
82 // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x i16> %op to <vscale x 8 x bfloat>
83 // CHECK: ret <vscale x 8 x bfloat> %[[CAST]]
84 return SVE_ACLE_FUNC(svreinterpret_bf16, _s16, , )(op);
85 }
86
test_svreinterpret_bf16_s32(svint32_t op)87 svbfloat16_t test_svreinterpret_bf16_s32(svint32_t op) {
88 // CHECK-LABEL: test_svreinterpret_bf16_s32
89 // CHECK: %[[CAST:.*]] = bitcast <vscale x 4 x i32> %op to <vscale x 8 x bfloat>
90 // CHECK: ret <vscale x 8 x bfloat> %[[CAST]]
91 return SVE_ACLE_FUNC(svreinterpret_bf16, _s32, , )(op);
92 }
93
test_svreinterpret_bf16_s64(svint64_t op)94 svbfloat16_t test_svreinterpret_bf16_s64(svint64_t op) {
95 // CHECK-LABEL: test_svreinterpret_bf16_s64
96 // CHECK: %[[CAST:.*]] = bitcast <vscale x 2 x i64> %op to <vscale x 8 x bfloat>
97 // CHECK: ret <vscale x 8 x bfloat> %[[CAST]]
98 return SVE_ACLE_FUNC(svreinterpret_bf16, _s64, , )(op);
99 }
100
test_svreinterpret_bf16_u8(svuint8_t op)101 svbfloat16_t test_svreinterpret_bf16_u8(svuint8_t op) {
102 // CHECK-LABEL: test_svreinterpret_bf16_u8
103 // CHECK: %[[CAST:.*]] = bitcast <vscale x 16 x i8> %op to <vscale x 8 x bfloat>
104 // CHECK: ret <vscale x 8 x bfloat> %[[CAST]]
105 return SVE_ACLE_FUNC(svreinterpret_bf16, _u8, , )(op);
106 }
107
test_svreinterpret_bf16_u16(svuint16_t op)108 svbfloat16_t test_svreinterpret_bf16_u16(svuint16_t op) {
109 // CHECK-LABEL: test_svreinterpret_bf16_u16
110 // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x i16> %op to <vscale x 8 x bfloat>
111 // CHECK: ret <vscale x 8 x bfloat> %[[CAST]]
112 return SVE_ACLE_FUNC(svreinterpret_bf16, _u16, , )(op);
113 }
114
test_svreinterpret_bf16_u32(svuint32_t op)115 svbfloat16_t test_svreinterpret_bf16_u32(svuint32_t op) {
116 // CHECK-LABEL: test_svreinterpret_bf16_u32
117 // CHECK: %[[CAST:.*]] = bitcast <vscale x 4 x i32> %op to <vscale x 8 x bfloat>
118 // CHECK: ret <vscale x 8 x bfloat> %[[CAST]]
119 return SVE_ACLE_FUNC(svreinterpret_bf16, _u32, , )(op);
120 }
121
test_svreinterpret_bf16_u64(svuint64_t op)122 svbfloat16_t test_svreinterpret_bf16_u64(svuint64_t op) {
123 // CHECK-LABEL: test_svreinterpret_bf16_u64
124 // CHECK: %[[CAST:.*]] = bitcast <vscale x 2 x i64> %op to <vscale x 8 x bfloat>
125 // CHECK: ret <vscale x 8 x bfloat> %[[CAST]]
126 return SVE_ACLE_FUNC(svreinterpret_bf16, _u64, , )(op);
127 }
128
test_svreinterpret_bf16_bf16(svbfloat16_t op)129 svbfloat16_t test_svreinterpret_bf16_bf16(svbfloat16_t op) {
130 // CHECK-LABEL: test_svreinterpret_bf16_bf16
131 // CHECK: ret <vscale x 8 x bfloat> %op
132 return SVE_ACLE_FUNC(svreinterpret_bf16, _bf16, , )(op);
133 }
134
test_svreinterpret_bf16_f16(svfloat16_t op)135 svbfloat16_t test_svreinterpret_bf16_f16(svfloat16_t op) {
136 // CHECK-LABEL: test_svreinterpret_bf16_f16
137 // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x half> %op to <vscale x 8 x bfloat>
138 // CHECK: ret <vscale x 8 x bfloat> %[[CAST]]
139 return SVE_ACLE_FUNC(svreinterpret_bf16, _f16, , )(op);
140 }
141
test_svreinterpret_bf16_f32(svfloat32_t op)142 svbfloat16_t test_svreinterpret_bf16_f32(svfloat32_t op) {
143 // CHECK-LABEL: test_svreinterpret_bf16_f32
144 // CHECK: %[[CAST:.*]] = bitcast <vscale x 4 x float> %op to <vscale x 8 x bfloat>
145 // CHECK: ret <vscale x 8 x bfloat> %[[CAST]]
146 return SVE_ACLE_FUNC(svreinterpret_bf16, _f32, , )(op);
147 }
148
test_svreinterpret_bf16_f64(svfloat64_t op)149 svbfloat16_t test_svreinterpret_bf16_f64(svfloat64_t op) {
150 // CHECK-LABEL: test_svreinterpret_bf16_f64
151 // CHECK: %[[CAST:.*]] = bitcast <vscale x 2 x double> %op to <vscale x 8 x bfloat>
152 // CHECK: ret <vscale x 8 x bfloat> %[[CAST]]
153 return SVE_ACLE_FUNC(svreinterpret_bf16, _f64, , )(op);
154 }
155
test_svreinterpret_f32_bf16(svbfloat16_t op)156 svfloat32_t test_svreinterpret_f32_bf16(svbfloat16_t op) {
157 // CHECK-LABEL: test_svreinterpret_f32_bf16
158 // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 4 x float>
159 // CHECK: ret <vscale x 4 x float> %[[CAST]]
160 return SVE_ACLE_FUNC(svreinterpret_f32, _bf16, , )(op);
161 }
162
test_svreinterpret_f16_bf16(svbfloat16_t op)163 svfloat16_t test_svreinterpret_f16_bf16(svbfloat16_t op) {
164 // CHECK-LABEL: test_svreinterpret_f16_bf16
165 // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 8 x half>
166 // CHECK: ret <vscale x 8 x half> %[[CAST]]
167 return SVE_ACLE_FUNC(svreinterpret_f16, _bf16, , )(op);
168 }
169
test_svreinterpret_f64_bf16(svbfloat16_t op)170 svfloat64_t test_svreinterpret_f64_bf16(svbfloat16_t op) {
171 // CHECK-LABEL: test_svreinterpret_f64_bf16
172 // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 2 x double>
173 // CHECK: ret <vscale x 2 x double> %[[CAST]]
174 return SVE_ACLE_FUNC(svreinterpret_f64, _bf16, , )(op);
175 }
176