1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
4 
5 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null 2>%t
6 // RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
7 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error -verify-ignore-unexpected=note %s
8 
9 // If this check fails please read test/CodeGen/aarch64-sve-intrinsics/README for instructions on how to resolve it.
10 // ASM-NOT: warning
11 #include <arm_sve.h>
12 
13 #ifdef SVE_OVERLOADED_FORMS
14 // A simple used,unused... macro, long enough to represent any SVE builtin.
15 #define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3
16 #else
17 #define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4
18 #endif
19 
test_svdupq_lane_bf16(svbfloat16_t data,uint64_t index)20 svbfloat16_t test_svdupq_lane_bf16(svbfloat16_t data, uint64_t index) {
21   // CHECK-LABEL: test_svdupq_lane_bf16
22   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %data, i64 %index)
23   // CHECK: ret <vscale x 8 x bfloat> %[[INTRINSIC]]
24   // expected-warning@+1 {{implicit declaration of function 'svdupq_lane_bf16'}}
25   return SVE_ACLE_FUNC(svdupq_lane, _bf16, , )(data, index);
26 }
test_svdupq_n_bf16(bfloat16_t x0,bfloat16_t x1,bfloat16_t x2,bfloat16_t x3,bfloat16_t x4,bfloat16_t x5,bfloat16_t x6,bfloat16_t x7)27 svbfloat16_t test_svdupq_n_bf16(bfloat16_t x0, bfloat16_t x1, bfloat16_t x2, bfloat16_t x3,
28                                 bfloat16_t x4, bfloat16_t x5, bfloat16_t x6, bfloat16_t x7) {
29   // CHECK-LABEL: test_svdupq_n_bf16
30   // CHECK: %[[ALLOCA:.*]] = alloca [8 x bfloat], align 16
31   // CHECK-DAG: %[[BASE:.*]] = getelementptr inbounds [8 x bfloat], [8 x bfloat]* %[[ALLOCA]], i64 0, i64 0
32   // CHECK-DAG: store bfloat %x0, bfloat* %[[BASE]], align 16
33   // <assume other stores>
34   // CHECK-DAG: %[[GEP:.*]] = getelementptr inbounds [8 x bfloat], [8 x bfloat]* %[[ALLOCA]], i64 0, i64 7
35   // CHECK: store bfloat %x7, bfloat* %[[GEP]], align 2
36   // CHECK-NOT: store
37   // CHECK: call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
38   // CHECK: %[[LOAD:.*]] = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1rq.nxv8bf16(<vscale x 8 x i1> %{{.*}}, bfloat* nonnull %[[BASE]])
39   // CHECK: ret <vscale x 8 x bfloat> %[[LOAD]]
40   // expected-warning@+1 {{implicit declaration of function 'svdupq_n_bf16'}}
41   return SVE_ACLE_FUNC(svdupq, _n, _bf16, )(x0, x1, x2, x3, x4, x5, x6, x7);
42 }
43