1 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
3 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
5 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
6 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
7 
8 #include <arm_sve.h>
9 
10 #ifdef SVE_OVERLOADED_FORMS
11 // A simple used,unused... macro, long enough to represent any SVE builtin.
12 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
13 #else
14 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
15 #endif
16 
test_svpmullt_pair_u8(svuint8_t op1,svuint8_t op2)17 svuint8_t test_svpmullt_pair_u8(svuint8_t op1, svuint8_t op2)
18 {
19   // CHECK-LABEL: test_svpmullt_pair_u8
20   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.pmullt.pair.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
21   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
22   // overload-warning@+2 {{implicit declaration of function 'svpmullt_pair'}}
23   // expected-warning@+1 {{implicit declaration of function 'svpmullt_pair_u8'}}
24   return SVE_ACLE_FUNC(svpmullt_pair,_u8,,)(op1, op2);
25 }
26 
test_svpmullt_pair_u32(svuint32_t op1,svuint32_t op2)27 svuint32_t test_svpmullt_pair_u32(svuint32_t op1, svuint32_t op2)
28 {
29   // CHECK-LABEL: test_svpmullt_pair_u32
30   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.pmullt.pair.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
31   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
32   // overload-warning@+2 {{implicit declaration of function 'svpmullt_pair'}}
33   // expected-warning@+1 {{implicit declaration of function 'svpmullt_pair_u32'}}
34   return SVE_ACLE_FUNC(svpmullt_pair,_u32,,)(op1, op2);
35 }
36 
test_svpmullt_pair_n_u8(svuint8_t op1,uint8_t op2)37 svuint8_t test_svpmullt_pair_n_u8(svuint8_t op1, uint8_t op2)
38 {
39   // CHECK-LABEL: test_svpmullt_pair_n_u8
40   // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
41   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.pmullt.pair.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
42   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
43   // overload-warning@+2 {{implicit declaration of function 'svpmullt_pair'}}
44   // expected-warning@+1 {{implicit declaration of function 'svpmullt_pair_n_u8'}}
45   return SVE_ACLE_FUNC(svpmullt_pair,_n_u8,,)(op1, op2);
46 }
47 
test_svpmullt_pair_n_u32(svuint32_t op1,uint32_t op2)48 svuint32_t test_svpmullt_pair_n_u32(svuint32_t op1, uint32_t op2)
49 {
50   // CHECK-LABEL: test_svpmullt_pair_n_u32
51   // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
52   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.pmullt.pair.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
53   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
54   // overload-warning@+2 {{implicit declaration of function 'svpmullt_pair'}}
55   // expected-warning@+1 {{implicit declaration of function 'svpmullt_pair_n_u32'}}
56   return SVE_ACLE_FUNC(svpmullt_pair,_n_u32,,)(op1, op2);
57 }
58 
test_svpmullt_u16(svuint8_t op1,svuint8_t op2)59 svuint16_t test_svpmullt_u16(svuint8_t op1, svuint8_t op2)
60 {
61   // CHECK-LABEL: test_svpmullt_u16
62   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.pmullt.pair.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
63   // CHECK: %[[BITCAST:.*]] = bitcast <vscale x 16 x i8> %[[INTRINSIC]] to <vscale x 8 x i16>
64   // CHECK: ret <vscale x 8 x i16> %[[BITCAST]]
65   // overload-warning@+2 {{implicit declaration of function 'svpmullt'}}
66   // expected-warning@+1 {{implicit declaration of function 'svpmullt_u16'}}
67   return SVE_ACLE_FUNC(svpmullt,_u16,,)(op1, op2);
68 }
69 
test_svpmullt_u64(svuint32_t op1,svuint32_t op2)70 svuint64_t test_svpmullt_u64(svuint32_t op1, svuint32_t op2)
71 {
72   // CHECK-LABEL: test_svpmullt_u64
73   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.pmullt.pair.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
74   // CHECK: %[[BITCAST:.*]] = bitcast <vscale x 4 x i32> %[[INTRINSIC]] to <vscale x 2 x i64>
75   // CHECK: ret <vscale x 2 x i64> %[[BITCAST]]
76   // overload-warning@+2 {{implicit declaration of function 'svpmullt'}}
77   // expected-warning@+1 {{implicit declaration of function 'svpmullt_u64'}}
78   return SVE_ACLE_FUNC(svpmullt,_u64,,)(op1, op2);
79 }
80 
test_svpmullt_n_u16(svuint8_t op1,uint8_t op2)81 svuint16_t test_svpmullt_n_u16(svuint8_t op1, uint8_t op2)
82 {
83   // CHECK-LABEL: test_svpmullt_n_u16
84   // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
85   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.pmullt.pair.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
86   // CHECK: %[[BITCAST:.*]] = bitcast <vscale x 16 x i8> %[[INTRINSIC]] to <vscale x 8 x i16>
87   // CHECK: ret <vscale x 8 x i16> %[[BITCAST]]
88   // overload-warning@+2 {{implicit declaration of function 'svpmullt'}}
89   // expected-warning@+1 {{implicit declaration of function 'svpmullt_n_u16'}}
90   return SVE_ACLE_FUNC(svpmullt,_n_u16,,)(op1, op2);
91 }
92 
test_svpmullt_n_u64(svuint32_t op1,uint32_t op2)93 svuint64_t test_svpmullt_n_u64(svuint32_t op1, uint32_t op2)
94 {
95   // CHECK-LABEL: test_svpmullt_n_u64
96   // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
97   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.pmullt.pair.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
98   // CHECK: %[[BITCAST:.*]] = bitcast <vscale x 4 x i32> %[[INTRINSIC]] to <vscale x 2 x i64>
99   // CHECK: ret <vscale x 2 x i64> %[[BITCAST]]
100   // overload-warning@+2 {{implicit declaration of function 'svpmullt'}}
101   // expected-warning@+1 {{implicit declaration of function 'svpmullt_n_u64'}}
102   return SVE_ACLE_FUNC(svpmullt,_n_u64,,)(op1, op2);
103 }
104