1; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
2
3; GCN-LABEL: {{^}}combine_ftrunc_frint_f64:
4; GCN: v_rndne_f64_e32 [[RND:v\[[0-9:]+\]]],
5; GCN: flat_store_dwordx2 v[{{[0-9:]+}}], [[RND]]
6define amdgpu_kernel void @combine_ftrunc_frint_f64(double addrspace(1)* %p) {
7  %v = load double, double addrspace(1)* %p, align 8
8  %round = tail call double @llvm.rint.f64(double %v)
9  %trunc = tail call double @llvm.trunc.f64(double %round)
10  store double %trunc, double addrspace(1)* %p, align 8
11  ret void
12}
13
14; GCN-LABEL: {{^}}combine_ftrunc_frint_f32:
15; GCN: v_rndne_f32_e32 [[RND:v[0-9]+]],
16; GCN: flat_store_dword v[{{[0-9:]+}}], [[RND]]
17define amdgpu_kernel void @combine_ftrunc_frint_f32(float addrspace(1)* %p) {
18  %v = load float, float addrspace(1)* %p, align 4
19  %round = tail call float @llvm.rint.f32(float %v)
20  %trunc = tail call float @llvm.trunc.f32(float %round)
21  store float %trunc, float addrspace(1)* %p, align 4
22  ret void
23}
24
25; GCN-LABEL: {{^}}combine_ftrunc_frint_v2f32:
26; GCN: s_load_dwordx2
27; GCN: s_load_dwordx2 s{{\[}}[[SRC1:[0-9]+]]:[[SRC2:[0-9]+]]{{\]}}
28; GCN-DAG: v_rndne_f32_e32 v[[RND1:[0-9]+]], s[[SRC1]]
29; GCN-DAG: v_rndne_f32_e32 v[[RND2:[0-9]+]], s[[SRC2]]
30; GCN: flat_store_dwordx2 v[{{[0-9:]+}}], v{{\[}}[[RND1]]:[[RND2]]{{\]}}
31define amdgpu_kernel void @combine_ftrunc_frint_v2f32(<2 x float> addrspace(1)* %p) {
32  %v = load <2 x float>, <2 x float> addrspace(1)* %p, align 8
33  %round = tail call <2 x float> @llvm.rint.v2f32(<2 x float> %v)
34  %trunc = tail call <2 x float> @llvm.trunc.v2f32(<2 x float> %round)
35  store <2 x float> %trunc, <2 x float> addrspace(1)* %p, align 8
36  ret void
37}
38
39; GCN-LABEL: {{^}}combine_ftrunc_fceil_f32:
40; GCN: v_ceil_f32_e32 [[RND:v[0-9]+]],
41; GCN: flat_store_dword v[{{[0-9:]+}}], [[RND]]
42define amdgpu_kernel void @combine_ftrunc_fceil_f32(float addrspace(1)* %p) {
43  %v = load float, float addrspace(1)* %p, align 4
44  %round = tail call float @llvm.ceil.f32(float %v)
45  %trunc = tail call float @llvm.trunc.f32(float %round)
46  store float %trunc, float addrspace(1)* %p, align 4
47  ret void
48}
49
50; GCN-LABEL: {{^}}combine_ftrunc_ffloor_f32:
51; GCN: v_floor_f32_e32 [[RND:v[0-9]+]],
52; GCN: flat_store_dword v[{{[0-9:]+}}], [[RND]]
53define amdgpu_kernel void @combine_ftrunc_ffloor_f32(float addrspace(1)* %p) {
54  %v = load float, float addrspace(1)* %p, align 4
55  %round = tail call float @llvm.floor.f32(float %v)
56  %trunc = tail call float @llvm.trunc.f32(float %round)
57  store float %trunc, float addrspace(1)* %p, align 4
58  ret void
59}
60
61; GCN-LABEL: {{^}}combine_ftrunc_fnearbyint_f32:
62; GCN: v_rndne_f32_e32 [[RND:v[0-9]+]],
63; GCN: flat_store_dword v[{{[0-9:]+}}], [[RND]]
64define amdgpu_kernel void @combine_ftrunc_fnearbyint_f32(float addrspace(1)* %p) {
65  %v = load float, float addrspace(1)* %p, align 4
66  %round = tail call float @llvm.nearbyint.f32(float %v)
67  %trunc = tail call float @llvm.trunc.f32(float %round)
68  store float %trunc, float addrspace(1)* %p, align 4
69  ret void
70}
71
72; GCN-LABEL: {{^}}combine_ftrunc_ftrunc_f32:
73; GCN: s_load_dword [[SRC:s[0-9]+]],
74; GCN: v_trunc_f32_e32 [[RND:v[0-9]+]], [[SRC]]
75; GCN: flat_store_dword v[{{[0-9:]+}}], [[RND]]
76define amdgpu_kernel void @combine_ftrunc_ftrunc_f32(float addrspace(1)* %p) {
77  %v = load float, float addrspace(1)* %p, align 4
78  %round = tail call float @llvm.trunc.f32(float %v)
79  %trunc = tail call float @llvm.trunc.f32(float %round)
80  store float %trunc, float addrspace(1)* %p, align 4
81  ret void
82}
83
84declare double @llvm.trunc.f64(double)
85declare float @llvm.trunc.f32(float)
86declare <2 x float> @llvm.trunc.v2f32(<2 x float>)
87declare double @llvm.rint.f64(double)
88declare float @llvm.rint.f32(float)
89declare <2 x float> @llvm.rint.v2f32(<2 x float>)
90declare float @llvm.ceil.f32(float)
91declare float @llvm.floor.f32(float)
92declare float @llvm.nearbyint.f32(float)
93