1; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
2; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
3
4declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
5
6declare i64 @llvm.ctpop.i64(i64) nounwind readnone
7declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
8declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>) nounwind readnone
9declare <8 x i64> @llvm.ctpop.v8i64(<8 x i64>) nounwind readnone
10declare <16 x i64> @llvm.ctpop.v16i64(<16 x i64>) nounwind readnone
11
12declare i65 @llvm.ctpop.i65(i65) nounwind readnone
13declare i128 @llvm.ctpop.i128(i128) nounwind readnone
14
15; FUNC-LABEL: {{^}}s_ctpop_i64:
16; SI: s_load_dwordx2 [[SVAL:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0x13
17; VI: s_load_dwordx2 [[SVAL:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0x4c
18; GCN: s_bcnt1_i32_b64 [[SRESULT:s[0-9]+]], [[SVAL]]
19; GCN: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
20; GCN: buffer_store_dword [[VRESULT]],
21; GCN: s_endpgm
22define amdgpu_kernel void @s_ctpop_i64(i32 addrspace(1)* noalias %out, [8 x i32], i64 %val) nounwind {
23  %ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
24  %truncctpop = trunc i64 %ctpop to i32
25  store i32 %truncctpop, i32 addrspace(1)* %out, align 4
26  ret void
27}
28
29; FUNC-LABEL: {{^}}v_ctpop_i64:
30; GCN: {{buffer|flat}}_load_dwordx2 v{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}},
31; GCN: v_bcnt_u32_b32{{(_e64)*}} [[MIDRESULT:v[0-9]+]], v[[LOVAL]], 0
32; SI-NEXT: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
33; VI-NEXT: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
34; GCN: buffer_store_dword [[RESULT]],
35; GCN: s_endpgm
36define amdgpu_kernel void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
37  %tid = call i32 @llvm.amdgcn.workitem.id.x()
38  %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
39  %val = load i64, i64 addrspace(1)* %in.gep, align 8
40  %ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
41  %truncctpop = trunc i64 %ctpop to i32
42  store i32 %truncctpop, i32 addrspace(1)* %out, align 4
43  ret void
44}
45
46; FUNC-LABEL: {{^}}v_ctpop_i64_user:
47; GCN: {{buffer|flat}}_load_dwordx2 v{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}},
48; GCN: v_bcnt_u32_b32{{(_e64)*}} [[MIDRESULT:v[0-9]+]], v[[LOVAL]], 0
49; SI-NEXT: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
50; VI-NEXT: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
51; GCN-DAG: v_or_b32_e32 v[[RESULT_LO:[0-9]+]], s{{[0-9]+}}, [[RESULT]]
52; GCN-DAG: v_mov_b32_e32 v[[RESULT_HI:[0-9]+]], s{{[0-9]+}}
53; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
54; GCN: s_endpgm
55define amdgpu_kernel void @v_ctpop_i64_user(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 %s.val) nounwind {
56  %tid = call i32 @llvm.amdgcn.workitem.id.x()
57  %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
58  %val = load i64, i64 addrspace(1)* %in.gep, align 8
59  %ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
60  %or = or i64 %ctpop, %s.val
61  store i64 %or, i64 addrspace(1)* %out
62  ret void
63}
64
65; FUNC-LABEL: {{^}}s_ctpop_v2i64:
66; GCN: s_bcnt1_i32_b64
67; GCN: s_bcnt1_i32_b64
68; GCN: s_endpgm
69define amdgpu_kernel void @s_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> %val) nounwind {
70  %ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val) nounwind readnone
71  %truncctpop = trunc <2 x i64> %ctpop to <2 x i32>
72  store <2 x i32> %truncctpop, <2 x i32> addrspace(1)* %out, align 8
73  ret void
74}
75
76; FUNC-LABEL: {{^}}s_ctpop_v4i64:
77; GCN: s_bcnt1_i32_b64
78; GCN: s_bcnt1_i32_b64
79; GCN: s_bcnt1_i32_b64
80; GCN: s_bcnt1_i32_b64
81; GCN: s_endpgm
82define amdgpu_kernel void @s_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> %val) nounwind {
83  %ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %val) nounwind readnone
84  %truncctpop = trunc <4 x i64> %ctpop to <4 x i32>
85  store <4 x i32> %truncctpop, <4 x i32> addrspace(1)* %out, align 16
86  ret void
87}
88
89; FUNC-LABEL: {{^}}v_ctpop_v2i64:
90; GCN: v_bcnt_u32_b32
91; GCN: v_bcnt_u32_b32
92; GCN: v_bcnt_u32_b32
93; GCN: v_bcnt_u32_b32
94; GCN: s_endpgm
95define amdgpu_kernel void @v_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in) nounwind {
96  %tid = call i32 @llvm.amdgcn.workitem.id.x()
97  %in.gep = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i32 %tid
98  %val = load <2 x i64>, <2 x i64> addrspace(1)* %in.gep, align 16
99  %ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val) nounwind readnone
100  %truncctpop = trunc <2 x i64> %ctpop to <2 x i32>
101  store <2 x i32> %truncctpop, <2 x i32> addrspace(1)* %out, align 8
102  ret void
103}
104
105; FUNC-LABEL: {{^}}v_ctpop_v4i64:
106; GCN: v_bcnt_u32_b32
107; GCN: v_bcnt_u32_b32
108; GCN: v_bcnt_u32_b32
109; GCN: v_bcnt_u32_b32
110; GCN: v_bcnt_u32_b32
111; GCN: v_bcnt_u32_b32
112; GCN: v_bcnt_u32_b32
113; GCN: v_bcnt_u32_b32
114; GCN: s_endpgm
115define amdgpu_kernel void @v_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> addrspace(1)* noalias %in) nounwind {
116  %tid = call i32 @llvm.amdgcn.workitem.id.x()
117  %in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
118  %val = load <4 x i64>, <4 x i64> addrspace(1)* %in.gep, align 32
119  %ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %val) nounwind readnone
120  %truncctpop = trunc <4 x i64> %ctpop to <4 x i32>
121  store <4 x i32> %truncctpop, <4 x i32> addrspace(1)* %out, align 16
122  ret void
123}
124
125; FUNC-LABEL: {{^}}ctpop_i64_in_br:
126; SI-DAG: s_load_dwordx2 s{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], 0xd
127; VI-DAG: s_load_dwordx2 s{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], 0x34
128; GCN-DAG: s_bcnt1_i32_b64 [[RESULT:s[0-9]+]], {{s\[}}[[LOVAL]]:[[HIVAL]]{{\]}}
129; GCN-DAG: s_mov_b32 [[ZERO:s[0-9]+]], 0
130; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], [[RESULT]]
131; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], [[ZERO]]
132; GCN: buffer_store_dwordx2 {{v\[}}[[VLO]]:[[VHI]]{{\]}}
133; GCN: s_endpgm
134define amdgpu_kernel void @ctpop_i64_in_br(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %ctpop_arg, i32 %cond) {
135entry:
136  %tmp0 = icmp eq i32 %cond, 0
137  br i1 %tmp0, label %if, label %else
138
139if:
140  %tmp2 = call i64 @llvm.ctpop.i64(i64 %ctpop_arg)
141  br label %endif
142
143else:
144  %tmp3 = getelementptr i64, i64 addrspace(1)* %in, i32 1
145  %tmp4 = load i64, i64 addrspace(1)* %tmp3
146  br label %endif
147
148endif:
149  %tmp5 = phi i64 [%tmp2, %if], [%tmp4, %else]
150  store i64 %tmp5, i64 addrspace(1)* %out
151  ret void
152}
153
154; FUNC-LABEL: {{^}}s_ctpop_i128:
155; GCN: s_bcnt1_i32_b64 [[SRESULT0:s[0-9]+]],
156; GCN: s_bcnt1_i32_b64 [[SRESULT1:s[0-9]+]],
157; GCN: s_add_i32 s{{[0-9]+}}, [[SRESULT1]], [[SRESULT0]]
158; GCN: s_endpgm
159define amdgpu_kernel void @s_ctpop_i128(i32 addrspace(1)* noalias %out, i128 %val) nounwind {
160  %ctpop = call i128 @llvm.ctpop.i128(i128 %val) nounwind readnone
161  %truncctpop = trunc i128 %ctpop to i32
162  store i32 %truncctpop, i32 addrspace(1)* %out, align 4
163  ret void
164}
165
166; FUNC-LABEL: {{^}}s_ctpop_i65:
167; GCN: s_bcnt1_i32_b64 [[REG0:s[0-9]+]],
168; GCN: s_and_b32
169; GCN: s_bcnt1_i32_b64 [[REG1:s[0-9]+]],
170; GCN: s_add_i32 {{s[0-9]+}}, [[REG0]], [[REG1]]
171; GCN: s_endpgm
172define amdgpu_kernel void @s_ctpop_i65(i32 addrspace(1)* noalias %out, i65 %val) nounwind {
173  %ctpop = call i65 @llvm.ctpop.i65(i65 %val) nounwind readnone
174  %truncctpop = trunc i65 %ctpop to i32
175  store i32 %truncctpop, i32 addrspace(1)* %out, align 4
176  ret void
177}
178
179; FIXME: Should not have extra add
180
181; FUNC-LABEL: {{^}}v_ctpop_i128:
182; SI: buffer_load_dwordx4 v{{\[}}[[VAL0:[0-9]+]]:[[VAL3:[0-9]+]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
183; VI: flat_load_dwordx4   v{{\[}}[[VAL0:[0-9]+]]:[[VAL3:[0-9]+]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}
184
185; GCN-DAG: v_bcnt_u32_b32{{(_e64)*}} [[MIDRESULT0:v[0-9]+]], v{{[0-9]+}}, 0
186; GCN-DAG: v_bcnt_u32_b32{{(_e32)*(_e64)*}} [[MIDRESULT1:v[0-9]+]], v[[VAL3]], [[MIDRESULT0]]
187
188; GCN-DAG: v_bcnt_u32_b32{{(_e64)*}} [[MIDRESULT2:v[0-9]+]], v[[VAL0]], 0
189; GCN-DAG: v_bcnt_u32_b32{{(_e32)*(_e64)*}} [[MIDRESULT3:v[0-9]+]], v{{[0-9]+}}, [[MIDRESULT2]]
190
191; GCN: v_add_{{[iu]}}32_e32 [[RESULT:v[0-9]+]], vcc, [[MIDRESULT2]], [[MIDRESULT1]]
192
193; GCN: buffer_store_dword [[RESULT]],
194; GCN: s_endpgm
195define amdgpu_kernel void @v_ctpop_i128(i32 addrspace(1)* noalias %out, i128 addrspace(1)* noalias %in) nounwind {
196  %tid = call i32 @llvm.amdgcn.workitem.id.x()
197  %in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %tid
198  %val = load i128, i128 addrspace(1)* %in.gep, align 8
199  %ctpop = call i128 @llvm.ctpop.i128(i128 %val) nounwind readnone
200  %truncctpop = trunc i128 %ctpop to i32
201  store i32 %truncctpop, i32 addrspace(1)* %out, align 4
202  ret void
203}
204