1; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI,FUNC %s
2; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,FUNC %s
3
4declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
5declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
6
7; FUNC-LABEL: {{^}}test_umul24_i32:
8; GCN: v_mul_u32_u24
9define amdgpu_kernel void @test_umul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
10entry:
11  %0 = shl i32 %a, 8
12  %a_24 = lshr i32 %0, 8
13  %1 = shl i32 %b, 8
14  %b_24 = lshr i32 %1, 8
15  %2 = mul i32 %a_24, %b_24
16  store i32 %2, i32 addrspace(1)* %out
17  ret void
18}
19
20; FUNC-LABEL: {{^}}test_umul24_i16_sext:
21; SI: v_mul_u32_u24_e{{(32|64)}} [[VI_MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
22; SI: v_bfe_i32 v{{[0-9]}}, [[VI_MUL]], 0, 16
23
24; VI: s_mul_i32 [[MUL:s[0-9]+]]
25; VI: s_sext_i32_i16 s{{[0-9]+}}, [[MUL]]
26define amdgpu_kernel void @test_umul24_i16_sext(i32 addrspace(1)* %out, i16 %a, i16 %b) {
27entry:
28  %mul = mul i16 %a, %b
29  %ext = sext i16 %mul to i32
30  store i32 %ext, i32 addrspace(1)* %out
31  ret void
32}
33
34; FUNC-LABEL: {{^}}test_umul24_i16_vgpr_sext:
35; SI: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
36; VI: v_mul_lo_u16_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
37; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 16
38define amdgpu_kernel void @test_umul24_i16_vgpr_sext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
39  %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
40  %tid.y = call i32 @llvm.amdgcn.workitem.id.y()
41  %ptr_a = getelementptr i16, i16 addrspace(1)* %in, i32 %tid.x
42  %ptr_b = getelementptr i16, i16 addrspace(1)* %in, i32 %tid.y
43  %a = load i16, i16 addrspace(1)* %ptr_a
44  %b = load i16, i16 addrspace(1)* %ptr_b
45  %mul = mul i16 %a, %b
46  %val = sext i16 %mul to i32
47  store i32 %val, i32 addrspace(1)* %out
48  ret void
49}
50
51; FUNC-LABEL: {{^}}test_umul24_i16:
52; SI: s_and_b32
53; SI: v_mul_u32_u24_e32
54; SI: v_and_b32_e32
55
56; VI: s_mul_i32
57; VI: s_and_b32
58define amdgpu_kernel void @test_umul24_i16(i32 addrspace(1)* %out, i16 %a, i16 %b) {
59entry:
60  %mul = mul i16 %a, %b
61  %ext = zext i16 %mul to i32
62  store i32 %ext, i32 addrspace(1)* %out
63  ret void
64}
65
66; FUNC-LABEL: {{^}}test_umul24_i16_vgpr:
67; SI: v_mul_u32_u24_e32
68; SI: v_and_b32_e32
69; VI: v_mul_lo_u16
70define amdgpu_kernel void @test_umul24_i16_vgpr(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
71  %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
72  %tid.y = call i32 @llvm.amdgcn.workitem.id.y()
73  %ptr_a = getelementptr i16, i16 addrspace(1)* %in, i32 %tid.x
74  %ptr_b = getelementptr i16, i16 addrspace(1)* %in, i32 %tid.y
75  %a = load i16, i16 addrspace(1)* %ptr_a
76  %b = load i16, i16 addrspace(1)* %ptr_b
77  %mul = mul i16 %a, %b
78  %val = zext i16 %mul to i32
79  store i32 %val, i32 addrspace(1)* %out
80  ret void
81}
82
83; FUNC-LABEL: {{^}}test_umul24_i8_vgpr:
84; SI: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
85; VI: v_mul_lo_u16_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
86; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8
87define amdgpu_kernel void @test_umul24_i8_vgpr(i32 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b) {
88entry:
89  %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
90  %tid.y = call i32 @llvm.amdgcn.workitem.id.y()
91  %a.ptr = getelementptr i8, i8 addrspace(1)* %a, i32 %tid.x
92  %b.ptr = getelementptr i8, i8 addrspace(1)* %b, i32 %tid.y
93  %a.l = load i8, i8 addrspace(1)* %a.ptr
94  %b.l = load i8, i8 addrspace(1)* %b.ptr
95  %mul = mul i8 %a.l, %b.l
96  %ext = sext i8 %mul to i32
97  store i32 %ext, i32 addrspace(1)* %out
98  ret void
99}
100
101; FUNC-LABEL: {{^}}test_umulhi24_i32_i64:
102; GCN-NOT: and
103; GCN: v_mul_hi_u32_u24_e32 [[RESULT:v[0-9]+]],
104; GCN-NEXT: buffer_store_dword [[RESULT]]
105define amdgpu_kernel void @test_umulhi24_i32_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) {
106entry:
107  %a.24 = and i32 %a, 16777215
108  %b.24 = and i32 %b, 16777215
109  %a.24.i64 = zext i32 %a.24 to i64
110  %b.24.i64 = zext i32 %b.24 to i64
111  %mul48 = mul i64 %a.24.i64, %b.24.i64
112  %mul48.hi = lshr i64 %mul48, 32
113  %mul24hi = trunc i64 %mul48.hi to i32
114  store i32 %mul24hi, i32 addrspace(1)* %out
115  ret void
116}
117
118; FUNC-LABEL: {{^}}test_umulhi24:
119; GCN-NOT: and
120; GCN: v_mul_hi_u32_u24_e32 [[RESULT:v[0-9]+]],
121; GCN-NEXT: buffer_store_dword [[RESULT]]
122define amdgpu_kernel void @test_umulhi24(i32 addrspace(1)* %out, i64 %a, i64 %b) {
123entry:
124  %a.24 = and i64 %a, 16777215
125  %b.24 = and i64 %b, 16777215
126  %mul48 = mul i64 %a.24, %b.24
127  %mul48.hi = lshr i64 %mul48, 32
128  %mul24.hi = trunc i64 %mul48.hi to i32
129  store i32 %mul24.hi, i32 addrspace(1)* %out
130  ret void
131}
132
133; Multiply with 24-bit inputs and 64-bit output.
134; FUNC-LABEL: {{^}}test_umul24_i64:
135; GCN-NOT: and
136; GCN-NOT: lshr
137; GCN-DAG: v_mul_u32_u24_e32
138; GCN-DAG: v_mul_hi_u32_u24_e32
139; GCN: buffer_store_dwordx2
140define amdgpu_kernel void @test_umul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
141entry:
142  %tmp0 = shl i64 %a, 40
143  %a_24 = lshr i64 %tmp0, 40
144  %tmp1 = shl i64 %b, 40
145  %b_24 = lshr i64 %tmp1, 40
146  %tmp2 = mul i64 %a_24, %b_24
147  store i64 %tmp2, i64 addrspace(1)* %out
148  ret void
149}
150
151; FUNC-LABEL: {{^}}test_umul24_i64_square:
152; GCN: s_load_dword [[A:s[0-9]+]]
153; GCN-NOT: s_and_b32
154; GCN-DAG: v_mul_hi_u32_u24_e64 v{{[0-9]+}}, [[A]], [[A]]
155; GCN-DAG: v_mul_u32_u24_e64 v{{[0-9]+}}, [[A]], [[A]]
156define amdgpu_kernel void @test_umul24_i64_square(i64 addrspace(1)* %out, [8 x i32], i64 %a) {
157entry:
158  %tmp0 = shl i64 %a, 40
159  %a.24 = lshr i64 %tmp0, 40
160  %tmp2 = mul i64 %a.24, %a.24
161  store i64 %tmp2, i64 addrspace(1)* %out
162  ret void
163}
164
165; FUNC-LABEL: {{^}}test_umulhi16_i32:
166; GCN: s_and_b32
167; GCN: s_and_b32
168; GCN: v_mul_u32_u24_e32 [[MUL24:v[0-9]+]]
169; GCN: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, [[MUL24]]
170define amdgpu_kernel void @test_umulhi16_i32(i16 addrspace(1)* %out, i32 %a, i32 %b) {
171entry:
172  %a.16 = and i32 %a, 65535
173  %b.16 = and i32 %b, 65535
174  %mul = mul i32 %a.16, %b.16
175  %hi = lshr i32 %mul, 16
176  %mulhi = trunc i32 %hi to i16
177  store i16 %mulhi, i16 addrspace(1)* %out
178  ret void
179}
180
181; FUNC-LABEL: {{^}}test_umul24_i33:
182; GCN: s_load_dword s
183; GCN: s_load_dword s
184; GCN-NOT: and
185; GCN-NOT: lshr
186; GCN-DAG: v_mul_u32_u24_e32 v[[MUL_LO:[0-9]+]],
187; GCN-DAG: v_mul_hi_u32_u24_e32 v[[MUL_HI:[0-9]+]],
188; GCN-DAG: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]]
189; GCN: buffer_store_dwordx2 v{{\[}}[[MUL_LO]]:[[HI]]{{\]}}
190define amdgpu_kernel void @test_umul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %b) {
191entry:
192  %tmp0 = shl i33 %a, 9
193  %a_24 = lshr i33 %tmp0, 9
194  %tmp1 = shl i33 %b, 9
195  %b_24 = lshr i33 %tmp1, 9
196  %tmp2 = mul i33 %a_24, %b_24
197  %ext = zext i33 %tmp2 to i64
198  store i64 %ext, i64 addrspace(1)* %out
199  ret void
200}
201
202; FUNC-LABEL: {{^}}test_umulhi24_i33:
203; GCN: s_load_dword s
204; GCN: s_load_dword s
205; GCN-NOT: and
206; GCN-NOT: lshr
207; GCN: v_mul_hi_u32_u24_e32 v[[MUL_HI:[0-9]+]],
208; GCN-NEXT: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]]
209; GCN-NEXT: buffer_store_dword v[[HI]]
210define amdgpu_kernel void @test_umulhi24_i33(i32 addrspace(1)* %out, i33 %a, i33 %b) {
211entry:
212  %tmp0 = shl i33 %a, 9
213  %a_24 = lshr i33 %tmp0, 9
214  %tmp1 = shl i33 %b, 9
215  %b_24 = lshr i33 %tmp1, 9
216  %tmp2 = mul i33 %a_24, %b_24
217  %hi = lshr i33 %tmp2, 32
218  %trunc = trunc i33 %hi to i32
219  store i32 %trunc, i32 addrspace(1)* %out
220  ret void
221}
222
223
224; Make sure the created any_extend is ignored to use the real bits
225; being multiplied.
226
227; GCN-LABEL: {{^}}test_umul24_anyextend_i24_src0_src1:
228; GCN-DAG: v_mul_u32_u24_e32 v0, 0xea, v0
229; GCN-DAG: v_mul_u32_u24_e32 v1, 0x39b, v1
230; GCN: v_mul_u32_u24_e32 v0, v0, v1
231; GCN: v_and_b32_e32 v0, 0x1fffe, v0
232; GCN: v_mul_u32_u24_e32 v0, 0x63, v0
233; GCN: s_setpc_b64
234define i17 @test_umul24_anyextend_i24_src0_src1(i24 %a, i24 %b) {
235entry:
236  %aa = mul i24 %a, 234
237  %bb = mul i24 %b, 923
238  %a_32 = zext i24 %aa to i32
239  %b_32 = zext i24 %bb to i32
240  %mul = mul i32 %a_32, %b_32
241  %trunc = trunc i32 %mul to i17
242  %arst = mul i17 %trunc, 99
243  ret i17 %arst
244}
245
246; GCN-LABEL: {{^}}test_umul24_anyextend_i23_src0_src1:
247; GCN: s_mov_b32 [[U23_MASK:s[0-9]+]], 0x7fffff
248; GCN-DAG: v_and_b32_e32 v0, [[U23_MASK]], v0
249; GCN-DAG: v_and_b32_e32 v1, [[U23_MASK]], v1
250; GCN-DAG: v_mul_u32_u24_e32 v0, 0xea, v0
251; GCN-DAG: v_mul_u32_u24_e32 v1, 0x39b, v1
252; GCN: v_and_b32_e32 v1, s6, v1
253; GCN: v_and_b32_e32 v0, 0x7ffffe, v0
254; GCN: v_mul_u32_u24_e32 v0, v0, v1
255; GCN: v_and_b32_e32 v0, 0x1fffe, v0
256; GCN: v_mul_u32_u24_e32 v0, 0x63, v0
257; GCN: s_setpc_b64
258define i17 @test_umul24_anyextend_i23_src0_src1(i23 %a, i23 %b) {
259entry:
260  %aa = mul i23 %a, 234
261  %bb = mul i23 %b, 923
262  %a_32 = zext i23 %aa to i32
263  %b_32 = zext i23 %bb to i32
264  %mul = mul i32 %a_32, %b_32
265  %trunc = trunc i32 %mul to i17
266  %arst = mul i17 %trunc, 99
267  ret i17 %arst
268}
269