1; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
2; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
3; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC --check-prefix=GCN --check-prefix=GCN1
4; RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN --check-prefix=GCN2
5; RUN: llc < %s -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN --check-prefix=GCN2
6
7declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
8
9; FUNC-LABEL: {{^}}u32_mad24:
10; EG: MULADD_UINT24
11; SI: v_mad_u32_u24
12; VI: v_mad_u32_u24
13
14define amdgpu_kernel void @u32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
15entry:
16  %0 = shl i32 %a, 8
17  %a_24 = lshr i32 %0, 8
18  %1 = shl i32 %b, 8
19  %b_24 = lshr i32 %1, 8
20  %2 = mul i32 %a_24, %b_24
21  %3 = add i32 %2, %c
22  store i32 %3, i32 addrspace(1)* %out
23  ret void
24}
25
26; FUNC-LABEL: {{^}}i16_mad24:
27; The order of A and B does not matter.
28; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
29; The result must be sign-extended
30; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
31; EG: 16
32; FIXME: Should be using scalar instructions here.
33; GCN1: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
34; GCN1: v_bfe_i32 v{{[0-9]}}, [[MAD]], 0, 16
35; GCN2:	s_mul_i32 [[MUL:s[0-9]]], {{[s][0-9], [s][0-9]}}
36; GCN2:	s_add_i32 [[MAD:s[0-9]]], [[MUL]], s{{[0-9]}}
37; GCN2:	s_sext_i32_i16 s0, [[MAD]]
38; GCN2:	v_mov_b32_e32 v0, s0
39define amdgpu_kernel void @i16_mad24(i32 addrspace(1)* %out, i16 %a, i16 %b, i16 %c) {
40entry:
41  %0 = mul i16 %a, %b
42  %1 = add i16 %0, %c
43  %2 = sext i16 %1 to i32
44  store i32 %2, i32 addrspace(1)* %out
45  ret void
46}
47
48; FIXME: Need to handle non-uniform case for function below (load without gep).
49; FUNC-LABEL: {{^}}i8_mad24:
50; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
51; The result must be sign-extended
52; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
53; EG: 8
54; GCN1: v_mad_u32_u24 [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
55; GCN1: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8
56; GCN2:	s_mul_i32 [[MUL:s[0-9]]], {{[s][0-9], [s][0-9]}}
57; GCN2:	s_add_i32 [[MAD:s[0-9]]], [[MUL]], s{{[0-9]}}
58; GCN2:	s_sext_i32_i8 s0, [[MAD]]
59; GCN2:	v_mov_b32_e32 v0, s0
60define amdgpu_kernel void @i8_mad24(i32 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
61entry:
62  %0 = mul i8 %a, %b
63  %1 = add i8 %0, %c
64  %2 = sext i8 %1 to i32
65  store i32 %2, i32 addrspace(1)* %out
66  ret void
67}
68
69; This tests for a bug where the mad_u24 pattern matcher would call
70; SimplifyDemandedBits on the first operand of the mul instruction
71; assuming that the pattern would be matched to a 24-bit mad.  This
72; led to some instructions being incorrectly erased when the entire
73; 24-bit mad pattern wasn't being matched.
74
75; Check that the select instruction is not deleted.
76; FUNC-LABEL: {{^}}i24_i32_i32_mad:
77; EG: CNDE_INT
78; SI: v_cndmask
79; GCN2: s_cselect
80define amdgpu_kernel void @i24_i32_i32_mad(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
81entry:
82  %0 = ashr i32 %a, 8
83  %1 = icmp ne i32 %c, 0
84  %2 = select i1 %1, i32 %0, i32 34
85  %3 = mul i32 %2, %c
86  %4 = add i32 %3, %d
87  store i32 %4, i32 addrspace(1)* %out
88  ret void
89}
90
91; FUNC-LABEL: {{^}}extra_and:
92; SI-NOT: v_and
93; SI: v_mad_u32_u24
94; SI: v_mad_u32_u24
95define amdgpu_kernel void @extra_and(i32 addrspace(1)* %arg, i32 %arg2, i32 %arg3) {
96bb:
97  br label %bb4
98
99bb4:                                              ; preds = %bb4, %bb
100  %tmp = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
101  %tmp5 = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
102  %tmp6 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
103  %tmp7 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
104  %tmp8 = and i32 %tmp7, 16777215
105  %tmp9 = and i32 %tmp6, 16777215
106  %tmp10 = and i32 %tmp5, 16777215
107  %tmp11 = and i32 %tmp, 16777215
108  %tmp12 = mul i32 %tmp8, %tmp11
109  %tmp13 = add i32 %arg2, %tmp12
110  %tmp14 = mul i32 %tmp9, %tmp11
111  %tmp15 = add i32 %arg3, %tmp14
112  %tmp16 = add nuw nsw i32 %tmp13, %tmp15
113  %tmp17 = icmp eq i32 %tmp16, 8
114  br i1 %tmp17, label %bb18, label %bb4
115
116bb18:                                             ; preds = %bb4
117  store i32 %tmp16, i32 addrspace(1)* %arg
118  ret void
119}
120
121; FUNC-LABEL: {{^}}dont_remove_shift
122; SI: v_lshr
123; SI: v_mad_u32_u24
124; SI: v_mad_u32_u24
125define amdgpu_kernel void @dont_remove_shift(i32 addrspace(1)* %arg, i32 %arg2, i32 %arg3) {
126bb:
127  br label %bb4
128
129bb4:                                              ; preds = %bb4, %bb
130  %tmp = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
131  %tmp5 = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
132  %tmp6 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
133  %tmp7 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
134  %tmp8 = lshr i32 %tmp7, 8
135  %tmp9 = lshr i32 %tmp6, 8
136  %tmp10 = lshr i32 %tmp5, 8
137  %tmp11 = lshr i32 %tmp, 8
138  %tmp12 = mul i32 %tmp8, %tmp11
139  %tmp13 = add i32 %arg2, %tmp12
140  %tmp14 = mul i32 %tmp9, %tmp11
141  %tmp15 = add i32 %arg3, %tmp14
142  %tmp16 = add nuw nsw i32 %tmp13, %tmp15
143  %tmp17 = icmp eq i32 %tmp16, 8
144  br i1 %tmp17, label %bb18, label %bb4
145
146bb18:                                             ; preds = %bb4
147  store i32 %tmp16, i32 addrspace(1)* %arg
148  ret void
149}
150
151; FUNC-LABEL: {{^}}i8_mad_sat_16:
152; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
153; The result must be sign-extended
154; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
155; EG: 8
156; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
157; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
158; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
159; GCN: v_med3_i32 v{{[0-9]}}, [[EXT]],
160define amdgpu_kernel void @i8_mad_sat_16(i8 addrspace(1)* %out, i8 addrspace(1)* %in0, i8 addrspace(1)* %in1, i8 addrspace(1)* %in2, i64 addrspace(5)* %idx) {
161entry:
162  %retval.0.i = load i64, i64 addrspace(5)* %idx
163  %arrayidx = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 %retval.0.i
164  %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 %retval.0.i
165  %arrayidx4 = getelementptr inbounds i8, i8 addrspace(1)* %in2, i64 %retval.0.i
166  %l1 = load i8, i8 addrspace(1)* %arrayidx, align 1
167  %l2 = load i8, i8 addrspace(1)* %arrayidx2, align 1
168  %l3 = load i8, i8 addrspace(1)* %arrayidx4, align 1
169  %conv1.i = sext i8 %l1 to i16
170  %conv3.i = sext i8 %l2 to i16
171  %conv5.i = sext i8 %l3 to i16
172  %mul.i.i.i = mul nsw i16 %conv3.i, %conv1.i
173  %add.i.i = add i16 %mul.i.i.i, %conv5.i
174  %c4 = icmp sgt i16 %add.i.i, -128
175  %cond.i.i = select i1 %c4, i16 %add.i.i, i16 -128
176  %c5 = icmp slt i16 %cond.i.i, 127
177  %cond13.i.i = select i1 %c5, i16 %cond.i.i, i16 127
178  %conv8.i = trunc i16 %cond13.i.i to i8
179  %arrayidx7 = getelementptr inbounds i8, i8 addrspace(1)* %out, i64 %retval.0.i
180  store i8 %conv8.i, i8 addrspace(1)* %arrayidx7, align 1
181  ret void
182}
183
184; FUNC-LABEL: {{^}}i8_mad_32:
185; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
186; The result must be sign-extended
187; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
188; EG: 8
189; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
190; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
191; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
192define amdgpu_kernel void @i8_mad_32(i32 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b, i8 addrspace(1)* %c, i64 addrspace(5)* %idx) {
193entry:
194  %retval.0.i = load i64, i64 addrspace(5)* %idx
195  %arrayidx = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 %retval.0.i
196  %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %b, i64 %retval.0.i
197  %arrayidx4 = getelementptr inbounds i8, i8 addrspace(1)* %c, i64 %retval.0.i
198  %la = load i8, i8 addrspace(1)* %arrayidx, align 1
199  %lb = load i8, i8 addrspace(1)* %arrayidx2, align 1
200  %lc = load i8, i8 addrspace(1)* %arrayidx4, align 1
201  %exta = sext i8 %la to i16
202  %extb = sext i8 %lb to i16
203  %extc = sext i8 %lc to i16
204  %mul = mul i16 %exta, %extb
205  %mad = add i16 %mul, %extc
206  %mad_ext = sext i16 %mad to i32
207  store i32 %mad_ext, i32 addrspace(1)* %out
208  ret void
209}
210
211; FUNC-LABEL: {{^}}i8_mad_64:
212; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
213; The result must be sign-extended
214; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
215; EG: 8
216; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
217; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
218; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
219define amdgpu_kernel void @i8_mad_64(i64 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b, i8 addrspace(1)* %c, i64 addrspace(5)* %idx) {
220entry:
221  %retval.0.i = load i64, i64 addrspace(5)* %idx
222  %arrayidx = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 %retval.0.i
223  %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %b, i64 %retval.0.i
224  %arrayidx4 = getelementptr inbounds i8, i8 addrspace(1)* %c, i64 %retval.0.i
225  %la = load i8, i8 addrspace(1)* %arrayidx, align 1
226  %lb = load i8, i8 addrspace(1)* %arrayidx2, align 1
227  %lc = load i8, i8 addrspace(1)* %arrayidx4, align 1
228  %exta = sext i8 %la to i16
229  %extb = sext i8 %lb to i16
230  %extc = sext i8 %lc to i16
231  %mul = mul i16 %exta, %extb
232  %mad = add i16 %mul, %extc
233  %mad_ext = sext i16 %mad to i64
234  store i64 %mad_ext, i64 addrspace(1)* %out
235  ret void
236}
237
238; The ands are asserting the high bits are 0. SimplifyDemandedBits on
239; the adds would remove the ands before the target combine on the mul
240; had a chance to form mul24. The mul combine would then see
241; extractelement with no known bits and fail. All of the mul/add
242; combos in this loop should form v_mad_u32_u24.
243
244; FUNC-LABEL: {{^}}mad24_known_bits_destroyed:
245; GCN: v_mad_u32_u24
246; GCN: v_mad_u32_u24
247; GCN: v_mad_u32_u24
248; GCN: v_mad_u32_u24
249; GCN: v_mad_u32_u24
250; GCN: v_mad_u32_u24
251; GCN: v_mad_u32_u24
252; GCN: v_mad_u32_u24
253define void @mad24_known_bits_destroyed(i32 %arg, <4 x i32> %arg1, <4 x i32> %arg2, <4 x i32> %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 addrspace(1)* %arg7, <4 x i32> addrspace(1)* %arg8) #0 {
254bb:
255  %tmp = and i32 %arg4, 16777215
256  %tmp9 = extractelement <4 x i32> %arg1, i64 1
257  %tmp10 = extractelement <4 x i32> %arg3, i64 1
258  %tmp11 = and i32 %tmp9, 16777215
259  %tmp12 = extractelement <4 x i32> %arg1, i64 2
260  %tmp13 = extractelement <4 x i32> %arg3, i64 2
261  %tmp14 = and i32 %tmp12, 16777215
262  %tmp15 = extractelement <4 x i32> %arg1, i64 3
263  %tmp16 = extractelement <4 x i32> %arg3, i64 3
264  %tmp17 = and i32 %tmp15, 16777215
265  br label %bb19
266
267bb18:                                             ; preds = %bb19
268  ret void
269
270bb19:                                             ; preds = %bb19, %bb
271  %tmp20 = phi i32 [ %arg, %bb ], [ %tmp40, %bb19 ]
272  %tmp21 = phi i32 [ 0, %bb ], [ %tmp54, %bb19 ]
273  %tmp22 = phi <4 x i32> [ %arg2, %bb ], [ %tmp53, %bb19 ]
274  %tmp23 = and i32 %tmp20, 16777215
275  %tmp24 = mul i32 %tmp23, %tmp
276  %tmp25 = add i32 %tmp24, %arg5
277  %tmp26 = extractelement <4 x i32> %tmp22, i64 1
278  %tmp27 = and i32 %tmp26, 16777215
279  %tmp28 = mul i32 %tmp27, %tmp11
280  %tmp29 = add i32 %tmp28, %tmp10
281  %tmp30 = extractelement <4 x i32> %tmp22, i64 2
282  %tmp31 = and i32 %tmp30, 16777215
283  %tmp32 = mul i32 %tmp31, %tmp14
284  %tmp33 = add i32 %tmp32, %tmp13
285  %tmp34 = extractelement <4 x i32> %tmp22, i64 3
286  %tmp35 = and i32 %tmp34, 16777215
287  %tmp36 = mul i32 %tmp35, %tmp17
288  %tmp37 = add i32 %tmp36, %tmp16
289  %tmp38 = and i32 %tmp25, 16777215
290  %tmp39 = mul i32 %tmp38, %tmp
291  %tmp40 = add i32 %tmp39, %arg5
292  store i32 %tmp40, i32 addrspace(1)* %arg7
293  %tmp41 = insertelement <4 x i32> undef, i32 %tmp40, i32 0
294  %tmp42 = and i32 %tmp29, 16777215
295  %tmp43 = mul i32 %tmp42, %tmp11
296  %tmp44 = add i32 %tmp43, %tmp10
297  %tmp45 = insertelement <4 x i32> %tmp41, i32 %tmp44, i32 1
298  %tmp46 = and i32 %tmp33, 16777215
299  %tmp47 = mul i32 %tmp46, %tmp14
300  %tmp48 = add i32 %tmp47, %tmp13
301  %tmp49 = insertelement <4 x i32> %tmp45, i32 %tmp48, i32 2
302  %tmp50 = and i32 %tmp37, 16777215
303  %tmp51 = mul i32 %tmp50, %tmp17
304  %tmp52 = add i32 %tmp51, %tmp16
305  %tmp53 = insertelement <4 x i32> %tmp49, i32 %tmp52, i32 3
306  store <4 x i32> %tmp53, <4 x i32> addrspace(1)* %arg8
307  %tmp54 = add nuw nsw i32 %tmp21, 1
308  %tmp55 = icmp eq i32 %tmp54, %arg6
309  br i1 %tmp55, label %bb18, label %bb19
310}
311
312attributes #0 = { norecurse nounwind }
313