1; RUN: llc -debugify-and-strip-all-safe -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
2; RUN: llc -debugify-and-strip-all-safe -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 -global-isel -pass-remarks-missed=gisel* 2>&1 | FileCheck %s --check-prefixes=GISEL,FALLBACK
3
4; FALLBACK-NOT: remark
5
6@var32 = global i32 0
7@var64 = global i64 0
8
9define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
10; CHECK-LABEL: test_lsl_arith:
11; GISEL-LABEL: test_lsl_arith:
12
13  %rhs1 = load volatile i32, i32* @var32
14  %shift1 = shl i32 %rhs1, 18
15  %val1 = add i32 %lhs32, %shift1
16  store volatile i32 %val1, i32* @var32
17; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18
18; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18
19
20  %rhs2 = load volatile i32, i32* @var32
21  %shift2 = shl i32 %rhs2, 31
22  %val2 = add i32 %shift2, %lhs32
23  store volatile i32 %val2, i32* @var32
24; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
25; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
26
27  %rhs3 = load volatile i32, i32* @var32
28  %shift3 = shl i32 %rhs3, 5
29  %val3 = sub i32 %lhs32, %shift3
30  store volatile i32 %val3, i32* @var32
31; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5
32; GISEL: subs {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5
33
34; Subtraction is not commutative!
35  %rhs4 = load volatile i32, i32* @var32
36  %shift4 = shl i32 %rhs4, 19
37  %val4 = sub i32 %shift4, %lhs32
38  store volatile i32 %val4, i32* @var32
39; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19
40; GISEL-NOT: sub{{[s]?}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19
41
42  %lhs4a = load volatile i32, i32* @var32
43  %shift4a = shl i32 %lhs4a, 15
44  %val4a = sub i32 0, %shift4a
45  store volatile i32 %val4a, i32* @var32
46; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
47; GISEL: negs {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
48
49  %rhs5 = load volatile i64, i64* @var64
50  %shift5 = shl i64 %rhs5, 18
51  %val5 = add i64 %lhs64, %shift5
52  store volatile i64 %val5, i64* @var64
53; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18
54; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18
55
56  %rhs6 = load volatile i64, i64* @var64
57  %shift6 = shl i64 %rhs6, 31
58  %val6 = add i64 %shift6, %lhs64
59  store volatile i64 %val6, i64* @var64
60; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31
61; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31
62
63  %rhs7 = load volatile i64, i64* @var64
64  %shift7 = shl i64 %rhs7, 5
65  %val7 = sub i64 %lhs64, %shift7
66  store volatile i64 %val7, i64* @var64
67; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5
68; GISEL: subs {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5
69
70; Subtraction is not commutative!
71  %rhs8 = load volatile i64, i64* @var64
72  %shift8 = shl i64 %rhs8, 19
73  %val8 = sub i64 %shift8, %lhs64
74  store volatile i64 %val8, i64* @var64
75; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19
76; GISEL-NOT: sub{{[s]?}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19
77
78  %lhs8a = load volatile i64, i64* @var64
79  %shift8a = shl i64 %lhs8a, 60
80  %val8a = sub i64 0, %shift8a
81  store volatile i64 %val8a, i64* @var64
82; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60
83; GISEL: negs {{x[0-9]+}}, {{x[0-9]+}}, lsl #60
84
85  ret void
86; CHECK: ret
87}
88
89define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
90; CHECK-LABEL: test_lsr_arith:
91
92  %shift1 = lshr i32 %rhs32, 18
93  %val1 = add i32 %lhs32, %shift1
94  store volatile i32 %val1, i32* @var32
95; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18
96; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18
97
98  %shift2 = lshr i32 %rhs32, 31
99  %val2 = add i32 %shift2, %lhs32
100  store volatile i32 %val2, i32* @var32
101; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31
102; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31
103
104  %shift3 = lshr i32 %rhs32, 5
105  %val3 = sub i32 %lhs32, %shift3
106  store volatile i32 %val3, i32* @var32
107; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5
108; GISEL: subs {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5
109
110; Subtraction is not commutative!
111  %shift4 = lshr i32 %rhs32, 19
112  %val4 = sub i32 %shift4, %lhs32
113  store volatile i32 %val4, i32* @var32
114; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19
115; GISEL-NOT: sub{{[s]?}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19
116
117  %shift4a = lshr i32 %lhs32, 15
118  %val4a = sub i32 0, %shift4a
119  store volatile i32 %val4a, i32* @var32
120; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15
121; GISEL: negs {{w[0-9]+}}, {{w[0-9]+}}, lsr #15
122
123  %shift5 = lshr i64 %rhs64, 18
124  %val5 = add i64 %lhs64, %shift5
125  store volatile i64 %val5, i64* @var64
126; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18
127; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18
128
129  %shift6 = lshr i64 %rhs64, 31
130  %val6 = add i64 %shift6, %lhs64
131  store volatile i64 %val6, i64* @var64
132; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31
133; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31
134
135  %shift7 = lshr i64 %rhs64, 5
136  %val7 = sub i64 %lhs64, %shift7
137  store volatile i64 %val7, i64* @var64
138; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5
139; GISEL: subs {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5
140
141; Subtraction is not commutative!
142  %shift8 = lshr i64 %rhs64, 19
143  %val8 = sub i64 %shift8, %lhs64
144  store volatile i64 %val8, i64* @var64
145; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19
146; GISEL-NOT: sub{{[s]?}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19
147
148  %shift8a = lshr i64 %lhs64, 45
149  %val8a = sub i64 0, %shift8a
150  store volatile i64 %val8a, i64* @var64
151; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45
152; GISEL: negs {{x[0-9]+}}, {{x[0-9]+}}, lsr #45
153
154  ret void
155; CHECK: ret
156; GISEL: ret
157}
158
159define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
160; CHECK-LABEL: test_asr_arith:
161
162  %shift1 = ashr i32 %rhs32, 18
163  %val1 = add i32 %lhs32, %shift1
164  store volatile i32 %val1, i32* @var32
165; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18
166; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18
167
168  %shift2 = ashr i32 %rhs32, 31
169  %val2 = add i32 %shift2, %lhs32
170  store volatile i32 %val2, i32* @var32
171; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31
172; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31
173
174  %shift3 = ashr i32 %rhs32, 5
175  %val3 = sub i32 %lhs32, %shift3
176  store volatile i32 %val3, i32* @var32
177; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5
178; GISEL: subs {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5
179
180; Subtraction is not commutative!
181  %shift4 = ashr i32 %rhs32, 19
182  %val4 = sub i32 %shift4, %lhs32
183  store volatile i32 %val4, i32* @var32
184; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #19
185; GISEL-NOT: sub{{[s]?}}  {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #19
186
187  %shift4a = ashr i32 %lhs32, 15
188  %val4a = sub i32 0, %shift4a
189  store volatile i32 %val4a, i32* @var32
190; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15
191; GISEL: negs {{w[0-9]+}}, {{w[0-9]+}}, asr #15
192
193  %shift5 = ashr i64 %rhs64, 18
194  %val5 = add i64 %lhs64, %shift5
195  store volatile i64 %val5, i64* @var64
196; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #18
197; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #18
198
199  %shift6 = ashr i64 %rhs64, 31
200  %val6 = add i64 %shift6, %lhs64
201  store volatile i64 %val6, i64* @var64
202; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #31
203; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #31
204
205  %shift7 = ashr i64 %rhs64, 5
206  %val7 = sub i64 %lhs64, %shift7
207  store volatile i64 %val7, i64* @var64
208; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #5
209; GISEL: subs {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #5
210
211; Subtraction is not commutative!
212  %shift8 = ashr i64 %rhs64, 19
213  %val8 = sub i64 %shift8, %lhs64
214  store volatile i64 %val8, i64* @var64
215; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #19
216; GISEL-NOT: sub{{[s]?}}  {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #19
217
218  %shift8a = ashr i64 %lhs64, 45
219  %val8a = sub i64 0, %shift8a
220  store volatile i64 %val8a, i64* @var64
221; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45
222; GISEL: negs {{x[0-9]+}}, {{x[0-9]+}}, asr #45
223
224  ret void
225; CHECK: ret
226}
227
228define void @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64, i32 %v) {
229; CHECK-LABEL: test_cmp:
230
231  %shift1 = shl i32 %rhs32, 13
232  %tst1 = icmp uge i32 %lhs32, %shift1
233  br i1 %tst1, label %t2, label %end
234; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsl #13
235
236t2:
237  store volatile i32 %v, i32* @var32
238  %shift2 = lshr i32 %rhs32, 20
239  %tst2 = icmp ne i32 %lhs32, %shift2
240  br i1 %tst2, label %t3, label %end
241; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
242
243t3:
244  store volatile i32 %v, i32* @var32
245  %shift3 = ashr i32 %rhs32, 9
246  %tst3 = icmp ne i32 %lhs32, %shift3
247  br i1 %tst3, label %t4, label %end
248; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, asr #9
249
250t4:
251  store volatile i32 %v, i32* @var32
252  %shift4 = shl i64 %rhs64, 43
253  %tst4 = icmp uge i64 %lhs64, %shift4
254  br i1 %tst4, label %t5, label %end
255; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsl #43
256
257t5:
258  store volatile i32 %v, i32* @var32
259  %shift5 = lshr i64 %rhs64, 20
260  %tst5 = icmp ne i64 %lhs64, %shift5
261  br i1 %tst5, label %t6, label %end
262; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
263
264t6:
265  store volatile i32 %v, i32* @var32
266  %shift6 = ashr i64 %rhs64, 59
267  %tst6 = icmp ne i64 %lhs64, %shift6
268  br i1 %tst6, label %t7, label %end
269; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, asr #59
270
271t7:
272  store volatile i32 %v, i32* @var32
273  br label %end
274
275end:
276  ret void
277; CHECK: ret
278}
279
280define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
281; CHECK-LABEL: test_cmn:
282
283  %shift1 = shl i32 %rhs32, 13
284  %val1 = sub i32 0, %shift1
285  %tst1 = icmp uge i32 %lhs32, %val1
286  br i1 %tst1, label %t2, label %end
287  ; Important that this isn't lowered to a cmn instruction because if %rhs32 ==
288  ; 0 then the results will differ.
289; CHECK: neg [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13
290; CHECK: cmp {{w[0-9]+}}, [[RHS]]
291; GISEL: negs [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13
292; GISEL: cmp {{w[0-9]+}}, [[RHS]]
293
294t2:
295  %shift2 = lshr i32 %rhs32, 20
296  %val2 = sub i32 0, %shift2
297  %tst2 = icmp ne i32 %lhs32, %val2
298  br i1 %tst2, label %t3, label %end
299; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
300
301t3:
302  %shift3 = ashr i32 %rhs32, 9
303  %val3 = sub i32 0, %shift3
304  %tst3 = icmp eq i32 %lhs32, %val3
305  br i1 %tst3, label %t4, label %end
306; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, asr #9
307
308t4:
309  %shift4 = shl i64 %rhs64, 43
310  %val4 = sub i64 0, %shift4
311  %tst4 = icmp slt i64 %lhs64, %val4
312  br i1 %tst4, label %t5, label %end
313  ; Again, it's important that cmn isn't used here in case %rhs64 == 0.
314; CHECK: neg [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43
315; CHECK: cmp {{x[0-9]+}}, [[RHS]]
316; GISEL: negs [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43
317; GISEL: cmp {{x[0-9]+}}, [[RHS]]
318
319t5:
320  %shift5 = lshr i64 %rhs64, 20
321  %val5 = sub i64 0, %shift5
322  %tst5 = icmp ne i64 %lhs64, %val5
323  br i1 %tst5, label %t6, label %end
324; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
325
326t6:
327  %shift6 = ashr i64 %rhs64, 59
328  %val6 = sub i64 0, %shift6
329  %tst6 = icmp ne i64 %lhs64, %val6
330  br i1 %tst6, label %t7, label %end
331; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, asr #59
332
333t7:
334  ret i32 1
335end:
336
337  ret i32 0
338; CHECK: ret
339; GISEL: ret
340}
341