1; RUN: llc -debugify-and-strip-all-safe -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
2; RUN: llc -debugify-and-strip-all-safe -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 -global-isel -pass-remarks-missed=gisel* 2>&1 | FileCheck %s --check-prefixes=CHECK,FALLBACK
3
4; FALLBACK-NOT: remark
5
6@var32 = global i32 0
7@var64 = global i64 0
8
9define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
10; CHECK-LABEL: test_lsl_arith:
11
12  %rhs1 = load volatile i32, i32* @var32
13  %shift1 = shl i32 %rhs1, 18
14  %val1 = add i32 %lhs32, %shift1
15  store volatile i32 %val1, i32* @var32
16; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18
17
18  %rhs2 = load volatile i32, i32* @var32
19  %shift2 = shl i32 %rhs2, 31
20  %val2 = add i32 %shift2, %lhs32
21  store volatile i32 %val2, i32* @var32
22; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
23
24  %rhs3 = load volatile i32, i32* @var32
25  %shift3 = shl i32 %rhs3, 5
26  %val3 = sub i32 %lhs32, %shift3
27  store volatile i32 %val3, i32* @var32
28; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5
29
30; Subtraction is not commutative!
31  %rhs4 = load volatile i32, i32* @var32
32  %shift4 = shl i32 %rhs4, 19
33  %val4 = sub i32 %shift4, %lhs32
34  store volatile i32 %val4, i32* @var32
35; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19
36
37  %lhs4a = load volatile i32, i32* @var32
38  %shift4a = shl i32 %lhs4a, 15
39  %val4a = sub i32 0, %shift4a
40  store volatile i32 %val4a, i32* @var32
41; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
42
43  %rhs5 = load volatile i64, i64* @var64
44  %shift5 = shl i64 %rhs5, 18
45  %val5 = add i64 %lhs64, %shift5
46  store volatile i64 %val5, i64* @var64
47; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18
48
49  %rhs6 = load volatile i64, i64* @var64
50  %shift6 = shl i64 %rhs6, 31
51  %val6 = add i64 %shift6, %lhs64
52  store volatile i64 %val6, i64* @var64
53; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31
54
55  %rhs7 = load volatile i64, i64* @var64
56  %shift7 = shl i64 %rhs7, 5
57  %val7 = sub i64 %lhs64, %shift7
58  store volatile i64 %val7, i64* @var64
59; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5
60
61; Subtraction is not commutative!
62  %rhs8 = load volatile i64, i64* @var64
63  %shift8 = shl i64 %rhs8, 19
64  %val8 = sub i64 %shift8, %lhs64
65  store volatile i64 %val8, i64* @var64
66; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19
67
68  %lhs8a = load volatile i64, i64* @var64
69  %shift8a = shl i64 %lhs8a, 60
70  %val8a = sub i64 0, %shift8a
71  store volatile i64 %val8a, i64* @var64
72; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60
73
74  ret void
75; CHECK: ret
76}
77
78define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
79; CHECK-LABEL: test_lsr_arith:
80
81  %shift1 = lshr i32 %rhs32, 18
82  %val1 = add i32 %lhs32, %shift1
83  store volatile i32 %val1, i32* @var32
84; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18
85
86  %shift2 = lshr i32 %rhs32, 31
87  %val2 = add i32 %shift2, %lhs32
88  store volatile i32 %val2, i32* @var32
89; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31
90
91  %shift3 = lshr i32 %rhs32, 5
92  %val3 = sub i32 %lhs32, %shift3
93  store volatile i32 %val3, i32* @var32
94; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5
95
96; Subtraction is not commutative!
97  %shift4 = lshr i32 %rhs32, 19
98  %val4 = sub i32 %shift4, %lhs32
99  store volatile i32 %val4, i32* @var32
100; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19
101
102  %shift4a = lshr i32 %lhs32, 15
103  %val4a = sub i32 0, %shift4a
104  store volatile i32 %val4a, i32* @var32
105; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15
106
107  %shift5 = lshr i64 %rhs64, 18
108  %val5 = add i64 %lhs64, %shift5
109  store volatile i64 %val5, i64* @var64
110; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18
111
112  %shift6 = lshr i64 %rhs64, 31
113  %val6 = add i64 %shift6, %lhs64
114  store volatile i64 %val6, i64* @var64
115; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31
116
117  %shift7 = lshr i64 %rhs64, 5
118  %val7 = sub i64 %lhs64, %shift7
119  store volatile i64 %val7, i64* @var64
120; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5
121
122; Subtraction is not commutative!
123  %shift8 = lshr i64 %rhs64, 19
124  %val8 = sub i64 %shift8, %lhs64
125  store volatile i64 %val8, i64* @var64
126; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19
127
128  %shift8a = lshr i64 %lhs64, 45
129  %val8a = sub i64 0, %shift8a
130  store volatile i64 %val8a, i64* @var64
131; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45
132
133  ret void
134; CHECK: ret
135}
136
137define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
138; CHECK-LABEL: test_asr_arith:
139
140  %shift1 = ashr i32 %rhs32, 18
141  %val1 = add i32 %lhs32, %shift1
142  store volatile i32 %val1, i32* @var32
143; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18
144
145  %shift2 = ashr i32 %rhs32, 31
146  %val2 = add i32 %shift2, %lhs32
147  store volatile i32 %val2, i32* @var32
148; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31
149
150  %shift3 = ashr i32 %rhs32, 5
151  %val3 = sub i32 %lhs32, %shift3
152  store volatile i32 %val3, i32* @var32
153; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5
154
155; Subtraction is not commutative!
156  %shift4 = ashr i32 %rhs32, 19
157  %val4 = sub i32 %shift4, %lhs32
158  store volatile i32 %val4, i32* @var32
159; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #19
160
161  %shift4a = ashr i32 %lhs32, 15
162  %val4a = sub i32 0, %shift4a
163  store volatile i32 %val4a, i32* @var32
164; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15
165
166  %shift5 = ashr i64 %rhs64, 18
167  %val5 = add i64 %lhs64, %shift5
168  store volatile i64 %val5, i64* @var64
169; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #18
170
171  %shift6 = ashr i64 %rhs64, 31
172  %val6 = add i64 %shift6, %lhs64
173  store volatile i64 %val6, i64* @var64
174; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #31
175
176  %shift7 = ashr i64 %rhs64, 5
177  %val7 = sub i64 %lhs64, %shift7
178  store volatile i64 %val7, i64* @var64
179; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #5
180
181; Subtraction is not commutative!
182  %shift8 = ashr i64 %rhs64, 19
183  %val8 = sub i64 %shift8, %lhs64
184  store volatile i64 %val8, i64* @var64
185; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #19
186
187  %shift8a = ashr i64 %lhs64, 45
188  %val8a = sub i64 0, %shift8a
189  store volatile i64 %val8a, i64* @var64
190; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45
191
192  ret void
193; CHECK: ret
194}
195
196define void @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64, i32 %v) {
197; CHECK-LABEL: test_cmp:
198
199  %shift1 = shl i32 %rhs32, 13
200  %tst1 = icmp uge i32 %lhs32, %shift1
201  br i1 %tst1, label %t2, label %end
202; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsl #13
203
204t2:
205  store volatile i32 %v, i32* @var32
206  %shift2 = lshr i32 %rhs32, 20
207  %tst2 = icmp ne i32 %lhs32, %shift2
208  br i1 %tst2, label %t3, label %end
209; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
210
211t3:
212  store volatile i32 %v, i32* @var32
213  %shift3 = ashr i32 %rhs32, 9
214  %tst3 = icmp ne i32 %lhs32, %shift3
215  br i1 %tst3, label %t4, label %end
216; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, asr #9
217
218t4:
219  store volatile i32 %v, i32* @var32
220  %shift4 = shl i64 %rhs64, 43
221  %tst4 = icmp uge i64 %lhs64, %shift4
222  br i1 %tst4, label %t5, label %end
223; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsl #43
224
225t5:
226  store volatile i32 %v, i32* @var32
227  %shift5 = lshr i64 %rhs64, 20
228  %tst5 = icmp ne i64 %lhs64, %shift5
229  br i1 %tst5, label %t6, label %end
230; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
231
232t6:
233  store volatile i32 %v, i32* @var32
234  %shift6 = ashr i64 %rhs64, 59
235  %tst6 = icmp ne i64 %lhs64, %shift6
236  br i1 %tst6, label %t7, label %end
237; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, asr #59
238
239t7:
240  store volatile i32 %v, i32* @var32
241  br label %end
242
243end:
244  ret void
245; CHECK: ret
246}
247
248define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
249; CHECK-LABEL: test_cmn:
250
251  %shift1 = shl i32 %rhs32, 13
252  %val1 = sub i32 0, %shift1
253  %tst1 = icmp uge i32 %lhs32, %val1
254  br i1 %tst1, label %t2, label %end
255  ; Important that this isn't lowered to a cmn instruction because if %rhs32 ==
256  ; 0 then the results will differ.
257; CHECK: neg [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13
258; CHECK: cmp {{w[0-9]+}}, [[RHS]]
259
260t2:
261  %shift2 = lshr i32 %rhs32, 20
262  %val2 = sub i32 0, %shift2
263  %tst2 = icmp ne i32 %lhs32, %val2
264  br i1 %tst2, label %t3, label %end
265; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
266
267t3:
268  %shift3 = ashr i32 %rhs32, 9
269  %val3 = sub i32 0, %shift3
270  %tst3 = icmp eq i32 %lhs32, %val3
271  br i1 %tst3, label %t4, label %end
272; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, asr #9
273
274t4:
275  %shift4 = shl i64 %rhs64, 43
276  %val4 = sub i64 0, %shift4
277  %tst4 = icmp slt i64 %lhs64, %val4
278  br i1 %tst4, label %t5, label %end
279  ; Again, it's important that cmn isn't used here in case %rhs64 == 0.
280; CHECK: neg [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43
281; CHECK: cmp {{x[0-9]+}}, [[RHS]]
282
283t5:
284  %shift5 = lshr i64 %rhs64, 20
285  %val5 = sub i64 0, %shift5
286  %tst5 = icmp ne i64 %lhs64, %val5
287  br i1 %tst5, label %t6, label %end
288; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
289
290t6:
291  %shift6 = ashr i64 %rhs64, 59
292  %val6 = sub i64 0, %shift6
293  %tst6 = icmp ne i64 %lhs64, %val6
294  br i1 %tst6, label %t7, label %end
295; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, asr #59
296
297t7:
298  ret i32 1
299end:
300
301  ret i32 0
302; CHECK: ret
303}
304