1; RUN: llc -O3 -mtriple=thumb-eabi -mcpu=cortex-a9 %s -o - | FileCheck %s -check-prefix=A9
2
3; @simple is the most basic chain of address induction variables. Chaining
4; saves at least one register and avoids complex addressing and setup
5; code.
6;
7; A9: @simple
8; no expensive address computation in the preheader
9; A9: lsl
10; A9-NOT: lsl
11; A9: %loop
12; no complex address modes
13; A9-NOT: lsl
14define i32 @simple(i32* %a, i32* %b, i32 %x) nounwind {
15entry:
16  br label %loop
17loop:
18  %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
19  %s = phi i32 [ 0, %entry ], [ %s4, %loop ]
20  %v = load i32, i32* %iv
21  %iv1 = getelementptr inbounds i32, i32* %iv, i32 %x
22  %v1 = load i32, i32* %iv1
23  %iv2 = getelementptr inbounds i32, i32* %iv1, i32 %x
24  %v2 = load i32, i32* %iv2
25  %iv3 = getelementptr inbounds i32, i32* %iv2, i32 %x
26  %v3 = load i32, i32* %iv3
27  %s1 = add i32 %s, %v
28  %s2 = add i32 %s1, %v1
29  %s3 = add i32 %s2, %v2
30  %s4 = add i32 %s3, %v3
31  %iv4 = getelementptr inbounds i32, i32* %iv3, i32 %x
32  %cmp = icmp eq i32* %iv4, %b
33  br i1 %cmp, label %exit, label %loop
34exit:
35  ret i32 %s4
36}
37
38; @user is not currently chained because the IV is live across memory ops.
39;
40; A9: @user
41; stride multiples computed in the preheader
42; A9: lsl
43; A9: lsl
44; A9: %loop
45; complex address modes
46; A9: lsl
47; A9: lsl
48define i32 @user(i32* %a, i32* %b, i32 %x) nounwind {
49entry:
50  br label %loop
51loop:
52  %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
53  %s = phi i32 [ 0, %entry ], [ %s4, %loop ]
54  %v = load i32, i32* %iv
55  %iv1 = getelementptr inbounds i32, i32* %iv, i32 %x
56  %v1 = load i32, i32* %iv1
57  %iv2 = getelementptr inbounds i32, i32* %iv1, i32 %x
58  %v2 = load i32, i32* %iv2
59  %iv3 = getelementptr inbounds i32, i32* %iv2, i32 %x
60  %v3 = load i32, i32* %iv3
61  %s1 = add i32 %s, %v
62  %s2 = add i32 %s1, %v1
63  %s3 = add i32 %s2, %v2
64  %s4 = add i32 %s3, %v3
65  %iv4 = getelementptr inbounds i32, i32* %iv3, i32 %x
66  store i32 %s4, i32* %iv
67  %cmp = icmp eq i32* %iv4, %b
68  br i1 %cmp, label %exit, label %loop
69exit:
70  ret i32 %s4
71}
72
73; @extrastride is a slightly more interesting case of a single
74; complete chain with multiple strides. The test case IR is what LSR
75; used to do, and exactly what we don't want to do. LSR's new IV
76; chaining feature should now undo the damage.
77;
78; A9: extrastride:
79; no spills
80; A9-NOT: str
81; only one stride multiple in the preheader
82; A9: lsl
83; A9-NOT: {{str r|lsl}}
84; A9: %for.body{{$}}
85; no complex address modes or reloads
86; A9-NOT: {{ldr .*[sp]|lsl}}
87define void @extrastride(i8* nocapture %main, i32 %main_stride, i32* nocapture %res, i32 %x, i32 %y, i32 %z) nounwind {
88entry:
89  %cmp8 = icmp eq i32 %z, 0
90  br i1 %cmp8, label %for.end, label %for.body.lr.ph
91
92for.body.lr.ph:                                   ; preds = %entry
93  %add.ptr.sum = shl i32 %main_stride, 1 ; s*2
94  %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride ; s*3
95  %add.ptr2.sum = add i32 %x, %main_stride ; s + x
96  %add.ptr4.sum = shl i32 %main_stride, 2 ; s*4
97  %add.ptr3.sum = add i32 %add.ptr2.sum, %add.ptr4.sum ; total IV stride = s*5+x
98  br label %for.body
99
100for.body:                                         ; preds = %for.body.lr.ph, %for.body
101  %main.addr.011 = phi i8* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
102  %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
103  %res.addr.09 = phi i32* [ %res, %for.body.lr.ph ], [ %add.ptr7, %for.body ]
104  %0 = bitcast i8* %main.addr.011 to i32*
105  %1 = load i32, i32* %0, align 4
106  %add.ptr = getelementptr inbounds i8, i8* %main.addr.011, i32 %main_stride
107  %2 = bitcast i8* %add.ptr to i32*
108  %3 = load i32, i32* %2, align 4
109  %add.ptr1 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr.sum
110  %4 = bitcast i8* %add.ptr1 to i32*
111  %5 = load i32, i32* %4, align 4
112  %add.ptr2 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr1.sum
113  %6 = bitcast i8* %add.ptr2 to i32*
114  %7 = load i32, i32* %6, align 4
115  %add.ptr3 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr4.sum
116  %8 = bitcast i8* %add.ptr3 to i32*
117  %9 = load i32, i32* %8, align 4
118  %add = add i32 %3, %1
119  %add4 = add i32 %add, %5
120  %add5 = add i32 %add4, %7
121  %add6 = add i32 %add5, %9
122  store i32 %add6, i32* %res.addr.09, align 4
123  %add.ptr6 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr3.sum
124  %add.ptr7 = getelementptr inbounds i32, i32* %res.addr.09, i32 %y
125  %inc = add i32 %i.010, 1
126  %cmp = icmp eq i32 %inc, %z
127  br i1 %cmp, label %for.end, label %for.body
128
129for.end:                                          ; preds = %for.body, %entry
130  ret void
131}
132
133; @foldedidx is an unrolled variant of this loop:
134;  for (unsigned long i = 0; i < len; i += s) {
135;    c[i] = a[i] + b[i];
136;  }
137; where 's' can be folded into the addressing mode.
138; Consequently, we should *not* form any chains.
139;
140; A9: foldedidx:
141; A9: ldrb{{(.w)?}} {{r[0-9]|lr}}, [{{r[0-9]|lr}}, #3]
142define void @foldedidx(i8* nocapture %a, i8* nocapture %b, i8* nocapture %c) nounwind ssp {
143entry:
144  br label %for.body
145
146for.body:                                         ; preds = %for.body, %entry
147  %i.07 = phi i32 [ 0, %entry ], [ %inc.3, %for.body ]
148  %arrayidx = getelementptr inbounds i8, i8* %a, i32 %i.07
149  %0 = load i8, i8* %arrayidx, align 1
150  %conv5 = zext i8 %0 to i32
151  %arrayidx1 = getelementptr inbounds i8, i8* %b, i32 %i.07
152  %1 = load i8, i8* %arrayidx1, align 1
153  %conv26 = zext i8 %1 to i32
154  %add = add nsw i32 %conv26, %conv5
155  %conv3 = trunc i32 %add to i8
156  %arrayidx4 = getelementptr inbounds i8, i8* %c, i32 %i.07
157  store i8 %conv3, i8* %arrayidx4, align 1
158  %inc1 = or i32 %i.07, 1
159  %arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %inc1
160  %2 = load i8, i8* %arrayidx.1, align 1
161  %conv5.1 = zext i8 %2 to i32
162  %arrayidx1.1 = getelementptr inbounds i8, i8* %b, i32 %inc1
163  %3 = load i8, i8* %arrayidx1.1, align 1
164  %conv26.1 = zext i8 %3 to i32
165  %add.1 = add nsw i32 %conv26.1, %conv5.1
166  %conv3.1 = trunc i32 %add.1 to i8
167  %arrayidx4.1 = getelementptr inbounds i8, i8* %c, i32 %inc1
168  store i8 %conv3.1, i8* %arrayidx4.1, align 1
169  %inc.12 = or i32 %i.07, 2
170  %arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %inc.12
171  %4 = load i8, i8* %arrayidx.2, align 1
172  %conv5.2 = zext i8 %4 to i32
173  %arrayidx1.2 = getelementptr inbounds i8, i8* %b, i32 %inc.12
174  %5 = load i8, i8* %arrayidx1.2, align 1
175  %conv26.2 = zext i8 %5 to i32
176  %add.2 = add nsw i32 %conv26.2, %conv5.2
177  %conv3.2 = trunc i32 %add.2 to i8
178  %arrayidx4.2 = getelementptr inbounds i8, i8* %c, i32 %inc.12
179  store i8 %conv3.2, i8* %arrayidx4.2, align 1
180  %inc.23 = or i32 %i.07, 3
181  %arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %inc.23
182  %6 = load i8, i8* %arrayidx.3, align 1
183  %conv5.3 = zext i8 %6 to i32
184  %arrayidx1.3 = getelementptr inbounds i8, i8* %b, i32 %inc.23
185  %7 = load i8, i8* %arrayidx1.3, align 1
186  %conv26.3 = zext i8 %7 to i32
187  %add.3 = add nsw i32 %conv26.3, %conv5.3
188  %conv3.3 = trunc i32 %add.3 to i8
189  %arrayidx4.3 = getelementptr inbounds i8, i8* %c, i32 %inc.23
190  store i8 %conv3.3, i8* %arrayidx4.3, align 1
191  %inc.3 = add nsw i32 %i.07, 4
192  %exitcond.3 = icmp eq i32 %inc.3, 400
193  br i1 %exitcond.3, label %for.end, label %for.body
194
195for.end:                                          ; preds = %for.body
196  ret void
197}
198
199; @testNeon is an important example of the nead for ivchains.
200;
201; Currently we have two extra add.w's that keep the store address
202; live past the next increment because ISEL is unfortunately undoing
203; the store chain. ISEL also fails to convert all but one of the stores to
204; post-increment addressing. However, the loads should use
205; post-increment addressing, no add's or add.w's beyond the three
206; mentioned. Most importantly, there should be no spills or reloads!
207;
208; A9: testNeon:
209; A9: %.lr.ph
210; A9: add.w r
211; A9-NOT: lsl.w
212; A9-NOT: {{ldr|str|adds|add r}}
213; A9: vst1.8 {{.*}} [r{{[0-9]+}}], r{{[0-9]+}}
214; A9: add.w r
215; A9-NOT: {{ldr|str|adds|add r}}
216; A9-NOT: add.w r
217; A9: bne
218define hidden void @testNeon(i8* %ref_data, i32 %ref_stride, i32 %limit, <16 x i8>* nocapture %data) nounwind optsize {
219  %1 = icmp sgt i32 %limit, 0
220  br i1 %1, label %.lr.ph, label %45
221
222.lr.ph:                                           ; preds = %0
223  %2 = shl nsw i32 %ref_stride, 1
224  %3 = mul nsw i32 %ref_stride, 3
225  %4 = shl nsw i32 %ref_stride, 2
226  %5 = mul nsw i32 %ref_stride, 5
227  %6 = mul nsw i32 %ref_stride, 6
228  %7 = mul nsw i32 %ref_stride, 7
229  %8 = shl nsw i32 %ref_stride, 3
230  %9 = sub i32 0, %8
231  %10 = mul i32 %limit, -64
232  br label %11
233
234; <label>:11                                      ; preds = %11, %.lr.ph
235  %.05 = phi i8* [ %ref_data, %.lr.ph ], [ %42, %11 ]
236  %counter.04 = phi i32 [ 0, %.lr.ph ], [ %44, %11 ]
237  %result.03 = phi <16 x i8> [ zeroinitializer, %.lr.ph ], [ %41, %11 ]
238  %.012 = phi <16 x i8>* [ %data, %.lr.ph ], [ %43, %11 ]
239  %12 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %.05, i32 1) nounwind
240  %13 = getelementptr inbounds i8, i8* %.05, i32 %ref_stride
241  %14 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %13, i32 1) nounwind
242  %15 = shufflevector <1 x i64> %12, <1 x i64> %14, <2 x i32> <i32 0, i32 1>
243  %16 = bitcast <2 x i64> %15 to <16 x i8>
244  %17 = getelementptr inbounds <16 x i8>, <16 x i8>* %.012, i32 1
245  store <16 x i8> %16, <16 x i8>* %.012, align 4
246  %18 = getelementptr inbounds i8, i8* %.05, i32 %2
247  %19 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %18, i32 1) nounwind
248  %20 = getelementptr inbounds i8, i8* %.05, i32 %3
249  %21 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %20, i32 1) nounwind
250  %22 = shufflevector <1 x i64> %19, <1 x i64> %21, <2 x i32> <i32 0, i32 1>
251  %23 = bitcast <2 x i64> %22 to <16 x i8>
252  %24 = getelementptr inbounds <16 x i8>, <16 x i8>* %.012, i32 2
253  store <16 x i8> %23, <16 x i8>* %17, align 4
254  %25 = getelementptr inbounds i8, i8* %.05, i32 %4
255  %26 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %25, i32 1) nounwind
256  %27 = getelementptr inbounds i8, i8* %.05, i32 %5
257  %28 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %27, i32 1) nounwind
258  %29 = shufflevector <1 x i64> %26, <1 x i64> %28, <2 x i32> <i32 0, i32 1>
259  %30 = bitcast <2 x i64> %29 to <16 x i8>
260  %31 = getelementptr inbounds <16 x i8>, <16 x i8>* %.012, i32 3
261  store <16 x i8> %30, <16 x i8>* %24, align 4
262  %32 = getelementptr inbounds i8, i8* %.05, i32 %6
263  %33 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %32, i32 1) nounwind
264  %34 = getelementptr inbounds i8, i8* %.05, i32 %7
265  %35 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %34, i32 1) nounwind
266  %36 = shufflevector <1 x i64> %33, <1 x i64> %35, <2 x i32> <i32 0, i32 1>
267  %37 = bitcast <2 x i64> %36 to <16 x i8>
268  store <16 x i8> %37, <16 x i8>* %31, align 4
269  %38 = add <16 x i8> %16, %23
270  %39 = add <16 x i8> %38, %30
271  %40 = add <16 x i8> %39, %37
272  %41 = add <16 x i8> %result.03, %40
273  %42 = getelementptr i8, i8* %.05, i32 %9
274  %43 = getelementptr inbounds <16 x i8>, <16 x i8>* %.012, i32 -64
275  %44 = add nsw i32 %counter.04, 1
276  %exitcond = icmp eq i32 %44, %limit
277  br i1 %exitcond, label %._crit_edge, label %11
278
279._crit_edge:                                      ; preds = %11
280  %scevgep = getelementptr <16 x i8>, <16 x i8>* %data, i32 %10
281  br label %45
282
283; <label>:45                                      ; preds = %._crit_edge, %0
284  %result.0.lcssa = phi <16 x i8> [ %41, %._crit_edge ], [ zeroinitializer, %0 ]
285  %.01.lcssa = phi <16 x i8>* [ %scevgep, %._crit_edge ], [ %data, %0 ]
286  store <16 x i8> %result.0.lcssa, <16 x i8>* %.01.lcssa, align 4
287  ret void
288}
289
290declare <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8*, i32) nounwind readonly
291
292; Handle chains in which the same offset is used for both loads and
293; stores to the same array.
294; rdar://11410078.
295;
296; A9: @testReuse
297; A9: %for.body
298; A9: vld1.8 {d{{[0-9]+}}}, [[BASE:[r[0-9]+]]], [[INC:r[0-9]]]
299; A9: vld1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
300; A9: vld1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
301; A9: vld1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
302; A9: vld1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
303; A9: vld1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
304; A9: vld1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
305; A9: vst1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
306; A9: vst1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
307; A9: vst1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
308; A9: vst1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
309; A9: vst1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
310; A9: vst1.8 {d{{[0-9]+}}}, [[BASE]]
311; A9: bne
312define void @testReuse(i8* %src, i32 %stride) nounwind ssp {
313entry:
314  %mul = shl nsw i32 %stride, 2
315  %idx.neg = sub i32 0, %mul
316  %mul1 = mul nsw i32 %stride, 3
317  %idx.neg2 = sub i32 0, %mul1
318  %mul5 = shl nsw i32 %stride, 1
319  %idx.neg6 = sub i32 0, %mul5
320  %idx.neg10 = sub i32 0, %stride
321  br label %for.body
322
323for.body:                                         ; preds = %for.body, %entry
324  %i.0110 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
325  %src.addr = phi i8* [ %src, %entry ], [ %add.ptr45, %for.body ]
326  %add.ptr = getelementptr inbounds i8, i8* %src.addr, i32 %idx.neg
327  %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr, i32 1)
328  %add.ptr3 = getelementptr inbounds i8, i8* %src.addr, i32 %idx.neg2
329  %vld2 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr3, i32 1)
330  %add.ptr7 = getelementptr inbounds i8, i8* %src.addr, i32 %idx.neg6
331  %vld3 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr7, i32 1)
332  %add.ptr11 = getelementptr inbounds i8, i8* %src.addr, i32 %idx.neg10
333  %vld4 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr11, i32 1)
334  %vld5 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %src.addr, i32 1)
335  %add.ptr17 = getelementptr inbounds i8, i8* %src.addr, i32 %stride
336  %vld6 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr17, i32 1)
337  %add.ptr20 = getelementptr inbounds i8, i8* %src.addr, i32 %mul5
338  %vld7 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr20, i32 1)
339  %add.ptr23 = getelementptr inbounds i8, i8* %src.addr, i32 %mul1
340  %vld8 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr23, i32 1)
341  %vadd1 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld1, <8 x i8> %vld2) nounwind
342  %vadd2 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld2, <8 x i8> %vld3) nounwind
343  %vadd3 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld3, <8 x i8> %vld4) nounwind
344  %vadd4 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld4, <8 x i8> %vld5) nounwind
345  %vadd5 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld5, <8 x i8> %vld6) nounwind
346  %vadd6 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld6, <8 x i8> %vld7) nounwind
347  tail call void @llvm.arm.neon.vst1.p0i8.v8i8(i8* %add.ptr3, <8 x i8> %vadd1, i32 1)
348  tail call void @llvm.arm.neon.vst1.p0i8.v8i8(i8* %add.ptr7, <8 x i8> %vadd2, i32 1)
349  tail call void @llvm.arm.neon.vst1.p0i8.v8i8(i8* %add.ptr11, <8 x i8> %vadd3, i32 1)
350  tail call void @llvm.arm.neon.vst1.p0i8.v8i8(i8* %src.addr, <8 x i8> %vadd4, i32 1)
351  tail call void @llvm.arm.neon.vst1.p0i8.v8i8(i8* %add.ptr17, <8 x i8> %vadd5, i32 1)
352  tail call void @llvm.arm.neon.vst1.p0i8.v8i8(i8* %add.ptr20, <8 x i8> %vadd6, i32 1)
353  %inc = add nsw i32 %i.0110, 1
354  %add.ptr45 = getelementptr inbounds i8, i8* %src.addr, i32 8
355  %exitcond = icmp eq i32 %inc, 4
356  br i1 %exitcond, label %for.end, label %for.body
357
358for.end:                                          ; preds = %for.body
359  ret void
360}
361
362declare <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8*, i32) nounwind readonly
363
364declare void @llvm.arm.neon.vst1.p0i8.v8i8(i8*, <8 x i8>, i32) nounwind
365
366declare <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
367