1; RUN: llc -O3 -mtriple=thumb-eabi -mcpu=cortex-a8 %s -o - -arm-atomic-cfg-tidy=0 | FileCheck %s
2;
3; LSR should only check for valid address modes when the IV user is a
4; memory address.
5; svn r158536, rdar://11635990
6;
7; Note that we still don't produce the best code here because we fail
8; to coalesce the IV. See <rdar://problem/11680670> [coalescer] IVs
9; need to be scheduled to expose coalescing.
10
11; LSR before the fix:
12;The chosen solution requires 4 regs, with addrec cost 1, plus 3 base adds, plus 2 setup cost:
13;  LSR Use: Kind=Special, Offsets={0}, all-fixups-outside-loop, widest fixup type: i32
14;    reg(%v3) + reg({0,+,-1}<%while.cond.i.i>) + imm(1)
15;  LSR Use: Kind=ICmpZero, Offsets={0}, widest fixup type: i32
16;    reg(%v3) + reg({0,+,-1}<%while.cond.i.i>)
17;  LSR Use: Kind=Address of i32, Offsets={0}, widest fixup type: i32*
18;    reg((-4 + (4 * %v3) + %v1)) + 4*reg({0,+,-1}<%while.cond.i.i>)
19;  LSR Use: Kind=Address of i32, Offsets={0}, widest fixup type: i32*
20;    reg((-4 + (4 * %v3) + %v4)) + 4*reg({0,+,-1}<%while.cond.i.i>)
21;  LSR Use: Kind=Special, Offsets={0}, all-fixups-outside-loop, widest fixup type: i32
22;    reg(%v3)
23;
24; LSR after the fix:
25;The chosen solution requires 4 regs, with addrec cost 1, plus 1 base add, plus 2 setup cost:
26;  LSR Use: Kind=Special, Offsets={0}, all-fixups-outside-loop, widest fixup type: i32
27;    reg({%v3,+,-1}<nsw><%while.cond.i.i>) + imm(1)
28;  LSR Use: Kind=ICmpZero, Offsets={0}, widest fixup type: i32
29;    reg({%v3,+,-1}<nsw><%while.cond.i.i>)
30;  LSR Use: Kind=Address of i32, Offsets={0}, widest fixup type: i32*
31;    reg((-4 + %v1)) + 4*reg({%v3,+,-1}<nsw><%while.cond.i.i>)
32;  LSR Use: Kind=Address of i32, Offsets={0}, widest fixup type: i32*
33;    reg((-4 + %v4)) + 4*reg({%v3,+,-1}<nsw><%while.cond.i.i>)
34;  LSR Use: Kind=Special, Offsets={0}, all-fixups-outside-loop, widest fixup type: i32
35;    reg(%v3)
36
37
38%s = type { i32* }
39
40@ncol = external global i32, align 4
41
42declare i32* @getptr() nounwind
43declare %s* @getstruct() nounwind
44
45; CHECK: @main
46; Check that the loop preheader contains no address computation.
47; CHECK: %end_of_chain
48; CHECK-NOT: add{{.*}}lsl
49; CHECK: ldr{{.*}}lsl #2
50; CHECK: ldr{{.*}}lsl #2
51define i32 @main() nounwind ssp {
52entry:
53  %v0 = load i32* @ncol, align 4
54  %v1 = tail call i32* @getptr() nounwind
55  %cmp10.i = icmp eq i32 %v0, 0
56  br label %while.cond.outer
57
58while.cond.outer:
59  %call18 = tail call %s* @getstruct() nounwind
60  br label %while.cond
61
62while.cond:
63  %cmp20 = icmp eq i32* %v1, null
64  br label %while.body
65
66while.body:
67  %v3 = load i32* @ncol, align 4
68  br label %end_of_chain
69
70end_of_chain:
71  %state.i = getelementptr inbounds %s* %call18, i32 0, i32 0
72  %v4 = load i32** %state.i, align 4
73  br label %while.cond.i.i
74
75while.cond.i.i:
76  %counter.0.i.i = phi i32 [ %v3, %end_of_chain ], [ %dec.i.i, %land.rhs.i.i ]
77  %dec.i.i = add nsw i32 %counter.0.i.i, -1
78  %tobool.i.i = icmp eq i32 %counter.0.i.i, 0
79  br i1 %tobool.i.i, label %where.exit, label %land.rhs.i.i
80
81land.rhs.i.i:
82  %arrayidx.i.i = getelementptr inbounds i32* %v4, i32 %dec.i.i
83  %v5 = load i32* %arrayidx.i.i, align 4
84  %arrayidx1.i.i = getelementptr inbounds i32* %v1, i32 %dec.i.i
85  %v6 = load i32* %arrayidx1.i.i, align 4
86  %cmp.i.i = icmp eq i32 %v5, %v6
87  br i1 %cmp.i.i, label %while.cond.i.i, label %equal_data.exit.i
88
89equal_data.exit.i:
90  ret i32 %counter.0.i.i
91
92where.exit:
93  br label %while.end.i
94
95while.end.i:
96  ret i32 %v3
97}
98