1; RUN: opt < %s  -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -force-vector-width=4 -force-vector-interleave=0 -dce -S \
2; RUN:   | FileCheck %s --check-prefix=CHECK-VECTOR
3; RUN: opt < %s  -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -force-vector-width=1 -force-vector-interleave=0 -dce -S \
4; RUN:   | FileCheck %s --check-prefix=CHECK-SCALAR
5
6target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
7target triple = "x86_64-apple-macosx10.8.0"
8
9; We don't unroll this loop because it has a small constant trip count.
10;
11; CHECK-VECTOR-LABEL: @foo(
12; CHECK-VECTOR: load <4 x i32>
13; CHECK-VECTOR-NOT: load <4 x i32>
14; CHECK-VECTOR: store <4 x i32>
15; CHECK-VECTOR-NOT: store <4 x i32>
16; CHECK-VECTOR: ret
17;
18; CHECK-SCALAR-LABEL: @foo(
19; CHECK-SCALAR: load i32*
20; CHECK-SCALAR-NOT: load i32*
21; CHECK-SCALAR: store i32
22; CHECK-SCALAR-NOT: store i32
23; CHECK-SCALAR: ret
24define i32 @foo(i32* nocapture %A) nounwind uwtable ssp {
25  br label %1
26
27; <label>:1                                       ; preds = %1, %0
28  %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
29  %2 = getelementptr inbounds i32* %A, i64 %indvars.iv
30  %3 = load i32* %2, align 4
31  %4 = add nsw i32 %3, 6
32  store i32 %4, i32* %2, align 4
33  %indvars.iv.next = add i64 %indvars.iv, 1
34  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
35  %exitcond = icmp eq i32 %lftr.wideiv, 100
36  br i1 %exitcond, label %5, label %1
37
38; <label>:5                                       ; preds = %1
39  ret i32 undef
40}
41
42; But this is a good small loop to unroll as we don't know of a bound on its
43; trip count.
44;
45; CHECK-VECTOR-LABEL: @bar(
46; CHECK-VECTOR: store <4 x i32>
47; CHECK-VECTOR: store <4 x i32>
48; CHECK-VECTOR: ret
49;
50; CHECK-SCALAR-LABEL: @bar(
51; CHECK-SCALAR: store i32
52; CHECK-SCALAR: store i32
53; CHECK-SCALAR: ret
54define i32 @bar(i32* nocapture %A, i32 %n) nounwind uwtable ssp {
55  %1 = icmp sgt i32 %n, 0
56  br i1 %1, label %.lr.ph, label %._crit_edge
57
58.lr.ph:                                           ; preds = %0, %.lr.ph
59  %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
60  %2 = getelementptr inbounds i32* %A, i64 %indvars.iv
61  %3 = load i32* %2, align 4
62  %4 = add nsw i32 %3, 6
63  store i32 %4, i32* %2, align 4
64  %indvars.iv.next = add i64 %indvars.iv, 1
65  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
66  %exitcond = icmp eq i32 %lftr.wideiv, %n
67  br i1 %exitcond, label %._crit_edge, label %.lr.ph
68
69._crit_edge:                                      ; preds = %.lr.ph, %0
70  ret i32 undef
71}
72
73; Also unroll if we need a runtime check but it was going to be added for
74; vectorization anyways.
75; CHECK-VECTOR-LABEL: @runtime_chk(
76; CHECK-VECTOR: store <4 x float>
77; CHECK-VECTOR: store <4 x float>
78;
79; But not if the unrolling would introduce the runtime check.
80; CHECK-SCALAR-LABEL: @runtime_chk(
81; CHECK-SCALAR: store float
82; CHECK-SCALAR-NOT: store float
83define void @runtime_chk(float* %A, float* %B, float %N) {
84entry:
85  br label %for.body
86
87for.body:
88  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
89  %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
90  %0 = load float* %arrayidx, align 4
91  %mul = fmul float %0, %N
92  %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
93  store float %mul, float* %arrayidx2, align 4
94  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
95  %exitcond = icmp eq i64 %indvars.iv.next, 256
96  br i1 %exitcond, label %for.end, label %for.body
97
98for.end:
99  ret void
100}
101