1; RUN: opt < %s  -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
2
3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
4target triple = "x86_64-apple-macosx10.9.0"
5
6; Make sure we vectorize this loop:
7; int foo(float *a, float *b, int n) {
8;   for (int i=0; i<n; ++i)
9;     a[i] = b[i] * 3;
10; }
11
12;CHECK: for.body.preheader:
13;CHECK: br i1 %cmp.zero, label %middle.block, label %vector.memcheck
14;CHECK: vector.memcheck:
15;CHECK: br i1 %memcheck.conflict, label %middle.block, label %vector.ph
16;CHECK: load <4 x float>
17define i32 @foo(float* nocapture %a, float* nocapture %b, i32 %n) nounwind uwtable ssp {
18entry:
19  %cmp6 = icmp sgt i32 %n, 0
20  br i1 %cmp6, label %for.body, label %for.end
21
22for.body:                                         ; preds = %entry, %for.body
23  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
24  %arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
25  %0 = load float* %arrayidx, align 4
26  %mul = fmul float %0, 3.000000e+00
27  %arrayidx2 = getelementptr inbounds float* %a, i64 %indvars.iv
28  store float %mul, float* %arrayidx2, align 4
29  %indvars.iv.next = add i64 %indvars.iv, 1
30  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
31  %exitcond = icmp eq i32 %lftr.wideiv, %n
32  br i1 %exitcond, label %for.end, label %for.body
33
34for.end:                                          ; preds = %for.body, %entry
35  ret i32 undef
36}
37
38; Make sure that we try to vectorize loops with a runtime check if the
39; dependency check fails.
40
41; CHECK-LABEL: test_runtime_check
42; CHECK:      <4 x float>
43define void @test_runtime_check(float* %a, float %b, i64 %offset, i64 %offset2, i64 %n) {
44entry:
45  br label %for.body
46
47for.body:
48  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
49  %ind.sum = add i64 %iv, %offset
50  %arr.idx = getelementptr inbounds float* %a, i64 %ind.sum
51  %l1 = load float* %arr.idx, align 4
52  %ind.sum2 = add i64 %iv, %offset2
53  %arr.idx2 = getelementptr inbounds float* %a, i64 %ind.sum2
54  %l2 = load float* %arr.idx2, align 4
55  %m = fmul fast float %b, %l2
56  %ad = fadd fast float %l1, %m
57  store float %ad, float* %arr.idx, align 4
58  %iv.next = add nuw nsw i64 %iv, 1
59  %exitcond = icmp eq i64 %iv.next, %n
60  br i1 %exitcond, label %loopexit, label %for.body
61
62loopexit:
63  ret void
64}
65