1; RUN: opt -basic-aa -loop-vectorize -enable-mem-access-versioning -force-vector-width=2 -force-vector-interleave=1 < %s -S | FileCheck %s
2
3target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
4
5; Check that we version this loop with speculating the value 1 for symbolic
6; strides.  This also checks that the symbolic stride information is correctly
7; propagated to the memcheck generation.  Without this the loop wouldn't
8; vectorize because we couldn't determine the array bounds for the required
9; memchecks.
10
11; CHECK-LABEL: test
12define void @test(i32*  %A, i64 %AStride,
13                  i32*  %B, i32 %BStride,
14                  i32*  %C, i64 %CStride, i32 %N) {
15entry:
16  %cmp13 = icmp eq i32 %N, 0
17  br i1 %cmp13, label %for.end, label %for.body.preheader
18
19; CHECK-DAG: icmp ne i64 %AStride, 1
20; CHECK-DAG: icmp ne i32 %BStride, 1
21; CHECK-DAG: icmp ne i64 %CStride, 1
22; CHECK: or
23; CHECK: or
24; CHECK: br
25
26; CHECK: vector.body
27; CHECK: load <2 x i32>
28
29for.body.preheader:
30  br label %for.body
31
32for.body:
33  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
34  %iv.trunc = trunc i64 %indvars.iv to i32
35  %mul = mul i32 %iv.trunc, %BStride
36  %mul64 = zext i32 %mul to i64
37  %arrayidx = getelementptr inbounds i32, i32* %B, i64 %mul64
38  %0 = load i32, i32* %arrayidx, align 4
39  %mul2 = mul nsw i64 %indvars.iv, %CStride
40  %arrayidx3 = getelementptr inbounds i32, i32* %C, i64 %mul2
41  %1 = load i32, i32* %arrayidx3, align 4
42  %mul4 = mul nsw i32 %1, %0
43  %mul3 = mul nsw i64 %indvars.iv, %AStride
44  %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %mul3
45  store i32 %mul4, i32* %arrayidx7, align 4
46  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
47  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
48  %exitcond = icmp eq i32 %lftr.wideiv, %N
49  br i1 %exitcond, label %for.end.loopexit, label %for.body
50
51for.end.loopexit:
52  br label %for.end
53
54for.end:
55  ret void
56}
57
58; We used to crash on this function because we removed the fptosi cast when
59; replacing the symbolic stride '%conv'.
60; PR18480
61
62; CHECK-LABEL: fn1
63; CHECK: load <2 x double>
64
65define void @fn1(double* noalias %x, double* noalias %c, double %a) {
66entry:
67  %conv = fptosi double %a to i32
68  %conv2 = add i32 %conv, 4
69  %cmp8 = icmp sgt i32 %conv2, 0
70  br i1 %cmp8, label %for.body.preheader, label %for.end
71
72for.body.preheader:
73  br label %for.body
74
75for.body:
76  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
77  %0 = trunc i64 %indvars.iv to i32
78  %mul = mul nsw i32 %0, %conv
79  %idxprom = sext i32 %mul to i64
80  %arrayidx = getelementptr inbounds double, double* %x, i64 %idxprom
81  %1 = load double, double* %arrayidx, align 8
82  %arrayidx3 = getelementptr inbounds double, double* %c, i64 %indvars.iv
83  store double %1, double* %arrayidx3, align 8
84  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
85  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
86  %exitcond = icmp eq i32 %lftr.wideiv, %conv2
87  br i1 %exitcond, label %for.end.loopexit, label %for.body
88
89for.end.loopexit:
90  br label %for.end
91
92for.end:
93  ret void
94}
95