1; RUN: opt < %s -mattr=avx -force-vector-width=2 -force-vector-interleave=1 -loop-vectorize -simplifycfg -simplifycfg-require-and-preserve-domtree=1 -S | FileCheck %s
2; RUN: opt -mcpu=skylake-avx512 -S -force-vector-width=8 -force-vector-interleave=1 -loop-vectorize < %s | FileCheck %s --check-prefix=SINK-GATHER
3
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5target triple = "x86_64-apple-macosx10.8.0"
6
7; CHECK-LABEL: predicated_sdiv_masked_load
8;
9; This test ensures that we don't scalarize the predicated load. Since the load
10; can be vectorized with predication, scalarizing it would cause its pointer
11; operand to become non-uniform.
12;
13; CHECK: vector.body:
14; CHECK:   %wide.masked.load = call <2 x i32> @llvm.masked.load.v2i32.p0v2i32
15; CHECK:   br i1 {{.*}}, label %[[IF0:.+]], label %[[CONT0:.+]]
16; CHECK: [[IF0]]:
17; CHECK:   %[[T0:.+]] = extractelement <2 x i32> %wide.masked.load, i32 0
18; CHECK:   %[[T1:.+]] = sdiv i32 %[[T0]], %x
19; CHECK:   %[[T2:.+]] = insertelement <2 x i32> poison, i32 %[[T1]], i32 0
20; CHECK:   br label %[[CONT0]]
21; CHECK: [[CONT0]]:
22; CHECK:   %[[T3:.+]] = phi <2 x i32> [ poison, %vector.body ], [ %[[T2]], %[[IF0]] ]
23; CHECK:   br i1 {{.*}}, label %[[IF1:.+]], label %[[CONT1:.+]]
24; CHECK: [[IF1]]:
25; CHECK:   %[[T4:.+]] = extractelement <2 x i32> %wide.masked.load, i32 1
26; CHECK:   %[[T5:.+]] = sdiv i32 %[[T4]], %x
27; CHECK:   %[[T6:.+]] = insertelement <2 x i32> %[[T3]], i32 %[[T5]], i32 1
28; CHECK:   br label %[[CONT1]]
29; CHECK: [[CONT1]]:
30; CHECK:   phi <2 x i32> [ %[[T3]], %[[CONT0]] ], [ %[[T6]], %[[IF1]] ]
31; CHECK:   br i1 {{.*}}, label %middle.block, label %vector.body
32
33define i32 @predicated_sdiv_masked_load(i32* %a, i32* %b, i32 %x, i1 %c) {
34entry:
35  br label %for.body
36
37for.body:
38  %i = phi i64 [ 0, %entry ], [ %i.next, %for.inc ]
39  %r = phi i32 [ 0, %entry ], [ %tmp7, %for.inc ]
40  %tmp0 = getelementptr inbounds i32, i32* %a, i64 %i
41  %tmp1 = load i32, i32* %tmp0, align 4
42  br i1 %c, label %if.then, label %for.inc
43
44if.then:
45  %tmp2 = getelementptr inbounds i32, i32* %b, i64 %i
46  %tmp3 = load i32, i32* %tmp2, align 4
47  %tmp4 = sdiv i32 %tmp3, %x
48  %tmp5 = add nsw i32 %tmp4, %tmp1
49  br label %for.inc
50
51for.inc:
52  %tmp6 = phi i32 [ %tmp1, %for.body ], [ %tmp5, %if.then]
53  %tmp7 = add i32 %r, %tmp6
54  %i.next = add nuw nsw i64 %i, 1
55  %cond = icmp eq i64 %i.next, 10000
56  br i1 %cond, label %for.end, label %for.body
57
58for.end:
59  %tmp8 = phi i32 [ %tmp7, %for.inc ]
60  ret i32 %tmp8
61}
62
63; This test ensures that a load, which would have been widened otherwise is
64; instead scalarized if Cost-Model so decided as part of its
65; sink-scalar-operands optimization for predicated instructions.
66;
67; SINK-GATHER-LABEL: @scalarize_and_sink_gather
68; SINK-GATHER:      vector.body:
69; SINK-GATHER-LABEL: pred.udiv.if:                                     ; preds = %vector.body
70; SINK-GATHER-NEXT:   [[EXT:%.+]] = extractelement <8 x i64> {{.*}}, i32 0
71; SINK-GATHER-NEXT:   [[GEP:%.+]] = getelementptr inbounds i32, i32* %a, i64 [[EXT]]
72; SINK-GATHER-NEXT:   [[LV:%.+]] = load i32, i32* [[GEP]], align 4
73; SINK-GATHER-NEXT:   [[UDIV:%.+]] = udiv i32 [[LV]], %x
74; SINK-GATHER-NEXT:   [[INS:%.+]] = insertelement <8 x i32> poison, i32 [[UDIV]], i32 0
75; SINK-GATHER-NEXT:   br label %pred.udiv.continue
76; SINK-GATHER:      pred.udiv.continue:
77; SINK-GATHER-NEXT:   phi i32 [ poison, %vector.body ], [ [[LV]], %pred.udiv.if ]
78; SINK-GATHER-NEXT:   phi <8 x i32> [ poison, %vector.body ], [ [[INS]], %pred.udiv.if ]
79define i32 @scalarize_and_sink_gather(i32* %a, i1 %c, i32 %x, i64 %n) {
80entry:
81  br label %for.body
82
83for.body:
84  %i = phi i64 [ 0, %entry ], [ %i.next, %for.inc ]
85  %r = phi i32 [ 0, %entry ], [ %tmp6, %for.inc ]
86  %i7 = mul i64 %i, 777
87  br i1 %c, label %if.then, label %for.inc
88
89if.then:
90  %tmp0 = getelementptr inbounds i32, i32* %a, i64 %i7
91  %tmp2 = load i32, i32* %tmp0, align 4
92  %tmp4 = udiv i32 %tmp2, %x
93  br label %for.inc
94
95for.inc:
96  %tmp5 = phi i32 [ %x, %for.body ], [ %tmp4, %if.then]
97  %tmp6 = add i32 %r, %tmp5
98  %i.next = add nuw nsw i64 %i, 1
99  %cond = icmp slt i64 %i.next, %n
100  br i1 %cond, label %for.body, label %for.end
101
102for.end:
103  %tmp7 = phi i32 [ %tmp6, %for.inc ]
104  ret i32 %tmp7
105}
106