1; RUN: opt < %s -loop-vectorize -force-vector-width=2 -force-vector-interleave=2 -S | FileCheck %s 2 3define void @test1(float* noalias nocapture %a, float* noalias nocapture readonly %b) { 4; CHECK-LABEL: @test1( 5; CHECK: vector.body: 6; CHECK: [[WIDE_LOAD:%.*]] = load <2 x float>, <2 x float>* {{.*}}, align 4 7; CHECK: [[WIDE_LOAD1:%.*]] = load <2 x float>, <2 x float>* {{.*}}, align 4 8; CHECK-NEXT: [[TMP1:%.*]] = fcmp ogt <2 x float> [[WIDE_LOAD]], <float 1.000000e+02, float 1.000000e+02> 9; CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <2 x float> [[WIDE_LOAD1]], <float 1.000000e+02, float 1.000000e+02> 10; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0 11; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP3]]) 12; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1 13; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP4]]) 14; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 15; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP5]]) 16; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 17; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP6]]) 18entry: 19 br label %for.body 20 21for.body: ; preds = %for.body, %entry 22 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 23 %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv 24 %0 = load float, float* %arrayidx, align 4 25 %cmp1 = fcmp ogt float %0, 1.000000e+02 26 tail call void @llvm.assume(i1 %cmp1) 27 %add = fadd float %0, 1.000000e+00 28 %arrayidx5 = getelementptr inbounds float, float* %a, i64 %indvars.iv 29 store float %add, float* %arrayidx5, align 4 30 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 31 %exitcond = icmp eq i64 %indvars.iv, 1599 32 br i1 %exitcond, label %for.end, label %for.body 33 34for.end: ; preds = %for.body 35 ret void 36} 37 38declare void @llvm.assume(i1) #0 39 40attributes #0 = { nounwind willreturn } 41 42%struct.data = type { float*, float* } 43 44define void @test2(%struct.data* nocapture readonly %d) { 45; CHECK-LABEL: @test2( 46; CHECK: entry: 47; CHECK: [[MASKCOND:%.*]] = icmp eq i64 %maskedptr, 0 48; CHECK: [[MASKCOND4:%.*]] = icmp eq i64 %maskedptr3, 0 49; CHECK: vector.body: 50; CHECK: tail call void @llvm.assume(i1 [[MASKCOND]]) 51; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) 52; CHECK: tail call void @llvm.assume(i1 [[MASKCOND4]]) 53; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]]) 54; CHECK: for.body: 55entry: 56 %b = getelementptr inbounds %struct.data, %struct.data* %d, i64 0, i32 1 57 %0 = load float*, float** %b, align 8 58 %ptrint = ptrtoint float* %0 to i64 59 %maskedptr = and i64 %ptrint, 31 60 %maskcond = icmp eq i64 %maskedptr, 0 61 %a = getelementptr inbounds %struct.data, %struct.data* %d, i64 0, i32 0 62 %1 = load float*, float** %a, align 8 63 %ptrint2 = ptrtoint float* %1 to i64 64 %maskedptr3 = and i64 %ptrint2, 31 65 %maskcond4 = icmp eq i64 %maskedptr3, 0 66 br label %for.body 67 68 69for.body: ; preds = %for.body, %entry 70 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 71 tail call void @llvm.assume(i1 %maskcond) 72 %arrayidx = getelementptr inbounds float, float* %0, i64 %indvars.iv 73 %2 = load float, float* %arrayidx, align 4 74 %add = fadd float %2, 1.000000e+00 75 tail call void @llvm.assume(i1 %maskcond4) 76 %arrayidx5 = getelementptr inbounds float, float* %1, i64 %indvars.iv 77 store float %add, float* %arrayidx5, align 4 78 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 79 %exitcond = icmp eq i64 %indvars.iv, 1599 80 br i1 %exitcond, label %for.end, label %for.body 81 82for.end: ; preds = %for.body 83 ret void 84} 85 86; Test case for PR43620. Make sure we can vectorize with predication in presence 87; of assume calls. For now, check that we drop all assumes in predicated blocks 88; in the vector body. 89define void @predicated_assume(float* noalias nocapture readonly %a, float* noalias nocapture %b, i32 %n) { 90; Check that the vector.body does not contain any assumes. 91; CHECK-LABEL: @predicated_assume( 92; CHECK: vector.body: 93; CHECK-NOT: llvm.assume 94; CHECK: for.body: 95entry: 96 %cmp15 = icmp eq i32 %n, 0 97 br i1 %cmp15, label %for.cond.cleanup, label %for.body.preheader 98 99for.body.preheader: ; preds = %entry 100 %0 = zext i32 %n to i64 101 br label %for.body 102 103for.cond.cleanup.loopexit: ; preds = %if.end5 104 br label %for.cond.cleanup 105 106for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry 107 ret void 108 109for.body: ; preds = %for.body.preheader, %if.end5 110 %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %if.end5 ] 111 %cmp1 = icmp ult i64 %indvars.iv, 495616 112 br i1 %cmp1, label %if.end5, label %if.else 113 114if.else: ; preds = %for.body 115 %cmp2 = icmp ult i64 %indvars.iv, 991232 116 tail call void @llvm.assume(i1 %cmp2) 117 br label %if.end5 118 119if.end5: ; preds = %for.body, %if.else 120 %x.0 = phi float [ 4.200000e+01, %if.else ], [ 2.300000e+01, %for.body ] 121 %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv 122 %1 = load float, float* %arrayidx, align 4 123 %mul = fmul float %x.0, %1 124 %arrayidx7 = getelementptr inbounds float, float* %b, i64 %indvars.iv 125 store float %mul, float* %arrayidx7, align 4 126 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 127 %cmp = icmp eq i64 %indvars.iv.next, %0 128 br i1 %cmp, label %for.cond.cleanup.loopexit, label %for.body 129} 130