1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes 2; RUN: opt -S -passes=attributor -aa-pipeline='basic-aa' -attributor-disable=false -attributor-max-iterations-verify -attributor-max-iterations=1 < %s | FileCheck %s 3; 4; void bar(int, float, double); 5; 6; void foo(int N) { 7; float p = 3; 8; double q = 5; 9; N = 7; 10; 11; #pragma omp parallel for firstprivate(q) 12; for (int i = 2; i < N; i++) { 13; bar(i, p, q); 14; } 15; } 16; 17; Verify the constant value of q is propagated into the outlined function. 18; 19target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 20 21%struct.ident_t = type { i32, i32, i32, i32, i8* } 22 23@.str = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1 24@0 = private unnamed_addr global %struct.ident_t { i32 0, i32 514, i32 0, i32 0, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @.str, i32 0, i32 0) }, align 8 25@1 = private unnamed_addr global %struct.ident_t { i32 0, i32 2, i32 0, i32 0, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @.str, i32 0, i32 0) }, align 8 26 27define dso_local void @foo(i32 %N) { 28; CHECK-LABEL: define {{[^@]+}}@foo 29; CHECK-SAME: (i32 [[N:%.*]]) 30; CHECK-NEXT: entry: 31; CHECK-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 32; CHECK-NEXT: [[P:%.*]] = alloca float, align 4 33; CHECK-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 34; CHECK-NEXT: store float 3.000000e+00, float* [[P]], align 4 35; CHECK-NEXT: store i32 7, i32* [[N_ADDR]], align 4 36; CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* nonnull align 8 dereferenceable(24) @1, i32 3, void (i32*, i32*, ...)* nonnull bitcast (void (i32*, i32*, i32*, float*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* noalias nocapture nonnull readonly align 4 dereferenceable(4) [[N_ADDR]], float* noalias nocapture nonnull readonly align 4 dereferenceable(4) [[P]], i64 4617315517961601024) 37; CHECK-NEXT: ret void 38; 39entry: 40 %N.addr = alloca i32, align 4 41 %p = alloca float, align 4 42 store i32 %N, i32* %N.addr, align 4 43 store float 3.000000e+00, float* %p, align 4 44 store i32 7, i32* %N.addr, align 4 45 call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* nonnull @1, i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, float*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* nonnull %N.addr, float* nonnull %p, i64 4617315517961601024) 46 ret void 47} 48 49define internal void @.omp_outlined.(i32* noalias %.global_tid., i32* noalias %.bound_tid., i32* dereferenceable(4) %N, float* dereferenceable(4) %p, i64 %q) { 50entry: 51 %q.addr = alloca i64, align 8 52 %.omp.lb = alloca i32, align 4 53 %.omp.ub = alloca i32, align 4 54 %.omp.stride = alloca i32, align 4 55 %.omp.is_last = alloca i32, align 4 56; CHECK: store i64 4617315517961601024, i64* %q.addr, align 8 57 store i64 %q, i64* %q.addr, align 8 58 %conv = bitcast i64* %q.addr to double* 59 %tmp = load i32, i32* %N, align 4 60 %sub3 = add nsw i32 %tmp, -3 61 %cmp = icmp sgt i32 %tmp, 2 62 br i1 %cmp, label %omp.precond.then, label %omp.precond.end 63 64omp.precond.then: ; preds = %entry 65 store i32 0, i32* %.omp.lb, align 4 66 store i32 %sub3, i32* %.omp.ub, align 4 67 store i32 1, i32* %.omp.stride, align 4 68 store i32 0, i32* %.omp.is_last, align 4 69 %tmp5 = load i32, i32* %.global_tid., align 4 70 call void @__kmpc_for_static_init_4(%struct.ident_t* nonnull @0, i32 %tmp5, i32 34, i32* nonnull %.omp.is_last, i32* nonnull %.omp.lb, i32* nonnull %.omp.ub, i32* nonnull %.omp.stride, i32 1, i32 1) 71 %tmp6 = load i32, i32* %.omp.ub, align 4 72 %cmp6 = icmp sgt i32 %tmp6, %sub3 73 br i1 %cmp6, label %cond.true, label %cond.false 74 75cond.true: ; preds = %omp.precond.then 76 br label %cond.end 77 78cond.false: ; preds = %omp.precond.then 79 %tmp7 = load i32, i32* %.omp.ub, align 4 80 br label %cond.end 81 82cond.end: ; preds = %cond.false, %cond.true 83 %cond = phi i32 [ %sub3, %cond.true ], [ %tmp7, %cond.false ] 84 store i32 %cond, i32* %.omp.ub, align 4 85 %tmp8 = load i32, i32* %.omp.lb, align 4 86 br label %omp.inner.for.cond 87 88omp.inner.for.cond: ; preds = %omp.inner.for.inc, %cond.end 89 %.omp.iv.0 = phi i32 [ %tmp8, %cond.end ], [ %add11, %omp.inner.for.inc ] 90 %tmp9 = load i32, i32* %.omp.ub, align 4 91 %cmp8 = icmp sgt i32 %.omp.iv.0, %tmp9 92 br i1 %cmp8, label %omp.inner.for.cond.cleanup, label %omp.inner.for.body 93 94omp.inner.for.cond.cleanup: ; preds = %omp.inner.for.cond 95 br label %omp.inner.for.end 96 97omp.inner.for.body: ; preds = %omp.inner.for.cond 98 %add10 = add nsw i32 %.omp.iv.0, 2 99 %tmp10 = load float, float* %p, align 4 100 %tmp11 = load double, double* %conv, align 8 101 call void @bar(i32 %add10, float %tmp10, double %tmp11) 102 br label %omp.body.continue 103 104omp.body.continue: ; preds = %omp.inner.for.body 105 br label %omp.inner.for.inc 106 107omp.inner.for.inc: ; preds = %omp.body.continue 108 %add11 = add nsw i32 %.omp.iv.0, 1 109 br label %omp.inner.for.cond 110 111omp.inner.for.end: ; preds = %omp.inner.for.cond.cleanup 112 br label %omp.loop.exit 113 114omp.loop.exit: ; preds = %omp.inner.for.end 115 %tmp12 = load i32, i32* %.global_tid., align 4 116 call void @__kmpc_for_static_fini(%struct.ident_t* nonnull @0, i32 %tmp12) 117 br label %omp.precond.end 118 119omp.precond.end: ; preds = %omp.loop.exit, %entry 120 ret void 121} 122 123declare dso_local void @__kmpc_for_static_init_4(%struct.ident_t*, i32, i32, i32*, i32*, i32*, i32*, i32, i32) 124 125declare dso_local void @bar(i32, float, double) 126 127declare dso_local void @__kmpc_for_static_fini(%struct.ident_t*, i32) 128 129declare !callback !0 dso_local void @__kmpc_fork_call(%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) 130 131!1 = !{i64 2, i64 -1, i64 -1, i1 true} 132!0 = !{!1} 133