1; This test contains extremely tricky call graph structures for the inliner to 2; handle correctly. They form cycles where the inliner introduces code that is 3; immediately or can eventually be transformed back into the original code. And 4; each step changes the call graph and so will trigger iteration. This requires 5; some out-of-band way to prevent infinitely re-inlining and re-transforming the 6; code. 7; 8; RUN: opt < %s -passes='cgscc(inline,function(sroa,instcombine))' -inline-threshold=50 -S | FileCheck %s 9 10 11; The `test1_*` collection of functions form a directly cycling pattern. 12 13define void @test1_a(i8** %ptr) { 14; CHECK-LABEL: define void @test1_a( 15entry: 16 call void @test1_b(i8* bitcast (void (i8*, i1, i32)* @test1_b to i8*), i1 false, i32 0) 17; Inlining and simplifying this call will reliably produce the exact same call, 18; over and over again. However, each inlining increments the count, and so we 19; expect this test case to stop after one round of inlining with a final 20; argument of '1'. 21; CHECK-NOT: call 22; CHECK: call void @test1_b(i8* bitcast (void (i8*, i1, i32)* @test1_b to i8*), i1 false, i32 1) 23; CHECK-NOT: call 24 25 ret void 26} 27 28define void @test1_b(i8* %arg, i1 %flag, i32 %inline_count) { 29; CHECK-LABEL: define void @test1_b( 30entry: 31 %a = alloca i8* 32 store i8* %arg, i8** %a 33; This alloca and store should remain through any optimization. 34; CHECK: %[[A:.*]] = alloca 35; CHECK: store i8* %arg, i8** %[[A]] 36 37 br i1 %flag, label %bb1, label %bb2 38 39bb1: 40 call void @test1_a(i8** %a) noinline 41 br label %bb2 42 43bb2: 44 %cast = bitcast i8** %a to void (i8*, i1, i32)** 45 %p = load void (i8*, i1, i32)*, void (i8*, i1, i32)** %cast 46 %inline_count_inc = add i32 %inline_count, 1 47 call void %p(i8* %arg, i1 %flag, i32 %inline_count_inc) 48; And we should continue to load and call indirectly through optimization. 49; CHECK: %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i1, i32)** 50; CHECK: %[[P:.*]] = load void (i8*, i1, i32)*, void (i8*, i1, i32)** %[[CAST]] 51; CHECK: call void %[[P]]( 52 53 ret void 54} 55 56define void @test2_a(i8** %ptr) { 57; CHECK-LABEL: define void @test2_a( 58entry: 59 call void @test2_b(i8* bitcast (void (i8*, i8*, i1, i32)* @test2_b to i8*), i8* bitcast (void (i8*, i8*, i1, i32)* @test2_c to i8*), i1 false, i32 0) 60; Inlining and simplifying this call will reliably produce the exact same call, 61; but only after doing two rounds if inlining, first from @test2_b then 62; @test2_c. We check the exact number of inlining rounds before we cut off to 63; break the cycle by inspecting the last paramater that gets incremented with 64; each inlined function body. 65; CHECK-NOT: call 66; CHECK: call void @test2_b(i8* bitcast (void (i8*, i8*, i1, i32)* @test2_b to i8*), i8* bitcast (void (i8*, i8*, i1, i32)* @test2_c to i8*), i1 false, i32 2) 67; CHECK-NOT: call 68 ret void 69} 70 71define void @test2_b(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count) { 72; CHECK-LABEL: define void @test2_b( 73entry: 74 %a = alloca i8* 75 store i8* %arg2, i8** %a 76; This alloca and store should remain through any optimization. 77; CHECK: %[[A:.*]] = alloca 78; CHECK: store i8* %arg2, i8** %[[A]] 79 80 br i1 %flag, label %bb1, label %bb2 81 82bb1: 83 call void @test2_a(i8** %a) noinline 84 br label %bb2 85 86bb2: 87 %p = load i8*, i8** %a 88 %cast = bitcast i8* %p to void (i8*, i8*, i1, i32)* 89 %inline_count_inc = add i32 %inline_count, 1 90 call void %cast(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count_inc) 91; And we should continue to load and call indirectly through optimization. 92; CHECK: %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i8*, i1, i32)** 93; CHECK: %[[P:.*]] = load void (i8*, i8*, i1, i32)*, void (i8*, i8*, i1, i32)** %[[CAST]] 94; CHECK: call void %[[P]]( 95 96 ret void 97} 98 99define void @test2_c(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count) { 100; CHECK-LABEL: define void @test2_c( 101entry: 102 %a = alloca i8* 103 store i8* %arg1, i8** %a 104; This alloca and store should remain through any optimization. 105; CHECK: %[[A:.*]] = alloca 106; CHECK: store i8* %arg1, i8** %[[A]] 107 108 br i1 %flag, label %bb1, label %bb2 109 110bb1: 111 call void @test2_a(i8** %a) noinline 112 br label %bb2 113 114bb2: 115 %p = load i8*, i8** %a 116 %cast = bitcast i8* %p to void (i8*, i8*, i1, i32)* 117 %inline_count_inc = add i32 %inline_count, 1 118 call void %cast(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count_inc) 119; And we should continue to load and call indirectly through optimization. 120; CHECK: %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i8*, i1, i32)** 121; CHECK: %[[P:.*]] = load void (i8*, i8*, i1, i32)*, void (i8*, i8*, i1, i32)** %[[CAST]] 122; CHECK: call void %[[P]]( 123 124 ret void 125} 126 127; Another infinite inlining case. The initial callgraph is like following: 128; 129; test3_a <---> test3_b 130; | ^ 131; v | 132; test3_c <---> test3_d 133; 134; For all the call edges in the call graph, only test3_c and test3_d can be 135; inlined into test3_a, and no other call edge can be inlined. 136; 137; After test3_c is inlined into test3_a, the original call edge test3_a->test3_c 138; will be removed, a new call edge will be added and the call graph becomes: 139; 140; test3_a <---> test3_b 141; \ ^ 142; v / 143; test3_c <---> test3_d 144; But test3_a, test3_b, test3_c and test3_d still belong to the same SCC. 145; 146; Then after test3_a->test3_d is inlined, when test3_a->test3_d is converted to 147; a ref edge, the original SCC will be split into two: {test3_c, test3_d} and 148; {test3_a, test3_b}, immediately after the newly added ref edge 149; test3_a->test3_c will be converted to a call edge, and the two SCCs will be 150; merged into the original one again. During this cycle, the original SCC will 151; be added into UR.CWorklist again and this creates an infinite loop. 152 153@a = global i64 0 154@b = global i64 0 155 156; Check test3_c is inlined into test3_a once and only once. 157; CHECK-LABEL: @test3_a( 158; CHECK: tail call void @test3_b() 159; CHECK-NEXT: tail call void @test3_d(i32 5) 160; CHECK-NEXT: %[[LD1:.*]] = load i64, i64* @a 161; CHECK-NEXT: %[[ADD1:.*]] = add nsw i64 %[[LD1]], 1 162; CHECK-NEXT: store i64 %[[ADD1]], i64* @a 163; CHECK-NEXT: %[[LD2:.*]] = load i64, i64* @b 164; CHECK-NEXT: %[[ADD2:.*]] = add nsw i64 %[[LD2]], 5 165; CHECK-NEXT: store i64 %[[ADD2]], i64* @b 166; CHECK-NEXT: ret void 167 168; Function Attrs: noinline 169define void @test3_a() #0 { 170entry: 171 tail call void @test3_b() 172 tail call void @test3_c(i32 5) 173 %t0 = load i64, i64* @b 174 %add = add nsw i64 %t0, 5 175 store i64 %add, i64* @b 176 ret void 177} 178 179; Function Attrs: noinline 180define void @test3_b() #0 { 181entry: 182 tail call void @test3_a() 183 %t0 = load i64, i64* @a 184 %add = add nsw i64 %t0, 2 185 store i64 %add, i64* @a 186 ret void 187} 188 189define void @test3_d(i32 %i) { 190entry: 191 %cmp = icmp eq i32 %i, 5 192 br i1 %cmp, label %if.end, label %if.then 193 194if.then: ; preds = %entry 195 %call = tail call i64 @random() 196 %t0 = load i64, i64* @a 197 %add = add nsw i64 %t0, %call 198 store i64 %add, i64* @a 199 br label %if.end 200 201if.end: ; preds = %entry, %if.then 202 tail call void @test3_c(i32 %i) 203 tail call void @test3_b() 204 %t6 = load i64, i64* @a 205 %add79 = add nsw i64 %t6, 3 206 store i64 %add79, i64* @a 207 ret void 208} 209 210define void @test3_c(i32 %i) { 211entry: 212 %cmp = icmp eq i32 %i, 5 213 br i1 %cmp, label %if.end, label %if.then 214 215if.then: ; preds = %entry 216 %call = tail call i64 @random() 217 %t0 = load i64, i64* @a 218 %add = add nsw i64 %t0, %call 219 store i64 %add, i64* @a 220 br label %if.end 221 222if.end: ; preds = %entry, %if.then 223 tail call void @test3_d(i32 %i) 224 %t6 = load i64, i64* @a 225 %add85 = add nsw i64 %t6, 1 226 store i64 %add85, i64* @a 227 ret void 228} 229 230declare i64 @random() 231 232attributes #0 = { noinline } 233