1// Copyright 2019 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5package runtime_test 6 7import ( 8 "fmt" 9 "math/rand" 10 . "runtime" 11 "testing" 12) 13 14// makePallocData produces an initialized PallocData by setting 15// the ranges of described in alloc and scavenge. 16func makePallocData(alloc, scavenged []BitRange) *PallocData { 17 b := new(PallocData) 18 for _, v := range alloc { 19 if v.N == 0 { 20 // Skip N==0. It's harmless and allocRange doesn't 21 // handle this case. 22 continue 23 } 24 b.AllocRange(v.I, v.N) 25 } 26 for _, v := range scavenged { 27 if v.N == 0 { 28 // See the previous loop. 29 continue 30 } 31 b.ScavengedSetRange(v.I, v.N) 32 } 33 return b 34} 35 36func TestFillAligned(t *testing.T) { 37 fillAlignedSlow := func(x uint64, m uint) uint64 { 38 if m == 1 { 39 return x 40 } 41 out := uint64(0) 42 for i := uint(0); i < 64; i += m { 43 for j := uint(0); j < m; j++ { 44 if x&(uint64(1)<<(i+j)) != 0 { 45 out |= ((uint64(1) << m) - 1) << i 46 break 47 } 48 } 49 } 50 return out 51 } 52 check := func(x uint64, m uint) { 53 want := fillAlignedSlow(x, m) 54 if got := FillAligned(x, m); got != want { 55 t.Logf("got: %064b", got) 56 t.Logf("want: %064b", want) 57 t.Errorf("bad fillAligned(%016x, %d)", x, m) 58 } 59 } 60 for m := uint(1); m <= 64; m *= 2 { 61 tests := []uint64{ 62 0x0000000000000000, 63 0x00000000ffffffff, 64 0xffffffff00000000, 65 0x8000000000000001, 66 0xf00000000000000f, 67 0xf00000010050000f, 68 0xffffffffffffffff, 69 0x0000000000000001, 70 0x0000000000000002, 71 0x0000000000000008, 72 uint64(1) << (m - 1), 73 uint64(1) << m, 74 // Try a few fixed arbitrary examples. 75 0xb02b9effcf137016, 76 0x3975a076a9fbff18, 77 0x0f8c88ec3b81506e, 78 0x60f14d80ef2fa0e6, 79 } 80 for _, test := range tests { 81 check(test, m) 82 } 83 for i := 0; i < 1000; i++ { 84 // Try a pseudo-random numbers. 85 check(rand.Uint64(), m) 86 87 if m > 1 { 88 // For m != 1, let's construct a slightly more interesting 89 // random test. Generate a bitmap which is either 0 or 90 // randomly set bits for each m-aligned group of m bits. 91 val := uint64(0) 92 for n := uint(0); n < 64; n += m { 93 // For each group of m bits, flip a coin: 94 // * Leave them as zero. 95 // * Set them randomly. 96 if rand.Uint64()%2 == 0 { 97 val |= (rand.Uint64() & ((1 << m) - 1)) << n 98 } 99 } 100 check(val, m) 101 } 102 } 103 } 104} 105 106func TestPallocDataFindScavengeCandidate(t *testing.T) { 107 type test struct { 108 alloc, scavenged []BitRange 109 min, max uintptr 110 want BitRange 111 } 112 tests := map[string]test{ 113 "MixedMin1": { 114 alloc: []BitRange{{0, 40}, {42, PallocChunkPages - 42}}, 115 scavenged: []BitRange{{0, 41}, {42, PallocChunkPages - 42}}, 116 min: 1, 117 max: PallocChunkPages, 118 want: BitRange{41, 1}, 119 }, 120 "MultiMin1": { 121 alloc: []BitRange{{0, 63}, {65, 20}, {87, PallocChunkPages - 87}}, 122 scavenged: []BitRange{{86, 1}}, 123 min: 1, 124 max: PallocChunkPages, 125 want: BitRange{85, 1}, 126 }, 127 } 128 // Try out different page minimums. 129 for m := uintptr(1); m <= 64; m *= 2 { 130 suffix := fmt.Sprintf("Min%d", m) 131 tests["AllFree"+suffix] = test{ 132 min: m, 133 max: PallocChunkPages, 134 want: BitRange{0, PallocChunkPages}, 135 } 136 tests["AllScavenged"+suffix] = test{ 137 scavenged: []BitRange{{0, PallocChunkPages}}, 138 min: m, 139 max: PallocChunkPages, 140 want: BitRange{0, 0}, 141 } 142 tests["NoneFree"+suffix] = test{ 143 alloc: []BitRange{{0, PallocChunkPages}}, 144 scavenged: []BitRange{{PallocChunkPages / 2, PallocChunkPages / 2}}, 145 min: m, 146 max: PallocChunkPages, 147 want: BitRange{0, 0}, 148 } 149 tests["StartFree"+suffix] = test{ 150 alloc: []BitRange{{uint(m), PallocChunkPages - uint(m)}}, 151 min: m, 152 max: PallocChunkPages, 153 want: BitRange{0, uint(m)}, 154 } 155 tests["EndFree"+suffix] = test{ 156 alloc: []BitRange{{0, PallocChunkPages - uint(m)}}, 157 min: m, 158 max: PallocChunkPages, 159 want: BitRange{PallocChunkPages - uint(m), uint(m)}, 160 } 161 tests["Straddle64"+suffix] = test{ 162 alloc: []BitRange{{0, 64 - uint(m)}, {64 + uint(m), PallocChunkPages - (64 + uint(m))}}, 163 min: m, 164 max: 2 * m, 165 want: BitRange{64 - uint(m), 2 * uint(m)}, 166 } 167 tests["BottomEdge64WithFull"+suffix] = test{ 168 alloc: []BitRange{{64, 64}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}}, 169 scavenged: []BitRange{{1, 10}}, 170 min: m, 171 max: 3 * m, 172 want: BitRange{128, 3 * uint(m)}, 173 } 174 tests["BottomEdge64WithPocket"+suffix] = test{ 175 alloc: []BitRange{{64, 62}, {127, 1}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}}, 176 scavenged: []BitRange{{1, 10}}, 177 min: m, 178 max: 3 * m, 179 want: BitRange{128, 3 * uint(m)}, 180 } 181 tests["Max0"+suffix] = test{ 182 scavenged: []BitRange{{0, PallocChunkPages - uint(m)}}, 183 min: m, 184 max: 0, 185 want: BitRange{PallocChunkPages - uint(m), uint(m)}, 186 } 187 if m <= 8 { 188 tests["OneFree"] = test{ 189 alloc: []BitRange{{0, 40}, {40 + uint(m), PallocChunkPages - (40 + uint(m))}}, 190 min: m, 191 max: PallocChunkPages, 192 want: BitRange{40, uint(m)}, 193 } 194 tests["OneScavenged"] = test{ 195 alloc: []BitRange{{0, 40}, {40 + uint(m), PallocChunkPages - (40 + uint(m))}}, 196 scavenged: []BitRange{{40, 1}}, 197 min: m, 198 max: PallocChunkPages, 199 want: BitRange{0, 0}, 200 } 201 } 202 if m > 1 { 203 tests["MaxUnaligned"+suffix] = test{ 204 scavenged: []BitRange{{0, PallocChunkPages - uint(m*2-1)}}, 205 min: m, 206 max: m - 2, 207 want: BitRange{PallocChunkPages - uint(m), uint(m)}, 208 } 209 tests["SkipSmall"+suffix] = test{ 210 alloc: []BitRange{{0, 64 - uint(m)}, {64, 5}, {70, 11}, {82, PallocChunkPages - 82}}, 211 min: m, 212 max: m, 213 want: BitRange{64 - uint(m), uint(m)}, 214 } 215 tests["SkipMisaligned"+suffix] = test{ 216 alloc: []BitRange{{0, 64 - uint(m)}, {64, 63}, {127 + uint(m), PallocChunkPages - (127 + uint(m))}}, 217 min: m, 218 max: m, 219 want: BitRange{64 - uint(m), uint(m)}, 220 } 221 tests["MaxLessThan"+suffix] = test{ 222 scavenged: []BitRange{{0, PallocChunkPages - uint(m)}}, 223 min: m, 224 max: 1, 225 want: BitRange{PallocChunkPages - uint(m), uint(m)}, 226 } 227 } 228 } 229 if PhysHugePageSize > uintptr(PageSize) { 230 // Check hugepage preserving behavior. 231 bits := uint(PhysHugePageSize / uintptr(PageSize)) 232 if bits < PallocChunkPages { 233 tests["PreserveHugePageBottom"] = test{ 234 alloc: []BitRange{{bits + 2, PallocChunkPages - (bits + 2)}}, 235 min: 1, 236 max: 3, // Make it so that max would have us try to break the huge page. 237 want: BitRange{0, bits + 2}, 238 } 239 if 3*bits < PallocChunkPages { 240 // We need at least 3 huge pages in a chunk for this test to make sense. 241 tests["PreserveHugePageMiddle"] = test{ 242 alloc: []BitRange{{0, bits - 10}, {2*bits + 10, PallocChunkPages - (2*bits + 10)}}, 243 min: 1, 244 max: 12, // Make it so that max would have us try to break the huge page. 245 want: BitRange{bits, bits + 10}, 246 } 247 } 248 tests["PreserveHugePageTop"] = test{ 249 alloc: []BitRange{{0, PallocChunkPages - bits}}, 250 min: 1, 251 max: 1, // Even one page would break a huge page in this case. 252 want: BitRange{PallocChunkPages - bits, bits}, 253 } 254 } else if bits == PallocChunkPages { 255 tests["PreserveHugePageAll"] = test{ 256 min: 1, 257 max: 1, // Even one page would break a huge page in this case. 258 want: BitRange{0, PallocChunkPages}, 259 } 260 } else { 261 // The huge page size is greater than pallocChunkPages, so it should 262 // be effectively disabled. There's no way we can possible scavenge 263 // a huge page out of this bitmap chunk. 264 tests["PreserveHugePageNone"] = test{ 265 min: 1, 266 max: 1, 267 want: BitRange{PallocChunkPages - 1, 1}, 268 } 269 } 270 } 271 for name, v := range tests { 272 v := v 273 t.Run(name, func(t *testing.T) { 274 b := makePallocData(v.alloc, v.scavenged) 275 start, size := b.FindScavengeCandidate(PallocChunkPages-1, v.min, v.max) 276 got := BitRange{start, size} 277 if !(got.N == 0 && v.want.N == 0) && got != v.want { 278 t.Fatalf("candidate mismatch: got %v, want %v", got, v.want) 279 } 280 }) 281 } 282} 283 284// Tests end-to-end scavenging on a pageAlloc. 285func TestPageAllocScavenge(t *testing.T) { 286 if GOOS == "openbsd" && testing.Short() { 287 t.Skip("skipping because virtual memory is limited; see #36210") 288 } 289 type test struct { 290 request, expect uintptr 291 } 292 minPages := PhysPageSize / PageSize 293 if minPages < 1 { 294 minPages = 1 295 } 296 type setup struct { 297 beforeAlloc map[ChunkIdx][]BitRange 298 beforeScav map[ChunkIdx][]BitRange 299 expect []test 300 afterScav map[ChunkIdx][]BitRange 301 } 302 tests := map[string]setup{ 303 "AllFreeUnscavExhaust": { 304 beforeAlloc: map[ChunkIdx][]BitRange{ 305 BaseChunkIdx: {}, 306 BaseChunkIdx + 1: {}, 307 BaseChunkIdx + 2: {}, 308 }, 309 beforeScav: map[ChunkIdx][]BitRange{ 310 BaseChunkIdx: {}, 311 BaseChunkIdx + 1: {}, 312 BaseChunkIdx + 2: {}, 313 }, 314 expect: []test{ 315 {^uintptr(0), 3 * PallocChunkPages * PageSize}, 316 }, 317 afterScav: map[ChunkIdx][]BitRange{ 318 BaseChunkIdx: {{0, PallocChunkPages}}, 319 BaseChunkIdx + 1: {{0, PallocChunkPages}}, 320 BaseChunkIdx + 2: {{0, PallocChunkPages}}, 321 }, 322 }, 323 "NoneFreeUnscavExhaust": { 324 beforeAlloc: map[ChunkIdx][]BitRange{ 325 BaseChunkIdx: {{0, PallocChunkPages}}, 326 BaseChunkIdx + 1: {}, 327 BaseChunkIdx + 2: {{0, PallocChunkPages}}, 328 }, 329 beforeScav: map[ChunkIdx][]BitRange{ 330 BaseChunkIdx: {}, 331 BaseChunkIdx + 1: {{0, PallocChunkPages}}, 332 BaseChunkIdx + 2: {}, 333 }, 334 expect: []test{ 335 {^uintptr(0), 0}, 336 }, 337 afterScav: map[ChunkIdx][]BitRange{ 338 BaseChunkIdx: {}, 339 BaseChunkIdx + 1: {{0, PallocChunkPages}}, 340 BaseChunkIdx + 2: {}, 341 }, 342 }, 343 "ScavHighestPageFirst": { 344 beforeAlloc: map[ChunkIdx][]BitRange{ 345 BaseChunkIdx: {}, 346 }, 347 beforeScav: map[ChunkIdx][]BitRange{ 348 BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}}, 349 }, 350 expect: []test{ 351 {1, minPages * PageSize}, 352 }, 353 afterScav: map[ChunkIdx][]BitRange{ 354 BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(minPages)}}, 355 }, 356 }, 357 "ScavMultiple": { 358 beforeAlloc: map[ChunkIdx][]BitRange{ 359 BaseChunkIdx: {}, 360 }, 361 beforeScav: map[ChunkIdx][]BitRange{ 362 BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}}, 363 }, 364 expect: []test{ 365 {minPages * PageSize, minPages * PageSize}, 366 {minPages * PageSize, minPages * PageSize}, 367 }, 368 afterScav: map[ChunkIdx][]BitRange{ 369 BaseChunkIdx: {{0, PallocChunkPages}}, 370 }, 371 }, 372 "ScavMultiple2": { 373 beforeAlloc: map[ChunkIdx][]BitRange{ 374 BaseChunkIdx: {}, 375 BaseChunkIdx + 1: {}, 376 }, 377 beforeScav: map[ChunkIdx][]BitRange{ 378 BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}}, 379 BaseChunkIdx + 1: {{0, PallocChunkPages - uint(2*minPages)}}, 380 }, 381 expect: []test{ 382 {2 * minPages * PageSize, 2 * minPages * PageSize}, 383 {minPages * PageSize, minPages * PageSize}, 384 {minPages * PageSize, minPages * PageSize}, 385 }, 386 afterScav: map[ChunkIdx][]BitRange{ 387 BaseChunkIdx: {{0, PallocChunkPages}}, 388 BaseChunkIdx + 1: {{0, PallocChunkPages}}, 389 }, 390 }, 391 "ScavDiscontiguous": { 392 beforeAlloc: map[ChunkIdx][]BitRange{ 393 BaseChunkIdx: {}, 394 BaseChunkIdx + 0xe: {}, 395 }, 396 beforeScav: map[ChunkIdx][]BitRange{ 397 BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}}, 398 BaseChunkIdx + 0xe: {{uint(2 * minPages), PallocChunkPages - uint(2*minPages)}}, 399 }, 400 expect: []test{ 401 {2 * minPages * PageSize, 2 * minPages * PageSize}, 402 {^uintptr(0), 2 * minPages * PageSize}, 403 {^uintptr(0), 0}, 404 }, 405 afterScav: map[ChunkIdx][]BitRange{ 406 BaseChunkIdx: {{0, PallocChunkPages}}, 407 BaseChunkIdx + 0xe: {{0, PallocChunkPages}}, 408 }, 409 }, 410 } 411 if PageAlloc64Bit != 0 { 412 tests["ScavAllVeryDiscontiguous"] = setup{ 413 beforeAlloc: map[ChunkIdx][]BitRange{ 414 BaseChunkIdx: {}, 415 BaseChunkIdx + 0x1000: {}, 416 }, 417 beforeScav: map[ChunkIdx][]BitRange{ 418 BaseChunkIdx: {}, 419 BaseChunkIdx + 0x1000: {}, 420 }, 421 expect: []test{ 422 {^uintptr(0), 2 * PallocChunkPages * PageSize}, 423 {^uintptr(0), 0}, 424 }, 425 afterScav: map[ChunkIdx][]BitRange{ 426 BaseChunkIdx: {{0, PallocChunkPages}}, 427 BaseChunkIdx + 0x1000: {{0, PallocChunkPages}}, 428 }, 429 } 430 } 431 for name, v := range tests { 432 v := v 433 runTest := func(t *testing.T, mayUnlock bool) { 434 b := NewPageAlloc(v.beforeAlloc, v.beforeScav) 435 defer FreePageAlloc(b) 436 437 for iter, h := range v.expect { 438 if got := b.Scavenge(h.request, mayUnlock); got != h.expect { 439 t.Fatalf("bad scavenge #%d: want %d, got %d", iter+1, h.expect, got) 440 } 441 } 442 want := NewPageAlloc(v.beforeAlloc, v.afterScav) 443 defer FreePageAlloc(want) 444 445 checkPageAlloc(t, want, b) 446 } 447 t.Run(name, func(t *testing.T) { 448 runTest(t, false) 449 }) 450 t.Run(name+"MayUnlock", func(t *testing.T) { 451 runTest(t, true) 452 }) 453 } 454} 455