1// Copyright 2019 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5// Scavenging free pages. 6// 7// This file implements scavenging (the release of physical pages backing mapped 8// memory) of free and unused pages in the heap as a way to deal with page-level 9// fragmentation and reduce the RSS of Go applications. 10// 11// Scavenging in Go happens on two fronts: there's the background 12// (asynchronous) scavenger and the heap-growth (synchronous) scavenger. 13// 14// The former happens on a goroutine much like the background sweeper which is 15// soft-capped at using scavengePercent of the mutator's time, based on 16// order-of-magnitude estimates of the costs of scavenging. The background 17// scavenger's primary goal is to bring the estimated heap RSS of the 18// application down to a goal. 19// 20// That goal is defined as: 21// (retainExtraPercent+100) / 100 * (next_gc / last_next_gc) * last_heap_inuse 22// 23// Essentially, we wish to have the application's RSS track the heap goal, but 24// the heap goal is defined in terms of bytes of objects, rather than pages like 25// RSS. As a result, we need to take into account for fragmentation internal to 26// spans. next_gc / last_next_gc defines the ratio between the current heap goal 27// and the last heap goal, which tells us by how much the heap is growing and 28// shrinking. We estimate what the heap will grow to in terms of pages by taking 29// this ratio and multiplying it by heap_inuse at the end of the last GC, which 30// allows us to account for this additional fragmentation. Note that this 31// procedure makes the assumption that the degree of fragmentation won't change 32// dramatically over the next GC cycle. Overestimating the amount of 33// fragmentation simply results in higher memory use, which will be accounted 34// for by the next pacing up date. Underestimating the fragmentation however 35// could lead to performance degradation. Handling this case is not within the 36// scope of the scavenger. Situations where the amount of fragmentation balloons 37// over the course of a single GC cycle should be considered pathologies, 38// flagged as bugs, and fixed appropriately. 39// 40// An additional factor of retainExtraPercent is added as a buffer to help ensure 41// that there's more unscavenged memory to allocate out of, since each allocation 42// out of scavenged memory incurs a potentially expensive page fault. 43// 44// The goal is updated after each GC and the scavenger's pacing parameters 45// (which live in mheap_) are updated to match. The pacing parameters work much 46// like the background sweeping parameters. The parameters define a line whose 47// horizontal axis is time and vertical axis is estimated heap RSS, and the 48// scavenger attempts to stay below that line at all times. 49// 50// The synchronous heap-growth scavenging happens whenever the heap grows in 51// size, for some definition of heap-growth. The intuition behind this is that 52// the application had to grow the heap because existing fragments were 53// not sufficiently large to satisfy a page-level memory allocation, so we 54// scavenge those fragments eagerly to offset the growth in RSS that results. 55 56package runtime 57 58import ( 59 "runtime/internal/atomic" 60 "runtime/internal/sys" 61 "unsafe" 62) 63 64const ( 65 // The background scavenger is paced according to these parameters. 66 // 67 // scavengePercent represents the portion of mutator time we're willing 68 // to spend on scavenging in percent. 69 scavengePercent = 1 // 1% 70 71 // retainExtraPercent represents the amount of memory over the heap goal 72 // that the scavenger should keep as a buffer space for the allocator. 73 // 74 // The purpose of maintaining this overhead is to have a greater pool of 75 // unscavenged memory available for allocation (since using scavenged memory 76 // incurs an additional cost), to account for heap fragmentation and 77 // the ever-changing layout of the heap. 78 retainExtraPercent = 10 79 80 // maxPagesPerPhysPage is the maximum number of supported runtime pages per 81 // physical page, based on maxPhysPageSize. 82 maxPagesPerPhysPage = maxPhysPageSize / pageSize 83 84 // scavengeCostRatio is the approximate ratio between the costs of using previously 85 // scavenged memory and scavenging memory. 86 // 87 // For most systems the cost of scavenging greatly outweighs the costs 88 // associated with using scavenged memory, making this constant 0. On other systems 89 // (especially ones where "sysUsed" is not just a no-op) this cost is non-trivial. 90 // 91 // This ratio is used as part of multiplicative factor to help the scavenger account 92 // for the additional costs of using scavenged memory in its pacing. 93 scavengeCostRatio = 0.7 * sys.GoosDarwin 94) 95 96// heapRetained returns an estimate of the current heap RSS. 97func heapRetained() uint64 { 98 return atomic.Load64(&memstats.heap_sys) - atomic.Load64(&memstats.heap_released) 99} 100 101// gcPaceScavenger updates the scavenger's pacing, particularly 102// its rate and RSS goal. 103// 104// The RSS goal is based on the current heap goal with a small overhead 105// to accommodate non-determinism in the allocator. 106// 107// The pacing is based on scavengePageRate, which applies to both regular and 108// huge pages. See that constant for more information. 109// 110// mheap_.lock must be held or the world must be stopped. 111func gcPaceScavenger() { 112 // If we're called before the first GC completed, disable scavenging. 113 // We never scavenge before the 2nd GC cycle anyway (we don't have enough 114 // information about the heap yet) so this is fine, and avoids a fault 115 // or garbage data later. 116 if memstats.last_next_gc == 0 { 117 mheap_.scavengeGoal = ^uint64(0) 118 return 119 } 120 // Compute our scavenging goal. 121 goalRatio := float64(memstats.next_gc) / float64(memstats.last_next_gc) 122 retainedGoal := uint64(float64(memstats.last_heap_inuse) * goalRatio) 123 // Add retainExtraPercent overhead to retainedGoal. This calculation 124 // looks strange but the purpose is to arrive at an integer division 125 // (e.g. if retainExtraPercent = 12.5, then we get a divisor of 8) 126 // that also avoids the overflow from a multiplication. 127 retainedGoal += retainedGoal / (1.0 / (retainExtraPercent / 100.0)) 128 // Align it to a physical page boundary to make the following calculations 129 // a bit more exact. 130 retainedGoal = (retainedGoal + uint64(physPageSize) - 1) &^ (uint64(physPageSize) - 1) 131 132 // Represents where we are now in the heap's contribution to RSS in bytes. 133 // 134 // Guaranteed to always be a multiple of physPageSize on systems where 135 // physPageSize <= pageSize since we map heap_sys at a rate larger than 136 // any physPageSize and released memory in multiples of the physPageSize. 137 // 138 // However, certain functions recategorize heap_sys as other stats (e.g. 139 // stack_sys) and this happens in multiples of pageSize, so on systems 140 // where physPageSize > pageSize the calculations below will not be exact. 141 // Generally this is OK since we'll be off by at most one regular 142 // physical page. 143 retainedNow := heapRetained() 144 145 // If we're already below our goal, or within one page of our goal, then disable 146 // the background scavenger. We disable the background scavenger if there's 147 // less than one physical page of work to do because it's not worth it. 148 if retainedNow <= retainedGoal || retainedNow-retainedGoal < uint64(physPageSize) { 149 mheap_.scavengeGoal = ^uint64(0) 150 return 151 } 152 mheap_.scavengeGoal = retainedGoal 153 mheap_.pages.resetScavengeAddr() 154} 155 156// Sleep/wait state of the background scavenger. 157var scavenge struct { 158 lock mutex 159 g *g 160 parked bool 161 timer *timer 162} 163 164// wakeScavenger unparks the scavenger if necessary. It must be called 165// after any pacing update. 166// 167// mheap_.lock and scavenge.lock must not be held. 168func wakeScavenger() { 169 lock(&scavenge.lock) 170 if scavenge.parked { 171 // Try to stop the timer but we don't really care if we succeed. 172 // It's possible that either a timer was never started, or that 173 // we're racing with it. 174 // In the case that we're racing with there's the low chance that 175 // we experience a spurious wake-up of the scavenger, but that's 176 // totally safe. 177 stopTimer(scavenge.timer) 178 179 // Unpark the goroutine and tell it that there may have been a pacing 180 // change. Note that we skip the scheduler's runnext slot because we 181 // want to avoid having the scavenger interfere with the fair 182 // scheduling of user goroutines. In effect, this schedules the 183 // scavenger at a "lower priority" but that's OK because it'll 184 // catch up on the work it missed when it does get scheduled. 185 scavenge.parked = false 186 systemstack(func() { 187 ready(scavenge.g, 0, false) 188 }) 189 } 190 unlock(&scavenge.lock) 191} 192 193// scavengeSleep attempts to put the scavenger to sleep for ns. 194// 195// Note that this function should only be called by the scavenger. 196// 197// The scavenger may be woken up earlier by a pacing change, and it may not go 198// to sleep at all if there's a pending pacing change. 199// 200// Returns the amount of time actually slept. 201func scavengeSleep(ns int64) int64 { 202 lock(&scavenge.lock) 203 204 // Set the timer. 205 // 206 // This must happen here instead of inside gopark 207 // because we can't close over any variables without 208 // failing escape analysis. 209 start := nanotime() 210 resetTimer(scavenge.timer, start+ns) 211 212 // Mark ourself as asleep and go to sleep. 213 scavenge.parked = true 214 goparkunlock(&scavenge.lock, waitReasonSleep, traceEvGoSleep, 2) 215 216 // Return how long we actually slept for. 217 return nanotime() - start 218} 219 220// Background scavenger. 221// 222// The background scavenger maintains the RSS of the application below 223// the line described by the proportional scavenging statistics in 224// the mheap struct. 225func bgscavenge(c chan int) { 226 setSystemGoroutine() 227 228 scavenge.g = getg() 229 230 lock(&scavenge.lock) 231 scavenge.parked = true 232 233 scavenge.timer = new(timer) 234 scavenge.timer.f = func(_ interface{}, _ uintptr) { 235 wakeScavenger() 236 } 237 238 c <- 1 239 goparkunlock(&scavenge.lock, waitReasonGCScavengeWait, traceEvGoBlock, 1) 240 241 // Exponentially-weighted moving average of the fraction of time this 242 // goroutine spends scavenging (that is, percent of a single CPU). 243 // It represents a measure of scheduling overheads which might extend 244 // the sleep or the critical time beyond what's expected. Assume no 245 // overhead to begin with. 246 // 247 // TODO(mknyszek): Consider making this based on total CPU time of the 248 // application (i.e. scavengePercent * GOMAXPROCS). This isn't really 249 // feasible now because the scavenger acquires the heap lock over the 250 // scavenging operation, which means scavenging effectively blocks 251 // allocators and isn't scalable. However, given a scalable allocator, 252 // it makes sense to also make the scavenger scale with it; if you're 253 // allocating more frequently, then presumably you're also generating 254 // more work for the scavenger. 255 const idealFraction = scavengePercent / 100.0 256 scavengeEWMA := float64(idealFraction) 257 258 for { 259 released := uintptr(0) 260 261 // Time in scavenging critical section. 262 crit := float64(0) 263 264 // Run on the system stack since we grab the heap lock, 265 // and a stack growth with the heap lock means a deadlock. 266 systemstack(func() { 267 lock(&mheap_.lock) 268 269 // If background scavenging is disabled or if there's no work to do just park. 270 retained, goal := heapRetained(), mheap_.scavengeGoal 271 if retained <= goal { 272 unlock(&mheap_.lock) 273 return 274 } 275 unlock(&mheap_.lock) 276 277 // Scavenge one page, and measure the amount of time spent scavenging. 278 start := nanotime() 279 released = mheap_.pages.scavengeOne(physPageSize, false) 280 atomic.Xadduintptr(&mheap_.pages.scavReleased, released) 281 crit = float64(nanotime() - start) 282 }) 283 284 if released == 0 { 285 lock(&scavenge.lock) 286 scavenge.parked = true 287 goparkunlock(&scavenge.lock, waitReasonGCScavengeWait, traceEvGoBlock, 1) 288 continue 289 } 290 291 if released < physPageSize { 292 // If this happens, it means that we may have attempted to release part 293 // of a physical page, but the likely effect of that is that it released 294 // the whole physical page, some of which may have still been in-use. 295 // This could lead to memory corruption. Throw. 296 throw("released less than one physical page of memory") 297 } 298 299 // On some platforms we may see crit as zero if the time it takes to scavenge 300 // memory is less than the minimum granularity of its clock (e.g. Windows). 301 // In this case, just assume scavenging takes 10 µs per regular physical page 302 // (determined empirically), and conservatively ignore the impact of huge pages 303 // on timing. 304 // 305 // We shouldn't ever see a crit value less than zero unless there's a bug of 306 // some kind, either on our side or in the platform we're running on, but be 307 // defensive in that case as well. 308 const approxCritNSPerPhysicalPage = 10e3 309 if crit <= 0 { 310 crit = approxCritNSPerPhysicalPage * float64(released/physPageSize) 311 } 312 313 // Multiply the critical time by 1 + the ratio of the costs of using 314 // scavenged memory vs. scavenging memory. This forces us to pay down 315 // the cost of reusing this memory eagerly by sleeping for a longer period 316 // of time and scavenging less frequently. More concretely, we avoid situations 317 // where we end up scavenging so often that we hurt allocation performance 318 // because of the additional overheads of using scavenged memory. 319 crit *= 1 + scavengeCostRatio 320 321 // If we spent more than 10 ms (for example, if the OS scheduled us away, or someone 322 // put their machine to sleep) in the critical section, bound the time we use to 323 // calculate at 10 ms to avoid letting the sleep time get arbitrarily high. 324 const maxCrit = 10e6 325 if crit > maxCrit { 326 crit = maxCrit 327 } 328 329 // Compute the amount of time to sleep, assuming we want to use at most 330 // scavengePercent of CPU time. Take into account scheduling overheads 331 // that may extend the length of our sleep by multiplying by how far 332 // off we are from the ideal ratio. For example, if we're sleeping too 333 // much, then scavengeEMWA < idealFraction, so we'll adjust the sleep time 334 // down. 335 adjust := scavengeEWMA / idealFraction 336 sleepTime := int64(adjust * crit / (scavengePercent / 100.0)) 337 338 // Go to sleep. 339 slept := scavengeSleep(sleepTime) 340 341 // Compute the new ratio. 342 fraction := crit / (crit + float64(slept)) 343 344 // Set a lower bound on the fraction. 345 // Due to OS-related anomalies we may "sleep" for an inordinate amount 346 // of time. Let's avoid letting the ratio get out of hand by bounding 347 // the sleep time we use in our EWMA. 348 const minFraction = 1 / 1000 349 if fraction < minFraction { 350 fraction = minFraction 351 } 352 353 // Update scavengeEWMA by merging in the new crit/slept ratio. 354 const alpha = 0.5 355 scavengeEWMA = alpha*fraction + (1-alpha)*scavengeEWMA 356 } 357} 358 359// scavenge scavenges nbytes worth of free pages, starting with the 360// highest address first. Successive calls continue from where it left 361// off until the heap is exhausted. Call resetScavengeAddr to bring it 362// back to the top of the heap. 363// 364// Returns the amount of memory scavenged in bytes. 365// 366// If locked == false, s.mheapLock must not be locked. If locked == true, 367// s.mheapLock must be locked. 368// 369// Must run on the system stack because scavengeOne must run on the 370// system stack. 371// 372//go:systemstack 373func (s *pageAlloc) scavenge(nbytes uintptr, locked bool) uintptr { 374 released := uintptr(0) 375 for released < nbytes { 376 r := s.scavengeOne(nbytes-released, locked) 377 if r == 0 { 378 // Nothing left to scavenge! Give up. 379 break 380 } 381 released += r 382 } 383 return released 384} 385 386// printScavTrace prints a scavenge trace line to standard error. 387// 388// released should be the amount of memory released since the last time this 389// was called, and forced indicates whether the scavenge was forced by the 390// application. 391func printScavTrace(released uintptr, forced bool) { 392 printlock() 393 print("scav ", 394 released>>10, " KiB work, ", 395 atomic.Load64(&memstats.heap_released)>>10, " KiB total, ", 396 (atomic.Load64(&memstats.heap_inuse)*100)/heapRetained(), "% util", 397 ) 398 if forced { 399 print(" (forced)") 400 } 401 println() 402 printunlock() 403} 404 405// resetScavengeAddr sets the scavenge start address to the top of the heap's 406// address space. This should be called each time the scavenger's pacing 407// changes. 408// 409// s.mheapLock must be held. 410func (s *pageAlloc) resetScavengeAddr() { 411 released := atomic.Loaduintptr(&s.scavReleased) 412 if debug.scavtrace > 0 { 413 printScavTrace(released, false) 414 } 415 // Subtract from scavReleased instead of just setting it to zero because 416 // the scavenger could have increased scavReleased concurrently with the 417 // load above, and we may miss an update by just blindly zeroing the field. 418 atomic.Xadduintptr(&s.scavReleased, -released) 419 s.scavAddr = chunkBase(s.end) - 1 420} 421 422// scavengeOne starts from s.scavAddr and walks down the heap until it finds 423// a contiguous run of pages to scavenge. It will try to scavenge at most 424// max bytes at once, but may scavenge more to avoid breaking huge pages. Once 425// it scavenges some memory it returns how much it scavenged and updates s.scavAddr 426// appropriately. s.scavAddr must be reset manually and externally. 427// 428// Should it exhaust the heap, it will return 0 and set s.scavAddr to minScavAddr. 429// 430// If locked == false, s.mheapLock must not be locked. 431// If locked == true, s.mheapLock must be locked. 432// 433// Must be run on the system stack because it either acquires the heap lock 434// or executes with the heap lock acquired. 435// 436//go:systemstack 437func (s *pageAlloc) scavengeOne(max uintptr, locked bool) uintptr { 438 // Calculate the maximum number of pages to scavenge. 439 // 440 // This should be alignUp(max, pageSize) / pageSize but max can and will 441 // be ^uintptr(0), so we need to be very careful not to overflow here. 442 // Rather than use alignUp, calculate the number of pages rounded down 443 // first, then add back one if necessary. 444 maxPages := max / pageSize 445 if max%pageSize != 0 { 446 maxPages++ 447 } 448 449 // Calculate the minimum number of pages we can scavenge. 450 // 451 // Because we can only scavenge whole physical pages, we must 452 // ensure that we scavenge at least minPages each time, aligned 453 // to minPages*pageSize. 454 minPages := physPageSize / pageSize 455 if minPages < 1 { 456 minPages = 1 457 } 458 459 // Helpers for locking and unlocking only if locked == false. 460 lockHeap := func() { 461 if !locked { 462 lock(s.mheapLock) 463 } 464 } 465 unlockHeap := func() { 466 if !locked { 467 unlock(s.mheapLock) 468 } 469 } 470 471 lockHeap() 472 ci := chunkIndex(s.scavAddr) 473 if ci < s.start { 474 unlockHeap() 475 return 0 476 } 477 478 // Check the chunk containing the scav addr, starting at the addr 479 // and see if there are any free and unscavenged pages. 480 // 481 // Only check this if s.scavAddr is covered by any address range 482 // in s.inUse, so that we know our check of the summary is safe. 483 if s.inUse.contains(s.scavAddr) && s.summary[len(s.summary)-1][ci].max() >= uint(minPages) { 484 // We only bother looking for a candidate if there at least 485 // minPages free pages at all. It's important that we only 486 // continue if the summary says we can because that's how 487 // we can tell if parts of the address space are unused. 488 // See the comment on s.chunks in mpagealloc.go. 489 base, npages := s.chunkOf(ci).findScavengeCandidate(chunkPageIndex(s.scavAddr), minPages, maxPages) 490 491 // If we found something, scavenge it and return! 492 if npages != 0 { 493 s.scavengeRangeLocked(ci, base, npages) 494 unlockHeap() 495 return uintptr(npages) * pageSize 496 } 497 } 498 499 // getInUseRange returns the highest range in the 500 // intersection of [0, addr] and s.inUse. 501 // 502 // s.mheapLock must be held. 503 getInUseRange := func(addr uintptr) addrRange { 504 top := s.inUse.findSucc(addr) 505 if top == 0 { 506 return addrRange{} 507 } 508 r := s.inUse.ranges[top-1] 509 // addr is inclusive, so treat it as such when 510 // updating the limit, which is exclusive. 511 if r.limit > addr+1 { 512 r.limit = addr + 1 513 } 514 return r 515 } 516 517 // Slow path: iterate optimistically over the in-use address space 518 // looking for any free and unscavenged page. If we think we see something, 519 // lock and verify it! 520 // 521 // We iterate over the address space by taking ranges from inUse. 522newRange: 523 for { 524 r := getInUseRange(s.scavAddr) 525 if r.size() == 0 { 526 break 527 } 528 unlockHeap() 529 530 // Iterate over all of the chunks described by r. 531 // Note that r.limit is the exclusive upper bound, but what 532 // we want is the top chunk instead, inclusive, so subtract 1. 533 bot, top := chunkIndex(r.base), chunkIndex(r.limit-1) 534 for i := top; i >= bot; i-- { 535 // If this chunk is totally in-use or has no unscavenged pages, don't bother 536 // doing a more sophisticated check. 537 // 538 // Note we're accessing the summary and the chunks without a lock, but 539 // that's fine. We're being optimistic anyway. 540 541 // Check quickly if there are enough free pages at all. 542 if s.summary[len(s.summary)-1][i].max() < uint(minPages) { 543 continue 544 } 545 546 // Run over the chunk looking harder for a candidate. Again, we could 547 // race with a lot of different pieces of code, but we're just being 548 // optimistic. Make sure we load the l2 pointer atomically though, to 549 // avoid races with heap growth. It may or may not be possible to also 550 // see a nil pointer in this case if we do race with heap growth, but 551 // just defensively ignore the nils. This operation is optimistic anyway. 552 l2 := (*[1 << pallocChunksL2Bits]pallocData)(atomic.Loadp(unsafe.Pointer(&s.chunks[i.l1()]))) 553 if l2 == nil || !l2[i.l2()].hasScavengeCandidate(minPages) { 554 continue 555 } 556 557 // We found a candidate, so let's lock and verify it. 558 lockHeap() 559 560 // Find, verify, and scavenge if we can. 561 chunk := s.chunkOf(i) 562 base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages) 563 if npages > 0 { 564 // We found memory to scavenge! Mark the bits and report that up. 565 // scavengeRangeLocked will update scavAddr for us, also. 566 s.scavengeRangeLocked(i, base, npages) 567 unlockHeap() 568 return uintptr(npages) * pageSize 569 } 570 571 // We were fooled, let's take this opportunity to move the scavAddr 572 // all the way down to where we searched as scavenged for future calls 573 // and keep iterating. Then, go get a new range. 574 s.scavAddr = chunkBase(i-1) + pallocChunkPages*pageSize - 1 575 continue newRange 576 } 577 lockHeap() 578 579 // Move the scavenger down the heap, past everything we just searched. 580 // Since we don't check if scavAddr moved while twe let go of the heap lock, 581 // it's possible that it moved down and we're moving it up here. This 582 // raciness could result in us searching parts of the heap unnecessarily. 583 // TODO(mknyszek): Remove this racy behavior through explicit address 584 // space reservations, which are difficult to do with just scavAddr. 585 s.scavAddr = r.base - 1 586 } 587 // We reached the end of the in-use address space and couldn't find anything, 588 // so signal that there's nothing left to scavenge. 589 s.scavAddr = minScavAddr 590 unlockHeap() 591 592 return 0 593} 594 595// scavengeRangeLocked scavenges the given region of memory. 596// 597// s.mheapLock must be held. 598func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) { 599 s.chunkOf(ci).scavenged.setRange(base, npages) 600 601 // Compute the full address for the start of the range. 602 addr := chunkBase(ci) + uintptr(base)*pageSize 603 604 // Update the scav pointer. 605 s.scavAddr = addr - 1 606 607 // Only perform the actual scavenging if we're not in a test. 608 // It's dangerous to do so otherwise. 609 if s.test { 610 return 611 } 612 sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize) 613 614 // Update global accounting only when not in test, otherwise 615 // the runtime's accounting will be wrong. 616 mSysStatInc(&memstats.heap_released, uintptr(npages)*pageSize) 617} 618 619// fillAligned returns x but with all zeroes in m-aligned 620// groups of m bits set to 1 if any bit in the group is non-zero. 621// 622// For example, fillAligned(0x0100a3, 8) == 0xff00ff. 623// 624// Note that if m == 1, this is a no-op. 625// 626// m must be a power of 2 <= maxPagesPerPhysPage. 627func fillAligned(x uint64, m uint) uint64 { 628 apply := func(x uint64, c uint64) uint64 { 629 // The technique used it here is derived from 630 // https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord 631 // and extended for more than just bytes (like nibbles 632 // and uint16s) by using an appropriate constant. 633 // 634 // To summarize the technique, quoting from that page: 635 // "[It] works by first zeroing the high bits of the [8] 636 // bytes in the word. Subsequently, it adds a number that 637 // will result in an overflow to the high bit of a byte if 638 // any of the low bits were initially set. Next the high 639 // bits of the original word are ORed with these values; 640 // thus, the high bit of a byte is set iff any bit in the 641 // byte was set. Finally, we determine if any of these high 642 // bits are zero by ORing with ones everywhere except the 643 // high bits and inverting the result." 644 return ^((((x & c) + c) | x) | c) 645 } 646 // Transform x to contain a 1 bit at the top of each m-aligned 647 // group of m zero bits. 648 switch m { 649 case 1: 650 return x 651 case 2: 652 x = apply(x, 0x5555555555555555) 653 case 4: 654 x = apply(x, 0x7777777777777777) 655 case 8: 656 x = apply(x, 0x7f7f7f7f7f7f7f7f) 657 case 16: 658 x = apply(x, 0x7fff7fff7fff7fff) 659 case 32: 660 x = apply(x, 0x7fffffff7fffffff) 661 case 64: // == maxPagesPerPhysPage 662 x = apply(x, 0x7fffffffffffffff) 663 default: 664 throw("bad m value") 665 } 666 // Now, the top bit of each m-aligned group in x is set 667 // that group was all zero in the original x. 668 669 // From each group of m bits subtract 1. 670 // Because we know only the top bits of each 671 // m-aligned group are set, we know this will 672 // set each group to have all the bits set except 673 // the top bit, so just OR with the original 674 // result to set all the bits. 675 return ^((x - (x >> (m - 1))) | x) 676} 677 678// hasScavengeCandidate returns true if there's any min-page-aligned groups of 679// min pages of free-and-unscavenged memory in the region represented by this 680// pallocData. 681// 682// min must be a non-zero power of 2 <= maxPagesPerPhysPage. 683func (m *pallocData) hasScavengeCandidate(min uintptr) bool { 684 if min&(min-1) != 0 || min == 0 { 685 print("runtime: min = ", min, "\n") 686 throw("min must be a non-zero power of 2") 687 } else if min > maxPagesPerPhysPage { 688 print("runtime: min = ", min, "\n") 689 throw("min too large") 690 } 691 692 // The goal of this search is to see if the chunk contains any free and unscavenged memory. 693 for i := len(m.scavenged) - 1; i >= 0; i-- { 694 // 1s are scavenged OR non-free => 0s are unscavenged AND free 695 // 696 // TODO(mknyszek): Consider splitting up fillAligned into two 697 // functions, since here we technically could get by with just 698 // the first half of its computation. It'll save a few instructions 699 // but adds some additional code complexity. 700 x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min)) 701 702 // Quickly skip over chunks of non-free or scavenged pages. 703 if x != ^uint64(0) { 704 return true 705 } 706 } 707 return false 708} 709 710// findScavengeCandidate returns a start index and a size for this pallocData 711// segment which represents a contiguous region of free and unscavenged memory. 712// 713// searchIdx indicates the page index within this chunk to start the search, but 714// note that findScavengeCandidate searches backwards through the pallocData. As a 715// a result, it will return the highest scavenge candidate in address order. 716// 717// min indicates a hard minimum size and alignment for runs of pages. That is, 718// findScavengeCandidate will not return a region smaller than min pages in size, 719// or that is min pages or greater in size but not aligned to min. min must be 720// a non-zero power of 2 <= maxPagesPerPhysPage. 721// 722// max is a hint for how big of a region is desired. If max >= pallocChunkPages, then 723// findScavengeCandidate effectively returns entire free and unscavenged regions. 724// If max < pallocChunkPages, it may truncate the returned region such that size is 725// max. However, findScavengeCandidate may still return a larger region if, for 726// example, it chooses to preserve huge pages, or if max is not aligned to min (it 727// will round up). That is, even if max is small, the returned size is not guaranteed 728// to be equal to max. max is allowed to be less than min, in which case it is as if 729// max == min. 730func (m *pallocData) findScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) { 731 if min&(min-1) != 0 || min == 0 { 732 print("runtime: min = ", min, "\n") 733 throw("min must be a non-zero power of 2") 734 } else if min > maxPagesPerPhysPage { 735 print("runtime: min = ", min, "\n") 736 throw("min too large") 737 } 738 // max may not be min-aligned, so we might accidentally truncate to 739 // a max value which causes us to return a non-min-aligned value. 740 // To prevent this, align max up to a multiple of min (which is always 741 // a power of 2). This also prevents max from ever being less than 742 // min, unless it's zero, so handle that explicitly. 743 if max == 0 { 744 max = min 745 } else { 746 max = alignUp(max, min) 747 } 748 749 i := int(searchIdx / 64) 750 // Start by quickly skipping over blocks of non-free or scavenged pages. 751 for ; i >= 0; i-- { 752 // 1s are scavenged OR non-free => 0s are unscavenged AND free 753 x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min)) 754 if x != ^uint64(0) { 755 break 756 } 757 } 758 if i < 0 { 759 // Failed to find any free/unscavenged pages. 760 return 0, 0 761 } 762 // We have something in the 64-bit chunk at i, but it could 763 // extend further. Loop until we find the extent of it. 764 765 // 1s are scavenged OR non-free => 0s are unscavenged AND free 766 x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min)) 767 z1 := uint(sys.LeadingZeros64(^x)) 768 run, end := uint(0), uint(i)*64+(64-z1) 769 if x<<z1 != 0 { 770 // After shifting out z1 bits, we still have 1s, 771 // so the run ends inside this word. 772 run = uint(sys.LeadingZeros64(x << z1)) 773 } else { 774 // After shifting out z1 bits, we have no more 1s. 775 // This means the run extends to the bottom of the 776 // word so it may extend into further words. 777 run = 64 - z1 778 for j := i - 1; j >= 0; j-- { 779 x := fillAligned(m.scavenged[j]|m.pallocBits[j], uint(min)) 780 run += uint(sys.LeadingZeros64(x)) 781 if x != 0 { 782 // The run stopped in this word. 783 break 784 } 785 } 786 } 787 788 // Split the run we found if it's larger than max but hold on to 789 // our original length, since we may need it later. 790 size := run 791 if size > uint(max) { 792 size = uint(max) 793 } 794 start := end - size 795 796 // Each huge page is guaranteed to fit in a single palloc chunk. 797 // 798 // TODO(mknyszek): Support larger huge page sizes. 799 // TODO(mknyszek): Consider taking pages-per-huge-page as a parameter 800 // so we can write tests for this. 801 if physHugePageSize > pageSize && physHugePageSize > physPageSize { 802 // We have huge pages, so let's ensure we don't break one by scavenging 803 // over a huge page boundary. If the range [start, start+size) overlaps with 804 // a free-and-unscavenged huge page, we want to grow the region we scavenge 805 // to include that huge page. 806 807 // Compute the huge page boundary above our candidate. 808 pagesPerHugePage := uintptr(physHugePageSize / pageSize) 809 hugePageAbove := uint(alignUp(uintptr(start), pagesPerHugePage)) 810 811 // If that boundary is within our current candidate, then we may be breaking 812 // a huge page. 813 if hugePageAbove <= end { 814 // Compute the huge page boundary below our candidate. 815 hugePageBelow := uint(alignDown(uintptr(start), pagesPerHugePage)) 816 817 if hugePageBelow >= end-run { 818 // We're in danger of breaking apart a huge page since start+size crosses 819 // a huge page boundary and rounding down start to the nearest huge 820 // page boundary is included in the full run we found. Include the entire 821 // huge page in the bound by rounding down to the huge page size. 822 size = size + (start - hugePageBelow) 823 start = hugePageBelow 824 } 825 } 826 } 827 return start, size 828} 829