1// Copyright 2010 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5// Export guts for testing. 6 7package runtime 8 9import ( 10 "runtime/internal/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13) 14 15//var Fadd64 = fadd64 16//var Fsub64 = fsub64 17//var Fmul64 = fmul64 18//var Fdiv64 = fdiv64 19//var F64to32 = f64to32 20//var F32to64 = f32to64 21//var Fcmp64 = fcmp64 22//var Fintto64 = fintto64 23//var F64toint = f64toint 24 25var Entersyscall = entersyscall 26var Exitsyscall = exitsyscall 27var LockedOSThread = lockedOSThread 28var Xadduintptr = atomic.Xadduintptr 29 30var FuncPC = funcPC 31 32var Fastlog2 = fastlog2 33 34var Atoi = atoi 35var Atoi32 = atoi32 36 37var Nanotime = nanotime 38var NetpollBreak = netpollBreak 39var Usleep = usleep 40 41var PhysPageSize = physPageSize 42var PhysHugePageSize = physHugePageSize 43 44var NetpollGenericInit = netpollGenericInit 45 46var ParseRelease = parseRelease 47 48var Memmove = memmove 49var MemclrNoHeapPointers = memclrNoHeapPointers 50 51const PreemptMSupported = preemptMSupported 52 53type LFNode struct { 54 Next uint64 55 Pushcnt uintptr 56} 57 58func LFStackPush(head *uint64, node *LFNode) { 59 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node))) 60} 61 62func LFStackPop(head *uint64) *LFNode { 63 return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop())) 64} 65 66func Netpoll(delta int64) { 67 systemstack(func() { 68 netpoll(delta) 69 }) 70} 71 72func GCMask(x interface{}) (ret []byte) { 73 return nil 74} 75 76func RunSchedLocalQueueTest() { 77 _p_ := new(p) 78 gs := make([]g, len(_p_.runq)) 79 for i := 0; i < len(_p_.runq); i++ { 80 if g, _ := runqget(_p_); g != nil { 81 throw("runq is not empty initially") 82 } 83 for j := 0; j < i; j++ { 84 runqput(_p_, &gs[i], false) 85 } 86 for j := 0; j < i; j++ { 87 if g, _ := runqget(_p_); g != &gs[i] { 88 print("bad element at iter ", i, "/", j, "\n") 89 throw("bad element") 90 } 91 } 92 if g, _ := runqget(_p_); g != nil { 93 throw("runq is not empty afterwards") 94 } 95 } 96} 97 98func RunSchedLocalQueueStealTest() { 99 p1 := new(p) 100 p2 := new(p) 101 gs := make([]g, len(p1.runq)) 102 for i := 0; i < len(p1.runq); i++ { 103 for j := 0; j < i; j++ { 104 gs[j].sig = 0 105 runqput(p1, &gs[j], false) 106 } 107 gp := runqsteal(p2, p1, true) 108 s := 0 109 if gp != nil { 110 s++ 111 gp.sig++ 112 } 113 for { 114 gp, _ = runqget(p2) 115 if gp == nil { 116 break 117 } 118 s++ 119 gp.sig++ 120 } 121 for { 122 gp, _ = runqget(p1) 123 if gp == nil { 124 break 125 } 126 gp.sig++ 127 } 128 for j := 0; j < i; j++ { 129 if gs[j].sig != 1 { 130 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n") 131 throw("bad element") 132 } 133 } 134 if s != i/2 && s != i/2+1 { 135 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n") 136 throw("bad steal") 137 } 138 } 139} 140 141func RunSchedLocalQueueEmptyTest(iters int) { 142 // Test that runq is not spuriously reported as empty. 143 // Runq emptiness affects scheduling decisions and spurious emptiness 144 // can lead to underutilization (both runnable Gs and idle Ps coexist 145 // for arbitrary long time). 146 done := make(chan bool, 1) 147 _p_ := new(p) 148 gs := make([]g, 2) 149 ready := new(uint32) 150 for i := 0; i < iters; i++ { 151 *ready = 0 152 next0 := (i & 1) == 0 153 next1 := (i & 2) == 0 154 runqput(_p_, &gs[0], next0) 155 go func(done chan bool, p *p, ready *uint32, next0, next1 bool) { 156 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; { 157 } 158 if runqempty(p) { 159 println("next:", next0, next1) 160 throw("queue is empty") 161 } 162 done <- true 163 }(done, _p_, ready, next0, next1) 164 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; { 165 } 166 runqput(_p_, &gs[1], next1) 167 runqget(_p_) 168 <-done 169 runqget(_p_) 170 } 171} 172 173var ( 174 StringHash = stringHash 175 BytesHash = bytesHash 176 Int32Hash = int32Hash 177 Int64Hash = int64Hash 178 MemHash = memhash 179 MemHash32 = memhash32 180 MemHash64 = memhash64 181 EfaceHash = efaceHash 182 IfaceHash = ifaceHash 183) 184 185var UseAeshash = &useAeshash 186 187func MemclrBytes(b []byte) { 188 s := (*slice)(unsafe.Pointer(&b)) 189 memclrNoHeapPointers(s.array, uintptr(s.len)) 190} 191 192var HashLoad = &hashLoad 193 194// entry point for testing 195//func GostringW(w []uint16) (s string) { 196// s = gostringw(&w[0]) 197// return 198//} 199 200type Uintreg sys.Uintreg 201 202var Open = open 203var Close = closefd 204var Read = read 205var Write = write 206 207func Envs() []string { return envs } 208func SetEnvs(e []string) { envs = e } 209 210//var BigEndian = sys.BigEndian 211 212// For benchmarking. 213 214func BenchSetType(n int, x interface{}) { 215 e := *efaceOf(&x) 216 t := e._type 217 var size uintptr 218 var p unsafe.Pointer 219 switch t.kind & kindMask { 220 case kindPtr: 221 t = (*ptrtype)(unsafe.Pointer(t)).elem 222 size = t.size 223 p = e.data 224 case kindSlice: 225 slice := *(*struct { 226 ptr unsafe.Pointer 227 len, cap uintptr 228 })(e.data) 229 t = (*slicetype)(unsafe.Pointer(t)).elem 230 size = t.size * slice.len 231 p = slice.ptr 232 } 233 allocSize := roundupsize(size) 234 systemstack(func() { 235 for i := 0; i < n; i++ { 236 heapBitsSetType(uintptr(p), allocSize, size, t) 237 } 238 }) 239} 240 241const PtrSize = sys.PtrSize 242 243var ForceGCPeriod = &forcegcperiod 244 245// SetTracebackEnv is like runtime/debug.SetTraceback, but it raises 246// the "environment" traceback level, so later calls to 247// debug.SetTraceback (e.g., from testing timeouts) can't lower it. 248func SetTracebackEnv(level string) { 249 setTraceback(level) 250 traceback_env = traceback_cache 251} 252 253var ReadUnaligned32 = readUnaligned32 254var ReadUnaligned64 = readUnaligned64 255 256func CountPagesInUse() (pagesInUse, counted uintptr) { 257 stopTheWorld("CountPagesInUse") 258 259 pagesInUse = uintptr(mheap_.pagesInUse) 260 261 for _, s := range mheap_.allspans { 262 if s.state.get() == mSpanInUse { 263 counted += s.npages 264 } 265 } 266 267 startTheWorld() 268 269 return 270} 271 272func Fastrand() uint32 { return fastrand() } 273func Fastrandn(n uint32) uint32 { return fastrandn(n) } 274 275type ProfBuf profBuf 276 277func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf { 278 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags)) 279} 280 281func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) { 282 (*profBuf)(p).write(tag, now, hdr, stk) 283} 284 285const ( 286 ProfBufBlocking = profBufBlocking 287 ProfBufNonBlocking = profBufNonBlocking 288) 289 290func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) { 291 return (*profBuf)(p).read(profBufReadMode(mode)) 292} 293 294func (p *ProfBuf) Close() { 295 (*profBuf)(p).close() 296} 297 298// ReadMemStatsSlow returns both the runtime-computed MemStats and 299// MemStats accumulated by scanning the heap. 300func ReadMemStatsSlow() (base, slow MemStats) { 301 stopTheWorld("ReadMemStatsSlow") 302 303 // Run on the system stack to avoid stack growth allocation. 304 systemstack(func() { 305 // Make sure stats don't change. 306 getg().m.mallocing++ 307 308 readmemstats_m(&base) 309 310 // Initialize slow from base and zero the fields we're 311 // recomputing. 312 slow = base 313 slow.Alloc = 0 314 slow.TotalAlloc = 0 315 slow.Mallocs = 0 316 slow.Frees = 0 317 slow.HeapReleased = 0 318 var bySize [_NumSizeClasses]struct { 319 Mallocs, Frees uint64 320 } 321 322 // Add up current allocations in spans. 323 for _, s := range mheap_.allspans { 324 if s.state.get() != mSpanInUse { 325 continue 326 } 327 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 { 328 slow.Mallocs++ 329 slow.Alloc += uint64(s.elemsize) 330 } else { 331 slow.Mallocs += uint64(s.allocCount) 332 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize) 333 bySize[sizeclass].Mallocs += uint64(s.allocCount) 334 } 335 } 336 337 // Add in frees. readmemstats_m flushed the cached stats, so 338 // these are up-to-date. 339 var smallFree uint64 340 slow.Frees = mheap_.nlargefree 341 for i := range mheap_.nsmallfree { 342 slow.Frees += mheap_.nsmallfree[i] 343 bySize[i].Frees = mheap_.nsmallfree[i] 344 bySize[i].Mallocs += mheap_.nsmallfree[i] 345 smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i]) 346 } 347 slow.Frees += memstats.tinyallocs 348 slow.Mallocs += slow.Frees 349 350 slow.TotalAlloc = slow.Alloc + mheap_.largefree + smallFree 351 352 for i := range slow.BySize { 353 slow.BySize[i].Mallocs = bySize[i].Mallocs 354 slow.BySize[i].Frees = bySize[i].Frees 355 } 356 357 for i := mheap_.pages.start; i < mheap_.pages.end; i++ { 358 pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages) 359 slow.HeapReleased += uint64(pg) * pageSize 360 } 361 for _, p := range allp { 362 pg := sys.OnesCount64(p.pcache.scav) 363 slow.HeapReleased += uint64(pg) * pageSize 364 } 365 366 // Unused space in the current arena also counts as released space. 367 slow.HeapReleased += uint64(mheap_.curArena.end - mheap_.curArena.base) 368 369 getg().m.mallocing-- 370 }) 371 372 startTheWorld() 373 return 374} 375 376// BlockOnSystemStack switches to the system stack, prints "x\n" to 377// stderr, and blocks in a stack containing 378// "runtime.blockOnSystemStackInternal". 379func BlockOnSystemStack() { 380 systemstack(blockOnSystemStackInternal) 381} 382 383func blockOnSystemStackInternal() { 384 print("x\n") 385 lock(&deadlock) 386 lock(&deadlock) 387} 388 389type RWMutex struct { 390 rw rwmutex 391} 392 393func (rw *RWMutex) RLock() { 394 rw.rw.rlock() 395} 396 397func (rw *RWMutex) RUnlock() { 398 rw.rw.runlock() 399} 400 401func (rw *RWMutex) Lock() { 402 rw.rw.lock() 403} 404 405func (rw *RWMutex) Unlock() { 406 rw.rw.unlock() 407} 408 409const RuntimeHmapSize = unsafe.Sizeof(hmap{}) 410 411func MapBucketsCount(m map[int]int) int { 412 h := *(**hmap)(unsafe.Pointer(&m)) 413 return 1 << h.B 414} 415 416func MapBucketsPointerIsNil(m map[int]int) bool { 417 h := *(**hmap)(unsafe.Pointer(&m)) 418 return h.buckets == nil 419} 420 421func LockOSCounts() (external, internal uint32) { 422 g := getg() 423 if g.m.lockedExt+g.m.lockedInt == 0 { 424 if g.lockedm != 0 { 425 panic("lockedm on non-locked goroutine") 426 } 427 } else { 428 if g.lockedm == 0 { 429 panic("nil lockedm on locked goroutine") 430 } 431 } 432 return g.m.lockedExt, g.m.lockedInt 433} 434 435//go:noinline 436func TracebackSystemstack(stk []uintptr, i int) int { 437 if i == 0 { 438 return callersRaw(stk) 439 } 440 n := 0 441 systemstack(func() { 442 n = TracebackSystemstack(stk, i-1) 443 }) 444 return n 445} 446 447func KeepNArenaHints(n int) { 448 hint := mheap_.arenaHints 449 for i := 1; i < n; i++ { 450 hint = hint.next 451 if hint == nil { 452 return 453 } 454 } 455 hint.next = nil 456} 457 458// MapNextArenaHint reserves a page at the next arena growth hint, 459// preventing the arena from growing there, and returns the range of 460// addresses that are no longer viable. 461func MapNextArenaHint() (start, end uintptr) { 462 hint := mheap_.arenaHints 463 addr := hint.addr 464 if hint.down { 465 start, end = addr-heapArenaBytes, addr 466 addr -= physPageSize 467 } else { 468 start, end = addr, addr+heapArenaBytes 469 } 470 sysReserve(unsafe.Pointer(addr), physPageSize) 471 return 472} 473 474func GetNextArenaHint() uintptr { 475 return mheap_.arenaHints.addr 476} 477 478type G = g 479 480func Getg() *G { 481 return getg() 482} 483 484//go:noinline 485func PanicForTesting(b []byte, i int) byte { 486 return unexportedPanicForTesting(b, i) 487} 488 489//go:noinline 490func unexportedPanicForTesting(b []byte, i int) byte { 491 return b[i] 492} 493 494func G0StackOverflow() { 495 systemstack(func() { 496 stackOverflow(nil) 497 }) 498} 499 500func stackOverflow(x *byte) { 501 var buf [256]byte 502 stackOverflow(&buf[0]) 503} 504 505func MapTombstoneCheck(m map[int]int) { 506 // Make sure emptyOne and emptyRest are distributed correctly. 507 // We should have a series of filled and emptyOne cells, followed by 508 // a series of emptyRest cells. 509 h := *(**hmap)(unsafe.Pointer(&m)) 510 i := interface{}(m) 511 t := *(**maptype)(unsafe.Pointer(&i)) 512 513 for x := 0; x < 1<<h.B; x++ { 514 b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.bucketsize))) 515 n := 0 516 for b := b0; b != nil; b = b.overflow(t) { 517 for i := 0; i < bucketCnt; i++ { 518 if b.tophash[i] != emptyRest { 519 n++ 520 } 521 } 522 } 523 k := 0 524 for b := b0; b != nil; b = b.overflow(t) { 525 for i := 0; i < bucketCnt; i++ { 526 if k < n && b.tophash[i] == emptyRest { 527 panic("early emptyRest") 528 } 529 if k >= n && b.tophash[i] != emptyRest { 530 panic("late non-emptyRest") 531 } 532 if k == n-1 && b.tophash[i] == emptyOne { 533 panic("last non-emptyRest entry is emptyOne") 534 } 535 k++ 536 } 537 } 538 } 539} 540 541func RunGetgThreadSwitchTest() { 542 // Test that getg works correctly with thread switch. 543 // With gccgo, if we generate getg inlined, the backend 544 // may cache the address of the TLS variable, which 545 // will become invalid after a thread switch. This test 546 // checks that the bad caching doesn't happen. 547 548 ch := make(chan int) 549 go func(ch chan int) { 550 ch <- 5 551 LockOSThread() 552 }(ch) 553 554 g1 := getg() 555 556 // Block on a receive. This is likely to get us a thread 557 // switch. If we yield to the sender goroutine, it will 558 // lock the thread, forcing us to resume on a different 559 // thread. 560 <-ch 561 562 g2 := getg() 563 if g1 != g2 { 564 panic("g1 != g2") 565 } 566 567 // Also test getg after some control flow, as the 568 // backend is sensitive to control flow. 569 g3 := getg() 570 if g1 != g3 { 571 panic("g1 != g3") 572 } 573} 574 575const ( 576 PageSize = pageSize 577 PallocChunkPages = pallocChunkPages 578 PageAlloc64Bit = pageAlloc64Bit 579 PallocSumBytes = pallocSumBytes 580) 581 582// Expose pallocSum for testing. 583type PallocSum pallocSum 584 585func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) } 586func (m PallocSum) Start() uint { return pallocSum(m).start() } 587func (m PallocSum) Max() uint { return pallocSum(m).max() } 588func (m PallocSum) End() uint { return pallocSum(m).end() } 589 590// Expose pallocBits for testing. 591type PallocBits pallocBits 592 593func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) { 594 return (*pallocBits)(b).find(npages, searchIdx) 595} 596func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) } 597func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) } 598func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) } 599func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) } 600 601// SummarizeSlow is a slow but more obviously correct implementation 602// of (*pallocBits).summarize. Used for testing. 603func SummarizeSlow(b *PallocBits) PallocSum { 604 var start, max, end uint 605 606 const N = uint(len(b)) * 64 607 for start < N && (*pageBits)(b).get(start) == 0 { 608 start++ 609 } 610 for end < N && (*pageBits)(b).get(N-end-1) == 0 { 611 end++ 612 } 613 run := uint(0) 614 for i := uint(0); i < N; i++ { 615 if (*pageBits)(b).get(i) == 0 { 616 run++ 617 } else { 618 run = 0 619 } 620 if run > max { 621 max = run 622 } 623 } 624 return PackPallocSum(start, max, end) 625} 626 627// Expose non-trivial helpers for testing. 628func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) } 629 630// Given two PallocBits, returns a set of bit ranges where 631// they differ. 632func DiffPallocBits(a, b *PallocBits) []BitRange { 633 ba := (*pageBits)(a) 634 bb := (*pageBits)(b) 635 636 var d []BitRange 637 base, size := uint(0), uint(0) 638 for i := uint(0); i < uint(len(ba))*64; i++ { 639 if ba.get(i) != bb.get(i) { 640 if size == 0 { 641 base = i 642 } 643 size++ 644 } else { 645 if size != 0 { 646 d = append(d, BitRange{base, size}) 647 } 648 size = 0 649 } 650 } 651 if size != 0 { 652 d = append(d, BitRange{base, size}) 653 } 654 return d 655} 656 657// StringifyPallocBits gets the bits in the bit range r from b, 658// and returns a string containing the bits as ASCII 0 and 1 659// characters. 660func StringifyPallocBits(b *PallocBits, r BitRange) string { 661 str := "" 662 for j := r.I; j < r.I+r.N; j++ { 663 if (*pageBits)(b).get(j) != 0 { 664 str += "1" 665 } else { 666 str += "0" 667 } 668 } 669 return str 670} 671 672// Expose pallocData for testing. 673type PallocData pallocData 674 675func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) { 676 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max) 677} 678func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) } 679func (d *PallocData) ScavengedSetRange(i, n uint) { 680 (*pallocData)(d).scavenged.setRange(i, n) 681} 682func (d *PallocData) PallocBits() *PallocBits { 683 return (*PallocBits)(&(*pallocData)(d).pallocBits) 684} 685func (d *PallocData) Scavenged() *PallocBits { 686 return (*PallocBits)(&(*pallocData)(d).scavenged) 687} 688 689// Expose fillAligned for testing. 690func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) } 691 692// Expose pageCache for testing. 693type PageCache pageCache 694 695const PageCachePages = pageCachePages 696 697func NewPageCache(base uintptr, cache, scav uint64) PageCache { 698 return PageCache(pageCache{base: base, cache: cache, scav: scav}) 699} 700func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() } 701func (c *PageCache) Base() uintptr { return (*pageCache)(c).base } 702func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache } 703func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav } 704func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) { 705 return (*pageCache)(c).alloc(npages) 706} 707func (c *PageCache) Flush(s *PageAlloc) { 708 (*pageCache)(c).flush((*pageAlloc)(s)) 709} 710 711// Expose chunk index type. 712type ChunkIdx chunkIdx 713 714// Expose pageAlloc for testing. Note that because pageAlloc is 715// not in the heap, so is PageAlloc. 716type PageAlloc pageAlloc 717 718func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) { 719 return (*pageAlloc)(p).alloc(npages) 720} 721func (p *PageAlloc) AllocToCache() PageCache { 722 return PageCache((*pageAlloc)(p).allocToCache()) 723} 724func (p *PageAlloc) Free(base, npages uintptr) { 725 (*pageAlloc)(p).free(base, npages) 726} 727func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) { 728 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end) 729} 730func (p *PageAlloc) Scavenge(nbytes uintptr, locked bool) (r uintptr) { 731 systemstack(func() { 732 r = (*pageAlloc)(p).scavenge(nbytes, locked) 733 }) 734 return 735} 736func (p *PageAlloc) InUse() []AddrRange { 737 ranges := make([]AddrRange, 0, len(p.inUse.ranges)) 738 for _, r := range p.inUse.ranges { 739 ranges = append(ranges, AddrRange{ 740 Base: r.base, 741 Limit: r.limit, 742 }) 743 } 744 return ranges 745} 746 747// Returns nil if the PallocData's L2 is missing. 748func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData { 749 ci := chunkIdx(i) 750 l2 := (*pageAlloc)(p).chunks[ci.l1()] 751 if l2 == nil { 752 return nil 753 } 754 return (*PallocData)(&l2[ci.l2()]) 755} 756 757// AddrRange represents a range over addresses. 758// Specifically, it represents the range [Base, Limit). 759type AddrRange struct { 760 Base, Limit uintptr 761} 762 763// BitRange represents a range over a bitmap. 764type BitRange struct { 765 I, N uint // bit index and length in bits 766} 767 768// NewPageAlloc creates a new page allocator for testing and 769// initializes it with the scav and chunks maps. Each key in these maps 770// represents a chunk index and each value is a series of bit ranges to 771// set within each bitmap's chunk. 772// 773// The initialization of the pageAlloc preserves the invariant that if a 774// scavenged bit is set the alloc bit is necessarily unset, so some 775// of the bits described by scav may be cleared in the final bitmap if 776// ranges in chunks overlap with them. 777// 778// scav is optional, and if nil, the scavenged bitmap will be cleared 779// (as opposed to all 1s, which it usually is). Furthermore, every 780// chunk index in scav must appear in chunks; ones that do not are 781// ignored. 782func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc { 783 p := new(pageAlloc) 784 785 // We've got an entry, so initialize the pageAlloc. 786 p.init(new(mutex), nil) 787 p.test = true 788 789 for i, init := range chunks { 790 addr := chunkBase(chunkIdx(i)) 791 792 // Mark the chunk's existence in the pageAlloc. 793 p.grow(addr, pallocChunkBytes) 794 795 // Initialize the bitmap and update pageAlloc metadata. 796 chunk := p.chunkOf(chunkIndex(addr)) 797 798 // Clear all the scavenged bits which grow set. 799 chunk.scavenged.clearRange(0, pallocChunkPages) 800 801 // Apply scavenge state if applicable. 802 if scav != nil { 803 if scvg, ok := scav[i]; ok { 804 for _, s := range scvg { 805 // Ignore the case of s.N == 0. setRange doesn't handle 806 // it and it's a no-op anyway. 807 if s.N != 0 { 808 chunk.scavenged.setRange(s.I, s.N) 809 } 810 } 811 } 812 } 813 p.resetScavengeAddr() 814 815 // Apply alloc state. 816 for _, s := range init { 817 // Ignore the case of s.N == 0. allocRange doesn't handle 818 // it and it's a no-op anyway. 819 if s.N != 0 { 820 chunk.allocRange(s.I, s.N) 821 } 822 } 823 824 // Update heap metadata for the allocRange calls above. 825 p.update(addr, pallocChunkPages, false, false) 826 } 827 return (*PageAlloc)(p) 828} 829 830// FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this 831// is called the pageAlloc may no longer be used. The object itself will be 832// collected by the garbage collector once it is no longer live. 833func FreePageAlloc(pp *PageAlloc) { 834 p := (*pageAlloc)(pp) 835 836 // Free all the mapped space for the summary levels. 837 if pageAlloc64Bit != 0 { 838 for l := 0; l < summaryLevels; l++ { 839 sysFree(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes, nil) 840 } 841 } else { 842 resSize := uintptr(0) 843 for _, s := range p.summary { 844 resSize += uintptr(cap(s)) * pallocSumBytes 845 } 846 sysFree(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize), nil) 847 } 848 849 // Free the mapped space for chunks. 850 for i := range p.chunks { 851 if x := p.chunks[i]; x != nil { 852 p.chunks[i] = nil 853 // This memory comes from sysAlloc and will always be page-aligned. 854 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), nil) 855 } 856 } 857} 858 859// BaseChunkIdx is a convenient chunkIdx value which works on both 860// 64 bit and 32 bit platforms, allowing the tests to share code 861// between the two. 862// 863// On AIX, the arenaBaseOffset is 0x0a00000000000000. However, this 864// constant can't be used here because it is negative and will cause 865// a constant overflow. 866// 867// This should not be higher than 0x100*pallocChunkBytes to support 868// mips and mipsle, which only have 31-bit address spaces. 869var BaseChunkIdx = ChunkIdx(chunkIndex(((0xc000*pageAlloc64Bit + 0x100*pageAlloc32Bit) * pallocChunkBytes) + 0x0a00000000000000*sys.GoosAix)) 870 871// PageBase returns an address given a chunk index and a page index 872// relative to that chunk. 873func PageBase(c ChunkIdx, pageIdx uint) uintptr { 874 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize 875} 876 877type BitsMismatch struct { 878 Base uintptr 879 Got, Want uint64 880} 881 882func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) { 883 ok = true 884 885 // Run on the system stack to avoid stack growth allocation. 886 systemstack(func() { 887 getg().m.mallocing++ 888 889 // Lock so that we can safely access the bitmap. 890 lock(&mheap_.lock) 891 chunkLoop: 892 for i := mheap_.pages.start; i < mheap_.pages.end; i++ { 893 chunk := mheap_.pages.chunkOf(i) 894 for j := 0; j < pallocChunkPages/64; j++ { 895 // Run over each 64-bit bitmap section and ensure 896 // scavenged is being cleared properly on allocation. 897 // If a used bit and scavenged bit are both set, that's 898 // an error, and could indicate a larger problem, or 899 // an accounting problem. 900 want := chunk.scavenged[j] &^ chunk.pallocBits[j] 901 got := chunk.scavenged[j] 902 if want != got { 903 ok = false 904 if n >= len(mismatches) { 905 break chunkLoop 906 } 907 mismatches[n] = BitsMismatch{ 908 Base: chunkBase(i) + uintptr(j)*64*pageSize, 909 Got: got, 910 Want: want, 911 } 912 n++ 913 } 914 } 915 } 916 unlock(&mheap_.lock) 917 918 getg().m.mallocing-- 919 }) 920 return 921} 922 923func PageCachePagesLeaked() (leaked uintptr) { 924 stopTheWorld("PageCachePagesLeaked") 925 926 // Walk over destroyed Ps and look for unflushed caches. 927 deadp := allp[len(allp):cap(allp)] 928 for _, p := range deadp { 929 // Since we're going past len(allp) we may see nil Ps. 930 // Just ignore them. 931 if p != nil { 932 leaked += uintptr(sys.OnesCount64(p.pcache.cache)) 933 } 934 } 935 936 startTheWorld() 937 return 938} 939 940var Semacquire = semacquire 941var Semrelease1 = semrelease1 942 943func SemNwait(addr *uint32) uint32 { 944 root := semroot(addr) 945 return atomic.Load(&root.nwait) 946} 947 948// MapHashCheck computes the hash of the key k for the map m, twice. 949// Method 1 uses the built-in hasher for the map. 950// Method 2 uses the typehash function (the one used by reflect). 951// Returns the two hash values, which should always be equal. 952func MapHashCheck(m interface{}, k interface{}) (uintptr, uintptr) { 953 // Unpack m. 954 mt := (*maptype)(unsafe.Pointer(efaceOf(&m)._type)) 955 mh := (*hmap)(efaceOf(&m).data) 956 957 // Unpack k. 958 kt := efaceOf(&k)._type 959 var p unsafe.Pointer 960 if isDirectIface(kt) { 961 q := efaceOf(&k).data 962 p = unsafe.Pointer(&q) 963 } else { 964 p = efaceOf(&k).data 965 } 966 967 // Compute the hash functions. 968 x := mt.hasher(noescape(p), uintptr(mh.hash0)) 969 y := typehash(kt, noescape(p), uintptr(mh.hash0)) 970 return x, y 971} 972 973var Pusestackmaps = &usestackmaps 974