1// Copyright 2014 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5// Memory allocator. 6// 7// This was originally based on tcmalloc, but has diverged quite a bit. 8// http://goog-perftools.sourceforge.net/doc/tcmalloc.html 9 10// The main allocator works in runs of pages. 11// Small allocation sizes (up to and including 32 kB) are 12// rounded to one of about 70 size classes, each of which 13// has its own free set of objects of exactly that size. 14// Any free page of memory can be split into a set of objects 15// of one size class, which are then managed using a free bitmap. 16// 17// The allocator's data structures are: 18// 19// fixalloc: a free-list allocator for fixed-size off-heap objects, 20// used to manage storage used by the allocator. 21// mheap: the malloc heap, managed at page (8192-byte) granularity. 22// mspan: a run of pages managed by the mheap. 23// mcentral: collects all spans of a given size class. 24// mcache: a per-P cache of mspans with free space. 25// mstats: allocation statistics. 26// 27// Allocating a small object proceeds up a hierarchy of caches: 28// 29// 1. Round the size up to one of the small size classes 30// and look in the corresponding mspan in this P's mcache. 31// Scan the mspan's free bitmap to find a free slot. 32// If there is a free slot, allocate it. 33// This can all be done without acquiring a lock. 34// 35// 2. If the mspan has no free slots, obtain a new mspan 36// from the mcentral's list of mspans of the required size 37// class that have free space. 38// Obtaining a whole span amortizes the cost of locking 39// the mcentral. 40// 41// 3. If the mcentral's mspan list is empty, obtain a run 42// of pages from the mheap to use for the mspan. 43// 44// 4. If the mheap is empty or has no page runs large enough, 45// allocate a new group of pages (at least 1MB) from the 46// operating system. Allocating a large run of pages 47// amortizes the cost of talking to the operating system. 48// 49// Sweeping an mspan and freeing objects on it proceeds up a similar 50// hierarchy: 51// 52// 1. If the mspan is being swept in response to allocation, it 53// is returned to the mcache to satisfy the allocation. 54// 55// 2. Otherwise, if the mspan still has allocated objects in it, 56// it is placed on the mcentral free list for the mspan's size 57// class. 58// 59// 3. Otherwise, if all objects in the mspan are free, the mspan 60// is now "idle", so it is returned to the mheap and no longer 61// has a size class. 62// This may coalesce it with adjacent idle mspans. 63// 64// 4. If an mspan remains idle for long enough, return its pages 65// to the operating system. 66// 67// Allocating and freeing a large object uses the mheap 68// directly, bypassing the mcache and mcentral. 69// 70// Free object slots in an mspan are zeroed only if mspan.needzero is 71// false. If needzero is true, objects are zeroed as they are 72// allocated. There are various benefits to delaying zeroing this way: 73// 74// 1. Stack frame allocation can avoid zeroing altogether. 75// 76// 2. It exhibits better temporal locality, since the program is 77// probably about to write to the memory. 78// 79// 3. We don't zero pages that never get reused. 80 81package runtime 82 83import ( 84 "runtime/internal/sys" 85 "unsafe" 86) 87 88// C function to get the end of the program's memory. 89func getEnd() uintptr 90 91// For gccgo, use go:linkname to rename compiler-called functions to 92// themselves, so that the compiler will export them. 93// 94//go:linkname newobject runtime.newobject 95 96// Functions called by C code. 97//go:linkname mallocgc runtime.mallocgc 98 99const ( 100 debugMalloc = false 101 102 maxTinySize = _TinySize 103 tinySizeClass = _TinySizeClass 104 maxSmallSize = _MaxSmallSize 105 106 pageShift = _PageShift 107 pageSize = _PageSize 108 pageMask = _PageMask 109 // By construction, single page spans of the smallest object class 110 // have the most objects per span. 111 maxObjsPerSpan = pageSize / 8 112 113 mSpanInUse = _MSpanInUse 114 115 concurrentSweep = _ConcurrentSweep 116 117 _PageSize = 1 << _PageShift 118 _PageMask = _PageSize - 1 119 120 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 121 _64bit = 1 << (^uintptr(0) >> 63) / 2 122 123 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 124 _TinySize = 16 125 _TinySizeClass = int8(2) 126 127 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 128 _MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap. 129 _HeapAllocChunk = 1 << 20 // Chunk size for heap growth 130 131 // Per-P, per order stack segment cache size. 132 _StackCacheSize = 32 * 1024 133 134 // Number of orders that get caching. Order 0 is FixedStack 135 // and each successive order is twice as large. 136 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 137 // will be allocated directly. 138 // Since FixedStack is different on different systems, we 139 // must vary NumStackOrders to keep the same maximum cached size. 140 // OS | FixedStack | NumStackOrders 141 // -----------------+------------+--------------- 142 // linux/darwin/bsd | 2KB | 4 143 // windows/32 | 4KB | 3 144 // windows/64 | 8KB | 2 145 // plan9 | 4KB | 3 146 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9 147 148 // Number of bits in page to span calculations (4k pages). 149 // On Windows 64-bit we limit the arena to 32GB or 35 bits. 150 // Windows counts memory used by page table into committed memory 151 // of the process, so we can't reserve too much memory. 152 // See https://golang.org/issue/5402 and https://golang.org/issue/5236. 153 // On other 64-bit platforms, we limit the arena to 512GB, or 39 bits. 154 // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address. 155 // The only exception is mips32 which only has access to low 2GB of virtual memory. 156 // On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory, 157 // but as most devices have less than 4GB of physical memory anyway, we 158 // try to be conservative here, and only ask for a 2GB heap. 159 _MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*(32-(sys.GoarchMips+sys.GoarchMipsle)) 160 _MHeapMap_Bits = _MHeapMap_TotalBits - _PageShift 161 162 // _MaxMem is the maximum heap arena size minus 1. 163 // 164 // On 32-bit, this is also the maximum heap pointer value, 165 // since the arena starts at address 0. 166 _MaxMem = 1<<_MHeapMap_TotalBits - 1 167 168 // Max number of threads to run garbage collection. 169 // 2, 3, and 4 are all plausible maximums depending 170 // on the hardware details of the machine. The garbage 171 // collector scales well to 32 cpus. 172 _MaxGcproc = 32 173 174 // minLegalPointer is the smallest possible legal pointer. 175 // This is the smallest possible architectural page size, 176 // since we assume that the first page is never mapped. 177 // 178 // This should agree with minZeroPage in the compiler. 179 minLegalPointer uintptr = 4096 180) 181 182// physPageSize is the size in bytes of the OS's physical pages. 183// Mapping and unmapping operations must be done at multiples of 184// physPageSize. 185// 186// This must be set by the OS init code (typically in osinit) before 187// mallocinit. 188var physPageSize uintptr 189 190// OS-defined helpers: 191// 192// sysAlloc obtains a large chunk of zeroed memory from the 193// operating system, typically on the order of a hundred kilobytes 194// or a megabyte. 195// NOTE: sysAlloc returns OS-aligned memory, but the heap allocator 196// may use larger alignment, so the caller must be careful to realign the 197// memory obtained by sysAlloc. 198// 199// SysUnused notifies the operating system that the contents 200// of the memory region are no longer needed and can be reused 201// for other purposes. 202// SysUsed notifies the operating system that the contents 203// of the memory region are needed again. 204// 205// SysFree returns it unconditionally; this is only used if 206// an out-of-memory error has been detected midway through 207// an allocation. It is okay if SysFree is a no-op. 208// 209// SysReserve reserves address space without allocating memory. 210// If the pointer passed to it is non-nil, the caller wants the 211// reservation there, but SysReserve can still choose another 212// location if that one is unavailable. On some systems and in some 213// cases SysReserve will simply check that the address space is 214// available and not actually reserve it. If SysReserve returns 215// non-nil, it sets *reserved to true if the address space is 216// reserved, false if it has merely been checked. 217// NOTE: SysReserve returns OS-aligned memory, but the heap allocator 218// may use larger alignment, so the caller must be careful to realign the 219// memory obtained by sysAlloc. 220// 221// SysMap maps previously reserved address space for use. 222// The reserved argument is true if the address space was really 223// reserved, not merely checked. 224// 225// SysFault marks a (already sysAlloc'd) region to fault 226// if accessed. Used only for debugging the runtime. 227 228func mallocinit() { 229 if class_to_size[_TinySizeClass] != _TinySize { 230 throw("bad TinySizeClass") 231 } 232 233 // Not used for gccgo. 234 // testdefersizes() 235 236 // Copy class sizes out for statistics table. 237 for i := range class_to_size { 238 memstats.by_size[i].size = uint32(class_to_size[i]) 239 } 240 241 // Check physPageSize. 242 if physPageSize == 0 { 243 // The OS init code failed to fetch the physical page size. 244 throw("failed to get system page size") 245 } 246 if physPageSize < minPhysPageSize { 247 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 248 throw("bad system page size") 249 } 250 if physPageSize&(physPageSize-1) != 0 { 251 print("system page size (", physPageSize, ") must be a power of 2\n") 252 throw("bad system page size") 253 } 254 255 // The auxiliary regions start at p and are laid out in the 256 // following order: spans, bitmap, arena. 257 var p, pSize uintptr 258 var reserved bool 259 260 // The spans array holds one *mspan per _PageSize of arena. 261 var spansSize uintptr = (_MaxMem + 1) / _PageSize * sys.PtrSize 262 spansSize = round(spansSize, _PageSize) 263 // The bitmap holds 2 bits per word of arena. 264 var bitmapSize uintptr = (_MaxMem + 1) / (sys.PtrSize * 8 / 2) 265 bitmapSize = round(bitmapSize, _PageSize) 266 267 // Set up the allocation arena, a contiguous area of memory where 268 // allocated data will be found. 269 if sys.PtrSize == 8 { 270 // On a 64-bit machine, allocate from a single contiguous reservation. 271 // 512 GB (MaxMem) should be big enough for now. 272 // 273 // The code will work with the reservation at any address, but ask 274 // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f). 275 // Allocating a 512 GB region takes away 39 bits, and the amd64 276 // doesn't let us choose the top 17 bits, so that leaves the 9 bits 277 // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means 278 // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df. 279 // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid 280 // UTF-8 sequences, and they are otherwise as far away from 281 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 282 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 283 // on OS X during thread allocations. 0x00c0 causes conflicts with 284 // AddressSanitizer which reserves all memory up to 0x0100. 285 // These choices are both for debuggability and to reduce the 286 // odds of a conservative garbage collector (as is still used in gccgo) 287 // not collecting memory because some non-pointer block of memory 288 // had a bit pattern that matched a memory address. 289 // 290 // Actually we reserve 544 GB (because the bitmap ends up being 32 GB) 291 // but it hardly matters: e0 00 is not valid UTF-8 either. 292 // 293 // If this fails we fall back to the 32 bit memory mechanism 294 // 295 // However, on arm64, we ignore all this advice above and slam the 296 // allocation at 0x40 << 32 because when using 4k pages with 3-level 297 // translation buffers, the user address space is limited to 39 bits 298 // On darwin/arm64, the address space is even smaller. 299 // On AIX, mmap adresses range starts at 0x0700000000000000 for 64-bit 300 // processes. The new address space allocator starts at 0x0A00000000000000. 301 arenaSize := round(_MaxMem, _PageSize) 302 pSize = bitmapSize + spansSize + arenaSize + _PageSize 303 for i := 0; i <= 0x7f; i++ { 304 switch { 305 case GOARCH == "arm64" && GOOS == "darwin": 306 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 307 case GOARCH == "arm64": 308 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 309 case GOOS == "aix": 310 if i == 0 { 311 p = uintptrMask&(1<<42) | uintptrMask&(0xa0<<52) 312 } else { 313 p = uintptr(i)<<42 | uintptrMask&(0x70<<52) 314 } 315 default: 316 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 317 } 318 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved)) 319 if p != 0 { 320 break 321 } 322 } 323 } 324 325 if p == 0 { 326 // On a 32-bit machine, we can't typically get away 327 // with a giant virtual address space reservation. 328 // Instead we map the memory information bitmap 329 // immediately after the data segment, large enough 330 // to handle the entire 4GB address space (256 MB), 331 // along with a reservation for an initial arena. 332 // When that gets used up, we'll start asking the kernel 333 // for any memory anywhere. 334 335 // We want to start the arena low, but if we're linked 336 // against C code, it's possible global constructors 337 // have called malloc and adjusted the process' brk. 338 // Query the brk so we can avoid trying to map the 339 // arena over it (which will cause the kernel to put 340 // the arena somewhere else, likely at a high 341 // address). 342 procBrk := sbrk0() 343 344 // If we fail to allocate, try again with a smaller arena. 345 // This is necessary on Android L where we share a process 346 // with ART, which reserves virtual memory aggressively. 347 // In the worst case, fall back to a 0-sized initial arena, 348 // in the hope that subsequent reservations will succeed. 349 arenaSizes := [...]uintptr{ 350 512 << 20, 351 256 << 20, 352 128 << 20, 353 0, 354 } 355 356 for _, arenaSize := range &arenaSizes { 357 // SysReserve treats the address we ask for, end, as a hint, 358 // not as an absolute requirement. If we ask for the end 359 // of the data segment but the operating system requires 360 // a little more space before we can start allocating, it will 361 // give out a slightly higher pointer. Except QEMU, which 362 // is buggy, as usual: it won't adjust the pointer upward. 363 // So adjust it upward a little bit ourselves: 1/4 MB to get 364 // away from the running binary image and then round up 365 // to a MB boundary. 366 p = round(getEnd()+(1<<18), 1<<20) 367 pSize = bitmapSize + spansSize + arenaSize + _PageSize 368 if p <= procBrk && procBrk < p+pSize { 369 // Move the start above the brk, 370 // leaving some room for future brk 371 // expansion. 372 p = round(procBrk+(1<<20), 1<<20) 373 } 374 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved)) 375 if p != 0 { 376 break 377 } 378 } 379 if p == 0 { 380 throw("runtime: cannot reserve arena virtual address space") 381 } 382 } 383 384 // PageSize can be larger than OS definition of page size, 385 // so SysReserve can give us a PageSize-unaligned pointer. 386 // To overcome this we ask for PageSize more and round up the pointer. 387 p1 := round(p, _PageSize) 388 pSize -= p1 - p 389 390 spansStart := p1 391 p1 += spansSize 392 mheap_.bitmap = p1 + bitmapSize 393 p1 += bitmapSize 394 if sys.PtrSize == 4 { 395 // Set arena_start such that we can accept memory 396 // reservations located anywhere in the 4GB virtual space. 397 mheap_.arena_start = 0 398 } else { 399 mheap_.arena_start = p1 400 } 401 mheap_.arena_end = p + pSize 402 mheap_.arena_used = p1 403 mheap_.arena_alloc = p1 404 mheap_.arena_reserved = reserved 405 406 if mheap_.arena_start&(_PageSize-1) != 0 { 407 println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start)) 408 throw("misrounded allocation in mallocinit") 409 } 410 411 // Initialize the rest of the allocator. 412 mheap_.init(spansStart, spansSize) 413 _g_ := getg() 414 _g_.m.mcache = allocmcache() 415} 416 417// sysAlloc allocates the next n bytes from the heap arena. The 418// returned pointer is always _PageSize aligned and between 419// h.arena_start and h.arena_end. sysAlloc returns nil on failure. 420// There is no corresponding free function. 421func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer { 422 // strandLimit is the maximum number of bytes to strand from 423 // the current arena block. If we would need to strand more 424 // than this, we fall back to sysAlloc'ing just enough for 425 // this allocation. 426 const strandLimit = 16 << 20 427 428 if n > h.arena_end-h.arena_alloc { 429 // If we haven't grown the arena to _MaxMem yet, try 430 // to reserve some more address space. 431 p_size := round(n+_PageSize, 256<<20) 432 new_end := h.arena_end + p_size // Careful: can overflow 433 if h.arena_end <= new_end && new_end-h.arena_start-1 <= _MaxMem { 434 // TODO: It would be bad if part of the arena 435 // is reserved and part is not. 436 var reserved bool 437 p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved)) 438 if p == 0 { 439 // TODO: Try smaller reservation 440 // growths in case we're in a crowded 441 // 32-bit address space. 442 goto reservationFailed 443 } 444 // p can be just about anywhere in the address 445 // space, including before arena_end. 446 if p == h.arena_end { 447 // The new block is contiguous with 448 // the current block. Extend the 449 // current arena block. 450 h.arena_end = new_end 451 h.arena_reserved = reserved 452 } else if h.arena_start <= p && p+p_size-h.arena_start-1 <= _MaxMem && h.arena_end-h.arena_alloc < strandLimit { 453 // We were able to reserve more memory 454 // within the arena space, but it's 455 // not contiguous with our previous 456 // reservation. It could be before or 457 // after our current arena_used. 458 // 459 // Keep everything page-aligned. 460 // Our pages are bigger than hardware pages. 461 h.arena_end = p + p_size 462 p = round(p, _PageSize) 463 h.arena_alloc = p 464 h.arena_reserved = reserved 465 } else { 466 // We got a mapping, but either 467 // 468 // 1) It's not in the arena, so we 469 // can't use it. (This should never 470 // happen on 32-bit.) 471 // 472 // 2) We would need to discard too 473 // much of our current arena block to 474 // use it. 475 // 476 // We haven't added this allocation to 477 // the stats, so subtract it from a 478 // fake stat (but avoid underflow). 479 // 480 // We'll fall back to a small sysAlloc. 481 stat := uint64(p_size) 482 sysFree(unsafe.Pointer(p), p_size, &stat) 483 } 484 } 485 } 486 487 if n <= h.arena_end-h.arena_alloc { 488 // Keep taking from our reservation. 489 p := h.arena_alloc 490 sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys) 491 h.arena_alloc += n 492 if h.arena_alloc > h.arena_used { 493 h.setArenaUsed(h.arena_alloc, true) 494 } 495 496 if p&(_PageSize-1) != 0 { 497 throw("misrounded allocation in MHeap_SysAlloc") 498 } 499 return unsafe.Pointer(p) 500 } 501 502reservationFailed: 503 // If using 64-bit, our reservation is all we have. 504 if sys.PtrSize != 4 { 505 return nil 506 } 507 508 // On 32-bit, once the reservation is gone we can 509 // try to get memory at a location chosen by the OS. 510 p_size := round(n, _PageSize) + _PageSize 511 p := uintptr(sysAlloc(p_size, &memstats.heap_sys)) 512 if p == 0 { 513 return nil 514 } 515 516 if p < h.arena_start || p+p_size-h.arena_start > _MaxMem { 517 // This shouldn't be possible because _MaxMem is the 518 // whole address space on 32-bit. 519 top := uint64(h.arena_start) + _MaxMem 520 print("runtime: memory allocated by OS (", hex(p), ") not in usable range [", hex(h.arena_start), ",", hex(top), ")\n") 521 sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys) 522 return nil 523 } 524 525 p += -p & (_PageSize - 1) 526 if p+n > h.arena_used { 527 h.setArenaUsed(p+n, true) 528 } 529 530 if p&(_PageSize-1) != 0 { 531 throw("misrounded allocation in MHeap_SysAlloc") 532 } 533 return unsafe.Pointer(p) 534} 535 536// base address for all 0-byte allocations 537var zerobase uintptr 538 539// nextFreeFast returns the next free object if one is quickly available. 540// Otherwise it returns 0. 541func nextFreeFast(s *mspan) gclinkptr { 542 theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache? 543 if theBit < 64 { 544 result := s.freeindex + uintptr(theBit) 545 if result < s.nelems { 546 freeidx := result + 1 547 if freeidx%64 == 0 && freeidx != s.nelems { 548 return 0 549 } 550 s.allocCache >>= uint(theBit + 1) 551 s.freeindex = freeidx 552 s.allocCount++ 553 return gclinkptr(result*s.elemsize + s.base()) 554 } 555 } 556 return 0 557} 558 559// nextFree returns the next free object from the cached span if one is available. 560// Otherwise it refills the cache with a span with an available object and 561// returns that object along with a flag indicating that this was a heavy 562// weight allocation. If it is a heavy weight allocation the caller must 563// determine whether a new GC cycle needs to be started or if the GC is active 564// whether this goroutine needs to assist the GC. 565func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) { 566 s = c.alloc[spc] 567 shouldhelpgc = false 568 freeIndex := s.nextFreeIndex() 569 if freeIndex == s.nelems { 570 // The span is full. 571 if uintptr(s.allocCount) != s.nelems { 572 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 573 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 574 } 575 systemstack(func() { 576 c.refill(spc) 577 }) 578 shouldhelpgc = true 579 s = c.alloc[spc] 580 581 freeIndex = s.nextFreeIndex() 582 } 583 584 if freeIndex >= s.nelems { 585 throw("freeIndex is not valid") 586 } 587 588 v = gclinkptr(freeIndex*s.elemsize + s.base()) 589 s.allocCount++ 590 if uintptr(s.allocCount) > s.nelems { 591 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 592 throw("s.allocCount > s.nelems") 593 } 594 return 595} 596 597// Allocate an object of size bytes. 598// Small objects are allocated from the per-P cache's free lists. 599// Large objects (> 32 kB) are allocated straight from the heap. 600func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 601 if gcphase == _GCmarktermination { 602 throw("mallocgc called with gcphase == _GCmarktermination") 603 } 604 605 if size == 0 { 606 return unsafe.Pointer(&zerobase) 607 } 608 609 if debug.sbrk != 0 { 610 align := uintptr(16) 611 if typ != nil { 612 align = uintptr(typ.align) 613 } 614 return persistentalloc(size, align, &memstats.other_sys) 615 } 616 617 // When using gccgo, when a cgo or SWIG function has an 618 // interface return type and the function returns a 619 // non-pointer, memory allocation occurs after syscall.Cgocall 620 // but before syscall.CgocallDone. Treat this allocation as a 621 // callback. 622 incallback := false 623 if gomcache() == nil && getg().m.ncgo > 0 { 624 exitsyscall(0) 625 incallback = true 626 } 627 628 // assistG is the G to charge for this allocation, or nil if 629 // GC is not currently active. 630 var assistG *g 631 if gcBlackenEnabled != 0 { 632 // Charge the current user G for this allocation. 633 assistG = getg() 634 if assistG.m.curg != nil { 635 assistG = assistG.m.curg 636 } 637 // Charge the allocation against the G. We'll account 638 // for internal fragmentation at the end of mallocgc. 639 assistG.gcAssistBytes -= int64(size) 640 641 if assistG.gcAssistBytes < 0 { 642 // This G is in debt. Assist the GC to correct 643 // this before allocating. This must happen 644 // before disabling preemption. 645 gcAssistAlloc(assistG) 646 } 647 } 648 649 // Set mp.mallocing to keep from being preempted by GC. 650 mp := acquirem() 651 if mp.mallocing != 0 { 652 throw("malloc deadlock") 653 } 654 if mp.gsignal == getg() { 655 throw("malloc during signal") 656 } 657 mp.mallocing = 1 658 659 shouldhelpgc := false 660 dataSize := size 661 c := gomcache() 662 var x unsafe.Pointer 663 noscan := typ == nil || typ.kind&kindNoPointers != 0 664 if size <= maxSmallSize { 665 if noscan && size < maxTinySize { 666 // Tiny allocator. 667 // 668 // Tiny allocator combines several tiny allocation requests 669 // into a single memory block. The resulting memory block 670 // is freed when all subobjects are unreachable. The subobjects 671 // must be noscan (don't have pointers), this ensures that 672 // the amount of potentially wasted memory is bounded. 673 // 674 // Size of the memory block used for combining (maxTinySize) is tunable. 675 // Current setting is 16 bytes, which relates to 2x worst case memory 676 // wastage (when all but one subobjects are unreachable). 677 // 8 bytes would result in no wastage at all, but provides less 678 // opportunities for combining. 679 // 32 bytes provides more opportunities for combining, 680 // but can lead to 4x worst case wastage. 681 // The best case winning is 8x regardless of block size. 682 // 683 // Objects obtained from tiny allocator must not be freed explicitly. 684 // So when an object will be freed explicitly, we ensure that 685 // its size >= maxTinySize. 686 // 687 // SetFinalizer has a special case for objects potentially coming 688 // from tiny allocator, it such case it allows to set finalizers 689 // for an inner byte of a memory block. 690 // 691 // The main targets of tiny allocator are small strings and 692 // standalone escaping variables. On a json benchmark 693 // the allocator reduces number of allocations by ~12% and 694 // reduces heap size by ~20%. 695 off := c.tinyoffset 696 // Align tiny pointer for required (conservative) alignment. 697 if size&7 == 0 { 698 off = round(off, 8) 699 } else if size&3 == 0 { 700 off = round(off, 4) 701 } else if size&1 == 0 { 702 off = round(off, 2) 703 } 704 if off+size <= maxTinySize && c.tiny != 0 { 705 // The object fits into existing tiny block. 706 x = unsafe.Pointer(c.tiny + off) 707 c.tinyoffset = off + size 708 c.local_tinyallocs++ 709 mp.mallocing = 0 710 releasem(mp) 711 if incallback { 712 entersyscall(0) 713 } 714 return x 715 } 716 // Allocate a new maxTinySize block. 717 span := c.alloc[tinySpanClass] 718 v := nextFreeFast(span) 719 if v == 0 { 720 v, _, shouldhelpgc = c.nextFree(tinySpanClass) 721 } 722 x = unsafe.Pointer(v) 723 (*[2]uint64)(x)[0] = 0 724 (*[2]uint64)(x)[1] = 0 725 // See if we need to replace the existing tiny block with the new one 726 // based on amount of remaining free space. 727 if size < c.tinyoffset || c.tiny == 0 { 728 c.tiny = uintptr(x) 729 c.tinyoffset = size 730 } 731 size = maxTinySize 732 } else { 733 var sizeclass uint8 734 if size <= smallSizeMax-8 { 735 sizeclass = size_to_class8[(size+smallSizeDiv-1)/smallSizeDiv] 736 } else { 737 sizeclass = size_to_class128[(size-smallSizeMax+largeSizeDiv-1)/largeSizeDiv] 738 } 739 size = uintptr(class_to_size[sizeclass]) 740 spc := makeSpanClass(sizeclass, noscan) 741 span := c.alloc[spc] 742 v := nextFreeFast(span) 743 if v == 0 { 744 v, span, shouldhelpgc = c.nextFree(spc) 745 } 746 x = unsafe.Pointer(v) 747 if needzero && span.needzero != 0 { 748 memclrNoHeapPointers(unsafe.Pointer(v), size) 749 } 750 } 751 } else { 752 var s *mspan 753 shouldhelpgc = true 754 systemstack(func() { 755 s = largeAlloc(size, needzero, noscan) 756 }) 757 s.freeindex = 1 758 s.allocCount = 1 759 x = unsafe.Pointer(s.base()) 760 size = s.elemsize 761 } 762 763 var scanSize uintptr 764 if !noscan { 765 heapBitsSetType(uintptr(x), size, dataSize, typ) 766 if dataSize > typ.size { 767 // Array allocation. If there are any 768 // pointers, GC has to scan to the last 769 // element. 770 if typ.ptrdata != 0 { 771 scanSize = dataSize - typ.size + typ.ptrdata 772 } 773 } else { 774 scanSize = typ.ptrdata 775 } 776 c.local_scan += scanSize 777 } 778 779 // Ensure that the stores above that initialize x to 780 // type-safe memory and set the heap bits occur before 781 // the caller can make x observable to the garbage 782 // collector. Otherwise, on weakly ordered machines, 783 // the garbage collector could follow a pointer to x, 784 // but see uninitialized memory or stale heap bits. 785 publicationBarrier() 786 787 // Allocate black during GC. 788 // All slots hold nil so no scanning is needed. 789 // This may be racing with GC so do it atomically if there can be 790 // a race marking the bit. 791 if gcphase != _GCoff { 792 gcmarknewobject(uintptr(x), size, scanSize) 793 } 794 795 if raceenabled { 796 racemalloc(x, size) 797 } 798 799 if msanenabled { 800 msanmalloc(x, size) 801 } 802 803 mp.mallocing = 0 804 releasem(mp) 805 806 if debug.allocfreetrace != 0 { 807 tracealloc(x, size, typ) 808 } 809 810 if rate := MemProfileRate; rate > 0 { 811 if size < uintptr(rate) && int32(size) < c.next_sample { 812 c.next_sample -= int32(size) 813 } else { 814 mp := acquirem() 815 profilealloc(mp, x, size) 816 releasem(mp) 817 } 818 } 819 820 if assistG != nil { 821 // Account for internal fragmentation in the assist 822 // debt now that we know it. 823 assistG.gcAssistBytes -= int64(size - dataSize) 824 } 825 826 if shouldhelpgc { 827 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 828 gcStart(gcBackgroundMode, t) 829 } 830 } 831 832 // Check preemption, since unlike gc we don't check on every call. 833 if getg().preempt { 834 checkPreempt() 835 } 836 837 if incallback { 838 entersyscall(0) 839 } 840 841 return x 842} 843 844func largeAlloc(size uintptr, needzero bool, noscan bool) *mspan { 845 // print("largeAlloc size=", size, "\n") 846 847 if size+_PageSize < size { 848 throw("out of memory") 849 } 850 npages := size >> _PageShift 851 if size&_PageMask != 0 { 852 npages++ 853 } 854 855 // Deduct credit for this span allocation and sweep if 856 // necessary. mHeap_Alloc will also sweep npages, so this only 857 // pays the debt down to npage pages. 858 deductSweepCredit(npages*_PageSize, npages) 859 860 s := mheap_.alloc(npages, makeSpanClass(0, noscan), true, needzero) 861 if s == nil { 862 throw("out of memory") 863 } 864 s.limit = s.base() + size 865 heapBitsForSpan(s.base()).initSpan(s) 866 return s 867} 868 869// implementation of new builtin 870// compiler (both frontend and SSA backend) knows the signature 871// of this function 872func newobject(typ *_type) unsafe.Pointer { 873 return mallocgc(typ.size, typ, true) 874} 875 876//go:linkname reflect_unsafe_New reflect.unsafe_New 877func reflect_unsafe_New(typ *_type) unsafe.Pointer { 878 return newobject(typ) 879} 880 881// newarray allocates an array of n elements of type typ. 882func newarray(typ *_type, n int) unsafe.Pointer { 883 if n == 1 { 884 return mallocgc(typ.size, typ, true) 885 } 886 if n < 0 || uintptr(n) > maxSliceCap(typ.size) { 887 panic(plainError("runtime: allocation size out of range")) 888 } 889 return mallocgc(typ.size*uintptr(n), typ, true) 890} 891 892//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 893func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 894 return newarray(typ, n) 895} 896 897func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 898 mp.mcache.next_sample = nextSample() 899 mProf_Malloc(x, size) 900} 901 902// nextSample returns the next sampling point for heap profiling. The goal is 903// to sample allocations on average every MemProfileRate bytes, but with a 904// completely random distribution over the allocation timeline; this 905// corresponds to a Poisson process with parameter MemProfileRate. In Poisson 906// processes, the distance between two samples follows the exponential 907// distribution (exp(MemProfileRate)), so the best return value is a random 908// number taken from an exponential distribution whose mean is MemProfileRate. 909func nextSample() int32 { 910 if GOOS == "plan9" { 911 // Plan 9 doesn't support floating point in note handler. 912 if g := getg(); g == g.m.gsignal { 913 return nextSampleNoFP() 914 } 915 } 916 917 return fastexprand(MemProfileRate) 918} 919 920// fastexprand returns a random number from an exponential distribution with 921// the specified mean. 922func fastexprand(mean int) int32 { 923 // Avoid overflow. Maximum possible step is 924 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean. 925 switch { 926 case mean > 0x7000000: 927 mean = 0x7000000 928 case mean == 0: 929 return 0 930 } 931 932 // Take a random sample of the exponential distribution exp(-mean*x). 933 // The probability distribution function is mean*exp(-mean*x), so the CDF is 934 // p = 1 - exp(-mean*x), so 935 // q = 1 - p == exp(-mean*x) 936 // log_e(q) = -mean*x 937 // -log_e(q)/mean = x 938 // x = -log_e(q) * mean 939 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency 940 const randomBitCount = 26 941 q := fastrand()%(1<<randomBitCount) + 1 942 qlog := fastlog2(float64(q)) - randomBitCount 943 if qlog > 0 { 944 qlog = 0 945 } 946 const minusLog2 = -0.6931471805599453 // -ln(2) 947 return int32(qlog*(minusLog2*float64(mean))) + 1 948} 949 950// nextSampleNoFP is similar to nextSample, but uses older, 951// simpler code to avoid floating point. 952func nextSampleNoFP() int32 { 953 // Set first allocation sample size. 954 rate := MemProfileRate 955 if rate > 0x3fffffff { // make 2*rate not overflow 956 rate = 0x3fffffff 957 } 958 if rate != 0 { 959 return int32(fastrand() % uint32(2*rate)) 960 } 961 return 0 962} 963 964type persistentAlloc struct { 965 base *notInHeap 966 off uintptr 967} 968 969var globalAlloc struct { 970 mutex 971 persistentAlloc 972} 973 974// Wrapper around sysAlloc that can allocate small chunks. 975// There is no associated free operation. 976// Intended for things like function/type/debug-related persistent data. 977// If align is 0, uses default align (currently 8). 978// The returned memory will be zeroed. 979// 980// Consider marking persistentalloc'd types go:notinheap. 981func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { 982 var p *notInHeap 983 systemstack(func() { 984 p = persistentalloc1(size, align, sysStat) 985 }) 986 return unsafe.Pointer(p) 987} 988 989// Must run on system stack because stack growth can (re)invoke it. 990// See issue 9174. 991//go:systemstack 992func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { 993 const ( 994 chunk = 256 << 10 995 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 996 ) 997 998 if size == 0 { 999 throw("persistentalloc: size == 0") 1000 } 1001 if align != 0 { 1002 if align&(align-1) != 0 { 1003 throw("persistentalloc: align is not a power of 2") 1004 } 1005 if align > _PageSize { 1006 throw("persistentalloc: align is too large") 1007 } 1008 } else { 1009 align = 8 1010 } 1011 1012 if size >= maxBlock { 1013 return (*notInHeap)(sysAlloc(size, sysStat)) 1014 } 1015 1016 mp := acquirem() 1017 var persistent *persistentAlloc 1018 if mp != nil && mp.p != 0 { 1019 persistent = &mp.p.ptr().palloc 1020 } else { 1021 lock(&globalAlloc.mutex) 1022 persistent = &globalAlloc.persistentAlloc 1023 } 1024 persistent.off = round(persistent.off, align) 1025 if persistent.off+size > chunk || persistent.base == nil { 1026 persistent.base = (*notInHeap)(sysAlloc(chunk, &memstats.other_sys)) 1027 if persistent.base == nil { 1028 if persistent == &globalAlloc.persistentAlloc { 1029 unlock(&globalAlloc.mutex) 1030 } 1031 throw("runtime: cannot allocate memory") 1032 } 1033 persistent.off = 0 1034 } 1035 p := persistent.base.add(persistent.off) 1036 persistent.off += size 1037 releasem(mp) 1038 if persistent == &globalAlloc.persistentAlloc { 1039 unlock(&globalAlloc.mutex) 1040 } 1041 1042 if sysStat != &memstats.other_sys { 1043 mSysStatInc(sysStat, size) 1044 mSysStatDec(&memstats.other_sys, size) 1045 } 1046 return p 1047} 1048 1049// notInHeap is off-heap memory allocated by a lower-level allocator 1050// like sysAlloc or persistentAlloc. 1051// 1052// In general, it's better to use real types marked as go:notinheap, 1053// but this serves as a generic type for situations where that isn't 1054// possible (like in the allocators). 1055// 1056// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? 1057// 1058//go:notinheap 1059type notInHeap struct{} 1060 1061func (p *notInHeap) add(bytes uintptr) *notInHeap { 1062 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) 1063} 1064