1// Copyright 2014 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5// Memory allocator. 6// 7// This was originally based on tcmalloc, but has diverged quite a bit. 8// http://goog-perftools.sourceforge.net/doc/tcmalloc.html 9 10// The main allocator works in runs of pages. 11// Small allocation sizes (up to and including 32 kB) are 12// rounded to one of about 70 size classes, each of which 13// has its own free set of objects of exactly that size. 14// Any free page of memory can be split into a set of objects 15// of one size class, which are then managed using a free bitmap. 16// 17// The allocator's data structures are: 18// 19// fixalloc: a free-list allocator for fixed-size off-heap objects, 20// used to manage storage used by the allocator. 21// mheap: the malloc heap, managed at page (8192-byte) granularity. 22// mspan: a run of in-use pages managed by the mheap. 23// mcentral: collects all spans of a given size class. 24// mcache: a per-P cache of mspans with free space. 25// mstats: allocation statistics. 26// 27// Allocating a small object proceeds up a hierarchy of caches: 28// 29// 1. Round the size up to one of the small size classes 30// and look in the corresponding mspan in this P's mcache. 31// Scan the mspan's free bitmap to find a free slot. 32// If there is a free slot, allocate it. 33// This can all be done without acquiring a lock. 34// 35// 2. If the mspan has no free slots, obtain a new mspan 36// from the mcentral's list of mspans of the required size 37// class that have free space. 38// Obtaining a whole span amortizes the cost of locking 39// the mcentral. 40// 41// 3. If the mcentral's mspan list is empty, obtain a run 42// of pages from the mheap to use for the mspan. 43// 44// 4. If the mheap is empty or has no page runs large enough, 45// allocate a new group of pages (at least 1MB) from the 46// operating system. Allocating a large run of pages 47// amortizes the cost of talking to the operating system. 48// 49// Sweeping an mspan and freeing objects on it proceeds up a similar 50// hierarchy: 51// 52// 1. If the mspan is being swept in response to allocation, it 53// is returned to the mcache to satisfy the allocation. 54// 55// 2. Otherwise, if the mspan still has allocated objects in it, 56// it is placed on the mcentral free list for the mspan's size 57// class. 58// 59// 3. Otherwise, if all objects in the mspan are free, the mspan's 60// pages are returned to the mheap and the mspan is now dead. 61// 62// Allocating and freeing a large object uses the mheap 63// directly, bypassing the mcache and mcentral. 64// 65// If mspan.needzero is false, then free object slots in the mspan are 66// already zeroed. Otherwise if needzero is true, objects are zeroed as 67// they are allocated. There are various benefits to delaying zeroing 68// this way: 69// 70// 1. Stack frame allocation can avoid zeroing altogether. 71// 72// 2. It exhibits better temporal locality, since the program is 73// probably about to write to the memory. 74// 75// 3. We don't zero pages that never get reused. 76 77// Virtual memory layout 78// 79// The heap consists of a set of arenas, which are 64MB on 64-bit and 80// 4MB on 32-bit (heapArenaBytes). Each arena's start address is also 81// aligned to the arena size. 82// 83// Each arena has an associated heapArena object that stores the 84// metadata for that arena: the heap bitmap for all words in the arena 85// and the span map for all pages in the arena. heapArena objects are 86// themselves allocated off-heap. 87// 88// Since arenas are aligned, the address space can be viewed as a 89// series of arena frames. The arena map (mheap_.arenas) maps from 90// arena frame number to *heapArena, or nil for parts of the address 91// space not backed by the Go heap. The arena map is structured as a 92// two-level array consisting of a "L1" arena map and many "L2" arena 93// maps; however, since arenas are large, on many architectures, the 94// arena map consists of a single, large L2 map. 95// 96// The arena map covers the entire possible address space, allowing 97// the Go heap to use any part of the address space. The allocator 98// attempts to keep arenas contiguous so that large spans (and hence 99// large objects) can cross arenas. 100 101package runtime 102 103import ( 104 "runtime/internal/atomic" 105 "runtime/internal/math" 106 "runtime/internal/sys" 107 "unsafe" 108) 109 110// C function to get the end of the program's memory. 111func getEnd() uintptr 112 113// For gccgo, use go:linkname to export compiler-called functions. 114// 115//go:linkname newobject 116 117// Functions called by C code. 118//go:linkname mallocgc 119 120const ( 121 debugMalloc = false 122 123 maxTinySize = _TinySize 124 tinySizeClass = _TinySizeClass 125 maxSmallSize = _MaxSmallSize 126 127 pageShift = _PageShift 128 pageSize = _PageSize 129 pageMask = _PageMask 130 // By construction, single page spans of the smallest object class 131 // have the most objects per span. 132 maxObjsPerSpan = pageSize / 8 133 134 concurrentSweep = _ConcurrentSweep 135 136 _PageSize = 1 << _PageShift 137 _PageMask = _PageSize - 1 138 139 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 140 _64bit = 1 << (^uintptr(0) >> 63) / 2 141 142 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 143 _TinySize = 16 144 _TinySizeClass = int8(2) 145 146 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 147 148 // Per-P, per order stack segment cache size. 149 _StackCacheSize = 32 * 1024 150 151 // Number of orders that get caching. Order 0 is FixedStack 152 // and each successive order is twice as large. 153 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 154 // will be allocated directly. 155 // Since FixedStack is different on different systems, we 156 // must vary NumStackOrders to keep the same maximum cached size. 157 // OS | FixedStack | NumStackOrders 158 // -----------------+------------+--------------- 159 // linux/darwin/bsd | 2KB | 4 160 // windows/32 | 4KB | 3 161 // windows/64 | 8KB | 2 162 // plan9 | 4KB | 3 163 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9 164 165 // heapAddrBits is the number of bits in a heap address. On 166 // amd64, addresses are sign-extended beyond heapAddrBits. On 167 // other arches, they are zero-extended. 168 // 169 // On most 64-bit platforms, we limit this to 48 bits based on a 170 // combination of hardware and OS limitations. 171 // 172 // amd64 hardware limits addresses to 48 bits, sign-extended 173 // to 64 bits. Addresses where the top 16 bits are not either 174 // all 0 or all 1 are "non-canonical" and invalid. Because of 175 // these "negative" addresses, we offset addresses by 1<<47 176 // (arenaBaseOffset) on amd64 before computing indexes into 177 // the heap arenas index. In 2017, amd64 hardware added 178 // support for 57 bit addresses; however, currently only Linux 179 // supports this extension and the kernel will never choose an 180 // address above 1<<47 unless mmap is called with a hint 181 // address above 1<<47 (which we never do). 182 // 183 // arm64 hardware (as of ARMv8) limits user addresses to 48 184 // bits, in the range [0, 1<<48). 185 // 186 // ppc64, mips64, and s390x support arbitrary 64 bit addresses 187 // in hardware. On Linux, Go leans on stricter OS limits. Based 188 // on Linux's processor.h, the user address space is limited as 189 // follows on 64-bit architectures: 190 // 191 // Architecture Name Maximum Value (exclusive) 192 // --------------------------------------------------------------------- 193 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses) 194 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses) 195 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses) 196 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) 197 // s390x TASK_SIZE 1<<64 (64 bit addresses) 198 // 199 // These limits may increase over time, but are currently at 200 // most 48 bits except on s390x. On all architectures, Linux 201 // starts placing mmap'd regions at addresses that are 202 // significantly below 48 bits, so even if it's possible to 203 // exceed Go's 48 bit limit, it's extremely unlikely in 204 // practice. 205 // 206 // On 32-bit platforms, we accept the full 32-bit address 207 // space because doing so is cheap. 208 // mips32 only has access to the low 2GB of virtual memory, so 209 // we further limit it to 31 bits. 210 // 211 // On ios/arm64, although 64-bit pointers are presumably 212 // available, pointers are truncated to 33 bits. Furthermore, 213 // only the top 4 GiB of the address space are actually available 214 // to the application, but we allow the whole 33 bits anyway for 215 // simplicity. 216 // TODO(mknyszek): Consider limiting it to 32 bits and using 217 // arenaBaseOffset to offset into the top 4 GiB. 218 // 219 // WebAssembly currently has a limit of 4GB linear memory. 220 heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosIos*sys.GoarchArm64 221 222 // maxAlloc is the maximum size of an allocation. On 64-bit, 223 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On 224 // 32-bit, however, this is one less than 1<<32 because the 225 // number of bytes in the address space doesn't actually fit 226 // in a uintptr. 227 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1 228 229 // The number of bits in a heap address, the size of heap 230 // arenas, and the L1 and L2 arena map sizes are related by 231 // 232 // (1 << addr bits) = arena size * L1 entries * L2 entries 233 // 234 // Currently, we balance these as follows: 235 // 236 // Platform Addr bits Arena size L1 entries L2 entries 237 // -------------- --------- ---------- ---------- ----------- 238 // */64-bit 48 64MB 1 4M (32MB) 239 // windows/64-bit 48 4MB 64 1M (8MB) 240 // */32-bit 32 4MB 1 1024 (4KB) 241 // */mips(le) 31 4MB 1 512 (2KB) 242 243 // heapArenaBytes is the size of a heap arena. The heap 244 // consists of mappings of size heapArenaBytes, aligned to 245 // heapArenaBytes. The initial heap mapping is one arena. 246 // 247 // This is currently 64MB on 64-bit non-Windows and 4MB on 248 // 32-bit and on Windows. We use smaller arenas on Windows 249 // because all committed memory is charged to the process, 250 // even if it's not touched. Hence, for processes with small 251 // heaps, the mapped arena space needs to be commensurate. 252 // This is particularly important with the race detector, 253 // since it significantly amplifies the cost of committed 254 // memory. 255 heapArenaBytes = 1 << logHeapArenaBytes 256 257 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity, 258 // prefer using heapArenaBytes where possible (we need the 259 // constant to compute some other constants). 260 logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm 261 262 // heapArenaBitmapBytes is the size of each heap arena's bitmap. 263 heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2) 264 265 pagesPerArena = heapArenaBytes / pageSize 266 267 // arenaL1Bits is the number of bits of the arena number 268 // covered by the first level arena map. 269 // 270 // This number should be small, since the first level arena 271 // map requires PtrSize*(1<<arenaL1Bits) of space in the 272 // binary's BSS. It can be zero, in which case the first level 273 // index is effectively unused. There is a performance benefit 274 // to this, since the generated code can be more efficient, 275 // but comes at the cost of having a large L2 mapping. 276 // 277 // We use the L1 map on 64-bit Windows because the arena size 278 // is small, but the address space is still 48 bits, and 279 // there's a high cost to having a large L2. 280 arenaL1Bits = 6 * (_64bit * sys.GoosWindows) 281 282 // arenaL2Bits is the number of bits of the arena number 283 // covered by the second level arena index. 284 // 285 // The size of each arena map allocation is proportional to 286 // 1<<arenaL2Bits, so it's important that this not be too 287 // large. 48 bits leads to 32MB arena index allocations, which 288 // is about the practical threshold. 289 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits 290 291 // arenaL1Shift is the number of bits to shift an arena frame 292 // number by to compute an index into the first level arena map. 293 arenaL1Shift = arenaL2Bits 294 295 // arenaBits is the total bits in a combined arena map index. 296 // This is split between the index into the L1 arena map and 297 // the L2 arena map. 298 arenaBits = arenaL1Bits + arenaL2Bits 299 300 // arenaBaseOffset is the pointer value that corresponds to 301 // index 0 in the heap arena map. 302 // 303 // On amd64, the address space is 48 bits, sign extended to 64 304 // bits. This offset lets us handle "negative" addresses (or 305 // high addresses if viewed as unsigned). 306 // 307 // On aix/ppc64, this offset allows to keep the heapAddrBits to 308 // 48. Otherwize, it would be 60 in order to handle mmap addresses 309 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this 310 // case, the memory reserved in (s *pageAlloc).init for chunks 311 // is causing important slowdowns. 312 // 313 // On other platforms, the user address space is contiguous 314 // and starts at 0, so no offset is necessary. 315 arenaBaseOffset = 0xffff800000000000*sys.GoarchAmd64 + 0x0a00000000000000*sys.GoosAix*sys.GoarchPpc64 316 // A typed version of this constant that will make it into DWARF (for viewcore). 317 arenaBaseOffsetUintptr = uintptr(arenaBaseOffset) 318 319 // Max number of threads to run garbage collection. 320 // 2, 3, and 4 are all plausible maximums depending 321 // on the hardware details of the machine. The garbage 322 // collector scales well to 32 cpus. 323 _MaxGcproc = 32 324 325 // minLegalPointer is the smallest possible legal pointer. 326 // This is the smallest possible architectural page size, 327 // since we assume that the first page is never mapped. 328 // 329 // This should agree with minZeroPage in the compiler. 330 minLegalPointer uintptr = 4096 331) 332 333// physPageSize is the size in bytes of the OS's physical pages. 334// Mapping and unmapping operations must be done at multiples of 335// physPageSize. 336// 337// This must be set by the OS init code (typically in osinit) before 338// mallocinit. 339var physPageSize uintptr 340 341// physHugePageSize is the size in bytes of the OS's default physical huge 342// page size whose allocation is opaque to the application. It is assumed 343// and verified to be a power of two. 344// 345// If set, this must be set by the OS init code (typically in osinit) before 346// mallocinit. However, setting it at all is optional, and leaving the default 347// value is always safe (though potentially less efficient). 348// 349// Since physHugePageSize is always assumed to be a power of two, 350// physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. 351// The purpose of physHugePageShift is to avoid doing divisions in 352// performance critical functions. 353var ( 354 physHugePageSize uintptr 355 physHugePageShift uint 356) 357 358// OS memory management abstraction layer 359// 360// Regions of the address space managed by the runtime may be in one of four 361// states at any given time: 362// 1) None - Unreserved and unmapped, the default state of any region. 363// 2) Reserved - Owned by the runtime, but accessing it would cause a fault. 364// Does not count against the process' memory footprint. 365// 3) Prepared - Reserved, intended not to be backed by physical memory (though 366// an OS may implement this lazily). Can transition efficiently to 367// Ready. Accessing memory in such a region is undefined (may 368// fault, may give back unexpected zeroes, etc.). 369// 4) Ready - may be accessed safely. 370// 371// This set of states is more than is strictly necessary to support all the 372// currently supported platforms. One could get by with just None, Reserved, and 373// Ready. However, the Prepared state gives us flexibility for performance 374// purposes. For example, on POSIX-y operating systems, Reserved is usually a 375// private anonymous mmap'd region with PROT_NONE set, and to transition 376// to Ready would require setting PROT_READ|PROT_WRITE. However the 377// underspecification of Prepared lets us use just MADV_FREE to transition from 378// Ready to Prepared. Thus with the Prepared state we can set the permission 379// bits just once early on, we can efficiently tell the OS that it's free to 380// take pages away from us when we don't strictly need them. 381// 382// For each OS there is a common set of helpers defined that transition 383// memory regions between these states. The helpers are as follows: 384// 385// sysAlloc transitions an OS-chosen region of memory from None to Ready. 386// More specifically, it obtains a large chunk of zeroed memory from the 387// operating system, typically on the order of a hundred kilobytes 388// or a megabyte. This memory is always immediately available for use. 389// 390// sysFree transitions a memory region from any state to None. Therefore, it 391// returns memory unconditionally. It is used if an out-of-memory error has been 392// detected midway through an allocation or to carve out an aligned section of 393// the address space. It is okay if sysFree is a no-op only if sysReserve always 394// returns a memory region aligned to the heap allocator's alignment 395// restrictions. 396// 397// sysReserve transitions a memory region from None to Reserved. It reserves 398// address space in such a way that it would cause a fatal fault upon access 399// (either via permissions or not committing the memory). Such a reservation is 400// thus never backed by physical memory. 401// If the pointer passed to it is non-nil, the caller wants the 402// reservation there, but sysReserve can still choose another 403// location if that one is unavailable. 404// NOTE: sysReserve returns OS-aligned memory, but the heap allocator 405// may use larger alignment, so the caller must be careful to realign the 406// memory obtained by sysReserve. 407// 408// sysMap transitions a memory region from Reserved to Prepared. It ensures the 409// memory region can be efficiently transitioned to Ready. 410// 411// sysUsed transitions a memory region from Prepared to Ready. It notifies the 412// operating system that the memory region is needed and ensures that the region 413// may be safely accessed. This is typically a no-op on systems that don't have 414// an explicit commit step and hard over-commit limits, but is critical on 415// Windows, for example. 416// 417// sysUnused transitions a memory region from Ready to Prepared. It notifies the 418// operating system that the physical pages backing this memory region are no 419// longer needed and can be reused for other purposes. The contents of a 420// sysUnused memory region are considered forfeit and the region must not be 421// accessed again until sysUsed is called. 422// 423// sysFault transitions a memory region from Ready or Prepared to Reserved. It 424// marks a region such that it will always fault if accessed. Used only for 425// debugging the runtime. 426 427func mallocinit() { 428 if class_to_size[_TinySizeClass] != _TinySize { 429 throw("bad TinySizeClass") 430 } 431 432 // Not used for gccgo. 433 // testdefersizes() 434 435 if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 { 436 // heapBits expects modular arithmetic on bitmap 437 // addresses to work. 438 throw("heapArenaBitmapBytes not a power of 2") 439 } 440 441 // Copy class sizes out for statistics table. 442 for i := range class_to_size { 443 memstats.by_size[i].size = uint32(class_to_size[i]) 444 } 445 446 // Check physPageSize. 447 if physPageSize == 0 { 448 // The OS init code failed to fetch the physical page size. 449 throw("failed to get system page size") 450 } 451 if physPageSize > maxPhysPageSize { 452 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n") 453 throw("bad system page size") 454 } 455 if physPageSize < minPhysPageSize { 456 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 457 throw("bad system page size") 458 } 459 if physPageSize&(physPageSize-1) != 0 { 460 print("system page size (", physPageSize, ") must be a power of 2\n") 461 throw("bad system page size") 462 } 463 if physHugePageSize&(physHugePageSize-1) != 0 { 464 print("system huge page size (", physHugePageSize, ") must be a power of 2\n") 465 throw("bad system huge page size") 466 } 467 if physHugePageSize > maxPhysHugePageSize { 468 // physHugePageSize is greater than the maximum supported huge page size. 469 // Don't throw here, like in the other cases, since a system configured 470 // in this way isn't wrong, we just don't have the code to support them. 471 // Instead, silently set the huge page size to zero. 472 physHugePageSize = 0 473 } 474 if physHugePageSize != 0 { 475 // Since physHugePageSize is a power of 2, it suffices to increase 476 // physHugePageShift until 1<<physHugePageShift == physHugePageSize. 477 for 1<<physHugePageShift != physHugePageSize { 478 physHugePageShift++ 479 } 480 } 481 if pagesPerArena%pagesPerSpanRoot != 0 { 482 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n") 483 throw("bad pagesPerSpanRoot") 484 } 485 if pagesPerArena%pagesPerReclaimerChunk != 0 { 486 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n") 487 throw("bad pagesPerReclaimerChunk") 488 } 489 490 // Initialize the heap. 491 mheap_.init() 492 mcache0 = allocmcache() 493 lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas) 494 lockInit(&proflock, lockRankProf) 495 lockInit(&globalAlloc.mutex, lockRankGlobalAlloc) 496 497 // Create initial arena growth hints. 498 if sys.PtrSize == 8 { 499 // On a 64-bit machine, we pick the following hints 500 // because: 501 // 502 // 1. Starting from the middle of the address space 503 // makes it easier to grow out a contiguous range 504 // without running in to some other mapping. 505 // 506 // 2. This makes Go heap addresses more easily 507 // recognizable when debugging. 508 // 509 // 3. Stack scanning in gccgo is still conservative, 510 // so it's important that addresses be distinguishable 511 // from other data. 512 // 513 // Starting at 0x00c0 means that the valid memory addresses 514 // will begin 0x00c0, 0x00c1, ... 515 // In little-endian, that's c0 00, c1 00, ... None of those are valid 516 // UTF-8 sequences, and they are otherwise as far away from 517 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 518 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 519 // on OS X during thread allocations. 0x00c0 causes conflicts with 520 // AddressSanitizer which reserves all memory up to 0x0100. 521 // These choices reduce the odds of a conservative garbage collector 522 // not collecting memory because some non-pointer block of memory 523 // had a bit pattern that matched a memory address. 524 // 525 // However, on arm64, we ignore all this advice above and slam the 526 // allocation at 0x40 << 32 because when using 4k pages with 3-level 527 // translation buffers, the user address space is limited to 39 bits 528 // On ios/arm64, the address space is even smaller. 529 // 530 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit. 531 // processes. 532 for i := 0x7f; i >= 0; i-- { 533 var p uintptr 534 switch { 535 case raceenabled: 536 // The TSAN runtime requires the heap 537 // to be in the range [0x00c000000000, 538 // 0x00e000000000). 539 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32) 540 if p >= uintptrMask&0x00e000000000 { 541 continue 542 } 543 case GOARCH == "arm64" && GOOS == "ios": 544 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 545 case GOARCH == "arm64": 546 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 547 case GOOS == "aix": 548 if i == 0 { 549 // We don't use addresses directly after 0x0A00000000000000 550 // to avoid collisions with others mmaps done by non-go programs. 551 continue 552 } 553 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52) 554 default: 555 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 556 } 557 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 558 hint.addr = p 559 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 560 } 561 } else { 562 // On a 32-bit machine, we're much more concerned 563 // about keeping the usable heap contiguous. 564 // Hence: 565 // 566 // 1. We reserve space for all heapArenas up front so 567 // they don't get interleaved with the heap. They're 568 // ~258MB, so this isn't too bad. (We could reserve a 569 // smaller amount of space up front if this is a 570 // problem.) 571 // 572 // 2. We hint the heap to start right above the end of 573 // the binary so we have the best chance of keeping it 574 // contiguous. 575 // 576 // 3. We try to stake out a reasonably large initial 577 // heap reservation. 578 579 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) 580 meta := uintptr(sysReserve(nil, arenaMetaSize)) 581 if meta != 0 { 582 mheap_.heapArenaAlloc.init(meta, arenaMetaSize) 583 } 584 585 // We want to start the arena low, but if we're linked 586 // against C code, it's possible global constructors 587 // have called malloc and adjusted the process' brk. 588 // Query the brk so we can avoid trying to map the 589 // region over it (which will cause the kernel to put 590 // the region somewhere else, likely at a high 591 // address). 592 procBrk := sbrk0() 593 594 // If we ask for the end of the data segment but the 595 // operating system requires a little more space 596 // before we can start allocating, it will give out a 597 // slightly higher pointer. Except QEMU, which is 598 // buggy, as usual: it won't adjust the pointer 599 // upward. So adjust it upward a little bit ourselves: 600 // 1/4 MB to get away from the running binary image. 601 p := getEnd() 602 if p < procBrk { 603 p = procBrk 604 } 605 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end { 606 p = mheap_.heapArenaAlloc.end 607 } 608 p = alignUp(p+(256<<10), heapArenaBytes) 609 // Because we're worried about fragmentation on 610 // 32-bit, we try to make a large initial reservation. 611 arenaSizes := [...]uintptr{ 612 512 << 20, 613 256 << 20, 614 128 << 20, 615 } 616 for _, arenaSize := range &arenaSizes { 617 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes) 618 if a != nil { 619 mheap_.arena.init(uintptr(a), size) 620 p = mheap_.arena.end // For hint below 621 break 622 } 623 } 624 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 625 hint.addr = p 626 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 627 } 628} 629 630// sysAlloc allocates heap arena space for at least n bytes. The 631// returned pointer is always heapArenaBytes-aligned and backed by 632// h.arenas metadata. The returned size is always a multiple of 633// heapArenaBytes. sysAlloc returns nil on failure. 634// There is no corresponding free function. 635// 636// sysAlloc returns a memory region in the Prepared state. This region must 637// be transitioned to Ready before use. 638// 639// h must be locked. 640func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) { 641 assertLockHeld(&h.lock) 642 643 n = alignUp(n, heapArenaBytes) 644 645 // First, try the arena pre-reservation. 646 v = h.arena.alloc(n, heapArenaBytes, &memstats.heap_sys) 647 if v != nil { 648 size = n 649 goto mapped 650 } 651 652 // Try to grow the heap at a hint address. 653 for h.arenaHints != nil { 654 hint := h.arenaHints 655 p := hint.addr 656 if hint.down { 657 p -= n 658 } 659 if p+n < p { 660 // We can't use this, so don't ask. 661 v = nil 662 } else if arenaIndex(p+n-1) >= 1<<arenaBits { 663 // Outside addressable heap. Can't use. 664 v = nil 665 } else { 666 v = sysReserve(unsafe.Pointer(p), n) 667 } 668 if p == uintptr(v) { 669 // Success. Update the hint. 670 if !hint.down { 671 p += n 672 } 673 hint.addr = p 674 size = n 675 break 676 } 677 // Failed. Discard this hint and try the next. 678 // 679 // TODO: This would be cleaner if sysReserve could be 680 // told to only return the requested address. In 681 // particular, this is already how Windows behaves, so 682 // it would simplify things there. 683 if v != nil { 684 sysFree(v, n, nil) 685 } 686 h.arenaHints = hint.next 687 h.arenaHintAlloc.free(unsafe.Pointer(hint)) 688 } 689 690 if size == 0 { 691 if raceenabled { 692 // The race detector assumes the heap lives in 693 // [0x00c000000000, 0x00e000000000), but we 694 // just ran out of hints in this region. Give 695 // a nice failure. 696 throw("too many address space collisions for -race mode") 697 } 698 699 // All of the hints failed, so we'll take any 700 // (sufficiently aligned) address the kernel will give 701 // us. 702 v, size = sysReserveAligned(nil, n, heapArenaBytes) 703 if v == nil { 704 return nil, 0 705 } 706 707 // Create new hints for extending this region. 708 hint := (*arenaHint)(h.arenaHintAlloc.alloc()) 709 hint.addr, hint.down = uintptr(v), true 710 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 711 hint = (*arenaHint)(h.arenaHintAlloc.alloc()) 712 hint.addr = uintptr(v) + size 713 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 714 } 715 716 // Check for bad pointers or pointers we can't use. 717 { 718 var bad string 719 p := uintptr(v) 720 if p+size < p { 721 bad = "region exceeds uintptr range" 722 } else if arenaIndex(p) >= 1<<arenaBits { 723 bad = "base outside usable address space" 724 } else if arenaIndex(p+size-1) >= 1<<arenaBits { 725 bad = "end outside usable address space" 726 } 727 if bad != "" { 728 // This should be impossible on most architectures, 729 // but it would be really confusing to debug. 730 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n") 731 throw("memory reservation exceeds address space limit") 732 } 733 } 734 735 if uintptr(v)&(heapArenaBytes-1) != 0 { 736 throw("misrounded allocation in sysAlloc") 737 } 738 739 // Transition from Reserved to Prepared. 740 sysMap(v, size, &memstats.heap_sys) 741 742mapped: 743 // Create arena metadata. 744 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ { 745 l2 := h.arenas[ri.l1()] 746 if l2 == nil { 747 // Allocate an L2 arena map. 748 l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil)) 749 if l2 == nil { 750 throw("out of memory allocating heap arena map") 751 } 752 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2)) 753 } 754 755 if l2[ri.l2()] != nil { 756 throw("arena already initialized") 757 } 758 var r *heapArena 759 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys)) 760 if r == nil { 761 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys)) 762 if r == nil { 763 throw("out of memory allocating heap arena metadata") 764 } 765 } 766 767 // Add the arena to the arenas list. 768 if len(h.allArenas) == cap(h.allArenas) { 769 size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize 770 if size == 0 { 771 size = physPageSize 772 } 773 newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gcMiscSys)) 774 if newArray == nil { 775 throw("out of memory allocating allArenas") 776 } 777 oldSlice := h.allArenas 778 *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)} 779 copy(h.allArenas, oldSlice) 780 // Do not free the old backing array because 781 // there may be concurrent readers. Since we 782 // double the array each time, this can lead 783 // to at most 2x waste. 784 } 785 h.allArenas = h.allArenas[:len(h.allArenas)+1] 786 h.allArenas[len(h.allArenas)-1] = ri 787 788 // Store atomically just in case an object from the 789 // new heap arena becomes visible before the heap lock 790 // is released (which shouldn't happen, but there's 791 // little downside to this). 792 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r)) 793 } 794 795 // Tell the race detector about the new heap memory. 796 if raceenabled { 797 racemapshadow(v, size) 798 } 799 800 return 801} 802 803// sysReserveAligned is like sysReserve, but the returned pointer is 804// aligned to align bytes. It may reserve either n or n+align bytes, 805// so it returns the size that was reserved. 806func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) { 807 // Since the alignment is rather large in uses of this 808 // function, we're not likely to get it by chance, so we ask 809 // for a larger region and remove the parts we don't need. 810 retries := 0 811retry: 812 p := uintptr(sysReserve(v, size+align)) 813 switch { 814 case p == 0: 815 return nil, 0 816 case p&(align-1) == 0: 817 // We got lucky and got an aligned region, so we can 818 // use the whole thing. 819 return unsafe.Pointer(p), size + align 820 case GOOS == "windows": 821 // On Windows we can't release pieces of a 822 // reservation, so we release the whole thing and 823 // re-reserve the aligned sub-region. This may race, 824 // so we may have to try again. 825 sysFree(unsafe.Pointer(p), size+align, nil) 826 p = alignUp(p, align) 827 p2 := sysReserve(unsafe.Pointer(p), size) 828 if p != uintptr(p2) { 829 // Must have raced. Try again. 830 sysFree(p2, size, nil) 831 if retries++; retries == 100 { 832 throw("failed to allocate aligned heap memory; too many retries") 833 } 834 goto retry 835 } 836 // Success. 837 return p2, size 838 default: 839 // Trim off the unaligned parts. 840 pAligned := alignUp(p, align) 841 sysFree(unsafe.Pointer(p), pAligned-p, nil) 842 end := pAligned + size 843 endLen := (p + size + align) - end 844 if endLen > 0 { 845 sysFree(unsafe.Pointer(end), endLen, nil) 846 } 847 return unsafe.Pointer(pAligned), size 848 } 849} 850 851// base address for all 0-byte allocations 852var zerobase uintptr 853 854// nextFreeFast returns the next free object if one is quickly available. 855// Otherwise it returns 0. 856func nextFreeFast(s *mspan) gclinkptr { 857 theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache? 858 if theBit < 64 { 859 result := s.freeindex + uintptr(theBit) 860 if result < s.nelems { 861 freeidx := result + 1 862 if freeidx%64 == 0 && freeidx != s.nelems { 863 return 0 864 } 865 s.allocCache >>= uint(theBit + 1) 866 s.freeindex = freeidx 867 s.allocCount++ 868 return gclinkptr(result*s.elemsize + s.base()) 869 } 870 } 871 return 0 872} 873 874// nextFree returns the next free object from the cached span if one is available. 875// Otherwise it refills the cache with a span with an available object and 876// returns that object along with a flag indicating that this was a heavy 877// weight allocation. If it is a heavy weight allocation the caller must 878// determine whether a new GC cycle needs to be started or if the GC is active 879// whether this goroutine needs to assist the GC. 880// 881// Must run in a non-preemptible context since otherwise the owner of 882// c could change. 883func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) { 884 s = c.alloc[spc] 885 shouldhelpgc = false 886 freeIndex := s.nextFreeIndex() 887 if freeIndex == s.nelems { 888 // The span is full. 889 if uintptr(s.allocCount) != s.nelems { 890 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 891 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 892 } 893 c.refill(spc) 894 shouldhelpgc = true 895 s = c.alloc[spc] 896 897 freeIndex = s.nextFreeIndex() 898 } 899 900 if freeIndex >= s.nelems { 901 throw("freeIndex is not valid") 902 } 903 904 v = gclinkptr(freeIndex*s.elemsize + s.base()) 905 s.allocCount++ 906 if uintptr(s.allocCount) > s.nelems { 907 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 908 throw("s.allocCount > s.nelems") 909 } 910 return 911} 912 913// Allocate an object of size bytes. 914// Small objects are allocated from the per-P cache's free lists. 915// Large objects (> 32 kB) are allocated straight from the heap. 916func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 917 if gcphase == _GCmarktermination { 918 throw("mallocgc called with gcphase == _GCmarktermination") 919 } 920 921 if size == 0 { 922 return unsafe.Pointer(&zerobase) 923 } 924 925 if debug.malloc { 926 if debug.sbrk != 0 { 927 align := uintptr(16) 928 if typ != nil { 929 // TODO(austin): This should be just 930 // align = uintptr(typ.align) 931 // but that's only 4 on 32-bit platforms, 932 // even if there's a uint64 field in typ (see #599). 933 // This causes 64-bit atomic accesses to panic. 934 // Hence, we use stricter alignment that matches 935 // the normal allocator better. 936 if size&7 == 0 { 937 align = 8 938 } else if size&3 == 0 { 939 align = 4 940 } else if size&1 == 0 { 941 align = 2 942 } else { 943 align = 1 944 } 945 } 946 return persistentalloc(size, align, &memstats.other_sys) 947 } 948 949 if inittrace.active && inittrace.id == getg().goid { 950 // Init functions are executed sequentially in a single Go routine. 951 inittrace.allocs += 1 952 } 953 } 954 955 // When using gccgo, when a cgo or SWIG function has an 956 // interface return type and the function returns a 957 // non-pointer, memory allocation occurs after syscall.Cgocall 958 // but before syscall.CgocallDone. Treat this allocation as a 959 // callback. 960 incallback := false 961 if gp := getg(); gp.m.p == 0 && gp.m.ncgo > 0 { 962 exitsyscall() 963 incallback = true 964 } 965 966 // assistG is the G to charge for this allocation, or nil if 967 // GC is not currently active. 968 var assistG *g 969 if gcBlackenEnabled != 0 { 970 // Charge the current user G for this allocation. 971 assistG = getg() 972 if assistG.m.curg != nil { 973 assistG = assistG.m.curg 974 } 975 // Charge the allocation against the G. We'll account 976 // for internal fragmentation at the end of mallocgc. 977 assistG.gcAssistBytes -= int64(size) 978 979 if assistG.gcAssistBytes < 0 { 980 // This G is in debt. Assist the GC to correct 981 // this before allocating. This must happen 982 // before disabling preemption. 983 gcAssistAlloc(assistG) 984 } 985 } 986 987 // Set mp.mallocing to keep from being preempted by GC. 988 mp := acquirem() 989 if mp.mallocing != 0 { 990 throw("malloc deadlock") 991 } 992 if mp.gsignal == getg() { 993 throw("malloc during signal") 994 } 995 mp.mallocing = 1 996 997 shouldhelpgc := false 998 dataSize := size 999 c := getMCache() 1000 if c == nil { 1001 throw("mallocgc called without a P or outside bootstrapping") 1002 } 1003 var span *mspan 1004 var x unsafe.Pointer 1005 noscan := typ == nil || typ.ptrdata == 0 1006 if size <= maxSmallSize { 1007 if noscan && size < maxTinySize { 1008 // Tiny allocator. 1009 // 1010 // Tiny allocator combines several tiny allocation requests 1011 // into a single memory block. The resulting memory block 1012 // is freed when all subobjects are unreachable. The subobjects 1013 // must be noscan (don't have pointers), this ensures that 1014 // the amount of potentially wasted memory is bounded. 1015 // 1016 // Size of the memory block used for combining (maxTinySize) is tunable. 1017 // Current setting is 16 bytes, which relates to 2x worst case memory 1018 // wastage (when all but one subobjects are unreachable). 1019 // 8 bytes would result in no wastage at all, but provides less 1020 // opportunities for combining. 1021 // 32 bytes provides more opportunities for combining, 1022 // but can lead to 4x worst case wastage. 1023 // The best case winning is 8x regardless of block size. 1024 // 1025 // Objects obtained from tiny allocator must not be freed explicitly. 1026 // So when an object will be freed explicitly, we ensure that 1027 // its size >= maxTinySize. 1028 // 1029 // SetFinalizer has a special case for objects potentially coming 1030 // from tiny allocator, it such case it allows to set finalizers 1031 // for an inner byte of a memory block. 1032 // 1033 // The main targets of tiny allocator are small strings and 1034 // standalone escaping variables. On a json benchmark 1035 // the allocator reduces number of allocations by ~12% and 1036 // reduces heap size by ~20%. 1037 off := c.tinyoffset 1038 // Align tiny pointer for required (conservative) alignment. 1039 if size&7 == 0 { 1040 off = alignUp(off, 8) 1041 } else if sys.PtrSize == 4 && size == 12 { 1042 // Conservatively align 12-byte objects to 8 bytes on 32-bit 1043 // systems so that objects whose first field is a 64-bit 1044 // value is aligned to 8 bytes and does not cause a fault on 1045 // atomic access. See issue 37262. 1046 // TODO(mknyszek): Remove this workaround if/when issue 36606 1047 // is resolved. 1048 off = alignUp(off, 8) 1049 } else if size&3 == 0 { 1050 off = alignUp(off, 4) 1051 } else if size&1 == 0 { 1052 off = alignUp(off, 2) 1053 } 1054 if off+size <= maxTinySize && c.tiny != 0 { 1055 // The object fits into existing tiny block. 1056 x = unsafe.Pointer(c.tiny + off) 1057 c.tinyoffset = off + size 1058 c.tinyAllocs++ 1059 mp.mallocing = 0 1060 releasem(mp) 1061 if incallback { 1062 entersyscall() 1063 } 1064 return x 1065 } 1066 // Allocate a new maxTinySize block. 1067 span = c.alloc[tinySpanClass] 1068 v := nextFreeFast(span) 1069 if v == 0 { 1070 v, span, shouldhelpgc = c.nextFree(tinySpanClass) 1071 } 1072 x = unsafe.Pointer(v) 1073 (*[2]uint64)(x)[0] = 0 1074 (*[2]uint64)(x)[1] = 0 1075 // See if we need to replace the existing tiny block with the new one 1076 // based on amount of remaining free space. 1077 if size < c.tinyoffset || c.tiny == 0 { 1078 c.tiny = uintptr(x) 1079 c.tinyoffset = size 1080 } 1081 size = maxTinySize 1082 } else { 1083 var sizeclass uint8 1084 if size <= smallSizeMax-8 { 1085 sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)] 1086 } else { 1087 sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)] 1088 } 1089 size = uintptr(class_to_size[sizeclass]) 1090 spc := makeSpanClass(sizeclass, noscan) 1091 span = c.alloc[spc] 1092 v := nextFreeFast(span) 1093 if v == 0 { 1094 v, span, shouldhelpgc = c.nextFree(spc) 1095 } 1096 x = unsafe.Pointer(v) 1097 if needzero && span.needzero != 0 { 1098 memclrNoHeapPointers(unsafe.Pointer(v), size) 1099 } 1100 } 1101 } else { 1102 shouldhelpgc = true 1103 span = c.allocLarge(size, needzero, noscan) 1104 span.freeindex = 1 1105 span.allocCount = 1 1106 x = unsafe.Pointer(span.base()) 1107 size = span.elemsize 1108 } 1109 1110 var scanSize uintptr 1111 if !noscan { 1112 heapBitsSetType(uintptr(x), size, dataSize, typ) 1113 if dataSize > typ.size { 1114 // Array allocation. If there are any 1115 // pointers, GC has to scan to the last 1116 // element. 1117 if typ.ptrdata != 0 { 1118 scanSize = dataSize - typ.size + typ.ptrdata 1119 } 1120 } else { 1121 scanSize = typ.ptrdata 1122 } 1123 c.scanAlloc += scanSize 1124 } 1125 1126 // Ensure that the stores above that initialize x to 1127 // type-safe memory and set the heap bits occur before 1128 // the caller can make x observable to the garbage 1129 // collector. Otherwise, on weakly ordered machines, 1130 // the garbage collector could follow a pointer to x, 1131 // but see uninitialized memory or stale heap bits. 1132 publicationBarrier() 1133 1134 // Allocate black during GC. 1135 // All slots hold nil so no scanning is needed. 1136 // This may be racing with GC so do it atomically if there can be 1137 // a race marking the bit. 1138 if gcphase != _GCoff { 1139 gcmarknewobject(span, uintptr(x), size, scanSize) 1140 } 1141 1142 if raceenabled { 1143 racemalloc(x, size) 1144 } 1145 1146 if msanenabled { 1147 msanmalloc(x, size) 1148 } 1149 1150 mp.mallocing = 0 1151 releasem(mp) 1152 1153 if debug.malloc { 1154 if debug.allocfreetrace != 0 { 1155 tracealloc(x, size, typ) 1156 } 1157 1158 if inittrace.active && inittrace.id == getg().goid { 1159 // Init functions are executed sequentially in a single Go routine. 1160 inittrace.bytes += uint64(size) 1161 } 1162 } 1163 1164 if rate := MemProfileRate; rate > 0 { 1165 if rate != 1 && size < c.nextSample { 1166 c.nextSample -= size 1167 } else { 1168 mp := acquirem() 1169 profilealloc(mp, x, size) 1170 releasem(mp) 1171 } 1172 } 1173 1174 if assistG != nil { 1175 // Account for internal fragmentation in the assist 1176 // debt now that we know it. 1177 assistG.gcAssistBytes -= int64(size - dataSize) 1178 } 1179 1180 if shouldhelpgc { 1181 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1182 gcStart(t) 1183 } 1184 } 1185 1186 // Check preemption, since unlike gc we don't check on every call. 1187 if getg().preempt { 1188 checkPreempt() 1189 } 1190 1191 if incallback { 1192 entersyscall() 1193 } 1194 1195 return x 1196} 1197 1198// implementation of new builtin 1199// compiler (both frontend and SSA backend) knows the signature 1200// of this function 1201func newobject(typ *_type) unsafe.Pointer { 1202 return mallocgc(typ.size, typ, true) 1203} 1204 1205//go:linkname reflect_unsafe_New reflect.unsafe__New 1206func reflect_unsafe_New(typ *_type) unsafe.Pointer { 1207 return mallocgc(typ.size, typ, true) 1208} 1209 1210//go:linkname reflectlite_unsafe_New internal_1reflectlite.unsafe__New 1211func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { 1212 return mallocgc(typ.size, typ, true) 1213} 1214 1215// newarray allocates an array of n elements of type typ. 1216func newarray(typ *_type, n int) unsafe.Pointer { 1217 if n == 1 { 1218 return mallocgc(typ.size, typ, true) 1219 } 1220 mem, overflow := math.MulUintptr(typ.size, uintptr(n)) 1221 if overflow || mem > maxAlloc || n < 0 { 1222 panic(plainError("runtime: allocation size out of range")) 1223 } 1224 return mallocgc(mem, typ, true) 1225} 1226 1227//go:linkname reflect_unsafe_NewArray reflect.unsafe__NewArray 1228func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 1229 return newarray(typ, n) 1230} 1231 1232func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 1233 c := getMCache() 1234 if c == nil { 1235 throw("profilealloc called without a P or outside bootstrapping") 1236 } 1237 c.nextSample = nextSample() 1238 mProf_Malloc(x, size) 1239} 1240 1241// nextSample returns the next sampling point for heap profiling. The goal is 1242// to sample allocations on average every MemProfileRate bytes, but with a 1243// completely random distribution over the allocation timeline; this 1244// corresponds to a Poisson process with parameter MemProfileRate. In Poisson 1245// processes, the distance between two samples follows the exponential 1246// distribution (exp(MemProfileRate)), so the best return value is a random 1247// number taken from an exponential distribution whose mean is MemProfileRate. 1248func nextSample() uintptr { 1249 if MemProfileRate == 1 { 1250 // Callers assign our return value to 1251 // mcache.next_sample, but next_sample is not used 1252 // when the rate is 1. So avoid the math below and 1253 // just return something. 1254 return 0 1255 } 1256 if GOOS == "plan9" { 1257 // Plan 9 doesn't support floating point in note handler. 1258 if g := getg(); g == g.m.gsignal { 1259 return nextSampleNoFP() 1260 } 1261 } 1262 1263 return uintptr(fastexprand(MemProfileRate)) 1264} 1265 1266// fastexprand returns a random number from an exponential distribution with 1267// the specified mean. 1268func fastexprand(mean int) int32 { 1269 // Avoid overflow. Maximum possible step is 1270 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean. 1271 switch { 1272 case mean > 0x7000000: 1273 mean = 0x7000000 1274 case mean == 0: 1275 return 0 1276 } 1277 1278 // Take a random sample of the exponential distribution exp(-mean*x). 1279 // The probability distribution function is mean*exp(-mean*x), so the CDF is 1280 // p = 1 - exp(-mean*x), so 1281 // q = 1 - p == exp(-mean*x) 1282 // log_e(q) = -mean*x 1283 // -log_e(q)/mean = x 1284 // x = -log_e(q) * mean 1285 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency 1286 const randomBitCount = 26 1287 q := fastrand()%(1<<randomBitCount) + 1 1288 qlog := fastlog2(float64(q)) - randomBitCount 1289 if qlog > 0 { 1290 qlog = 0 1291 } 1292 const minusLog2 = -0.6931471805599453 // -ln(2) 1293 return int32(qlog*(minusLog2*float64(mean))) + 1 1294} 1295 1296// nextSampleNoFP is similar to nextSample, but uses older, 1297// simpler code to avoid floating point. 1298func nextSampleNoFP() uintptr { 1299 // Set first allocation sample size. 1300 rate := MemProfileRate 1301 if rate > 0x3fffffff { // make 2*rate not overflow 1302 rate = 0x3fffffff 1303 } 1304 if rate != 0 { 1305 return uintptr(fastrand() % uint32(2*rate)) 1306 } 1307 return 0 1308} 1309 1310type persistentAlloc struct { 1311 base *notInHeap 1312 off uintptr 1313} 1314 1315var globalAlloc struct { 1316 mutex 1317 persistentAlloc 1318} 1319 1320// persistentChunkSize is the number of bytes we allocate when we grow 1321// a persistentAlloc. 1322const persistentChunkSize = 256 << 10 1323 1324// persistentChunks is a list of all the persistent chunks we have 1325// allocated. The list is maintained through the first word in the 1326// persistent chunk. This is updated atomically. 1327var persistentChunks *notInHeap 1328 1329// Wrapper around sysAlloc that can allocate small chunks. 1330// There is no associated free operation. 1331// Intended for things like function/type/debug-related persistent data. 1332// If align is 0, uses default align (currently 8). 1333// The returned memory will be zeroed. 1334// 1335// Consider marking persistentalloc'd types go:notinheap. 1336func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 1337 var p *notInHeap 1338 systemstack(func() { 1339 p = persistentalloc1(size, align, sysStat) 1340 }) 1341 return unsafe.Pointer(p) 1342} 1343 1344// Must run on system stack because stack growth can (re)invoke it. 1345// See issue 9174. 1346//go:systemstack 1347func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap { 1348 const ( 1349 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 1350 ) 1351 1352 if size == 0 { 1353 throw("persistentalloc: size == 0") 1354 } 1355 if align != 0 { 1356 if align&(align-1) != 0 { 1357 throw("persistentalloc: align is not a power of 2") 1358 } 1359 if align > _PageSize { 1360 throw("persistentalloc: align is too large") 1361 } 1362 } else { 1363 align = 8 1364 } 1365 1366 if size >= maxBlock { 1367 return (*notInHeap)(sysAlloc(size, sysStat)) 1368 } 1369 1370 mp := acquirem() 1371 var persistent *persistentAlloc 1372 if mp != nil && mp.p != 0 { 1373 persistent = &mp.p.ptr().palloc 1374 } else { 1375 lock(&globalAlloc.mutex) 1376 persistent = &globalAlloc.persistentAlloc 1377 } 1378 persistent.off = alignUp(persistent.off, align) 1379 if persistent.off+size > persistentChunkSize || persistent.base == nil { 1380 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys)) 1381 if persistent.base == nil { 1382 if persistent == &globalAlloc.persistentAlloc { 1383 unlock(&globalAlloc.mutex) 1384 } 1385 throw("runtime: cannot allocate memory") 1386 } 1387 1388 // Add the new chunk to the persistentChunks list. 1389 for { 1390 chunks := uintptr(unsafe.Pointer(persistentChunks)) 1391 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks 1392 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { 1393 break 1394 } 1395 } 1396 persistent.off = alignUp(sys.PtrSize, align) 1397 } 1398 p := persistent.base.add(persistent.off) 1399 persistent.off += size 1400 releasem(mp) 1401 if persistent == &globalAlloc.persistentAlloc { 1402 unlock(&globalAlloc.mutex) 1403 } 1404 1405 if sysStat != &memstats.other_sys { 1406 sysStat.add(int64(size)) 1407 memstats.other_sys.add(-int64(size)) 1408 } 1409 return p 1410} 1411 1412// inPersistentAlloc reports whether p points to memory allocated by 1413// persistentalloc. This must be nosplit because it is called by the 1414// cgo checker code, which is called by the write barrier code. 1415//go:nosplit 1416func inPersistentAlloc(p uintptr) bool { 1417 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) 1418 for chunk != 0 { 1419 if p >= chunk && p < chunk+persistentChunkSize { 1420 return true 1421 } 1422 chunk = *(*uintptr)(unsafe.Pointer(chunk)) 1423 } 1424 return false 1425} 1426 1427// linearAlloc is a simple linear allocator that pre-reserves a region 1428// of memory and then maps that region into the Ready state as needed. The 1429// caller is responsible for locking. 1430type linearAlloc struct { 1431 next uintptr // next free byte 1432 mapped uintptr // one byte past end of mapped space 1433 end uintptr // end of reserved space 1434} 1435 1436func (l *linearAlloc) init(base, size uintptr) { 1437 if base+size < base { 1438 // Chop off the last byte. The runtime isn't prepared 1439 // to deal with situations where the bounds could overflow. 1440 // Leave that memory reserved, though, so we don't map it 1441 // later. 1442 size -= 1 1443 } 1444 l.next, l.mapped = base, base 1445 l.end = base + size 1446} 1447 1448func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 1449 p := alignUp(l.next, align) 1450 if p+size > l.end { 1451 return nil 1452 } 1453 l.next = p + size 1454 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped { 1455 // Transition from Reserved to Prepared to Ready. 1456 sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat) 1457 sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped) 1458 l.mapped = pEnd 1459 } 1460 return unsafe.Pointer(p) 1461} 1462 1463// notInHeap is off-heap memory allocated by a lower-level allocator 1464// like sysAlloc or persistentAlloc. 1465// 1466// In general, it's better to use real types marked as go:notinheap, 1467// but this serves as a generic type for situations where that isn't 1468// possible (like in the allocators). 1469// 1470// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? 1471// 1472//go:notinheap 1473type notInHeap struct{} 1474 1475func (p *notInHeap) add(bytes uintptr) *notInHeap { 1476 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) 1477} 1478