1// Copyright 2014 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5// Memory allocator. 6// 7// This was originally based on tcmalloc, but has diverged quite a bit. 8// http://goog-perftools.sourceforge.net/doc/tcmalloc.html 9 10// The main allocator works in runs of pages. 11// Small allocation sizes (up to and including 32 kB) are 12// rounded to one of about 70 size classes, each of which 13// has its own free set of objects of exactly that size. 14// Any free page of memory can be split into a set of objects 15// of one size class, which are then managed using a free bitmap. 16// 17// The allocator's data structures are: 18// 19// fixalloc: a free-list allocator for fixed-size off-heap objects, 20// used to manage storage used by the allocator. 21// mheap: the malloc heap, managed at page (8192-byte) granularity. 22// mspan: a run of in-use pages managed by the mheap. 23// mcentral: collects all spans of a given size class. 24// mcache: a per-P cache of mspans with free space. 25// mstats: allocation statistics. 26// 27// Allocating a small object proceeds up a hierarchy of caches: 28// 29// 1. Round the size up to one of the small size classes 30// and look in the corresponding mspan in this P's mcache. 31// Scan the mspan's free bitmap to find a free slot. 32// If there is a free slot, allocate it. 33// This can all be done without acquiring a lock. 34// 35// 2. If the mspan has no free slots, obtain a new mspan 36// from the mcentral's list of mspans of the required size 37// class that have free space. 38// Obtaining a whole span amortizes the cost of locking 39// the mcentral. 40// 41// 3. If the mcentral's mspan list is empty, obtain a run 42// of pages from the mheap to use for the mspan. 43// 44// 4. If the mheap is empty or has no page runs large enough, 45// allocate a new group of pages (at least 1MB) from the 46// operating system. Allocating a large run of pages 47// amortizes the cost of talking to the operating system. 48// 49// Sweeping an mspan and freeing objects on it proceeds up a similar 50// hierarchy: 51// 52// 1. If the mspan is being swept in response to allocation, it 53// is returned to the mcache to satisfy the allocation. 54// 55// 2. Otherwise, if the mspan still has allocated objects in it, 56// it is placed on the mcentral free list for the mspan's size 57// class. 58// 59// 3. Otherwise, if all objects in the mspan are free, the mspan's 60// pages are returned to the mheap and the mspan is now dead. 61// 62// Allocating and freeing a large object uses the mheap 63// directly, bypassing the mcache and mcentral. 64// 65// If mspan.needzero is false, then free object slots in the mspan are 66// already zeroed. Otherwise if needzero is true, objects are zeroed as 67// they are allocated. There are various benefits to delaying zeroing 68// this way: 69// 70// 1. Stack frame allocation can avoid zeroing altogether. 71// 72// 2. It exhibits better temporal locality, since the program is 73// probably about to write to the memory. 74// 75// 3. We don't zero pages that never get reused. 76 77// Virtual memory layout 78// 79// The heap consists of a set of arenas, which are 64MB on 64-bit and 80// 4MB on 32-bit (heapArenaBytes). Each arena's start address is also 81// aligned to the arena size. 82// 83// Each arena has an associated heapArena object that stores the 84// metadata for that arena: the heap bitmap for all words in the arena 85// and the span map for all pages in the arena. heapArena objects are 86// themselves allocated off-heap. 87// 88// Since arenas are aligned, the address space can be viewed as a 89// series of arena frames. The arena map (mheap_.arenas) maps from 90// arena frame number to *heapArena, or nil for parts of the address 91// space not backed by the Go heap. The arena map is structured as a 92// two-level array consisting of a "L1" arena map and many "L2" arena 93// maps; however, since arenas are large, on many architectures, the 94// arena map consists of a single, large L2 map. 95// 96// The arena map covers the entire possible address space, allowing 97// the Go heap to use any part of the address space. The allocator 98// attempts to keep arenas contiguous so that large spans (and hence 99// large objects) can cross arenas. 100 101package runtime 102 103import ( 104 "runtime/internal/atomic" 105 "runtime/internal/math" 106 "runtime/internal/sys" 107 "unsafe" 108) 109 110// C function to get the end of the program's memory. 111func getEnd() uintptr 112 113// For gccgo, use go:linkname to export compiler-called functions. 114// 115//go:linkname newobject 116 117// Functions called by C code. 118//go:linkname mallocgc 119 120const ( 121 debugMalloc = false 122 123 maxTinySize = _TinySize 124 tinySizeClass = _TinySizeClass 125 maxSmallSize = _MaxSmallSize 126 127 pageShift = _PageShift 128 pageSize = _PageSize 129 pageMask = _PageMask 130 // By construction, single page spans of the smallest object class 131 // have the most objects per span. 132 maxObjsPerSpan = pageSize / 8 133 134 concurrentSweep = _ConcurrentSweep 135 136 _PageSize = 1 << _PageShift 137 _PageMask = _PageSize - 1 138 139 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 140 _64bit = 1 << (^uintptr(0) >> 63) / 2 141 142 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 143 _TinySize = 16 144 _TinySizeClass = int8(2) 145 146 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 147 148 // Per-P, per order stack segment cache size. 149 _StackCacheSize = 32 * 1024 150 151 // Number of orders that get caching. Order 0 is FixedStack 152 // and each successive order is twice as large. 153 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 154 // will be allocated directly. 155 // Since FixedStack is different on different systems, we 156 // must vary NumStackOrders to keep the same maximum cached size. 157 // OS | FixedStack | NumStackOrders 158 // -----------------+------------+--------------- 159 // linux/darwin/bsd | 2KB | 4 160 // windows/32 | 4KB | 3 161 // windows/64 | 8KB | 2 162 // plan9 | 4KB | 3 163 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9 164 165 // heapAddrBits is the number of bits in a heap address. On 166 // amd64, addresses are sign-extended beyond heapAddrBits. On 167 // other arches, they are zero-extended. 168 // 169 // On most 64-bit platforms, we limit this to 48 bits based on a 170 // combination of hardware and OS limitations. 171 // 172 // amd64 hardware limits addresses to 48 bits, sign-extended 173 // to 64 bits. Addresses where the top 16 bits are not either 174 // all 0 or all 1 are "non-canonical" and invalid. Because of 175 // these "negative" addresses, we offset addresses by 1<<47 176 // (arenaBaseOffset) on amd64 before computing indexes into 177 // the heap arenas index. In 2017, amd64 hardware added 178 // support for 57 bit addresses; however, currently only Linux 179 // supports this extension and the kernel will never choose an 180 // address above 1<<47 unless mmap is called with a hint 181 // address above 1<<47 (which we never do). 182 // 183 // arm64 hardware (as of ARMv8) limits user addresses to 48 184 // bits, in the range [0, 1<<48). 185 // 186 // ppc64, mips64, and s390x support arbitrary 64 bit addresses 187 // in hardware. On Linux, Go leans on stricter OS limits. Based 188 // on Linux's processor.h, the user address space is limited as 189 // follows on 64-bit architectures: 190 // 191 // Architecture Name Maximum Value (exclusive) 192 // --------------------------------------------------------------------- 193 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses) 194 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses) 195 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses) 196 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) 197 // s390x TASK_SIZE 1<<64 (64 bit addresses) 198 // 199 // These limits may increase over time, but are currently at 200 // most 48 bits except on s390x. On all architectures, Linux 201 // starts placing mmap'd regions at addresses that are 202 // significantly below 48 bits, so even if it's possible to 203 // exceed Go's 48 bit limit, it's extremely unlikely in 204 // practice. 205 // 206 // On 32-bit platforms, we accept the full 32-bit address 207 // space because doing so is cheap. 208 // mips32 only has access to the low 2GB of virtual memory, so 209 // we further limit it to 31 bits. 210 // 211 // On darwin/arm64, although 64-bit pointers are presumably 212 // available, pointers are truncated to 33 bits. Furthermore, 213 // only the top 4 GiB of the address space are actually available 214 // to the application, but we allow the whole 33 bits anyway for 215 // simplicity. 216 // TODO(mknyszek): Consider limiting it to 32 bits and using 217 // arenaBaseOffset to offset into the top 4 GiB. 218 // 219 // WebAssembly currently has a limit of 4GB linear memory. 220 heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosDarwin*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosDarwin*sys.GoarchArm64 221 222 // maxAlloc is the maximum size of an allocation. On 64-bit, 223 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On 224 // 32-bit, however, this is one less than 1<<32 because the 225 // number of bytes in the address space doesn't actually fit 226 // in a uintptr. 227 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1 228 229 // The number of bits in a heap address, the size of heap 230 // arenas, and the L1 and L2 arena map sizes are related by 231 // 232 // (1 << addr bits) = arena size * L1 entries * L2 entries 233 // 234 // Currently, we balance these as follows: 235 // 236 // Platform Addr bits Arena size L1 entries L2 entries 237 // -------------- --------- ---------- ---------- ----------- 238 // */64-bit 48 64MB 1 4M (32MB) 239 // windows/64-bit 48 4MB 64 1M (8MB) 240 // */32-bit 32 4MB 1 1024 (4KB) 241 // */mips(le) 31 4MB 1 512 (2KB) 242 243 // heapArenaBytes is the size of a heap arena. The heap 244 // consists of mappings of size heapArenaBytes, aligned to 245 // heapArenaBytes. The initial heap mapping is one arena. 246 // 247 // This is currently 64MB on 64-bit non-Windows and 4MB on 248 // 32-bit and on Windows. We use smaller arenas on Windows 249 // because all committed memory is charged to the process, 250 // even if it's not touched. Hence, for processes with small 251 // heaps, the mapped arena space needs to be commensurate. 252 // This is particularly important with the race detector, 253 // since it significantly amplifies the cost of committed 254 // memory. 255 heapArenaBytes = 1 << logHeapArenaBytes 256 257 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity, 258 // prefer using heapArenaBytes where possible (we need the 259 // constant to compute some other constants). 260 logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm 261 262 // heapArenaBitmapBytes is the size of each heap arena's bitmap. 263 heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2) 264 265 pagesPerArena = heapArenaBytes / pageSize 266 267 // arenaL1Bits is the number of bits of the arena number 268 // covered by the first level arena map. 269 // 270 // This number should be small, since the first level arena 271 // map requires PtrSize*(1<<arenaL1Bits) of space in the 272 // binary's BSS. It can be zero, in which case the first level 273 // index is effectively unused. There is a performance benefit 274 // to this, since the generated code can be more efficient, 275 // but comes at the cost of having a large L2 mapping. 276 // 277 // We use the L1 map on 64-bit Windows because the arena size 278 // is small, but the address space is still 48 bits, and 279 // there's a high cost to having a large L2. 280 arenaL1Bits = 6 * (_64bit * sys.GoosWindows) 281 282 // arenaL2Bits is the number of bits of the arena number 283 // covered by the second level arena index. 284 // 285 // The size of each arena map allocation is proportional to 286 // 1<<arenaL2Bits, so it's important that this not be too 287 // large. 48 bits leads to 32MB arena index allocations, which 288 // is about the practical threshold. 289 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits 290 291 // arenaL1Shift is the number of bits to shift an arena frame 292 // number by to compute an index into the first level arena map. 293 arenaL1Shift = arenaL2Bits 294 295 // arenaBits is the total bits in a combined arena map index. 296 // This is split between the index into the L1 arena map and 297 // the L2 arena map. 298 arenaBits = arenaL1Bits + arenaL2Bits 299 300 // arenaBaseOffset is the pointer value that corresponds to 301 // index 0 in the heap arena map. 302 // 303 // On amd64, the address space is 48 bits, sign extended to 64 304 // bits. This offset lets us handle "negative" addresses (or 305 // high addresses if viewed as unsigned). 306 // 307 // On aix/ppc64, this offset allows to keep the heapAddrBits to 308 // 48. Otherwize, it would be 60 in order to handle mmap addresses 309 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this 310 // case, the memory reserved in (s *pageAlloc).init for chunks 311 // is causing important slowdowns. 312 // 313 // On other platforms, the user address space is contiguous 314 // and starts at 0, so no offset is necessary. 315 arenaBaseOffset = sys.GoarchAmd64*(1<<47) + (^0x0a00000000000000+1)&uintptrMask*sys.GoosAix 316 317 // Max number of threads to run garbage collection. 318 // 2, 3, and 4 are all plausible maximums depending 319 // on the hardware details of the machine. The garbage 320 // collector scales well to 32 cpus. 321 _MaxGcproc = 32 322 323 // minLegalPointer is the smallest possible legal pointer. 324 // This is the smallest possible architectural page size, 325 // since we assume that the first page is never mapped. 326 // 327 // This should agree with minZeroPage in the compiler. 328 minLegalPointer uintptr = 4096 329) 330 331// physPageSize is the size in bytes of the OS's physical pages. 332// Mapping and unmapping operations must be done at multiples of 333// physPageSize. 334// 335// This must be set by the OS init code (typically in osinit) before 336// mallocinit. 337var physPageSize uintptr 338 339// physHugePageSize is the size in bytes of the OS's default physical huge 340// page size whose allocation is opaque to the application. It is assumed 341// and verified to be a power of two. 342// 343// If set, this must be set by the OS init code (typically in osinit) before 344// mallocinit. However, setting it at all is optional, and leaving the default 345// value is always safe (though potentially less efficient). 346// 347// Since physHugePageSize is always assumed to be a power of two, 348// physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. 349// The purpose of physHugePageShift is to avoid doing divisions in 350// performance critical functions. 351var ( 352 physHugePageSize uintptr 353 physHugePageShift uint 354) 355 356// OS memory management abstraction layer 357// 358// Regions of the address space managed by the runtime may be in one of four 359// states at any given time: 360// 1) None - Unreserved and unmapped, the default state of any region. 361// 2) Reserved - Owned by the runtime, but accessing it would cause a fault. 362// Does not count against the process' memory footprint. 363// 3) Prepared - Reserved, intended not to be backed by physical memory (though 364// an OS may implement this lazily). Can transition efficiently to 365// Ready. Accessing memory in such a region is undefined (may 366// fault, may give back unexpected zeroes, etc.). 367// 4) Ready - may be accessed safely. 368// 369// This set of states is more than is strictly necessary to support all the 370// currently supported platforms. One could get by with just None, Reserved, and 371// Ready. However, the Prepared state gives us flexibility for performance 372// purposes. For example, on POSIX-y operating systems, Reserved is usually a 373// private anonymous mmap'd region with PROT_NONE set, and to transition 374// to Ready would require setting PROT_READ|PROT_WRITE. However the 375// underspecification of Prepared lets us use just MADV_FREE to transition from 376// Ready to Prepared. Thus with the Prepared state we can set the permission 377// bits just once early on, we can efficiently tell the OS that it's free to 378// take pages away from us when we don't strictly need them. 379// 380// For each OS there is a common set of helpers defined that transition 381// memory regions between these states. The helpers are as follows: 382// 383// sysAlloc transitions an OS-chosen region of memory from None to Ready. 384// More specifically, it obtains a large chunk of zeroed memory from the 385// operating system, typically on the order of a hundred kilobytes 386// or a megabyte. This memory is always immediately available for use. 387// 388// sysFree transitions a memory region from any state to None. Therefore, it 389// returns memory unconditionally. It is used if an out-of-memory error has been 390// detected midway through an allocation or to carve out an aligned section of 391// the address space. It is okay if sysFree is a no-op only if sysReserve always 392// returns a memory region aligned to the heap allocator's alignment 393// restrictions. 394// 395// sysReserve transitions a memory region from None to Reserved. It reserves 396// address space in such a way that it would cause a fatal fault upon access 397// (either via permissions or not committing the memory). Such a reservation is 398// thus never backed by physical memory. 399// If the pointer passed to it is non-nil, the caller wants the 400// reservation there, but sysReserve can still choose another 401// location if that one is unavailable. 402// NOTE: sysReserve returns OS-aligned memory, but the heap allocator 403// may use larger alignment, so the caller must be careful to realign the 404// memory obtained by sysReserve. 405// 406// sysMap transitions a memory region from Reserved to Prepared. It ensures the 407// memory region can be efficiently transitioned to Ready. 408// 409// sysUsed transitions a memory region from Prepared to Ready. It notifies the 410// operating system that the memory region is needed and ensures that the region 411// may be safely accessed. This is typically a no-op on systems that don't have 412// an explicit commit step and hard over-commit limits, but is critical on 413// Windows, for example. 414// 415// sysUnused transitions a memory region from Ready to Prepared. It notifies the 416// operating system that the physical pages backing this memory region are no 417// longer needed and can be reused for other purposes. The contents of a 418// sysUnused memory region are considered forfeit and the region must not be 419// accessed again until sysUsed is called. 420// 421// sysFault transitions a memory region from Ready or Prepared to Reserved. It 422// marks a region such that it will always fault if accessed. Used only for 423// debugging the runtime. 424 425func mallocinit() { 426 if class_to_size[_TinySizeClass] != _TinySize { 427 throw("bad TinySizeClass") 428 } 429 430 // Not used for gccgo. 431 // testdefersizes() 432 433 if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 { 434 // heapBits expects modular arithmetic on bitmap 435 // addresses to work. 436 throw("heapArenaBitmapBytes not a power of 2") 437 } 438 439 // Copy class sizes out for statistics table. 440 for i := range class_to_size { 441 memstats.by_size[i].size = uint32(class_to_size[i]) 442 } 443 444 // Check physPageSize. 445 if physPageSize == 0 { 446 // The OS init code failed to fetch the physical page size. 447 throw("failed to get system page size") 448 } 449 if physPageSize > maxPhysPageSize { 450 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n") 451 throw("bad system page size") 452 } 453 if physPageSize < minPhysPageSize { 454 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 455 throw("bad system page size") 456 } 457 if physPageSize&(physPageSize-1) != 0 { 458 print("system page size (", physPageSize, ") must be a power of 2\n") 459 throw("bad system page size") 460 } 461 if physHugePageSize&(physHugePageSize-1) != 0 { 462 print("system huge page size (", physHugePageSize, ") must be a power of 2\n") 463 throw("bad system huge page size") 464 } 465 if physHugePageSize > maxPhysHugePageSize { 466 // physHugePageSize is greater than the maximum supported huge page size. 467 // Don't throw here, like in the other cases, since a system configured 468 // in this way isn't wrong, we just don't have the code to support them. 469 // Instead, silently set the huge page size to zero. 470 physHugePageSize = 0 471 } 472 if physHugePageSize != 0 { 473 // Since physHugePageSize is a power of 2, it suffices to increase 474 // physHugePageShift until 1<<physHugePageShift == physHugePageSize. 475 for 1<<physHugePageShift != physHugePageSize { 476 physHugePageShift++ 477 } 478 } 479 480 // Initialize the heap. 481 mheap_.init() 482 _g_ := getg() 483 _g_.m.mcache = allocmcache() 484 485 // Create initial arena growth hints. 486 if sys.PtrSize == 8 { 487 // On a 64-bit machine, we pick the following hints 488 // because: 489 // 490 // 1. Starting from the middle of the address space 491 // makes it easier to grow out a contiguous range 492 // without running in to some other mapping. 493 // 494 // 2. This makes Go heap addresses more easily 495 // recognizable when debugging. 496 // 497 // 3. Stack scanning in gccgo is still conservative, 498 // so it's important that addresses be distinguishable 499 // from other data. 500 // 501 // Starting at 0x00c0 means that the valid memory addresses 502 // will begin 0x00c0, 0x00c1, ... 503 // In little-endian, that's c0 00, c1 00, ... None of those are valid 504 // UTF-8 sequences, and they are otherwise as far away from 505 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 506 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 507 // on OS X during thread allocations. 0x00c0 causes conflicts with 508 // AddressSanitizer which reserves all memory up to 0x0100. 509 // These choices reduce the odds of a conservative garbage collector 510 // not collecting memory because some non-pointer block of memory 511 // had a bit pattern that matched a memory address. 512 // 513 // However, on arm64, we ignore all this advice above and slam the 514 // allocation at 0x40 << 32 because when using 4k pages with 3-level 515 // translation buffers, the user address space is limited to 39 bits 516 // On darwin/arm64, the address space is even smaller. 517 // 518 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit. 519 // processes. 520 for i := 0x7f; i >= 0; i-- { 521 var p uintptr 522 switch { 523 case GOARCH == "arm64" && GOOS == "darwin": 524 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 525 case GOARCH == "arm64": 526 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 527 case GOOS == "aix": 528 if i == 0 { 529 // We don't use addresses directly after 0x0A00000000000000 530 // to avoid collisions with others mmaps done by non-go programs. 531 continue 532 } 533 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52) 534 case raceenabled: 535 // The TSAN runtime requires the heap 536 // to be in the range [0x00c000000000, 537 // 0x00e000000000). 538 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32) 539 if p >= uintptrMask&0x00e000000000 { 540 continue 541 } 542 default: 543 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 544 } 545 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 546 hint.addr = p 547 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 548 } 549 } else { 550 // On a 32-bit machine, we're much more concerned 551 // about keeping the usable heap contiguous. 552 // Hence: 553 // 554 // 1. We reserve space for all heapArenas up front so 555 // they don't get interleaved with the heap. They're 556 // ~258MB, so this isn't too bad. (We could reserve a 557 // smaller amount of space up front if this is a 558 // problem.) 559 // 560 // 2. We hint the heap to start right above the end of 561 // the binary so we have the best chance of keeping it 562 // contiguous. 563 // 564 // 3. We try to stake out a reasonably large initial 565 // heap reservation. 566 567 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) 568 meta := uintptr(sysReserve(nil, arenaMetaSize)) 569 if meta != 0 { 570 mheap_.heapArenaAlloc.init(meta, arenaMetaSize) 571 } 572 573 // We want to start the arena low, but if we're linked 574 // against C code, it's possible global constructors 575 // have called malloc and adjusted the process' brk. 576 // Query the brk so we can avoid trying to map the 577 // region over it (which will cause the kernel to put 578 // the region somewhere else, likely at a high 579 // address). 580 procBrk := sbrk0() 581 582 // If we ask for the end of the data segment but the 583 // operating system requires a little more space 584 // before we can start allocating, it will give out a 585 // slightly higher pointer. Except QEMU, which is 586 // buggy, as usual: it won't adjust the pointer 587 // upward. So adjust it upward a little bit ourselves: 588 // 1/4 MB to get away from the running binary image. 589 p := getEnd() 590 if p < procBrk { 591 p = procBrk 592 } 593 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end { 594 p = mheap_.heapArenaAlloc.end 595 } 596 p = alignUp(p+(256<<10), heapArenaBytes) 597 // Because we're worried about fragmentation on 598 // 32-bit, we try to make a large initial reservation. 599 arenaSizes := [...]uintptr{ 600 512 << 20, 601 256 << 20, 602 128 << 20, 603 } 604 for _, arenaSize := range &arenaSizes { 605 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes) 606 if a != nil { 607 mheap_.arena.init(uintptr(a), size) 608 p = uintptr(a) + size // For hint below 609 break 610 } 611 } 612 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 613 hint.addr = p 614 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 615 } 616} 617 618// sysAlloc allocates heap arena space for at least n bytes. The 619// returned pointer is always heapArenaBytes-aligned and backed by 620// h.arenas metadata. The returned size is always a multiple of 621// heapArenaBytes. sysAlloc returns nil on failure. 622// There is no corresponding free function. 623// 624// sysAlloc returns a memory region in the Prepared state. This region must 625// be transitioned to Ready before use. 626// 627// h must be locked. 628func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) { 629 n = alignUp(n, heapArenaBytes) 630 631 // First, try the arena pre-reservation. 632 v = h.arena.alloc(n, heapArenaBytes, &memstats.heap_sys) 633 if v != nil { 634 size = n 635 goto mapped 636 } 637 638 // Try to grow the heap at a hint address. 639 for h.arenaHints != nil { 640 hint := h.arenaHints 641 p := hint.addr 642 if hint.down { 643 p -= n 644 } 645 if p+n < p { 646 // We can't use this, so don't ask. 647 v = nil 648 } else if arenaIndex(p+n-1) >= 1<<arenaBits { 649 // Outside addressable heap. Can't use. 650 v = nil 651 } else { 652 v = sysReserve(unsafe.Pointer(p), n) 653 } 654 if p == uintptr(v) { 655 // Success. Update the hint. 656 if !hint.down { 657 p += n 658 } 659 hint.addr = p 660 size = n 661 break 662 } 663 // Failed. Discard this hint and try the next. 664 // 665 // TODO: This would be cleaner if sysReserve could be 666 // told to only return the requested address. In 667 // particular, this is already how Windows behaves, so 668 // it would simplify things there. 669 if v != nil { 670 sysFree(v, n, nil) 671 } 672 h.arenaHints = hint.next 673 h.arenaHintAlloc.free(unsafe.Pointer(hint)) 674 } 675 676 if size == 0 { 677 if raceenabled { 678 // The race detector assumes the heap lives in 679 // [0x00c000000000, 0x00e000000000), but we 680 // just ran out of hints in this region. Give 681 // a nice failure. 682 throw("too many address space collisions for -race mode") 683 } 684 685 // All of the hints failed, so we'll take any 686 // (sufficiently aligned) address the kernel will give 687 // us. 688 v, size = sysReserveAligned(nil, n, heapArenaBytes) 689 if v == nil { 690 return nil, 0 691 } 692 693 // Create new hints for extending this region. 694 hint := (*arenaHint)(h.arenaHintAlloc.alloc()) 695 hint.addr, hint.down = uintptr(v), true 696 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 697 hint = (*arenaHint)(h.arenaHintAlloc.alloc()) 698 hint.addr = uintptr(v) + size 699 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 700 } 701 702 // Check for bad pointers or pointers we can't use. 703 { 704 var bad string 705 p := uintptr(v) 706 if p+size < p { 707 bad = "region exceeds uintptr range" 708 } else if arenaIndex(p) >= 1<<arenaBits { 709 bad = "base outside usable address space" 710 } else if arenaIndex(p+size-1) >= 1<<arenaBits { 711 bad = "end outside usable address space" 712 } 713 if bad != "" { 714 // This should be impossible on most architectures, 715 // but it would be really confusing to debug. 716 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n") 717 throw("memory reservation exceeds address space limit") 718 } 719 } 720 721 if uintptr(v)&(heapArenaBytes-1) != 0 { 722 throw("misrounded allocation in sysAlloc") 723 } 724 725 // Transition from Reserved to Prepared. 726 sysMap(v, size, &memstats.heap_sys) 727 728mapped: 729 // Create arena metadata. 730 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ { 731 l2 := h.arenas[ri.l1()] 732 if l2 == nil { 733 // Allocate an L2 arena map. 734 l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil)) 735 if l2 == nil { 736 throw("out of memory allocating heap arena map") 737 } 738 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2)) 739 } 740 741 if l2[ri.l2()] != nil { 742 throw("arena already initialized") 743 } 744 var r *heapArena 745 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys)) 746 if r == nil { 747 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys)) 748 if r == nil { 749 throw("out of memory allocating heap arena metadata") 750 } 751 } 752 753 // Add the arena to the arenas list. 754 if len(h.allArenas) == cap(h.allArenas) { 755 size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize 756 if size == 0 { 757 size = physPageSize 758 } 759 newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gc_sys)) 760 if newArray == nil { 761 throw("out of memory allocating allArenas") 762 } 763 oldSlice := h.allArenas 764 *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)} 765 copy(h.allArenas, oldSlice) 766 // Do not free the old backing array because 767 // there may be concurrent readers. Since we 768 // double the array each time, this can lead 769 // to at most 2x waste. 770 } 771 h.allArenas = h.allArenas[:len(h.allArenas)+1] 772 h.allArenas[len(h.allArenas)-1] = ri 773 774 // Store atomically just in case an object from the 775 // new heap arena becomes visible before the heap lock 776 // is released (which shouldn't happen, but there's 777 // little downside to this). 778 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r)) 779 } 780 781 // Tell the race detector about the new heap memory. 782 if raceenabled { 783 racemapshadow(v, size) 784 } 785 786 return 787} 788 789// sysReserveAligned is like sysReserve, but the returned pointer is 790// aligned to align bytes. It may reserve either n or n+align bytes, 791// so it returns the size that was reserved. 792func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) { 793 // Since the alignment is rather large in uses of this 794 // function, we're not likely to get it by chance, so we ask 795 // for a larger region and remove the parts we don't need. 796 retries := 0 797retry: 798 p := uintptr(sysReserve(v, size+align)) 799 switch { 800 case p == 0: 801 return nil, 0 802 case p&(align-1) == 0: 803 // We got lucky and got an aligned region, so we can 804 // use the whole thing. 805 return unsafe.Pointer(p), size + align 806 case GOOS == "windows": 807 // On Windows we can't release pieces of a 808 // reservation, so we release the whole thing and 809 // re-reserve the aligned sub-region. This may race, 810 // so we may have to try again. 811 sysFree(unsafe.Pointer(p), size+align, nil) 812 p = alignUp(p, align) 813 p2 := sysReserve(unsafe.Pointer(p), size) 814 if p != uintptr(p2) { 815 // Must have raced. Try again. 816 sysFree(p2, size, nil) 817 if retries++; retries == 100 { 818 throw("failed to allocate aligned heap memory; too many retries") 819 } 820 goto retry 821 } 822 // Success. 823 return p2, size 824 default: 825 // Trim off the unaligned parts. 826 pAligned := alignUp(p, align) 827 sysFree(unsafe.Pointer(p), pAligned-p, nil) 828 end := pAligned + size 829 endLen := (p + size + align) - end 830 if endLen > 0 { 831 sysFree(unsafe.Pointer(end), endLen, nil) 832 } 833 return unsafe.Pointer(pAligned), size 834 } 835} 836 837// base address for all 0-byte allocations 838var zerobase uintptr 839 840// nextFreeFast returns the next free object if one is quickly available. 841// Otherwise it returns 0. 842func nextFreeFast(s *mspan) gclinkptr { 843 theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache? 844 if theBit < 64 { 845 result := s.freeindex + uintptr(theBit) 846 if result < s.nelems { 847 freeidx := result + 1 848 if freeidx%64 == 0 && freeidx != s.nelems { 849 return 0 850 } 851 s.allocCache >>= uint(theBit + 1) 852 s.freeindex = freeidx 853 s.allocCount++ 854 return gclinkptr(result*s.elemsize + s.base()) 855 } 856 } 857 return 0 858} 859 860// nextFree returns the next free object from the cached span if one is available. 861// Otherwise it refills the cache with a span with an available object and 862// returns that object along with a flag indicating that this was a heavy 863// weight allocation. If it is a heavy weight allocation the caller must 864// determine whether a new GC cycle needs to be started or if the GC is active 865// whether this goroutine needs to assist the GC. 866// 867// Must run in a non-preemptible context since otherwise the owner of 868// c could change. 869func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) { 870 s = c.alloc[spc] 871 shouldhelpgc = false 872 freeIndex := s.nextFreeIndex() 873 if freeIndex == s.nelems { 874 // The span is full. 875 if uintptr(s.allocCount) != s.nelems { 876 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 877 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 878 } 879 c.refill(spc) 880 shouldhelpgc = true 881 s = c.alloc[spc] 882 883 freeIndex = s.nextFreeIndex() 884 } 885 886 if freeIndex >= s.nelems { 887 throw("freeIndex is not valid") 888 } 889 890 v = gclinkptr(freeIndex*s.elemsize + s.base()) 891 s.allocCount++ 892 if uintptr(s.allocCount) > s.nelems { 893 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 894 throw("s.allocCount > s.nelems") 895 } 896 return 897} 898 899// Allocate an object of size bytes. 900// Small objects are allocated from the per-P cache's free lists. 901// Large objects (> 32 kB) are allocated straight from the heap. 902func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 903 if gcphase == _GCmarktermination { 904 throw("mallocgc called with gcphase == _GCmarktermination") 905 } 906 907 if size == 0 { 908 return unsafe.Pointer(&zerobase) 909 } 910 911 if debug.sbrk != 0 { 912 align := uintptr(16) 913 if typ != nil { 914 // TODO(austin): This should be just 915 // align = uintptr(typ.align) 916 // but that's only 4 on 32-bit platforms, 917 // even if there's a uint64 field in typ (see #599). 918 // This causes 64-bit atomic accesses to panic. 919 // Hence, we use stricter alignment that matches 920 // the normal allocator better. 921 if size&7 == 0 { 922 align = 8 923 } else if size&3 == 0 { 924 align = 4 925 } else if size&1 == 0 { 926 align = 2 927 } else { 928 align = 1 929 } 930 } 931 return persistentalloc(size, align, &memstats.other_sys) 932 } 933 934 // When using gccgo, when a cgo or SWIG function has an 935 // interface return type and the function returns a 936 // non-pointer, memory allocation occurs after syscall.Cgocall 937 // but before syscall.CgocallDone. Treat this allocation as a 938 // callback. 939 incallback := false 940 if gomcache() == nil && getg().m.ncgo > 0 { 941 exitsyscall() 942 incallback = true 943 } 944 945 // assistG is the G to charge for this allocation, or nil if 946 // GC is not currently active. 947 var assistG *g 948 if gcBlackenEnabled != 0 { 949 // Charge the current user G for this allocation. 950 assistG = getg() 951 if assistG.m.curg != nil { 952 assistG = assistG.m.curg 953 } 954 // Charge the allocation against the G. We'll account 955 // for internal fragmentation at the end of mallocgc. 956 assistG.gcAssistBytes -= int64(size) 957 958 if assistG.gcAssistBytes < 0 { 959 // This G is in debt. Assist the GC to correct 960 // this before allocating. This must happen 961 // before disabling preemption. 962 gcAssistAlloc(assistG) 963 } 964 } 965 966 // Set mp.mallocing to keep from being preempted by GC. 967 mp := acquirem() 968 if mp.mallocing != 0 { 969 throw("malloc deadlock") 970 } 971 if mp.gsignal == getg() { 972 throw("malloc during signal") 973 } 974 mp.mallocing = 1 975 976 shouldhelpgc := false 977 dataSize := size 978 c := gomcache() 979 var x unsafe.Pointer 980 noscan := typ == nil || typ.ptrdata == 0 981 if size <= maxSmallSize { 982 if noscan && size < maxTinySize { 983 // Tiny allocator. 984 // 985 // Tiny allocator combines several tiny allocation requests 986 // into a single memory block. The resulting memory block 987 // is freed when all subobjects are unreachable. The subobjects 988 // must be noscan (don't have pointers), this ensures that 989 // the amount of potentially wasted memory is bounded. 990 // 991 // Size of the memory block used for combining (maxTinySize) is tunable. 992 // Current setting is 16 bytes, which relates to 2x worst case memory 993 // wastage (when all but one subobjects are unreachable). 994 // 8 bytes would result in no wastage at all, but provides less 995 // opportunities for combining. 996 // 32 bytes provides more opportunities for combining, 997 // but can lead to 4x worst case wastage. 998 // The best case winning is 8x regardless of block size. 999 // 1000 // Objects obtained from tiny allocator must not be freed explicitly. 1001 // So when an object will be freed explicitly, we ensure that 1002 // its size >= maxTinySize. 1003 // 1004 // SetFinalizer has a special case for objects potentially coming 1005 // from tiny allocator, it such case it allows to set finalizers 1006 // for an inner byte of a memory block. 1007 // 1008 // The main targets of tiny allocator are small strings and 1009 // standalone escaping variables. On a json benchmark 1010 // the allocator reduces number of allocations by ~12% and 1011 // reduces heap size by ~20%. 1012 off := c.tinyoffset 1013 // Align tiny pointer for required (conservative) alignment. 1014 if size&7 == 0 { 1015 off = alignUp(off, 8) 1016 } else if size&3 == 0 { 1017 off = alignUp(off, 4) 1018 } else if size&1 == 0 { 1019 off = alignUp(off, 2) 1020 } 1021 if off+size <= maxTinySize && c.tiny != 0 { 1022 // The object fits into existing tiny block. 1023 x = unsafe.Pointer(c.tiny + off) 1024 c.tinyoffset = off + size 1025 c.local_tinyallocs++ 1026 mp.mallocing = 0 1027 releasem(mp) 1028 if incallback { 1029 entersyscall() 1030 } 1031 return x 1032 } 1033 // Allocate a new maxTinySize block. 1034 span := c.alloc[tinySpanClass] 1035 v := nextFreeFast(span) 1036 if v == 0 { 1037 v, _, shouldhelpgc = c.nextFree(tinySpanClass) 1038 } 1039 x = unsafe.Pointer(v) 1040 (*[2]uint64)(x)[0] = 0 1041 (*[2]uint64)(x)[1] = 0 1042 // See if we need to replace the existing tiny block with the new one 1043 // based on amount of remaining free space. 1044 if size < c.tinyoffset || c.tiny == 0 { 1045 c.tiny = uintptr(x) 1046 c.tinyoffset = size 1047 } 1048 size = maxTinySize 1049 } else { 1050 var sizeclass uint8 1051 if size <= smallSizeMax-8 { 1052 sizeclass = size_to_class8[(size+smallSizeDiv-1)/smallSizeDiv] 1053 } else { 1054 sizeclass = size_to_class128[(size-smallSizeMax+largeSizeDiv-1)/largeSizeDiv] 1055 } 1056 size = uintptr(class_to_size[sizeclass]) 1057 spc := makeSpanClass(sizeclass, noscan) 1058 span := c.alloc[spc] 1059 v := nextFreeFast(span) 1060 if v == 0 { 1061 v, span, shouldhelpgc = c.nextFree(spc) 1062 } 1063 x = unsafe.Pointer(v) 1064 if needzero && span.needzero != 0 { 1065 memclrNoHeapPointers(unsafe.Pointer(v), size) 1066 } 1067 } 1068 } else { 1069 var s *mspan 1070 shouldhelpgc = true 1071 systemstack(func() { 1072 s = largeAlloc(size, needzero, noscan) 1073 }) 1074 s.freeindex = 1 1075 s.allocCount = 1 1076 x = unsafe.Pointer(s.base()) 1077 size = s.elemsize 1078 } 1079 1080 var scanSize uintptr 1081 if !noscan { 1082 heapBitsSetType(uintptr(x), size, dataSize, typ) 1083 if dataSize > typ.size { 1084 // Array allocation. If there are any 1085 // pointers, GC has to scan to the last 1086 // element. 1087 if typ.ptrdata != 0 { 1088 scanSize = dataSize - typ.size + typ.ptrdata 1089 } 1090 } else { 1091 scanSize = typ.ptrdata 1092 } 1093 c.local_scan += scanSize 1094 } 1095 1096 // Ensure that the stores above that initialize x to 1097 // type-safe memory and set the heap bits occur before 1098 // the caller can make x observable to the garbage 1099 // collector. Otherwise, on weakly ordered machines, 1100 // the garbage collector could follow a pointer to x, 1101 // but see uninitialized memory or stale heap bits. 1102 publicationBarrier() 1103 1104 // Allocate black during GC. 1105 // All slots hold nil so no scanning is needed. 1106 // This may be racing with GC so do it atomically if there can be 1107 // a race marking the bit. 1108 if gcphase != _GCoff { 1109 gcmarknewobject(uintptr(x), size, scanSize) 1110 } 1111 1112 if raceenabled { 1113 racemalloc(x, size) 1114 } 1115 1116 if msanenabled { 1117 msanmalloc(x, size) 1118 } 1119 1120 mp.mallocing = 0 1121 releasem(mp) 1122 1123 if debug.allocfreetrace != 0 { 1124 tracealloc(x, size, typ) 1125 } 1126 1127 if rate := MemProfileRate; rate > 0 { 1128 if rate != 1 && size < c.next_sample { 1129 c.next_sample -= size 1130 } else { 1131 mp := acquirem() 1132 profilealloc(mp, x, size) 1133 releasem(mp) 1134 } 1135 } 1136 1137 if assistG != nil { 1138 // Account for internal fragmentation in the assist 1139 // debt now that we know it. 1140 assistG.gcAssistBytes -= int64(size - dataSize) 1141 } 1142 1143 if shouldhelpgc { 1144 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1145 gcStart(t) 1146 } 1147 } 1148 1149 // Check preemption, since unlike gc we don't check on every call. 1150 if getg().preempt { 1151 checkPreempt() 1152 } 1153 1154 if incallback { 1155 entersyscall() 1156 } 1157 1158 return x 1159} 1160 1161func largeAlloc(size uintptr, needzero bool, noscan bool) *mspan { 1162 // print("largeAlloc size=", size, "\n") 1163 1164 if size+_PageSize < size { 1165 throw("out of memory") 1166 } 1167 npages := size >> _PageShift 1168 if size&_PageMask != 0 { 1169 npages++ 1170 } 1171 1172 // Deduct credit for this span allocation and sweep if 1173 // necessary. mHeap_Alloc will also sweep npages, so this only 1174 // pays the debt down to npage pages. 1175 deductSweepCredit(npages*_PageSize, npages) 1176 1177 s := mheap_.alloc(npages, makeSpanClass(0, noscan), needzero) 1178 if s == nil { 1179 throw("out of memory") 1180 } 1181 s.limit = s.base() + size 1182 heapBitsForAddr(s.base()).initSpan(s) 1183 return s 1184} 1185 1186// implementation of new builtin 1187// compiler (both frontend and SSA backend) knows the signature 1188// of this function 1189func newobject(typ *_type) unsafe.Pointer { 1190 return mallocgc(typ.size, typ, true) 1191} 1192 1193//go:linkname reflect_unsafe_New reflect.unsafe_New 1194func reflect_unsafe_New(typ *_type) unsafe.Pointer { 1195 return mallocgc(typ.size, typ, true) 1196} 1197 1198//go:linkname reflectlite_unsafe_New internal..z2freflectlite.unsafe_New 1199func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { 1200 return mallocgc(typ.size, typ, true) 1201} 1202 1203// newarray allocates an array of n elements of type typ. 1204func newarray(typ *_type, n int) unsafe.Pointer { 1205 if n == 1 { 1206 return mallocgc(typ.size, typ, true) 1207 } 1208 mem, overflow := math.MulUintptr(typ.size, uintptr(n)) 1209 if overflow || mem > maxAlloc || n < 0 { 1210 panic(plainError("runtime: allocation size out of range")) 1211 } 1212 return mallocgc(mem, typ, true) 1213} 1214 1215//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 1216func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 1217 return newarray(typ, n) 1218} 1219 1220func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 1221 mp.mcache.next_sample = nextSample() 1222 mProf_Malloc(x, size) 1223} 1224 1225// nextSample returns the next sampling point for heap profiling. The goal is 1226// to sample allocations on average every MemProfileRate bytes, but with a 1227// completely random distribution over the allocation timeline; this 1228// corresponds to a Poisson process with parameter MemProfileRate. In Poisson 1229// processes, the distance between two samples follows the exponential 1230// distribution (exp(MemProfileRate)), so the best return value is a random 1231// number taken from an exponential distribution whose mean is MemProfileRate. 1232func nextSample() uintptr { 1233 if GOOS == "plan9" { 1234 // Plan 9 doesn't support floating point in note handler. 1235 if g := getg(); g == g.m.gsignal { 1236 return nextSampleNoFP() 1237 } 1238 } 1239 1240 return uintptr(fastexprand(MemProfileRate)) 1241} 1242 1243// fastexprand returns a random number from an exponential distribution with 1244// the specified mean. 1245func fastexprand(mean int) int32 { 1246 // Avoid overflow. Maximum possible step is 1247 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean. 1248 switch { 1249 case mean > 0x7000000: 1250 mean = 0x7000000 1251 case mean == 0: 1252 return 0 1253 } 1254 1255 // Take a random sample of the exponential distribution exp(-mean*x). 1256 // The probability distribution function is mean*exp(-mean*x), so the CDF is 1257 // p = 1 - exp(-mean*x), so 1258 // q = 1 - p == exp(-mean*x) 1259 // log_e(q) = -mean*x 1260 // -log_e(q)/mean = x 1261 // x = -log_e(q) * mean 1262 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency 1263 const randomBitCount = 26 1264 q := fastrand()%(1<<randomBitCount) + 1 1265 qlog := fastlog2(float64(q)) - randomBitCount 1266 if qlog > 0 { 1267 qlog = 0 1268 } 1269 const minusLog2 = -0.6931471805599453 // -ln(2) 1270 return int32(qlog*(minusLog2*float64(mean))) + 1 1271} 1272 1273// nextSampleNoFP is similar to nextSample, but uses older, 1274// simpler code to avoid floating point. 1275func nextSampleNoFP() uintptr { 1276 // Set first allocation sample size. 1277 rate := MemProfileRate 1278 if rate > 0x3fffffff { // make 2*rate not overflow 1279 rate = 0x3fffffff 1280 } 1281 if rate != 0 { 1282 return uintptr(fastrand() % uint32(2*rate)) 1283 } 1284 return 0 1285} 1286 1287type persistentAlloc struct { 1288 base *notInHeap 1289 off uintptr 1290} 1291 1292var globalAlloc struct { 1293 mutex 1294 persistentAlloc 1295} 1296 1297// persistentChunkSize is the number of bytes we allocate when we grow 1298// a persistentAlloc. 1299const persistentChunkSize = 256 << 10 1300 1301// persistentChunks is a list of all the persistent chunks we have 1302// allocated. The list is maintained through the first word in the 1303// persistent chunk. This is updated atomically. 1304var persistentChunks *notInHeap 1305 1306// Wrapper around sysAlloc that can allocate small chunks. 1307// There is no associated free operation. 1308// Intended for things like function/type/debug-related persistent data. 1309// If align is 0, uses default align (currently 8). 1310// The returned memory will be zeroed. 1311// 1312// Consider marking persistentalloc'd types go:notinheap. 1313func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { 1314 var p *notInHeap 1315 systemstack(func() { 1316 p = persistentalloc1(size, align, sysStat) 1317 }) 1318 return unsafe.Pointer(p) 1319} 1320 1321// Must run on system stack because stack growth can (re)invoke it. 1322// See issue 9174. 1323//go:systemstack 1324func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { 1325 const ( 1326 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 1327 ) 1328 1329 if size == 0 { 1330 throw("persistentalloc: size == 0") 1331 } 1332 if align != 0 { 1333 if align&(align-1) != 0 { 1334 throw("persistentalloc: align is not a power of 2") 1335 } 1336 if align > _PageSize { 1337 throw("persistentalloc: align is too large") 1338 } 1339 } else { 1340 align = 8 1341 } 1342 1343 if size >= maxBlock { 1344 return (*notInHeap)(sysAlloc(size, sysStat)) 1345 } 1346 1347 mp := acquirem() 1348 var persistent *persistentAlloc 1349 if mp != nil && mp.p != 0 { 1350 persistent = &mp.p.ptr().palloc 1351 } else { 1352 lock(&globalAlloc.mutex) 1353 persistent = &globalAlloc.persistentAlloc 1354 } 1355 persistent.off = alignUp(persistent.off, align) 1356 if persistent.off+size > persistentChunkSize || persistent.base == nil { 1357 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys)) 1358 if persistent.base == nil { 1359 if persistent == &globalAlloc.persistentAlloc { 1360 unlock(&globalAlloc.mutex) 1361 } 1362 throw("runtime: cannot allocate memory") 1363 } 1364 1365 // Add the new chunk to the persistentChunks list. 1366 for { 1367 chunks := uintptr(unsafe.Pointer(persistentChunks)) 1368 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks 1369 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { 1370 break 1371 } 1372 } 1373 persistent.off = alignUp(sys.PtrSize, align) 1374 } 1375 p := persistent.base.add(persistent.off) 1376 persistent.off += size 1377 releasem(mp) 1378 if persistent == &globalAlloc.persistentAlloc { 1379 unlock(&globalAlloc.mutex) 1380 } 1381 1382 if sysStat != &memstats.other_sys { 1383 mSysStatInc(sysStat, size) 1384 mSysStatDec(&memstats.other_sys, size) 1385 } 1386 return p 1387} 1388 1389// inPersistentAlloc reports whether p points to memory allocated by 1390// persistentalloc. This must be nosplit because it is called by the 1391// cgo checker code, which is called by the write barrier code. 1392//go:nosplit 1393func inPersistentAlloc(p uintptr) bool { 1394 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) 1395 for chunk != 0 { 1396 if p >= chunk && p < chunk+persistentChunkSize { 1397 return true 1398 } 1399 chunk = *(*uintptr)(unsafe.Pointer(chunk)) 1400 } 1401 return false 1402} 1403 1404// linearAlloc is a simple linear allocator that pre-reserves a region 1405// of memory and then maps that region into the Ready state as needed. The 1406// caller is responsible for locking. 1407type linearAlloc struct { 1408 next uintptr // next free byte 1409 mapped uintptr // one byte past end of mapped space 1410 end uintptr // end of reserved space 1411} 1412 1413func (l *linearAlloc) init(base, size uintptr) { 1414 l.next, l.mapped = base, base 1415 l.end = base + size 1416} 1417 1418func (l *linearAlloc) alloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { 1419 p := alignUp(l.next, align) 1420 if p+size > l.end { 1421 return nil 1422 } 1423 l.next = p + size 1424 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped { 1425 // Transition from Reserved to Prepared to Ready. 1426 sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat) 1427 sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped) 1428 l.mapped = pEnd 1429 } 1430 return unsafe.Pointer(p) 1431} 1432 1433// notInHeap is off-heap memory allocated by a lower-level allocator 1434// like sysAlloc or persistentAlloc. 1435// 1436// In general, it's better to use real types marked as go:notinheap, 1437// but this serves as a generic type for situations where that isn't 1438// possible (like in the allocators). 1439// 1440// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? 1441// 1442//go:notinheap 1443type notInHeap struct{} 1444 1445func (p *notInHeap) add(bytes uintptr) *notInHeap { 1446 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) 1447} 1448