1// Copyright 2009 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5// See malloc.h for overview. 6// 7// TODO(rsc): double-check stats. 8 9package runtime 10#include <stddef.h> 11#include <errno.h> 12#include <stdlib.h> 13#include "go-alloc.h" 14#include "runtime.h" 15#include "arch.h" 16#include "malloc.h" 17#include "interface.h" 18#include "go-type.h" 19 20// Map gccgo field names to gc field names. 21// Eface aka __go_empty_interface. 22#define type __type_descriptor 23// Type aka __go_type_descriptor 24#define kind __code 25#define string __reflection 26#define KindPtr GO_PTR 27#define KindNoPointers GO_NO_POINTERS 28#define kindMask GO_CODE_MASK 29 30// GCCGO SPECIFIC CHANGE 31// 32// There is a long comment in runtime_mallocinit about where to put the heap 33// on a 64-bit system. It makes assumptions that are not valid on linux/arm64 34// -- it assumes user space can choose the lower 47 bits of a pointer, but on 35// linux/arm64 we can only choose the lower 39 bits. This means the heap is 36// roughly a quarter of the available address space and we cannot choose a bit 37// pattern that all pointers will have -- luckily the GC is mostly precise 38// these days so this doesn't matter all that much. The kernel (as of 3.13) 39// will allocate address space starting either down from 0x7fffffffff or up 40// from 0x2000000000, so we put the heap roughly in the middle of these two 41// addresses to minimize the chance that a non-heap allocation will get in the 42// way of the heap. 43// 44// This all means that there isn't much point in trying 256 different 45// locations for the heap on such systems. 46#ifdef __aarch64__ 47#define HeapBase(i) ((void*)(uintptr)(0x40ULL<<32)) 48#define HeapBaseOptions 1 49#else 50#define HeapBase(i) ((void*)(uintptr)(i<<40|0x00c0ULL<<32)) 51#define HeapBaseOptions 0x80 52#endif 53// END GCCGO SPECIFIC CHANGE 54 55// Mark mheap as 'no pointers', it does not contain interesting pointers but occupies ~45K. 56MHeap runtime_mheap; 57MStats mstats; 58 59int32 runtime_checking; 60 61extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go 62 63extern volatile intgo runtime_MemProfileRate 64 __asm__ (GOSYM_PREFIX "runtime.MemProfileRate"); 65 66static MSpan* largealloc(uint32, uintptr*); 67static void runtime_profilealloc(void *v, uintptr size); 68static void settype(MSpan *s, void *v, uintptr typ); 69 70// Allocate an object of at least size bytes. 71// Small objects are allocated from the per-thread cache's free lists. 72// Large objects (> 32 kB) are allocated straight from the heap. 73// If the block will be freed with runtime_free(), typ must be 0. 74void* 75runtime_mallocgc(uintptr size, uintptr typ, uint32 flag) 76{ 77 M *m; 78 G *g; 79 int32 sizeclass; 80 uintptr tinysize, size1; 81 intgo rate; 82 MCache *c; 83 MSpan *s; 84 MLink *v, *next; 85 byte *tiny; 86 bool incallback; 87 88 if(size == 0) { 89 // All 0-length allocations use this pointer. 90 // The language does not require the allocations to 91 // have distinct values. 92 return &runtime_zerobase; 93 } 94 95 m = runtime_m(); 96 g = runtime_g(); 97 98 incallback = false; 99 if(m->mcache == nil && g->ncgo > 0) { 100 // For gccgo this case can occur when a cgo or SWIG function 101 // has an interface return type and the function 102 // returns a non-pointer, so memory allocation occurs 103 // after syscall.Cgocall but before syscall.CgocallDone. 104 // We treat it as a callback. 105 runtime_exitsyscall(); 106 m = runtime_m(); 107 incallback = true; 108 flag |= FlagNoInvokeGC; 109 } 110 111 if(runtime_gcwaiting() && g != m->g0 && m->locks == 0 && !(flag & FlagNoInvokeGC)) { 112 runtime_gosched(); 113 m = runtime_m(); 114 } 115 if(m->mallocing) 116 runtime_throw("malloc/free - deadlock"); 117 // Disable preemption during settype. 118 // We can not use m->mallocing for this, because settype calls mallocgc. 119 m->locks++; 120 m->mallocing = 1; 121 122 if(DebugTypeAtBlockEnd) 123 size += sizeof(uintptr); 124 125 c = m->mcache; 126 if(!runtime_debug.efence && size <= MaxSmallSize) { 127 if((flag&(FlagNoScan|FlagNoGC)) == FlagNoScan && size < TinySize) { 128 // Tiny allocator. 129 // 130 // Tiny allocator combines several tiny allocation requests 131 // into a single memory block. The resulting memory block 132 // is freed when all subobjects are unreachable. The subobjects 133 // must be FlagNoScan (don't have pointers), this ensures that 134 // the amount of potentially wasted memory is bounded. 135 // 136 // Size of the memory block used for combining (TinySize) is tunable. 137 // Current setting is 16 bytes, which relates to 2x worst case memory 138 // wastage (when all but one subobjects are unreachable). 139 // 8 bytes would result in no wastage at all, but provides less 140 // opportunities for combining. 141 // 32 bytes provides more opportunities for combining, 142 // but can lead to 4x worst case wastage. 143 // The best case winning is 8x regardless of block size. 144 // 145 // Objects obtained from tiny allocator must not be freed explicitly. 146 // So when an object will be freed explicitly, we ensure that 147 // its size >= TinySize. 148 // 149 // SetFinalizer has a special case for objects potentially coming 150 // from tiny allocator, it such case it allows to set finalizers 151 // for an inner byte of a memory block. 152 // 153 // The main targets of tiny allocator are small strings and 154 // standalone escaping variables. On a json benchmark 155 // the allocator reduces number of allocations by ~12% and 156 // reduces heap size by ~20%. 157 158 tinysize = c->tinysize; 159 if(size <= tinysize) { 160 tiny = c->tiny; 161 // Align tiny pointer for required (conservative) alignment. 162 if((size&7) == 0) 163 tiny = (byte*)ROUND((uintptr)tiny, 8); 164 else if((size&3) == 0) 165 tiny = (byte*)ROUND((uintptr)tiny, 4); 166 else if((size&1) == 0) 167 tiny = (byte*)ROUND((uintptr)tiny, 2); 168 size1 = size + (tiny - c->tiny); 169 if(size1 <= tinysize) { 170 // The object fits into existing tiny block. 171 v = (MLink*)tiny; 172 c->tiny += size1; 173 c->tinysize -= size1; 174 m->mallocing = 0; 175 m->locks--; 176 if(incallback) 177 runtime_entersyscall(); 178 return v; 179 } 180 } 181 // Allocate a new TinySize block. 182 s = c->alloc[TinySizeClass]; 183 if(s->freelist == nil) 184 s = runtime_MCache_Refill(c, TinySizeClass); 185 v = s->freelist; 186 next = v->next; 187 s->freelist = next; 188 s->ref++; 189 if(next != nil) // prefetching nil leads to a DTLB miss 190 PREFETCH(next); 191 ((uint64*)v)[0] = 0; 192 ((uint64*)v)[1] = 0; 193 // See if we need to replace the existing tiny block with the new one 194 // based on amount of remaining free space. 195 if(TinySize-size > tinysize) { 196 c->tiny = (byte*)v + size; 197 c->tinysize = TinySize - size; 198 } 199 size = TinySize; 200 goto done; 201 } 202 // Allocate from mcache free lists. 203 // Inlined version of SizeToClass(). 204 if(size <= 1024-8) 205 sizeclass = runtime_size_to_class8[(size+7)>>3]; 206 else 207 sizeclass = runtime_size_to_class128[(size-1024+127) >> 7]; 208 size = runtime_class_to_size[sizeclass]; 209 s = c->alloc[sizeclass]; 210 if(s->freelist == nil) 211 s = runtime_MCache_Refill(c, sizeclass); 212 v = s->freelist; 213 next = v->next; 214 s->freelist = next; 215 s->ref++; 216 if(next != nil) // prefetching nil leads to a DTLB miss 217 PREFETCH(next); 218 if(!(flag & FlagNoZero)) { 219 v->next = nil; 220 // block is zeroed iff second word is zero ... 221 if(size > 2*sizeof(uintptr) && ((uintptr*)v)[1] != 0) 222 runtime_memclr((byte*)v, size); 223 } 224 done: 225 c->local_cachealloc += size; 226 } else { 227 // Allocate directly from heap. 228 s = largealloc(flag, &size); 229 v = (void*)(s->start << PageShift); 230 } 231 232 if(flag & FlagNoGC) 233 runtime_marknogc(v); 234 else if(!(flag & FlagNoScan)) 235 runtime_markscan(v); 236 237 if(DebugTypeAtBlockEnd) 238 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = typ; 239 240 m->mallocing = 0; 241 // TODO: save type even if FlagNoScan? Potentially expensive but might help 242 // heap profiling/tracing. 243 if(UseSpanType && !(flag & FlagNoScan) && typ != 0) 244 settype(s, v, typ); 245 246 if(runtime_debug.allocfreetrace) 247 runtime_tracealloc(v, size, typ); 248 249 if(!(flag & FlagNoProfiling) && (rate = runtime_MemProfileRate) > 0) { 250 if(size < (uintptr)rate && size < (uintptr)(uint32)c->next_sample) 251 c->next_sample -= size; 252 else 253 runtime_profilealloc(v, size); 254 } 255 256 m->locks--; 257 258 if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc) 259 runtime_gc(0); 260 261 if(incallback) 262 runtime_entersyscall(); 263 264 return v; 265} 266 267static MSpan* 268largealloc(uint32 flag, uintptr *sizep) 269{ 270 uintptr npages, size; 271 MSpan *s; 272 void *v; 273 274 // Allocate directly from heap. 275 size = *sizep; 276 if(size + PageSize < size) 277 runtime_throw("out of memory"); 278 npages = size >> PageShift; 279 if((size & PageMask) != 0) 280 npages++; 281 s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1, !(flag & FlagNoZero)); 282 if(s == nil) 283 runtime_throw("out of memory"); 284 s->limit = (byte*)(s->start<<PageShift) + size; 285 *sizep = npages<<PageShift; 286 v = (void*)(s->start << PageShift); 287 // setup for mark sweep 288 runtime_markspan(v, 0, 0, true); 289 return s; 290} 291 292static void 293runtime_profilealloc(void *v, uintptr size) 294{ 295 uintptr rate; 296 int32 next; 297 MCache *c; 298 299 c = runtime_m()->mcache; 300 rate = runtime_MemProfileRate; 301 if(size < rate) { 302 // pick next profile time 303 // If you change this, also change allocmcache. 304 if(rate > 0x3fffffff) // make 2*rate not overflow 305 rate = 0x3fffffff; 306 next = runtime_fastrand1() % (2*rate); 307 // Subtract the "remainder" of the current allocation. 308 // Otherwise objects that are close in size to sampling rate 309 // will be under-sampled, because we consistently discard this remainder. 310 next -= (size - c->next_sample); 311 if(next < 0) 312 next = 0; 313 c->next_sample = next; 314 } 315 runtime_MProf_Malloc(v, size); 316} 317 318void* 319__go_alloc(uintptr size) 320{ 321 return runtime_mallocgc(size, 0, FlagNoInvokeGC); 322} 323 324// Free the object whose base pointer is v. 325void 326__go_free(void *v) 327{ 328 M *m; 329 int32 sizeclass; 330 MSpan *s; 331 MCache *c; 332 uintptr size; 333 334 if(v == nil) 335 return; 336 337 // If you change this also change mgc0.c:/^sweep, 338 // which has a copy of the guts of free. 339 340 m = runtime_m(); 341 if(m->mallocing) 342 runtime_throw("malloc/free - deadlock"); 343 m->mallocing = 1; 344 345 if(!runtime_mlookup(v, nil, nil, &s)) { 346 runtime_printf("free %p: not an allocated block\n", v); 347 runtime_throw("free runtime_mlookup"); 348 } 349 size = s->elemsize; 350 sizeclass = s->sizeclass; 351 // Objects that are smaller than TinySize can be allocated using tiny alloc, 352 // if then such object is combined with an object with finalizer, we will crash. 353 if(size < TinySize) 354 runtime_throw("freeing too small block"); 355 356 if(runtime_debug.allocfreetrace) 357 runtime_tracefree(v, size); 358 359 // Ensure that the span is swept. 360 // If we free into an unswept span, we will corrupt GC bitmaps. 361 runtime_MSpan_EnsureSwept(s); 362 363 if(s->specials != nil) 364 runtime_freeallspecials(s, v, size); 365 366 c = m->mcache; 367 if(sizeclass == 0) { 368 // Large object. 369 s->needzero = 1; 370 // Must mark v freed before calling unmarkspan and MHeap_Free: 371 // they might coalesce v into other spans and change the bitmap further. 372 runtime_markfreed(v); 373 runtime_unmarkspan(v, 1<<PageShift); 374 // NOTE(rsc,dvyukov): The original implementation of efence 375 // in CL 22060046 used SysFree instead of SysFault, so that 376 // the operating system would eventually give the memory 377 // back to us again, so that an efence program could run 378 // longer without running out of memory. Unfortunately, 379 // calling SysFree here without any kind of adjustment of the 380 // heap data structures means that when the memory does 381 // come back to us, we have the wrong metadata for it, either in 382 // the MSpan structures or in the garbage collection bitmap. 383 // Using SysFault here means that the program will run out of 384 // memory fairly quickly in efence mode, but at least it won't 385 // have mysterious crashes due to confused memory reuse. 386 // It should be possible to switch back to SysFree if we also 387 // implement and then call some kind of MHeap_DeleteSpan. 388 if(runtime_debug.efence) 389 runtime_SysFault((void*)(s->start<<PageShift), size); 390 else 391 runtime_MHeap_Free(&runtime_mheap, s, 1); 392 c->local_nlargefree++; 393 c->local_largefree += size; 394 } else { 395 // Small object. 396 if(size > 2*sizeof(uintptr)) 397 ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed" 398 else if(size > sizeof(uintptr)) 399 ((uintptr*)v)[1] = 0; 400 // Must mark v freed before calling MCache_Free: 401 // it might coalesce v and other blocks into a bigger span 402 // and change the bitmap further. 403 c->local_nsmallfree[sizeclass]++; 404 c->local_cachealloc -= size; 405 if(c->alloc[sizeclass] == s) { 406 // We own the span, so we can just add v to the freelist 407 runtime_markfreed(v); 408 ((MLink*)v)->next = s->freelist; 409 s->freelist = v; 410 s->ref--; 411 } else { 412 // Someone else owns this span. Add to free queue. 413 runtime_MCache_Free(c, v, sizeclass, size); 414 } 415 } 416 m->mallocing = 0; 417} 418 419int32 420runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp) 421{ 422 M *m; 423 uintptr n, i; 424 byte *p; 425 MSpan *s; 426 427 m = runtime_m(); 428 429 m->mcache->local_nlookup++; 430 if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) { 431 // purge cache stats to prevent overflow 432 runtime_lock(&runtime_mheap); 433 runtime_purgecachedstats(m->mcache); 434 runtime_unlock(&runtime_mheap); 435 } 436 437 s = runtime_MHeap_LookupMaybe(&runtime_mheap, v); 438 if(sp) 439 *sp = s; 440 if(s == nil) { 441 runtime_checkfreed(v, 1); 442 if(base) 443 *base = nil; 444 if(size) 445 *size = 0; 446 return 0; 447 } 448 449 p = (byte*)((uintptr)s->start<<PageShift); 450 if(s->sizeclass == 0) { 451 // Large object. 452 if(base) 453 *base = p; 454 if(size) 455 *size = s->npages<<PageShift; 456 return 1; 457 } 458 459 n = s->elemsize; 460 if(base) { 461 i = ((byte*)v - p)/n; 462 *base = p + i*n; 463 } 464 if(size) 465 *size = n; 466 467 return 1; 468} 469 470void 471runtime_purgecachedstats(MCache *c) 472{ 473 MHeap *h; 474 int32 i; 475 476 // Protected by either heap or GC lock. 477 h = &runtime_mheap; 478 mstats.heap_alloc += c->local_cachealloc; 479 c->local_cachealloc = 0; 480 mstats.nlookup += c->local_nlookup; 481 c->local_nlookup = 0; 482 h->largefree += c->local_largefree; 483 c->local_largefree = 0; 484 h->nlargefree += c->local_nlargefree; 485 c->local_nlargefree = 0; 486 for(i=0; i<(int32)nelem(c->local_nsmallfree); i++) { 487 h->nsmallfree[i] += c->local_nsmallfree[i]; 488 c->local_nsmallfree[i] = 0; 489 } 490} 491 492extern uintptr runtime_sizeof_C_MStats 493 __asm__ (GOSYM_PREFIX "runtime.Sizeof_C_MStats"); 494 495// Size of the trailing by_size array differs between Go and C, 496// NumSizeClasses was changed, but we can not change Go struct because of backward compatibility. 497// sizeof_C_MStats is what C thinks about size of Go struct. 498 499// Initialized in mallocinit because it's defined in go/runtime/mem.go. 500 501#define MaxArena32 (2U<<30) 502 503void 504runtime_mallocinit(void) 505{ 506 byte *p, *p1; 507 uintptr arena_size, bitmap_size, spans_size, p_size; 508 uintptr *pend; 509 uintptr end; 510 uintptr limit; 511 uint64 i; 512 bool reserved; 513 514 runtime_sizeof_C_MStats = sizeof(MStats) - (NumSizeClasses - 61) * sizeof(mstats.by_size[0]); 515 516 p = nil; 517 p_size = 0; 518 arena_size = 0; 519 bitmap_size = 0; 520 spans_size = 0; 521 reserved = false; 522 523 // for 64-bit build 524 USED(p); 525 USED(p_size); 526 USED(arena_size); 527 USED(bitmap_size); 528 USED(spans_size); 529 530 runtime_InitSizes(); 531 532 if(runtime_class_to_size[TinySizeClass] != TinySize) 533 runtime_throw("bad TinySizeClass"); 534 535 // limit = runtime_memlimit(); 536 // See https://code.google.com/p/go/issues/detail?id=5049 537 // TODO(rsc): Fix after 1.1. 538 limit = 0; 539 540 // Set up the allocation arena, a contiguous area of memory where 541 // allocated data will be found. The arena begins with a bitmap large 542 // enough to hold 4 bits per allocated word. 543 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) { 544 // On a 64-bit machine, allocate from a single contiguous reservation. 545 // 128 GB (MaxMem) should be big enough for now. 546 // 547 // The code will work with the reservation at any address, but ask 548 // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f). 549 // Allocating a 128 GB region takes away 37 bits, and the amd64 550 // doesn't let us choose the top 17 bits, so that leaves the 11 bits 551 // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means 552 // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df. 553 // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid 554 // UTF-8 sequences, and they are otherwise as far away from 555 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 556 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 557 // on OS X during thread allocations. 0x00c0 causes conflicts with 558 // AddressSanitizer which reserves all memory up to 0x0100. 559 // These choices are both for debuggability and to reduce the 560 // odds of the conservative garbage collector not collecting memory 561 // because some non-pointer block of memory had a bit pattern 562 // that matched a memory address. 563 // 564 // Actually we reserve 136 GB (because the bitmap ends up being 8 GB) 565 // but it hardly matters: e0 00 is not valid UTF-8 either. 566 // 567 // If this fails we fall back to the 32 bit memory mechanism 568 arena_size = MaxMem; 569 bitmap_size = arena_size / (sizeof(void*)*8/4); 570 spans_size = arena_size / PageSize * sizeof(runtime_mheap.spans[0]); 571 spans_size = ROUND(spans_size, PageSize); 572 for(i = 0; i < HeapBaseOptions; i++) { 573 p = HeapBase(i); 574 p_size = bitmap_size + spans_size + arena_size + PageSize; 575 p = runtime_SysReserve(p, p_size, &reserved); 576 if(p != nil) 577 break; 578 } 579 } 580 if (p == nil) { 581 // On a 32-bit machine, we can't typically get away 582 // with a giant virtual address space reservation. 583 // Instead we map the memory information bitmap 584 // immediately after the data segment, large enough 585 // to handle another 2GB of mappings (256 MB), 586 // along with a reservation for another 512 MB of memory. 587 // When that gets used up, we'll start asking the kernel 588 // for any memory anywhere and hope it's in the 2GB 589 // following the bitmap (presumably the executable begins 590 // near the bottom of memory, so we'll have to use up 591 // most of memory before the kernel resorts to giving out 592 // memory before the beginning of the text segment). 593 // 594 // Alternatively we could reserve 512 MB bitmap, enough 595 // for 4GB of mappings, and then accept any memory the 596 // kernel threw at us, but normally that's a waste of 512 MB 597 // of address space, which is probably too much in a 32-bit world. 598 bitmap_size = MaxArena32 / (sizeof(void*)*8/4); 599 arena_size = 512<<20; 600 spans_size = MaxArena32 / PageSize * sizeof(runtime_mheap.spans[0]); 601 if(limit > 0 && arena_size+bitmap_size+spans_size > limit) { 602 bitmap_size = (limit / 9) & ~((1<<PageShift) - 1); 603 arena_size = bitmap_size * 8; 604 spans_size = arena_size / PageSize * sizeof(runtime_mheap.spans[0]); 605 } 606 spans_size = ROUND(spans_size, PageSize); 607 608 // SysReserve treats the address we ask for, end, as a hint, 609 // not as an absolute requirement. If we ask for the end 610 // of the data segment but the operating system requires 611 // a little more space before we can start allocating, it will 612 // give out a slightly higher pointer. Except QEMU, which 613 // is buggy, as usual: it won't adjust the pointer upward. 614 // So adjust it upward a little bit ourselves: 1/4 MB to get 615 // away from the running binary image and then round up 616 // to a MB boundary. 617 618 end = 0; 619 pend = &__go_end; 620 if(pend != nil) 621 end = *pend; 622 p = (byte*)ROUND(end + (1<<18), 1<<20); 623 p_size = bitmap_size + spans_size + arena_size + PageSize; 624 p = runtime_SysReserve(p, p_size, &reserved); 625 if(p == nil) 626 runtime_throw("runtime: cannot reserve arena virtual address space"); 627 } 628 629 // PageSize can be larger than OS definition of page size, 630 // so SysReserve can give us a PageSize-unaligned pointer. 631 // To overcome this we ask for PageSize more and round up the pointer. 632 p1 = (byte*)ROUND((uintptr)p, PageSize); 633 634 runtime_mheap.spans = (MSpan**)p1; 635 runtime_mheap.bitmap = p1 + spans_size; 636 runtime_mheap.arena_start = p1 + spans_size + bitmap_size; 637 runtime_mheap.arena_used = runtime_mheap.arena_start; 638 runtime_mheap.arena_end = p + p_size; 639 runtime_mheap.arena_reserved = reserved; 640 641 if(((uintptr)runtime_mheap.arena_start & (PageSize-1)) != 0) 642 runtime_throw("misrounded allocation in mallocinit"); 643 644 // Initialize the rest of the allocator. 645 runtime_MHeap_Init(&runtime_mheap); 646 runtime_m()->mcache = runtime_allocmcache(); 647 648 // See if it works. 649 runtime_free(runtime_malloc(TinySize)); 650} 651 652void* 653runtime_MHeap_SysAlloc(MHeap *h, uintptr n) 654{ 655 byte *p, *p_end; 656 uintptr p_size; 657 bool reserved; 658 659 660 if(n > (uintptr)(h->arena_end - h->arena_used)) { 661 // We are in 32-bit mode, maybe we didn't use all possible address space yet. 662 // Reserve some more space. 663 byte *new_end; 664 665 p_size = ROUND(n + PageSize, 256<<20); 666 new_end = h->arena_end + p_size; 667 if(new_end <= h->arena_start + MaxArena32) { 668 // TODO: It would be bad if part of the arena 669 // is reserved and part is not. 670 p = runtime_SysReserve(h->arena_end, p_size, &reserved); 671 if(p == h->arena_end) { 672 h->arena_end = new_end; 673 h->arena_reserved = reserved; 674 } 675 else if(p+p_size <= h->arena_start + MaxArena32) { 676 // Keep everything page-aligned. 677 // Our pages are bigger than hardware pages. 678 h->arena_end = p+p_size; 679 h->arena_used = p + (-(uintptr)p&(PageSize-1)); 680 h->arena_reserved = reserved; 681 } else { 682 uint64 stat; 683 stat = 0; 684 runtime_SysFree(p, p_size, &stat); 685 } 686 } 687 } 688 if(n <= (uintptr)(h->arena_end - h->arena_used)) { 689 // Keep taking from our reservation. 690 p = h->arena_used; 691 runtime_SysMap(p, n, h->arena_reserved, &mstats.heap_sys); 692 h->arena_used += n; 693 runtime_MHeap_MapBits(h); 694 runtime_MHeap_MapSpans(h); 695 696 if(((uintptr)p & (PageSize-1)) != 0) 697 runtime_throw("misrounded allocation in MHeap_SysAlloc"); 698 return p; 699 } 700 701 // If using 64-bit, our reservation is all we have. 702 if((uintptr)(h->arena_end - h->arena_start) >= MaxArena32) 703 return nil; 704 705 // On 32-bit, once the reservation is gone we can 706 // try to get memory at a location chosen by the OS 707 // and hope that it is in the range we allocated bitmap for. 708 p_size = ROUND(n, PageSize) + PageSize; 709 p = runtime_SysAlloc(p_size, &mstats.heap_sys); 710 if(p == nil) 711 return nil; 712 713 if(p < h->arena_start || (uintptr)(p+p_size - h->arena_start) >= MaxArena32) { 714 runtime_printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n", 715 p, h->arena_start, h->arena_start+MaxArena32); 716 runtime_SysFree(p, p_size, &mstats.heap_sys); 717 return nil; 718 } 719 720 p_end = p + p_size; 721 p += -(uintptr)p & (PageSize-1); 722 if(p+n > h->arena_used) { 723 h->arena_used = p+n; 724 if(p_end > h->arena_end) 725 h->arena_end = p_end; 726 runtime_MHeap_MapBits(h); 727 runtime_MHeap_MapSpans(h); 728 } 729 730 if(((uintptr)p & (PageSize-1)) != 0) 731 runtime_throw("misrounded allocation in MHeap_SysAlloc"); 732 return p; 733} 734 735static struct 736{ 737 Lock; 738 byte* pos; 739 byte* end; 740} persistent; 741 742enum 743{ 744 PersistentAllocChunk = 256<<10, 745 PersistentAllocMaxBlock = 64<<10, // VM reservation granularity is 64K on windows 746}; 747 748// Wrapper around SysAlloc that can allocate small chunks. 749// There is no associated free operation. 750// Intended for things like function/type/debug-related persistent data. 751// If align is 0, uses default align (currently 8). 752void* 753runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat) 754{ 755 byte *p; 756 757 if(align != 0) { 758 if(align&(align-1)) 759 runtime_throw("persistentalloc: align is not a power of 2"); 760 if(align > PageSize) 761 runtime_throw("persistentalloc: align is too large"); 762 } else 763 align = 8; 764 if(size >= PersistentAllocMaxBlock) 765 return runtime_SysAlloc(size, stat); 766 runtime_lock(&persistent); 767 persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align); 768 if(persistent.pos + size > persistent.end) { 769 persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats.other_sys); 770 if(persistent.pos == nil) { 771 runtime_unlock(&persistent); 772 runtime_throw("runtime: cannot allocate memory"); 773 } 774 persistent.end = persistent.pos + PersistentAllocChunk; 775 } 776 p = persistent.pos; 777 persistent.pos += size; 778 runtime_unlock(&persistent); 779 if(stat != &mstats.other_sys) { 780 // reaccount the allocation against provided stat 781 runtime_xadd64(stat, size); 782 runtime_xadd64(&mstats.other_sys, -(uint64)size); 783 } 784 return p; 785} 786 787static void 788settype(MSpan *s, void *v, uintptr typ) 789{ 790 uintptr size, ofs, j, t; 791 uintptr ntypes, nbytes2, nbytes3; 792 uintptr *data2; 793 byte *data3; 794 795 if(s->sizeclass == 0) { 796 s->types.compression = MTypes_Single; 797 s->types.data = typ; 798 return; 799 } 800 size = s->elemsize; 801 ofs = ((uintptr)v - (s->start<<PageShift)) / size; 802 803 switch(s->types.compression) { 804 case MTypes_Empty: 805 ntypes = (s->npages << PageShift) / size; 806 nbytes3 = 8*sizeof(uintptr) + 1*ntypes; 807 data3 = runtime_mallocgc(nbytes3, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC); 808 s->types.compression = MTypes_Bytes; 809 s->types.data = (uintptr)data3; 810 ((uintptr*)data3)[1] = typ; 811 data3[8*sizeof(uintptr) + ofs] = 1; 812 break; 813 814 case MTypes_Words: 815 ((uintptr*)s->types.data)[ofs] = typ; 816 break; 817 818 case MTypes_Bytes: 819 data3 = (byte*)s->types.data; 820 for(j=1; j<8; j++) { 821 if(((uintptr*)data3)[j] == typ) { 822 break; 823 } 824 if(((uintptr*)data3)[j] == 0) { 825 ((uintptr*)data3)[j] = typ; 826 break; 827 } 828 } 829 if(j < 8) { 830 data3[8*sizeof(uintptr) + ofs] = j; 831 } else { 832 ntypes = (s->npages << PageShift) / size; 833 nbytes2 = ntypes * sizeof(uintptr); 834 data2 = runtime_mallocgc(nbytes2, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC); 835 s->types.compression = MTypes_Words; 836 s->types.data = (uintptr)data2; 837 838 // Move the contents of data3 to data2. Then deallocate data3. 839 for(j=0; j<ntypes; j++) { 840 t = data3[8*sizeof(uintptr) + j]; 841 t = ((uintptr*)data3)[t]; 842 data2[j] = t; 843 } 844 data2[ofs] = typ; 845 } 846 break; 847 } 848} 849 850uintptr 851runtime_gettype(void *v) 852{ 853 MSpan *s; 854 uintptr t, ofs; 855 byte *data; 856 857 s = runtime_MHeap_LookupMaybe(&runtime_mheap, v); 858 if(s != nil) { 859 t = 0; 860 switch(s->types.compression) { 861 case MTypes_Empty: 862 break; 863 case MTypes_Single: 864 t = s->types.data; 865 break; 866 case MTypes_Words: 867 ofs = (uintptr)v - (s->start<<PageShift); 868 t = ((uintptr*)s->types.data)[ofs/s->elemsize]; 869 break; 870 case MTypes_Bytes: 871 ofs = (uintptr)v - (s->start<<PageShift); 872 data = (byte*)s->types.data; 873 t = data[8*sizeof(uintptr) + ofs/s->elemsize]; 874 t = ((uintptr*)data)[t]; 875 break; 876 default: 877 runtime_throw("runtime_gettype: invalid compression kind"); 878 } 879 if(0) { 880 runtime_printf("%p -> %d,%X\n", v, (int32)s->types.compression, (int64)t); 881 } 882 return t; 883 } 884 return 0; 885} 886 887// Runtime stubs. 888 889void* 890runtime_mal(uintptr n) 891{ 892 return runtime_mallocgc(n, 0, 0); 893} 894 895func new(typ *Type) (ret *uint8) { 896 ret = runtime_mallocgc(typ->__size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0); 897} 898 899static void* 900cnew(const Type *typ, intgo n, int32 objtyp) 901{ 902 if((objtyp&(PtrSize-1)) != objtyp) 903 runtime_throw("runtime: invalid objtyp"); 904 if(n < 0 || (typ->__size > 0 && (uintptr)n > (MaxMem/typ->__size))) 905 runtime_panicstring("runtime: allocation size out of range"); 906 return runtime_mallocgc(typ->__size*n, (uintptr)typ | objtyp, typ->kind&KindNoPointers ? FlagNoScan : 0); 907} 908 909// same as runtime_new, but callable from C 910void* 911runtime_cnew(const Type *typ) 912{ 913 return cnew(typ, 1, TypeInfo_SingleObject); 914} 915 916void* 917runtime_cnewarray(const Type *typ, intgo n) 918{ 919 return cnew(typ, n, TypeInfo_Array); 920} 921 922func GC() { 923 runtime_gc(2); // force GC and do eager sweep 924} 925 926func SetFinalizer(obj Eface, finalizer Eface) { 927 byte *base; 928 uintptr size; 929 const FuncType *ft; 930 const Type *fint; 931 const PtrType *ot; 932 933 if(obj.__type_descriptor == nil) { 934 runtime_printf("runtime.SetFinalizer: first argument is nil interface\n"); 935 goto throw; 936 } 937 if((obj.__type_descriptor->kind&kindMask) != GO_PTR) { 938 runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.__type_descriptor->__reflection); 939 goto throw; 940 } 941 ot = (const PtrType*)obj.type; 942 // As an implementation detail we do not run finalizers for zero-sized objects, 943 // because we use &runtime_zerobase for all such allocations. 944 if(ot->__element_type != nil && ot->__element_type->__size == 0) 945 return; 946 // The following check is required for cases when a user passes a pointer to composite literal, 947 // but compiler makes it a pointer to global. For example: 948 // var Foo = &Object{} 949 // func main() { 950 // runtime.SetFinalizer(Foo, nil) 951 // } 952 // See issue 7656. 953 if((byte*)obj.__object < runtime_mheap.arena_start || runtime_mheap.arena_used <= (byte*)obj.__object) 954 return; 955 if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) { 956 // As an implementation detail we allow to set finalizers for an inner byte 957 // of an object if it could come from tiny alloc (see mallocgc for details). 958 if(ot->__element_type == nil || (ot->__element_type->kind&KindNoPointers) == 0 || ot->__element_type->__size >= TinySize) { 959 runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block (%p)\n", obj.__object); 960 goto throw; 961 } 962 } 963 if(finalizer.__type_descriptor != nil) { 964 runtime_createfing(); 965 if((finalizer.__type_descriptor->kind&kindMask) != GO_FUNC) 966 goto badfunc; 967 ft = (const FuncType*)finalizer.__type_descriptor; 968 if(ft->__dotdotdot || ft->__in.__count != 1) 969 goto badfunc; 970 fint = *(Type**)ft->__in.__values; 971 if(__go_type_descriptors_equal(fint, obj.__type_descriptor)) { 972 // ok - same type 973 } else if((fint->kind&kindMask) == GO_PTR && (fint->__uncommon == nil || fint->__uncommon->__name == nil || obj.type->__uncommon == nil || obj.type->__uncommon->__name == nil) && __go_type_descriptors_equal(((const PtrType*)fint)->__element_type, ((const PtrType*)obj.type)->__element_type)) { 974 // ok - not same type, but both pointers, 975 // one or the other is unnamed, and same element type, so assignable. 976 } else if((fint->kind&kindMask) == GO_INTERFACE && ((const InterfaceType*)fint)->__methods.__count == 0) { 977 // ok - satisfies empty interface 978 } else if((fint->kind&kindMask) == GO_INTERFACE && __go_convert_interface_2(fint, obj.__type_descriptor, 1) != nil) { 979 // ok - satisfies non-empty interface 980 } else 981 goto badfunc; 982 983 ot = (const PtrType*)obj.__type_descriptor; 984 if(!runtime_addfinalizer(obj.__object, *(FuncVal**)finalizer.__object, ft, ot)) { 985 runtime_printf("runtime.SetFinalizer: finalizer already set\n"); 986 goto throw; 987 } 988 } else { 989 // NOTE: asking to remove a finalizer when there currently isn't one set is OK. 990 runtime_removefinalizer(obj.__object); 991 } 992 return; 993 994badfunc: 995 runtime_printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *obj.__type_descriptor->__reflection, *finalizer.__type_descriptor->__reflection); 996throw: 997 runtime_throw("runtime.SetFinalizer"); 998} 999