1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 // Garbage collector.
6
7 #include <unistd.h>
8
9 #include "runtime.h"
10 #include "arch.h"
11 #include "malloc.h"
12 #include "mgc0.h"
13 #include "race.h"
14 #include "go-type.h"
15
16 // Map gccgo field names to gc field names.
17 // Slice aka __go_open_array.
18 #define array __values
19 #define cap __capacity
20 // Iface aka __go_interface
21 #define tab __methods
22 // Eface aka __go_empty_interface.
23 #define type __type_descriptor
24 // Type aka __go_type_descriptor
25 #define kind __code
26 #define KindPtr GO_PTR
27 #define KindNoPointers GO_NO_POINTERS
28 // PtrType aka __go_ptr_type
29 #define elem __element_type
30
31 #ifdef USING_SPLIT_STACK
32
33 extern void * __splitstack_find (void *, void *, size_t *, void **, void **,
34 void **);
35
36 extern void * __splitstack_find_context (void *context[10], size_t *, void **,
37 void **, void **);
38
39 #endif
40
41 enum {
42 Debug = 0,
43 DebugMark = 0, // run second pass to check mark
44
45 // Four bits per word (see #defines below).
46 wordsPerBitmapWord = sizeof(void*)*8/4,
47 bitShift = sizeof(void*)*8/4,
48
49 handoffThreshold = 4,
50 IntermediateBufferCapacity = 64,
51
52 // Bits in type information
53 PRECISE = 1,
54 LOOP = 2,
55 PC_BITS = PRECISE | LOOP,
56 };
57
58 // Bits in per-word bitmap.
59 // #defines because enum might not be able to hold the values.
60 //
61 // Each word in the bitmap describes wordsPerBitmapWord words
62 // of heap memory. There are 4 bitmap bits dedicated to each heap word,
63 // so on a 64-bit system there is one bitmap word per 16 heap words.
64 // The bits in the word are packed together by type first, then by
65 // heap location, so each 64-bit bitmap word consists of, from top to bottom,
66 // the 16 bitSpecial bits for the corresponding heap words, then the 16 bitMarked bits,
67 // then the 16 bitNoPointers/bitBlockBoundary bits, then the 16 bitAllocated bits.
68 // This layout makes it easier to iterate over the bits of a given type.
69 //
70 // The bitmap starts at mheap.arena_start and extends *backward* from
71 // there. On a 64-bit system the off'th word in the arena is tracked by
72 // the off/16+1'th word before mheap.arena_start. (On a 32-bit system,
73 // the only difference is that the divisor is 8.)
74 //
75 // To pull out the bits corresponding to a given pointer p, we use:
76 //
77 // off = p - (uintptr*)mheap.arena_start; // word offset
78 // b = (uintptr*)mheap.arena_start - off/wordsPerBitmapWord - 1;
79 // shift = off % wordsPerBitmapWord
80 // bits = *b >> shift;
81 // /* then test bits & bitAllocated, bits & bitMarked, etc. */
82 //
83 #define bitAllocated ((uintptr)1<<(bitShift*0))
84 #define bitNoPointers ((uintptr)1<<(bitShift*1)) /* when bitAllocated is set */
85 #define bitMarked ((uintptr)1<<(bitShift*2)) /* when bitAllocated is set */
86 #define bitSpecial ((uintptr)1<<(bitShift*3)) /* when bitAllocated is set - has finalizer or being profiled */
87 #define bitBlockBoundary ((uintptr)1<<(bitShift*1)) /* when bitAllocated is NOT set */
88
89 #define bitMask (bitBlockBoundary | bitAllocated | bitMarked | bitSpecial)
90
91 // Holding worldsema grants an M the right to try to stop the world.
92 // The procedure is:
93 //
94 // runtime_semacquire(&runtime_worldsema);
95 // m->gcing = 1;
96 // runtime_stoptheworld();
97 //
98 // ... do stuff ...
99 //
100 // m->gcing = 0;
101 // runtime_semrelease(&runtime_worldsema);
102 // runtime_starttheworld();
103 //
104 uint32 runtime_worldsema = 1;
105
106 static int32 gctrace;
107
108 // The size of Workbuf is N*PageSize.
109 typedef struct Workbuf Workbuf;
110 struct Workbuf
111 {
112 #define SIZE (2*PageSize-sizeof(LFNode)-sizeof(uintptr))
113 LFNode node; // must be first
114 uintptr nobj;
115 Obj obj[SIZE/sizeof(Obj) - 1];
116 uint8 _padding[SIZE%sizeof(Obj) + sizeof(Obj)];
117 #undef SIZE
118 };
119
120 typedef struct Finalizer Finalizer;
121 struct Finalizer
122 {
123 void (*fn)(void*);
124 void *arg;
125 const struct __go_func_type *ft;
126 };
127
128 typedef struct FinBlock FinBlock;
129 struct FinBlock
130 {
131 FinBlock *alllink;
132 FinBlock *next;
133 int32 cnt;
134 int32 cap;
135 Finalizer fin[1];
136 };
137
138 static G *fing;
139 static FinBlock *finq; // list of finalizers that are to be executed
140 static FinBlock *finc; // cache of free blocks
141 static FinBlock *allfin; // list of all blocks
142 static Lock finlock;
143 static int32 fingwait;
144
145 static void runfinq(void*);
146 static Workbuf* getempty(Workbuf*);
147 static Workbuf* getfull(Workbuf*);
148 static void putempty(Workbuf*);
149 static Workbuf* handoff(Workbuf*);
150
151 static struct {
152 uint64 full; // lock-free list of full blocks
153 uint64 empty; // lock-free list of empty blocks
154 byte pad0[CacheLineSize]; // prevents false-sharing between full/empty and nproc/nwait
155 uint32 nproc;
156 volatile uint32 nwait;
157 volatile uint32 ndone;
158 volatile uint32 debugmarkdone;
159 Note alldone;
160 ParFor *markfor;
161 ParFor *sweepfor;
162
163 Lock;
164 byte *chunk;
165 uintptr nchunk;
166
167 Obj *roots;
168 uint32 nroot;
169 uint32 rootcap;
170 } work;
171
172 enum {
173 // TODO(atom): to be expanded in a next CL
174 GC_DEFAULT_PTR = GC_NUM_INSTR,
175 };
176
177 // PtrTarget and BitTarget are structures used by intermediate buffers.
178 // The intermediate buffers hold GC data before it
179 // is moved/flushed to the work buffer (Workbuf).
180 // The size of an intermediate buffer is very small,
181 // such as 32 or 64 elements.
182 typedef struct PtrTarget PtrTarget;
183 struct PtrTarget
184 {
185 void *p;
186 uintptr ti;
187 };
188
189 typedef struct BitTarget BitTarget;
190 struct BitTarget
191 {
192 void *p;
193 uintptr ti;
194 uintptr *bitp, shift;
195 };
196
197 typedef struct BufferList BufferList;
198 struct BufferList
199 {
200 PtrTarget ptrtarget[IntermediateBufferCapacity];
201 BitTarget bittarget[IntermediateBufferCapacity];
202 BufferList *next;
203 };
204 static BufferList *bufferList;
205
206 static Lock lock;
207 static Type *itabtype;
208
209 static void enqueue(Obj obj, Workbuf **_wbuf, Obj **_wp, uintptr *_nobj);
210
211 // flushptrbuf moves data from the PtrTarget buffer to the work buffer.
212 // The PtrTarget buffer contains blocks irrespective of whether the blocks have been marked or scanned,
213 // while the work buffer contains blocks which have been marked
214 // and are prepared to be scanned by the garbage collector.
215 //
216 // _wp, _wbuf, _nobj are input/output parameters and are specifying the work buffer.
217 // bitbuf holds temporary data generated by this function.
218 //
219 // A simplified drawing explaining how the todo-list moves from a structure to another:
220 //
221 // scanblock
222 // (find pointers)
223 // Obj ------> PtrTarget (pointer targets)
224 // ↑ |
225 // | | flushptrbuf (1st part,
226 // | | find block start)
227 // | ↓
228 // `--------- BitTarget (pointer targets and the corresponding locations in bitmap)
229 // flushptrbuf
230 // (2nd part, mark and enqueue)
231 static void
flushptrbuf(PtrTarget * ptrbuf,PtrTarget ** ptrbufpos,Obj ** _wp,Workbuf ** _wbuf,uintptr * _nobj,BitTarget * bitbuf)232 flushptrbuf(PtrTarget *ptrbuf, PtrTarget **ptrbufpos, Obj **_wp, Workbuf **_wbuf, uintptr *_nobj, BitTarget *bitbuf)
233 {
234 byte *p, *arena_start, *obj;
235 uintptr size, *bitp, bits, shift, j, x, xbits, off, nobj, ti, n;
236 MSpan *s;
237 PageID k;
238 Obj *wp;
239 Workbuf *wbuf;
240 PtrTarget *ptrbuf_end;
241 BitTarget *bitbufpos, *bt;
242
243 arena_start = runtime_mheap.arena_start;
244
245 wp = *_wp;
246 wbuf = *_wbuf;
247 nobj = *_nobj;
248
249 ptrbuf_end = *ptrbufpos;
250 n = ptrbuf_end - ptrbuf;
251 *ptrbufpos = ptrbuf;
252
253 // If buffer is nearly full, get a new one.
254 if(wbuf == nil || nobj+n >= nelem(wbuf->obj)) {
255 if(wbuf != nil)
256 wbuf->nobj = nobj;
257 wbuf = getempty(wbuf);
258 wp = wbuf->obj;
259 nobj = 0;
260
261 if(n >= nelem(wbuf->obj))
262 runtime_throw("ptrbuf has to be smaller than WorkBuf");
263 }
264
265 // TODO(atom): This block is a branch of an if-then-else statement.
266 // The single-threaded branch may be added in a next CL.
267 {
268 // Multi-threaded version.
269
270 bitbufpos = bitbuf;
271
272 while(ptrbuf < ptrbuf_end) {
273 obj = ptrbuf->p;
274 ti = ptrbuf->ti;
275 ptrbuf++;
276
277 // obj belongs to interval [mheap.arena_start, mheap.arena_used).
278 if(Debug > 1) {
279 if(obj < runtime_mheap.arena_start || obj >= runtime_mheap.arena_used)
280 runtime_throw("object is outside of mheap");
281 }
282
283 // obj may be a pointer to a live object.
284 // Try to find the beginning of the object.
285
286 // Round down to word boundary.
287 if(((uintptr)obj & ((uintptr)PtrSize-1)) != 0) {
288 obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
289 ti = 0;
290 }
291
292 // Find bits for this word.
293 off = (uintptr*)obj - (uintptr*)arena_start;
294 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
295 shift = off % wordsPerBitmapWord;
296 xbits = *bitp;
297 bits = xbits >> shift;
298
299 // Pointing at the beginning of a block?
300 if((bits & (bitAllocated|bitBlockBoundary)) != 0)
301 goto found;
302
303 ti = 0;
304
305 // Pointing just past the beginning?
306 // Scan backward a little to find a block boundary.
307 for(j=shift; j-->0; ) {
308 if(((xbits>>j) & (bitAllocated|bitBlockBoundary)) != 0) {
309 obj = (byte*)obj - (shift-j)*PtrSize;
310 shift = j;
311 bits = xbits>>shift;
312 goto found;
313 }
314 }
315
316 // Otherwise consult span table to find beginning.
317 // (Manually inlined copy of MHeap_LookupMaybe.)
318 k = (uintptr)obj>>PageShift;
319 x = k;
320 if(sizeof(void*) == 8)
321 x -= (uintptr)arena_start>>PageShift;
322 s = runtime_mheap.map[x];
323 if(s == nil || k < s->start || k - s->start >= s->npages || s->state != MSpanInUse)
324 continue;
325 p = (byte*)((uintptr)s->start<<PageShift);
326 if(s->sizeclass == 0) {
327 obj = p;
328 } else {
329 if((byte*)obj >= (byte*)s->limit)
330 continue;
331 size = s->elemsize;
332 int32 i = ((byte*)obj - p)/size;
333 obj = p+i*size;
334 }
335
336 // Now that we know the object header, reload bits.
337 off = (uintptr*)obj - (uintptr*)arena_start;
338 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
339 shift = off % wordsPerBitmapWord;
340 xbits = *bitp;
341 bits = xbits >> shift;
342
343 found:
344 // Now we have bits, bitp, and shift correct for
345 // obj pointing at the base of the object.
346 // Only care about allocated and not marked.
347 if((bits & (bitAllocated|bitMarked)) != bitAllocated)
348 continue;
349
350 *bitbufpos++ = (BitTarget){obj, ti, bitp, shift};
351 }
352
353 runtime_lock(&lock);
354 for(bt=bitbuf; bt<bitbufpos; bt++){
355 xbits = *bt->bitp;
356 bits = xbits >> bt->shift;
357 if((bits & bitMarked) != 0)
358 continue;
359
360 // Mark the block
361 *bt->bitp = xbits | (bitMarked << bt->shift);
362
363 // If object has no pointers, don't need to scan further.
364 if((bits & bitNoPointers) != 0)
365 continue;
366
367 obj = bt->p;
368
369 // Ask span about size class.
370 // (Manually inlined copy of MHeap_Lookup.)
371 x = (uintptr)obj >> PageShift;
372 if(sizeof(void*) == 8)
373 x -= (uintptr)arena_start>>PageShift;
374 s = runtime_mheap.map[x];
375
376 PREFETCH(obj);
377
378 *wp = (Obj){obj, s->elemsize, bt->ti};
379 wp++;
380 nobj++;
381 }
382 runtime_unlock(&lock);
383
384 // If another proc wants a pointer, give it some.
385 if(work.nwait > 0 && nobj > handoffThreshold && work.full == 0) {
386 wbuf->nobj = nobj;
387 wbuf = handoff(wbuf);
388 nobj = wbuf->nobj;
389 wp = wbuf->obj + nobj;
390 }
391 }
392
393 *_wp = wp;
394 *_wbuf = wbuf;
395 *_nobj = nobj;
396 }
397
398 // Program that scans the whole block and treats every block element as a potential pointer
399 static uintptr defaultProg[2] = {PtrSize, GC_DEFAULT_PTR};
400
401 // Local variables of a program fragment or loop
402 typedef struct Frame Frame;
403 struct Frame {
404 uintptr count, elemsize, b;
405 uintptr *loop_or_ret;
406 };
407
408 // scanblock scans a block of n bytes starting at pointer b for references
409 // to other objects, scanning any it finds recursively until there are no
410 // unscanned objects left. Instead of using an explicit recursion, it keeps
411 // a work list in the Workbuf* structures and loops in the main function
412 // body. Keeping an explicit work list is easier on the stack allocator and
413 // more efficient.
414 //
415 // wbuf: current work buffer
416 // wp: storage for next queued pointer (write pointer)
417 // nobj: number of queued objects
418 static void
scanblock(Workbuf * wbuf,Obj * wp,uintptr nobj,bool keepworking)419 scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
420 {
421 byte *b, *arena_start, *arena_used;
422 uintptr n, i, end_b, elemsize, ti, objti, count /* , type */;
423 uintptr *pc, precise_type, nominal_size;
424 void *obj;
425 const Type *t;
426 Slice *sliceptr;
427 Frame *stack_ptr, stack_top, stack[GC_STACK_CAPACITY+4];
428 BufferList *scanbuffers;
429 PtrTarget *ptrbuf, *ptrbuf_end, *ptrbufpos;
430 BitTarget *bitbuf;
431 Eface *eface;
432 Iface *iface;
433
434 if(sizeof(Workbuf) % PageSize != 0)
435 runtime_throw("scanblock: size of Workbuf is suboptimal");
436
437 // Memory arena parameters.
438 arena_start = runtime_mheap.arena_start;
439 arena_used = runtime_mheap.arena_used;
440
441 stack_ptr = stack+nelem(stack)-1;
442
443 precise_type = false;
444 nominal_size = 0;
445
446 // Allocate ptrbuf, bitbuf
447 {
448 runtime_lock(&lock);
449
450 if(bufferList == nil) {
451 bufferList = runtime_SysAlloc(sizeof(*bufferList));
452 bufferList->next = nil;
453 }
454 scanbuffers = bufferList;
455 bufferList = bufferList->next;
456
457 ptrbuf = &scanbuffers->ptrtarget[0];
458 ptrbuf_end = &scanbuffers->ptrtarget[0] + nelem(scanbuffers->ptrtarget);
459 bitbuf = &scanbuffers->bittarget[0];
460
461 runtime_unlock(&lock);
462 }
463
464 ptrbufpos = ptrbuf;
465
466 goto next_block;
467
468 for(;;) {
469 // Each iteration scans the block b of length n, queueing pointers in
470 // the work buffer.
471 if(Debug > 1) {
472 runtime_printf("scanblock %p %D\n", b, (int64)n);
473 }
474
475 if(ti != 0 && 0) {
476 pc = (uintptr*)(ti & ~(uintptr)PC_BITS);
477 precise_type = (ti & PRECISE);
478 stack_top.elemsize = pc[0];
479 if(!precise_type)
480 nominal_size = pc[0];
481 if(ti & LOOP) {
482 stack_top.count = 0; // 0 means an infinite number of iterations
483 stack_top.loop_or_ret = pc+1;
484 } else {
485 stack_top.count = 1;
486 }
487 } else if(UseSpanType && 0) {
488 #if 0
489 type = runtime_gettype(b);
490 if(type != 0) {
491 t = (Type*)(type & ~(uintptr)(PtrSize-1));
492 switch(type & (PtrSize-1)) {
493 case TypeInfo_SingleObject:
494 pc = (uintptr*)t->gc;
495 precise_type = true; // type information about 'b' is precise
496 stack_top.count = 1;
497 stack_top.elemsize = pc[0];
498 break;
499 case TypeInfo_Array:
500 pc = (uintptr*)t->gc;
501 if(pc[0] == 0)
502 goto next_block;
503 precise_type = true; // type information about 'b' is precise
504 stack_top.count = 0; // 0 means an infinite number of iterations
505 stack_top.elemsize = pc[0];
506 stack_top.loop_or_ret = pc+1;
507 break;
508 case TypeInfo_Map:
509 // TODO(atom): to be expanded in a next CL
510 pc = defaultProg;
511 break;
512 default:
513 runtime_throw("scanblock: invalid type");
514 return;
515 }
516 } else {
517 pc = defaultProg;
518 }
519 #endif
520 } else {
521 pc = defaultProg;
522 }
523
524 pc++;
525 stack_top.b = (uintptr)b;
526
527 end_b = (uintptr)b + n - PtrSize;
528
529 for(;;) {
530 obj = nil;
531 objti = 0;
532 switch(pc[0]) {
533 case GC_PTR:
534 obj = *(void**)(stack_top.b + pc[1]);
535 objti = pc[2];
536 pc += 3;
537 break;
538
539 case GC_SLICE:
540 sliceptr = (Slice*)(stack_top.b + pc[1]);
541 if(sliceptr->cap != 0) {
542 obj = sliceptr->array;
543 objti = pc[2] | PRECISE | LOOP;
544 }
545 pc += 3;
546 break;
547
548 case GC_APTR:
549 obj = *(void**)(stack_top.b + pc[1]);
550 pc += 2;
551 break;
552
553 case GC_STRING:
554 obj = *(void**)(stack_top.b + pc[1]);
555 pc += 2;
556 break;
557
558 case GC_EFACE:
559 eface = (Eface*)(stack_top.b + pc[1]);
560 pc += 2;
561 if(eface->type != nil && ((byte*)eface->__object >= arena_start && (byte*)eface->__object < arena_used)) {
562 t = eface->type;
563 if(t->__size <= sizeof(void*)) {
564 if((t->kind & KindNoPointers))
565 break;
566
567 obj = eface->__object;
568 if((t->kind & ~KindNoPointers) == KindPtr)
569 // objti = (uintptr)((PtrType*)t)->elem->gc;
570 objti = 0;
571 } else {
572 obj = eface->__object;
573 // objti = (uintptr)t->gc;
574 objti = 0;
575 }
576 }
577 break;
578
579 case GC_IFACE:
580 iface = (Iface*)(stack_top.b + pc[1]);
581 pc += 2;
582 if(iface->tab == nil)
583 break;
584
585 // iface->tab
586 if((byte*)iface->tab >= arena_start && (byte*)iface->tab < arena_used) {
587 // *ptrbufpos++ = (struct PtrTarget){iface->tab, (uintptr)itabtype->gc};
588 *ptrbufpos++ = (struct PtrTarget){iface->tab, 0};
589 if(ptrbufpos == ptrbuf_end)
590 flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj, bitbuf);
591 }
592
593 // iface->data
594 if((byte*)iface->__object >= arena_start && (byte*)iface->__object < arena_used) {
595 // t = iface->tab->type;
596 t = nil;
597 if(t->__size <= sizeof(void*)) {
598 if((t->kind & KindNoPointers))
599 break;
600
601 obj = iface->__object;
602 if((t->kind & ~KindNoPointers) == KindPtr)
603 // objti = (uintptr)((const PtrType*)t)->elem->gc;
604 objti = 0;
605 } else {
606 obj = iface->__object;
607 // objti = (uintptr)t->gc;
608 objti = 0;
609 }
610 }
611 break;
612
613 case GC_DEFAULT_PTR:
614 while((i = stack_top.b) <= end_b) {
615 stack_top.b += PtrSize;
616 obj = *(byte**)i;
617 if((byte*)obj >= arena_start && (byte*)obj < arena_used) {
618 *ptrbufpos++ = (struct PtrTarget){obj, 0};
619 if(ptrbufpos == ptrbuf_end)
620 flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj, bitbuf);
621 }
622 }
623 goto next_block;
624
625 case GC_END:
626 if(--stack_top.count != 0) {
627 // Next iteration of a loop if possible.
628 elemsize = stack_top.elemsize;
629 stack_top.b += elemsize;
630 if(stack_top.b + elemsize <= end_b+PtrSize) {
631 pc = stack_top.loop_or_ret;
632 continue;
633 }
634 i = stack_top.b;
635 } else {
636 // Stack pop if possible.
637 if(stack_ptr+1 < stack+nelem(stack)) {
638 pc = stack_top.loop_or_ret;
639 stack_top = *(++stack_ptr);
640 continue;
641 }
642 i = (uintptr)b + nominal_size;
643 }
644 if(!precise_type) {
645 // Quickly scan [b+i,b+n) for possible pointers.
646 for(; i<=end_b; i+=PtrSize) {
647 if(*(byte**)i != nil) {
648 // Found a value that may be a pointer.
649 // Do a rescan of the entire block.
650 enqueue((Obj){b, n, 0}, &wbuf, &wp, &nobj);
651 break;
652 }
653 }
654 }
655 goto next_block;
656
657 case GC_ARRAY_START:
658 i = stack_top.b + pc[1];
659 count = pc[2];
660 elemsize = pc[3];
661 pc += 4;
662
663 // Stack push.
664 *stack_ptr-- = stack_top;
665 stack_top = (Frame){count, elemsize, i, pc};
666 continue;
667
668 case GC_ARRAY_NEXT:
669 if(--stack_top.count != 0) {
670 stack_top.b += stack_top.elemsize;
671 pc = stack_top.loop_or_ret;
672 } else {
673 // Stack pop.
674 stack_top = *(++stack_ptr);
675 pc += 1;
676 }
677 continue;
678
679 case GC_CALL:
680 // Stack push.
681 *stack_ptr-- = stack_top;
682 stack_top = (Frame){1, 0, stack_top.b + pc[1], pc+3 /*return address*/};
683 pc = (uintptr*)pc[2]; // target of the CALL instruction
684 continue;
685
686 case GC_MAP_PTR:
687 // TODO(atom): to be expanded in a next CL. Same as GC_APTR for now.
688 obj = *(void**)(stack_top.b + pc[1]);
689 pc += 3;
690 break;
691
692 case GC_REGION:
693 // TODO(atom): to be expanded in a next CL. Same as GC_APTR for now.
694 obj = (void*)(stack_top.b + pc[1]);
695 pc += 4;
696 break;
697
698 default:
699 runtime_throw("scanblock: invalid GC instruction");
700 return;
701 }
702
703 if((byte*)obj >= arena_start && (byte*)obj < arena_used) {
704 *ptrbufpos++ = (PtrTarget){obj, objti};
705 if(ptrbufpos == ptrbuf_end)
706 flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj, bitbuf);
707 }
708 }
709
710 next_block:
711 // Done scanning [b, b+n). Prepare for the next iteration of
712 // the loop by setting b, n, ti to the parameters for the next block.
713
714 if(nobj == 0) {
715 flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj, bitbuf);
716
717 if(nobj == 0) {
718 if(!keepworking) {
719 if(wbuf)
720 putempty(wbuf);
721 goto endscan;
722 }
723 // Emptied our buffer: refill.
724 wbuf = getfull(wbuf);
725 if(wbuf == nil)
726 goto endscan;
727 nobj = wbuf->nobj;
728 wp = wbuf->obj + wbuf->nobj;
729 }
730 }
731
732 // Fetch b from the work buffer.
733 --wp;
734 b = wp->p;
735 n = wp->n;
736 ti = wp->ti;
737 nobj--;
738 }
739
740 endscan:
741 runtime_lock(&lock);
742 scanbuffers->next = bufferList;
743 bufferList = scanbuffers;
744 runtime_unlock(&lock);
745 }
746
747 // debug_scanblock is the debug copy of scanblock.
748 // it is simpler, slower, single-threaded, recursive,
749 // and uses bitSpecial as the mark bit.
750 static void
debug_scanblock(byte * b,uintptr n)751 debug_scanblock(byte *b, uintptr n)
752 {
753 byte *obj, *p;
754 void **vp;
755 uintptr size, *bitp, bits, shift, i, xbits, off;
756 MSpan *s;
757
758 if(!DebugMark)
759 runtime_throw("debug_scanblock without DebugMark");
760
761 if((intptr)n < 0) {
762 runtime_printf("debug_scanblock %p %D\n", b, (int64)n);
763 runtime_throw("debug_scanblock");
764 }
765
766 // Align b to a word boundary.
767 off = (uintptr)b & (PtrSize-1);
768 if(off != 0) {
769 b += PtrSize - off;
770 n -= PtrSize - off;
771 }
772
773 vp = (void**)b;
774 n /= PtrSize;
775 for(i=0; i<(uintptr)n; i++) {
776 obj = (byte*)vp[i];
777
778 // Words outside the arena cannot be pointers.
779 if((byte*)obj < runtime_mheap.arena_start || (byte*)obj >= runtime_mheap.arena_used)
780 continue;
781
782 // Round down to word boundary.
783 obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
784
785 // Consult span table to find beginning.
786 s = runtime_MHeap_LookupMaybe(&runtime_mheap, obj);
787 if(s == nil)
788 continue;
789
790 p = (byte*)((uintptr)s->start<<PageShift);
791 size = s->elemsize;
792 if(s->sizeclass == 0) {
793 obj = p;
794 } else {
795 if((byte*)obj >= (byte*)s->limit)
796 continue;
797 int32 i = ((byte*)obj - p)/size;
798 obj = p+i*size;
799 }
800
801 // Now that we know the object header, reload bits.
802 off = (uintptr*)obj - (uintptr*)runtime_mheap.arena_start;
803 bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
804 shift = off % wordsPerBitmapWord;
805 xbits = *bitp;
806 bits = xbits >> shift;
807
808 // Now we have bits, bitp, and shift correct for
809 // obj pointing at the base of the object.
810 // If not allocated or already marked, done.
811 if((bits & bitAllocated) == 0 || (bits & bitSpecial) != 0) // NOTE: bitSpecial not bitMarked
812 continue;
813 *bitp |= bitSpecial<<shift;
814 if(!(bits & bitMarked))
815 runtime_printf("found unmarked block %p in %p\n", obj, vp+i);
816
817 // If object has no pointers, don't need to scan further.
818 if((bits & bitNoPointers) != 0)
819 continue;
820
821 debug_scanblock(obj, size);
822 }
823 }
824
825 // Append obj to the work buffer.
826 // _wbuf, _wp, _nobj are input/output parameters and are specifying the work buffer.
827 static void
enqueue(Obj obj,Workbuf ** _wbuf,Obj ** _wp,uintptr * _nobj)828 enqueue(Obj obj, Workbuf **_wbuf, Obj **_wp, uintptr *_nobj)
829 {
830 uintptr nobj, off;
831 Obj *wp;
832 Workbuf *wbuf;
833
834 if(Debug > 1)
835 runtime_printf("append obj(%p %D %p)\n", obj.p, (int64)obj.n, obj.ti);
836
837 // Align obj.b to a word boundary.
838 off = (uintptr)obj.p & (PtrSize-1);
839 if(off != 0) {
840 obj.p += PtrSize - off;
841 obj.n -= PtrSize - off;
842 obj.ti = 0;
843 }
844
845 if(obj.p == nil || obj.n == 0)
846 return;
847
848 // Load work buffer state
849 wp = *_wp;
850 wbuf = *_wbuf;
851 nobj = *_nobj;
852
853 // If another proc wants a pointer, give it some.
854 if(work.nwait > 0 && nobj > handoffThreshold && work.full == 0) {
855 wbuf->nobj = nobj;
856 wbuf = handoff(wbuf);
857 nobj = wbuf->nobj;
858 wp = wbuf->obj + nobj;
859 }
860
861 // If buffer is full, get a new one.
862 if(wbuf == nil || nobj >= nelem(wbuf->obj)) {
863 if(wbuf != nil)
864 wbuf->nobj = nobj;
865 wbuf = getempty(wbuf);
866 wp = wbuf->obj;
867 nobj = 0;
868 }
869
870 *wp = obj;
871 wp++;
872 nobj++;
873
874 // Save work buffer state
875 *_wp = wp;
876 *_wbuf = wbuf;
877 *_nobj = nobj;
878 }
879
880 static void
markroot(ParFor * desc,uint32 i)881 markroot(ParFor *desc, uint32 i)
882 {
883 Obj *wp;
884 Workbuf *wbuf;
885 uintptr nobj;
886
887 USED(&desc);
888 wp = nil;
889 wbuf = nil;
890 nobj = 0;
891 enqueue(work.roots[i], &wbuf, &wp, &nobj);
892 scanblock(wbuf, wp, nobj, false);
893 }
894
895 // Get an empty work buffer off the work.empty list,
896 // allocating new buffers as needed.
897 static Workbuf*
getempty(Workbuf * b)898 getempty(Workbuf *b)
899 {
900 if(b != nil)
901 runtime_lfstackpush(&work.full, &b->node);
902 b = (Workbuf*)runtime_lfstackpop(&work.empty);
903 if(b == nil) {
904 // Need to allocate.
905 runtime_lock(&work);
906 if(work.nchunk < sizeof *b) {
907 work.nchunk = 1<<20;
908 work.chunk = runtime_SysAlloc(work.nchunk);
909 }
910 b = (Workbuf*)work.chunk;
911 work.chunk += sizeof *b;
912 work.nchunk -= sizeof *b;
913 runtime_unlock(&work);
914 }
915 b->nobj = 0;
916 return b;
917 }
918
919 static void
putempty(Workbuf * b)920 putempty(Workbuf *b)
921 {
922 runtime_lfstackpush(&work.empty, &b->node);
923 }
924
925 // Get a full work buffer off the work.full list, or return nil.
926 static Workbuf*
getfull(Workbuf * b)927 getfull(Workbuf *b)
928 {
929 M *m;
930 int32 i;
931
932 if(b != nil)
933 runtime_lfstackpush(&work.empty, &b->node);
934 b = (Workbuf*)runtime_lfstackpop(&work.full);
935 if(b != nil || work.nproc == 1)
936 return b;
937
938 m = runtime_m();
939 runtime_xadd(&work.nwait, +1);
940 for(i=0;; i++) {
941 if(work.full != 0) {
942 runtime_xadd(&work.nwait, -1);
943 b = (Workbuf*)runtime_lfstackpop(&work.full);
944 if(b != nil)
945 return b;
946 runtime_xadd(&work.nwait, +1);
947 }
948 if(work.nwait == work.nproc)
949 return nil;
950 if(i < 10) {
951 m->gcstats.nprocyield++;
952 runtime_procyield(20);
953 } else if(i < 20) {
954 m->gcstats.nosyield++;
955 runtime_osyield();
956 } else {
957 m->gcstats.nsleep++;
958 runtime_usleep(100);
959 }
960 }
961 }
962
963 static Workbuf*
handoff(Workbuf * b)964 handoff(Workbuf *b)
965 {
966 M *m;
967 int32 n;
968 Workbuf *b1;
969
970 m = runtime_m();
971
972 // Make new buffer with half of b's pointers.
973 b1 = getempty(nil);
974 n = b->nobj/2;
975 b->nobj -= n;
976 b1->nobj = n;
977 runtime_memmove(b1->obj, b->obj+b->nobj, n*sizeof b1->obj[0]);
978 m->gcstats.nhandoff++;
979 m->gcstats.nhandoffcnt += n;
980
981 // Put b on full list - let first half of b get stolen.
982 runtime_lfstackpush(&work.full, &b->node);
983 return b1;
984 }
985
986 static void
addroot(Obj obj)987 addroot(Obj obj)
988 {
989 uint32 cap;
990 Obj *new;
991
992 if(work.nroot >= work.rootcap) {
993 cap = PageSize/sizeof(Obj);
994 if(cap < 2*work.rootcap)
995 cap = 2*work.rootcap;
996 new = (Obj*)runtime_SysAlloc(cap*sizeof(Obj));
997 if(work.roots != nil) {
998 runtime_memmove(new, work.roots, work.rootcap*sizeof(Obj));
999 runtime_SysFree(work.roots, work.rootcap*sizeof(Obj));
1000 }
1001 work.roots = new;
1002 work.rootcap = cap;
1003 }
1004 work.roots[work.nroot] = obj;
1005 work.nroot++;
1006 }
1007
1008 static void
addstackroots(G * gp)1009 addstackroots(G *gp)
1010 {
1011 #ifdef USING_SPLIT_STACK
1012 M *mp;
1013 void* sp;
1014 size_t spsize;
1015 void* next_segment;
1016 void* next_sp;
1017 void* initial_sp;
1018
1019 if(gp == runtime_g()) {
1020 // Scanning our own stack.
1021 sp = __splitstack_find(nil, nil, &spsize, &next_segment,
1022 &next_sp, &initial_sp);
1023 } else if((mp = gp->m) != nil && mp->helpgc) {
1024 // gchelper's stack is in active use and has no interesting pointers.
1025 return;
1026 } else {
1027 // Scanning another goroutine's stack.
1028 // The goroutine is usually asleep (the world is stopped).
1029
1030 // The exception is that if the goroutine is about to enter or might
1031 // have just exited a system call, it may be executing code such
1032 // as schedlock and may have needed to start a new stack segment.
1033 // Use the stack segment and stack pointer at the time of
1034 // the system call instead, since that won't change underfoot.
1035 if(gp->gcstack != nil) {
1036 sp = gp->gcstack;
1037 spsize = gp->gcstack_size;
1038 next_segment = gp->gcnext_segment;
1039 next_sp = gp->gcnext_sp;
1040 initial_sp = gp->gcinitial_sp;
1041 } else {
1042 sp = __splitstack_find_context(&gp->stack_context[0],
1043 &spsize, &next_segment,
1044 &next_sp, &initial_sp);
1045 }
1046 }
1047 if(sp != nil) {
1048 addroot((Obj){sp, spsize, 0});
1049 while((sp = __splitstack_find(next_segment, next_sp,
1050 &spsize, &next_segment,
1051 &next_sp, &initial_sp)) != nil)
1052 addroot((Obj){sp, spsize, 0});
1053 }
1054 #else
1055 M *mp;
1056 byte* bottom;
1057 byte* top;
1058
1059 if(gp == runtime_g()) {
1060 // Scanning our own stack.
1061 bottom = (byte*)&gp;
1062 } else if((mp = gp->m) != nil && mp->helpgc) {
1063 // gchelper's stack is in active use and has no interesting pointers.
1064 return;
1065 } else {
1066 // Scanning another goroutine's stack.
1067 // The goroutine is usually asleep (the world is stopped).
1068 bottom = (byte*)gp->gcnext_sp;
1069 if(bottom == nil)
1070 return;
1071 }
1072 top = (byte*)gp->gcinitial_sp + gp->gcstack_size;
1073 if(top > bottom)
1074 addroot((Obj){bottom, top - bottom, 0});
1075 else
1076 addroot((Obj){top, bottom - top, 0});
1077 #endif
1078 }
1079
1080 static void
addfinroots(void * v)1081 addfinroots(void *v)
1082 {
1083 uintptr size;
1084
1085 size = 0;
1086 if(!runtime_mlookup(v, (byte**)&v, &size, nil) || !runtime_blockspecial(v))
1087 runtime_throw("mark - finalizer inconsistency");
1088
1089 // do not mark the finalizer block itself. just mark the things it points at.
1090 addroot((Obj){v, size, 0});
1091 }
1092
1093 static struct root_list* roots;
1094
1095 void
__go_register_gc_roots(struct root_list * r)1096 __go_register_gc_roots (struct root_list* r)
1097 {
1098 // FIXME: This needs locking if multiple goroutines can call
1099 // dlopen simultaneously.
1100 r->next = roots;
1101 roots = r;
1102 }
1103
1104 static void
addroots(void)1105 addroots(void)
1106 {
1107 struct root_list *pl;
1108 G *gp;
1109 FinBlock *fb;
1110 MSpan *s, **allspans;
1111 uint32 spanidx;
1112
1113 work.nroot = 0;
1114
1115 // mark data+bss.
1116 for(pl = roots; pl != nil; pl = pl->next) {
1117 struct root* pr = &pl->roots[0];
1118 while(1) {
1119 void *decl = pr->decl;
1120 if(decl == nil)
1121 break;
1122 addroot((Obj){decl, pr->size, 0});
1123 pr++;
1124 }
1125 }
1126
1127 addroot((Obj){(byte*)&runtime_m0, sizeof runtime_m0, 0});
1128 addroot((Obj){(byte*)&runtime_g0, sizeof runtime_g0, 0});
1129 addroot((Obj){(byte*)&runtime_allg, sizeof runtime_allg, 0});
1130 addroot((Obj){(byte*)&runtime_allm, sizeof runtime_allm, 0});
1131 runtime_MProf_Mark(addroot);
1132 runtime_time_scan(addroot);
1133 runtime_trampoline_scan(addroot);
1134
1135 // MSpan.types
1136 allspans = runtime_mheap.allspans;
1137 for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
1138 s = allspans[spanidx];
1139 if(s->state == MSpanInUse) {
1140 switch(s->types.compression) {
1141 case MTypes_Empty:
1142 case MTypes_Single:
1143 break;
1144 case MTypes_Words:
1145 case MTypes_Bytes:
1146 // TODO(atom): consider using defaultProg instead of 0
1147 addroot((Obj){(byte*)&s->types.data, sizeof(void*), 0});
1148 break;
1149 }
1150 }
1151 }
1152
1153 // stacks
1154 for(gp=runtime_allg; gp!=nil; gp=gp->alllink) {
1155 switch(gp->status){
1156 default:
1157 runtime_printf("unexpected G.status %d\n", gp->status);
1158 runtime_throw("mark - bad status");
1159 case Gdead:
1160 break;
1161 case Grunning:
1162 if(gp != runtime_g())
1163 runtime_throw("mark - world not stopped");
1164 addstackroots(gp);
1165 break;
1166 case Grunnable:
1167 case Gsyscall:
1168 case Gwaiting:
1169 addstackroots(gp);
1170 break;
1171 }
1172 }
1173
1174 runtime_walkfintab(addfinroots, addroot);
1175
1176 for(fb=allfin; fb; fb=fb->alllink)
1177 addroot((Obj){(byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]), 0});
1178
1179 addroot((Obj){(byte*)&work, sizeof work, 0});
1180 }
1181
1182 static bool
handlespecial(byte * p,uintptr size)1183 handlespecial(byte *p, uintptr size)
1184 {
1185 void (*fn)(void*);
1186 const struct __go_func_type *ft;
1187 FinBlock *block;
1188 Finalizer *f;
1189
1190 if(!runtime_getfinalizer(p, true, &fn, &ft)) {
1191 runtime_setblockspecial(p, false);
1192 runtime_MProf_Free(p, size);
1193 return false;
1194 }
1195
1196 runtime_lock(&finlock);
1197 if(finq == nil || finq->cnt == finq->cap) {
1198 if(finc == nil) {
1199 finc = runtime_SysAlloc(PageSize);
1200 finc->cap = (PageSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
1201 finc->alllink = allfin;
1202 allfin = finc;
1203 }
1204 block = finc;
1205 finc = block->next;
1206 block->next = finq;
1207 finq = block;
1208 }
1209 f = &finq->fin[finq->cnt];
1210 finq->cnt++;
1211 f->fn = fn;
1212 f->ft = ft;
1213 f->arg = p;
1214 runtime_unlock(&finlock);
1215 return true;
1216 }
1217
1218 // Sweep frees or collects finalizers for blocks not marked in the mark phase.
1219 // It clears the mark bits in preparation for the next GC round.
1220 static void
sweepspan(ParFor * desc,uint32 idx)1221 sweepspan(ParFor *desc, uint32 idx)
1222 {
1223 M *m;
1224 int32 cl, n, npages;
1225 uintptr size;
1226 byte *p;
1227 MCache *c;
1228 byte *arena_start;
1229 MLink head, *end;
1230 int32 nfree;
1231 byte *type_data;
1232 byte compression;
1233 uintptr type_data_inc;
1234 MSpan *s;
1235
1236 m = runtime_m();
1237
1238 USED(&desc);
1239 s = runtime_mheap.allspans[idx];
1240 if(s->state != MSpanInUse)
1241 return;
1242 arena_start = runtime_mheap.arena_start;
1243 p = (byte*)(s->start << PageShift);
1244 cl = s->sizeclass;
1245 size = s->elemsize;
1246 if(cl == 0) {
1247 n = 1;
1248 } else {
1249 // Chunk full of small blocks.
1250 npages = runtime_class_to_allocnpages[cl];
1251 n = (npages << PageShift) / size;
1252 }
1253 nfree = 0;
1254 end = &head;
1255 c = m->mcache;
1256
1257 type_data = (byte*)s->types.data;
1258 type_data_inc = sizeof(uintptr);
1259 compression = s->types.compression;
1260 switch(compression) {
1261 case MTypes_Bytes:
1262 type_data += 8*sizeof(uintptr);
1263 type_data_inc = 1;
1264 break;
1265 }
1266
1267 // Sweep through n objects of given size starting at p.
1268 // This thread owns the span now, so it can manipulate
1269 // the block bitmap without atomic operations.
1270 for(; n > 0; n--, p += size, type_data+=type_data_inc) {
1271 uintptr off, *bitp, shift, bits;
1272
1273 off = (uintptr*)p - (uintptr*)arena_start;
1274 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
1275 shift = off % wordsPerBitmapWord;
1276 bits = *bitp>>shift;
1277
1278 if((bits & bitAllocated) == 0)
1279 continue;
1280
1281 if((bits & bitMarked) != 0) {
1282 if(DebugMark) {
1283 if(!(bits & bitSpecial))
1284 runtime_printf("found spurious mark on %p\n", p);
1285 *bitp &= ~(bitSpecial<<shift);
1286 }
1287 *bitp &= ~(bitMarked<<shift);
1288 continue;
1289 }
1290
1291 // Special means it has a finalizer or is being profiled.
1292 // In DebugMark mode, the bit has been coopted so
1293 // we have to assume all blocks are special.
1294 if(DebugMark || (bits & bitSpecial) != 0) {
1295 if(handlespecial(p, size))
1296 continue;
1297 }
1298
1299 // Mark freed; restore block boundary bit.
1300 *bitp = (*bitp & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
1301
1302 if(cl == 0) {
1303 // Free large span.
1304 runtime_unmarkspan(p, 1<<PageShift);
1305 *(uintptr*)p = 1; // needs zeroing
1306 runtime_MHeap_Free(&runtime_mheap, s, 1);
1307 c->local_alloc -= size;
1308 c->local_nfree++;
1309 } else {
1310 // Free small object.
1311 switch(compression) {
1312 case MTypes_Words:
1313 *(uintptr*)type_data = 0;
1314 break;
1315 case MTypes_Bytes:
1316 *(byte*)type_data = 0;
1317 break;
1318 }
1319 if(size > sizeof(uintptr))
1320 ((uintptr*)p)[1] = 1; // mark as "needs to be zeroed"
1321
1322 end->next = (MLink*)p;
1323 end = (MLink*)p;
1324 nfree++;
1325 }
1326 }
1327
1328 if(nfree) {
1329 c->local_by_size[cl].nfree += nfree;
1330 c->local_alloc -= size * nfree;
1331 c->local_nfree += nfree;
1332 c->local_cachealloc -= nfree * size;
1333 c->local_objects -= nfree;
1334 runtime_MCentral_FreeSpan(&runtime_mheap.central[cl], s, nfree, head.next, end);
1335 }
1336 }
1337
1338 static void
dumpspan(uint32 idx)1339 dumpspan(uint32 idx)
1340 {
1341 int32 sizeclass, n, npages, i, column;
1342 uintptr size;
1343 byte *p;
1344 byte *arena_start;
1345 MSpan *s;
1346 bool allocated, special;
1347
1348 s = runtime_mheap.allspans[idx];
1349 if(s->state != MSpanInUse)
1350 return;
1351 arena_start = runtime_mheap.arena_start;
1352 p = (byte*)(s->start << PageShift);
1353 sizeclass = s->sizeclass;
1354 size = s->elemsize;
1355 if(sizeclass == 0) {
1356 n = 1;
1357 } else {
1358 npages = runtime_class_to_allocnpages[sizeclass];
1359 n = (npages << PageShift) / size;
1360 }
1361
1362 runtime_printf("%p .. %p:\n", p, p+n*size);
1363 column = 0;
1364 for(; n>0; n--, p+=size) {
1365 uintptr off, *bitp, shift, bits;
1366
1367 off = (uintptr*)p - (uintptr*)arena_start;
1368 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
1369 shift = off % wordsPerBitmapWord;
1370 bits = *bitp>>shift;
1371
1372 allocated = ((bits & bitAllocated) != 0);
1373 special = ((bits & bitSpecial) != 0);
1374
1375 for(i=0; (uint32)i<size; i+=sizeof(void*)) {
1376 if(column == 0) {
1377 runtime_printf("\t");
1378 }
1379 if(i == 0) {
1380 runtime_printf(allocated ? "(" : "[");
1381 runtime_printf(special ? "@" : "");
1382 runtime_printf("%p: ", p+i);
1383 } else {
1384 runtime_printf(" ");
1385 }
1386
1387 runtime_printf("%p", *(void**)(p+i));
1388
1389 if(i+sizeof(void*) >= size) {
1390 runtime_printf(allocated ? ") " : "] ");
1391 }
1392
1393 column++;
1394 if(column == 8) {
1395 runtime_printf("\n");
1396 column = 0;
1397 }
1398 }
1399 }
1400 runtime_printf("\n");
1401 }
1402
1403 // A debugging function to dump the contents of memory
1404 void
runtime_memorydump(void)1405 runtime_memorydump(void)
1406 {
1407 uint32 spanidx;
1408
1409 for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
1410 dumpspan(spanidx);
1411 }
1412 }
1413
1414 void
runtime_gchelper(void)1415 runtime_gchelper(void)
1416 {
1417 // parallel mark for over gc roots
1418 runtime_parfordo(work.markfor);
1419
1420 // help other threads scan secondary blocks
1421 scanblock(nil, nil, 0, true);
1422
1423 if(DebugMark) {
1424 // wait while the main thread executes mark(debug_scanblock)
1425 while(runtime_atomicload(&work.debugmarkdone) == 0)
1426 runtime_usleep(10);
1427 }
1428
1429 runtime_parfordo(work.sweepfor);
1430 if(runtime_xadd(&work.ndone, +1) == work.nproc-1)
1431 runtime_notewakeup(&work.alldone);
1432 }
1433
1434 // Initialized from $GOGC. GOGC=off means no gc.
1435 //
1436 // Next gc is after we've allocated an extra amount of
1437 // memory proportional to the amount already in use.
1438 // If gcpercent=100 and we're using 4M, we'll gc again
1439 // when we get to 8M. This keeps the gc cost in linear
1440 // proportion to the allocation cost. Adjusting gcpercent
1441 // just changes the linear constant (and also the amount of
1442 // extra memory used).
1443 static int32 gcpercent = -2;
1444
1445 static void
stealcache(void)1446 stealcache(void)
1447 {
1448 M *mp;
1449
1450 for(mp=runtime_allm; mp; mp=mp->alllink)
1451 runtime_MCache_ReleaseAll(mp->mcache);
1452 }
1453
1454 static void
cachestats(GCStats * stats)1455 cachestats(GCStats *stats)
1456 {
1457 M *mp;
1458 MCache *c;
1459 uint32 i;
1460 uint64 stacks_inuse;
1461 uint64 *src, *dst;
1462
1463 if(stats)
1464 runtime_memclr((byte*)stats, sizeof(*stats));
1465 stacks_inuse = 0;
1466 for(mp=runtime_allm; mp; mp=mp->alllink) {
1467 c = mp->mcache;
1468 runtime_purgecachedstats(c);
1469 // stacks_inuse += mp->stackinuse*FixedStack;
1470 if(stats) {
1471 src = (uint64*)&mp->gcstats;
1472 dst = (uint64*)stats;
1473 for(i=0; i<sizeof(*stats)/sizeof(uint64); i++)
1474 dst[i] += src[i];
1475 runtime_memclr((byte*)&mp->gcstats, sizeof(mp->gcstats));
1476 }
1477 for(i=0; i<nelem(c->local_by_size); i++) {
1478 mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc;
1479 c->local_by_size[i].nmalloc = 0;
1480 mstats.by_size[i].nfree += c->local_by_size[i].nfree;
1481 c->local_by_size[i].nfree = 0;
1482 }
1483 }
1484 mstats.stacks_inuse = stacks_inuse;
1485 }
1486
1487 // Structure of arguments passed to function gc().
1488 // This allows the arguments to be passed via reflect_call.
1489 struct gc_args
1490 {
1491 int32 force;
1492 };
1493
1494 static void gc(struct gc_args *args);
1495
1496 void
runtime_gc(int32 force)1497 runtime_gc(int32 force)
1498 {
1499 M *m;
1500 const byte *p;
1501 struct gc_args a, *ap;
1502
1503 // The atomic operations are not atomic if the uint64s
1504 // are not aligned on uint64 boundaries. This has been
1505 // a problem in the past.
1506 if((((uintptr)&work.empty) & 7) != 0)
1507 runtime_throw("runtime: gc work buffer is misaligned");
1508
1509 // Make sure all registers are saved on stack so that
1510 // scanstack sees them.
1511 __builtin_unwind_init();
1512
1513 // The gc is turned off (via enablegc) until
1514 // the bootstrap has completed.
1515 // Also, malloc gets called in the guts
1516 // of a number of libraries that might be
1517 // holding locks. To avoid priority inversion
1518 // problems, don't bother trying to run gc
1519 // while holding a lock. The next mallocgc
1520 // without a lock will do the gc instead.
1521 m = runtime_m();
1522 if(!mstats.enablegc || m->locks > 0 || runtime_panicking)
1523 return;
1524
1525 if(gcpercent == -2) { // first time through
1526 p = runtime_getenv("GOGC");
1527 if(p == nil || p[0] == '\0')
1528 gcpercent = 100;
1529 else if(runtime_strcmp((const char*)p, "off") == 0)
1530 gcpercent = -1;
1531 else
1532 gcpercent = runtime_atoi(p);
1533
1534 p = runtime_getenv("GOGCTRACE");
1535 if(p != nil)
1536 gctrace = runtime_atoi(p);
1537 }
1538 if(gcpercent < 0)
1539 return;
1540
1541 // Run gc on a bigger stack to eliminate
1542 // a potentially large number of calls to runtime_morestack.
1543 // But not when using gccgo.
1544 a.force = force;
1545 ap = &a;
1546 gc(ap);
1547
1548 if(gctrace > 1 && !force) {
1549 a.force = 1;
1550 gc(&a);
1551 }
1552 }
1553
1554 static void
gc(struct gc_args * args)1555 gc(struct gc_args *args)
1556 {
1557 M *m;
1558 int64 t0, t1, t2, t3, t4;
1559 uint64 heap0, heap1, obj0, obj1;
1560 GCStats stats;
1561 M *mp;
1562 uint32 i;
1563 // Eface eface;
1564
1565 runtime_semacquire(&runtime_worldsema);
1566 if(!args->force && mstats.heap_alloc < mstats.next_gc) {
1567 runtime_semrelease(&runtime_worldsema);
1568 return;
1569 }
1570
1571 m = runtime_m();
1572
1573 t0 = runtime_nanotime();
1574
1575 m->gcing = 1;
1576 runtime_stoptheworld();
1577
1578 for(mp=runtime_allm; mp; mp=mp->alllink)
1579 runtime_settype_flush(mp, false);
1580
1581 heap0 = 0;
1582 obj0 = 0;
1583 if(gctrace) {
1584 cachestats(nil);
1585 heap0 = mstats.heap_alloc;
1586 obj0 = mstats.nmalloc - mstats.nfree;
1587 }
1588
1589 m->locks++; // disable gc during mallocs in parforalloc
1590 if(work.markfor == nil)
1591 work.markfor = runtime_parforalloc(MaxGcproc);
1592 if(work.sweepfor == nil)
1593 work.sweepfor = runtime_parforalloc(MaxGcproc);
1594 m->locks--;
1595
1596 if(itabtype == nil) {
1597 // get C pointer to the Go type "itab"
1598 // runtime_gc_itab_ptr(&eface);
1599 // itabtype = ((PtrType*)eface.type)->elem;
1600 }
1601
1602 work.nwait = 0;
1603 work.ndone = 0;
1604 work.debugmarkdone = 0;
1605 work.nproc = runtime_gcprocs();
1606 addroots();
1607 runtime_parforsetup(work.markfor, work.nproc, work.nroot, nil, false, markroot);
1608 runtime_parforsetup(work.sweepfor, work.nproc, runtime_mheap.nspan, nil, true, sweepspan);
1609 if(work.nproc > 1) {
1610 runtime_noteclear(&work.alldone);
1611 runtime_helpgc(work.nproc);
1612 }
1613
1614 t1 = runtime_nanotime();
1615
1616 runtime_parfordo(work.markfor);
1617 scanblock(nil, nil, 0, true);
1618
1619 if(DebugMark) {
1620 for(i=0; i<work.nroot; i++)
1621 debug_scanblock(work.roots[i].p, work.roots[i].n);
1622 runtime_atomicstore(&work.debugmarkdone, 1);
1623 }
1624 t2 = runtime_nanotime();
1625
1626 runtime_parfordo(work.sweepfor);
1627 t3 = runtime_nanotime();
1628
1629 stealcache();
1630 cachestats(&stats);
1631
1632 if(work.nproc > 1)
1633 runtime_notesleep(&work.alldone);
1634
1635 stats.nprocyield += work.sweepfor->nprocyield;
1636 stats.nosyield += work.sweepfor->nosyield;
1637 stats.nsleep += work.sweepfor->nsleep;
1638
1639 mstats.next_gc = mstats.heap_alloc+(mstats.heap_alloc-runtime_stacks_sys)*gcpercent/100;
1640 m->gcing = 0;
1641
1642 if(finq != nil) {
1643 m->locks++; // disable gc during the mallocs in newproc
1644 // kick off or wake up goroutine to run queued finalizers
1645 if(fing == nil)
1646 fing = __go_go(runfinq, nil);
1647 else if(fingwait) {
1648 fingwait = 0;
1649 runtime_ready(fing);
1650 }
1651 m->locks--;
1652 }
1653
1654 heap1 = mstats.heap_alloc;
1655 obj1 = mstats.nmalloc - mstats.nfree;
1656
1657 t4 = runtime_nanotime();
1658 mstats.last_gc = t4;
1659 mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0;
1660 mstats.pause_total_ns += t4 - t0;
1661 mstats.numgc++;
1662 if(mstats.debuggc)
1663 runtime_printf("pause %D\n", t4-t0);
1664
1665 if(gctrace) {
1666 runtime_printf("gc%d(%d): %D+%D+%D ms, %D -> %D MB %D -> %D (%D-%D) objects,"
1667 " %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
1668 mstats.numgc, work.nproc, (t2-t1)/1000000, (t3-t2)/1000000, (t1-t0+t4-t3)/1000000,
1669 heap0>>20, heap1>>20, obj0, obj1,
1670 mstats.nmalloc, mstats.nfree,
1671 stats.nhandoff, stats.nhandoffcnt,
1672 work.sweepfor->nsteal, work.sweepfor->nstealcnt,
1673 stats.nprocyield, stats.nosyield, stats.nsleep);
1674 }
1675
1676 runtime_MProf_GC();
1677 runtime_semrelease(&runtime_worldsema);
1678 runtime_starttheworld();
1679
1680 // give the queued finalizers, if any, a chance to run
1681 if(finq != nil)
1682 runtime_gosched();
1683 }
1684
1685 void runtime_ReadMemStats(MStats *)
1686 __asm__ (GOSYM_PREFIX "runtime.ReadMemStats");
1687
1688 void
runtime_ReadMemStats(MStats * stats)1689 runtime_ReadMemStats(MStats *stats)
1690 {
1691 M *m;
1692
1693 // Have to acquire worldsema to stop the world,
1694 // because stoptheworld can only be used by
1695 // one goroutine at a time, and there might be
1696 // a pending garbage collection already calling it.
1697 runtime_semacquire(&runtime_worldsema);
1698 m = runtime_m();
1699 m->gcing = 1;
1700 runtime_stoptheworld();
1701 cachestats(nil);
1702 *stats = mstats;
1703 m->gcing = 0;
1704 runtime_semrelease(&runtime_worldsema);
1705 runtime_starttheworld();
1706 }
1707
1708 static void
runfinq(void * dummy)1709 runfinq(void* dummy __attribute__ ((unused)))
1710 {
1711 Finalizer *f;
1712 FinBlock *fb, *next;
1713 uint32 i;
1714
1715 for(;;) {
1716 // There's no need for a lock in this section
1717 // because it only conflicts with the garbage
1718 // collector, and the garbage collector only
1719 // runs when everyone else is stopped, and
1720 // runfinq only stops at the gosched() or
1721 // during the calls in the for loop.
1722 fb = finq;
1723 finq = nil;
1724 if(fb == nil) {
1725 fingwait = 1;
1726 runtime_park(nil, nil, "finalizer wait");
1727 continue;
1728 }
1729 if(raceenabled)
1730 runtime_racefingo();
1731 for(; fb; fb=next) {
1732 next = fb->next;
1733 for(i=0; i<(uint32)fb->cnt; i++) {
1734 void *params[1];
1735
1736 f = &fb->fin[i];
1737 params[0] = &f->arg;
1738 reflect_call(f->ft, (void*)f->fn, 0, 0, params, nil);
1739 f->fn = nil;
1740 f->arg = nil;
1741 }
1742 fb->cnt = 0;
1743 fb->next = finc;
1744 finc = fb;
1745 }
1746 runtime_gc(1); // trigger another gc to clean up the finalized objects, if possible
1747 }
1748 }
1749
1750 // mark the block at v of size n as allocated.
1751 // If noptr is true, mark it as having no pointers.
1752 void
runtime_markallocated(void * v,uintptr n,bool noptr)1753 runtime_markallocated(void *v, uintptr n, bool noptr)
1754 {
1755 uintptr *b, obits, bits, off, shift;
1756
1757 if(0)
1758 runtime_printf("markallocated %p+%p\n", v, n);
1759
1760 if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1761 runtime_throw("markallocated: bad pointer");
1762
1763 off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset
1764 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1765 shift = off % wordsPerBitmapWord;
1766
1767 for(;;) {
1768 obits = *b;
1769 bits = (obits & ~(bitMask<<shift)) | (bitAllocated<<shift);
1770 if(noptr)
1771 bits |= bitNoPointers<<shift;
1772 if(runtime_singleproc) {
1773 *b = bits;
1774 break;
1775 } else {
1776 // more than one goroutine is potentially running: use atomic op
1777 if(runtime_casp((void**)b, (void*)obits, (void*)bits))
1778 break;
1779 }
1780 }
1781 }
1782
1783 // mark the block at v of size n as freed.
1784 void
runtime_markfreed(void * v,uintptr n)1785 runtime_markfreed(void *v, uintptr n)
1786 {
1787 uintptr *b, obits, bits, off, shift;
1788
1789 if(0)
1790 runtime_printf("markallocated %p+%p\n", v, n);
1791
1792 if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1793 runtime_throw("markallocated: bad pointer");
1794
1795 off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset
1796 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1797 shift = off % wordsPerBitmapWord;
1798
1799 for(;;) {
1800 obits = *b;
1801 bits = (obits & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
1802 if(runtime_singleproc) {
1803 *b = bits;
1804 break;
1805 } else {
1806 // more than one goroutine is potentially running: use atomic op
1807 if(runtime_casp((void**)b, (void*)obits, (void*)bits))
1808 break;
1809 }
1810 }
1811 }
1812
1813 // check that the block at v of size n is marked freed.
1814 void
runtime_checkfreed(void * v,uintptr n)1815 runtime_checkfreed(void *v, uintptr n)
1816 {
1817 uintptr *b, bits, off, shift;
1818
1819 if(!runtime_checking)
1820 return;
1821
1822 if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1823 return; // not allocated, so okay
1824
1825 off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset
1826 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1827 shift = off % wordsPerBitmapWord;
1828
1829 bits = *b>>shift;
1830 if((bits & bitAllocated) != 0) {
1831 runtime_printf("checkfreed %p+%p: off=%p have=%p\n",
1832 v, n, off, bits & bitMask);
1833 runtime_throw("checkfreed: not freed");
1834 }
1835 }
1836
1837 // mark the span of memory at v as having n blocks of the given size.
1838 // if leftover is true, there is left over space at the end of the span.
1839 void
runtime_markspan(void * v,uintptr size,uintptr n,bool leftover)1840 runtime_markspan(void *v, uintptr size, uintptr n, bool leftover)
1841 {
1842 uintptr *b, off, shift;
1843 byte *p;
1844
1845 if((byte*)v+size*n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1846 runtime_throw("markspan: bad pointer");
1847
1848 p = v;
1849 if(leftover) // mark a boundary just past end of last block too
1850 n++;
1851 for(; n-- > 0; p += size) {
1852 // Okay to use non-atomic ops here, because we control
1853 // the entire span, and each bitmap word has bits for only
1854 // one span, so no other goroutines are changing these
1855 // bitmap words.
1856 off = (uintptr*)p - (uintptr*)runtime_mheap.arena_start; // word offset
1857 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1858 shift = off % wordsPerBitmapWord;
1859 *b = (*b & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
1860 }
1861 }
1862
1863 // unmark the span of memory at v of length n bytes.
1864 void
runtime_unmarkspan(void * v,uintptr n)1865 runtime_unmarkspan(void *v, uintptr n)
1866 {
1867 uintptr *p, *b, off;
1868
1869 if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1870 runtime_throw("markspan: bad pointer");
1871
1872 p = v;
1873 off = p - (uintptr*)runtime_mheap.arena_start; // word offset
1874 if(off % wordsPerBitmapWord != 0)
1875 runtime_throw("markspan: unaligned pointer");
1876 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1877 n /= PtrSize;
1878 if(n%wordsPerBitmapWord != 0)
1879 runtime_throw("unmarkspan: unaligned length");
1880 // Okay to use non-atomic ops here, because we control
1881 // the entire span, and each bitmap word has bits for only
1882 // one span, so no other goroutines are changing these
1883 // bitmap words.
1884 n /= wordsPerBitmapWord;
1885 while(n-- > 0)
1886 *b-- = 0;
1887 }
1888
1889 bool
runtime_blockspecial(void * v)1890 runtime_blockspecial(void *v)
1891 {
1892 uintptr *b, off, shift;
1893
1894 if(DebugMark)
1895 return true;
1896
1897 off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start;
1898 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1899 shift = off % wordsPerBitmapWord;
1900
1901 return (*b & (bitSpecial<<shift)) != 0;
1902 }
1903
1904 void
runtime_setblockspecial(void * v,bool s)1905 runtime_setblockspecial(void *v, bool s)
1906 {
1907 uintptr *b, off, shift, bits, obits;
1908
1909 if(DebugMark)
1910 return;
1911
1912 off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start;
1913 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1914 shift = off % wordsPerBitmapWord;
1915
1916 for(;;) {
1917 obits = *b;
1918 if(s)
1919 bits = obits | (bitSpecial<<shift);
1920 else
1921 bits = obits & ~(bitSpecial<<shift);
1922 if(runtime_singleproc) {
1923 *b = bits;
1924 break;
1925 } else {
1926 // more than one goroutine is potentially running: use atomic op
1927 if(runtime_casp((void**)b, (void*)obits, (void*)bits))
1928 break;
1929 }
1930 }
1931 }
1932
1933 void
runtime_MHeap_MapBits(MHeap * h)1934 runtime_MHeap_MapBits(MHeap *h)
1935 {
1936 size_t page_size;
1937
1938 // Caller has added extra mappings to the arena.
1939 // Add extra mappings of bitmap words as needed.
1940 // We allocate extra bitmap pieces in chunks of bitmapChunk.
1941 enum {
1942 bitmapChunk = 8192
1943 };
1944 uintptr n;
1945
1946 n = (h->arena_used - h->arena_start) / wordsPerBitmapWord;
1947 n = (n+bitmapChunk-1) & ~(bitmapChunk-1);
1948 if(h->bitmap_mapped >= n)
1949 return;
1950
1951 page_size = getpagesize();
1952 n = (n+page_size-1) & ~(page_size-1);
1953
1954 runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped);
1955 h->bitmap_mapped = n;
1956 }
1957