1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4 
5 // Implementation of runtime/debug.WriteHeapDump.  Writes all
6 // objects in the heap plus additional info (roots, threads,
7 // finalizers, etc.) to a file.
8 
9 // The format of the dumped file is described at
10 // http://code.google.com/p/go-wiki/wiki/heapdump13
11 
12 #include "runtime.h"
13 #include "arch.h"
14 #include "malloc.h"
15 #include "mgc0.h"
16 #include "go-type.h"
17 #include "go-defer.h"
18 #include "go-panic.h"
19 
20 #define hash __hash
21 #define KindNoPointers GO_NO_POINTERS
22 
23 enum {
24 	FieldKindEol = 0,
25 	FieldKindPtr = 1,
26 	FieldKindString = 2,
27 	FieldKindSlice = 3,
28 	FieldKindIface = 4,
29 	FieldKindEface = 5,
30 
31 	TagEOF = 0,
32 	TagObject = 1,
33 	TagOtherRoot = 2,
34 	TagType = 3,
35 	TagGoRoutine = 4,
36 	TagStackFrame = 5,
37 	TagParams = 6,
38 	TagFinalizer = 7,
39 	TagItab = 8,
40 	TagOSThread = 9,
41 	TagMemStats = 10,
42 	TagQueuedFinalizer = 11,
43 	TagData = 12,
44 	TagBss = 13,
45 	TagDefer = 14,
46 	TagPanic = 15,
47 	TagMemProf = 16,
48 	TagAllocSample = 17,
49 
50 	TypeInfo_Conservative = 127,
51 };
52 
53 // static uintptr* playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg);
54 // static void dumpfields(uintptr *prog);
55 static void dumpefacetypes(void *obj, uintptr size, const Type *type, uintptr kind);
56 
57 // fd to write the dump to.
58 static uintptr dumpfd;
59 
60 // buffer of pending write data
61 enum {
62 	BufSize = 4096,
63 };
64 static byte buf[BufSize];
65 static uintptr nbuf;
66 
67 static void
hwrite(const byte * data,uintptr len)68 hwrite(const byte *data, uintptr len)
69 {
70 	if(len + nbuf <= BufSize) {
71 		runtime_memmove(buf + nbuf, data, len);
72 		nbuf += len;
73 		return;
74 	}
75 	runtime_write(dumpfd, buf, nbuf);
76 	if(len >= BufSize) {
77 		runtime_write(dumpfd, data, len);
78 		nbuf = 0;
79 	} else {
80 		runtime_memmove(buf, data, len);
81 		nbuf = len;
82 	}
83 }
84 
85 static void
flush(void)86 flush(void)
87 {
88 	runtime_write(dumpfd, buf, nbuf);
89 	nbuf = 0;
90 }
91 
92 // Cache of types that have been serialized already.
93 // We use a type's hash field to pick a bucket.
94 // Inside a bucket, we keep a list of types that
95 // have been serialized so far, most recently used first.
96 // Note: when a bucket overflows we may end up
97 // serializing a type more than once.  That's ok.
98 enum {
99 	TypeCacheBuckets = 256, // must be a power of 2
100 	TypeCacheAssoc = 4,
101 };
102 typedef struct TypeCacheBucket TypeCacheBucket;
103 struct TypeCacheBucket {
104 	const Type *t[TypeCacheAssoc];
105 };
106 static TypeCacheBucket typecache[TypeCacheBuckets];
107 
108 // dump a uint64 in a varint format parseable by encoding/binary
109 static void
dumpint(uint64 v)110 dumpint(uint64 v)
111 {
112 	byte buf[10];
113 	int32 n;
114 	n = 0;
115 	while(v >= 0x80) {
116 		buf[n++] = v | 0x80;
117 		v >>= 7;
118 	}
119 	buf[n++] = v;
120 	hwrite(buf, n);
121 }
122 
123 static void
dumpbool(bool b)124 dumpbool(bool b)
125 {
126 	dumpint(b ? 1 : 0);
127 }
128 
129 // dump varint uint64 length followed by memory contents
130 static void
dumpmemrange(const byte * data,uintptr len)131 dumpmemrange(const byte *data, uintptr len)
132 {
133 	dumpint(len);
134 	hwrite(data, len);
135 }
136 
137 static void
dumpstr(String s)138 dumpstr(String s)
139 {
140 	dumpmemrange(s.str, s.len);
141 }
142 
143 static void
dumpcstr(const int8 * c)144 dumpcstr(const int8 *c)
145 {
146 	dumpmemrange((const byte*)c, runtime_findnull((const byte*)c));
147 }
148 
149 // dump information for a type
150 static void
dumptype(const Type * t)151 dumptype(const Type *t)
152 {
153 	TypeCacheBucket *b;
154 	int32 i, j;
155 
156 	if(t == nil) {
157 		return;
158 	}
159 
160 	// If we've definitely serialized the type before,
161 	// no need to do it again.
162 	b = &typecache[t->hash & (TypeCacheBuckets-1)];
163 	if(t == b->t[0]) return;
164 	for(i = 1; i < TypeCacheAssoc; i++) {
165 		if(t == b->t[i]) {
166 			// Move-to-front
167 			for(j = i; j > 0; j--) {
168 				b->t[j] = b->t[j-1];
169 			}
170 			b->t[0] = t;
171 			return;
172 		}
173 	}
174 	// Might not have been dumped yet.  Dump it and
175 	// remember we did so.
176 	for(j = TypeCacheAssoc-1; j > 0; j--) {
177 		b->t[j] = b->t[j-1];
178 	}
179 	b->t[0] = t;
180 
181 	// dump the type
182 	dumpint(TagType);
183 	dumpint((uintptr)t);
184 	dumpint(t->__size);
185 	if(t->__uncommon == nil || t->__uncommon->__pkg_path == nil || t->__uncommon->__name == nil) {
186 		dumpstr(*t->__reflection);
187 	} else {
188 		dumpint(t->__uncommon->__pkg_path->len + 1 + t->__uncommon->__name->len);
189 		hwrite(t->__uncommon->__pkg_path->str, t->__uncommon->__pkg_path->len);
190 		hwrite((const byte*)".", 1);
191 		hwrite(t->__uncommon->__name->str, t->__uncommon->__name->len);
192 	}
193 	dumpbool(t->__size > PtrSize || (t->__code & KindNoPointers) == 0);
194 	// dumpfields((uintptr*)t->gc + 1);
195 }
196 
197 // returns true if object is scannable
198 static bool
scannable(byte * obj)199 scannable(byte *obj)
200 {
201 	uintptr *b, off, shift;
202 
203 	off = (uintptr*)obj - (uintptr*)runtime_mheap.arena_start;  // word offset
204 	b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
205 	shift = off % wordsPerBitmapWord;
206 	return ((*b >> shift) & bitScan) != 0;
207 }
208 
209 // dump an object
210 static void
dumpobj(byte * obj,uintptr size,const Type * type,uintptr kind)211 dumpobj(byte *obj, uintptr size, const Type *type, uintptr kind)
212 {
213 	if(type != nil) {
214 		dumptype(type);
215 		dumpefacetypes(obj, size, type, kind);
216 	}
217 
218 	dumpint(TagObject);
219 	dumpint((uintptr)obj);
220 	dumpint((uintptr)type);
221 	dumpint(kind);
222 	dumpmemrange(obj, size);
223 }
224 
225 static void
dumpotherroot(const char * description,byte * to)226 dumpotherroot(const char *description, byte *to)
227 {
228 	dumpint(TagOtherRoot);
229 	dumpcstr((const int8 *)description);
230 	dumpint((uintptr)to);
231 }
232 
233 static void
dumpfinalizer(byte * obj,FuncVal * fn,const FuncType * ft,const PtrType * ot)234 dumpfinalizer(byte *obj, FuncVal *fn, const FuncType* ft, const PtrType *ot)
235 {
236 	dumpint(TagFinalizer);
237 	dumpint((uintptr)obj);
238 	dumpint((uintptr)fn);
239 	dumpint((uintptr)fn->fn);
240 	dumpint((uintptr)ft);
241 	dumpint((uintptr)ot);
242 }
243 
244 typedef struct ChildInfo ChildInfo;
245 struct ChildInfo {
246 	// Information passed up from the callee frame about
247 	// the layout of the outargs region.
248 	uintptr argoff;     // where the arguments start in the frame
249 	uintptr arglen;     // size of args region
250 	BitVector args;    // if args.n >= 0, pointer map of args region
251 
252 	byte *sp;           // callee sp
253 	uintptr depth;      // depth in call stack (0 == most recent)
254 };
255 
256 static void
dumpgoroutine(G * gp)257 dumpgoroutine(G *gp)
258 {
259 	// ChildInfo child;
260 	Defer *d;
261 	Panic *p;
262 
263 	dumpint(TagGoRoutine);
264 	dumpint((uintptr)gp);
265 	dumpint((uintptr)0);
266 	dumpint(gp->goid);
267 	dumpint(gp->gopc);
268 	dumpint(gp->status);
269 	dumpbool(gp->issystem);
270 	dumpbool(gp->isbackground);
271 	dumpint(gp->waitsince);
272 	dumpcstr((const int8 *)gp->waitreason);
273 	dumpint((uintptr)0);
274 	dumpint((uintptr)gp->m);
275 	dumpint((uintptr)gp->defer);
276 	dumpint((uintptr)gp->panic);
277 
278 	// dump stack
279 	// child.args.n = -1;
280 	// child.arglen = 0;
281 	// child.sp = nil;
282 	// child.depth = 0;
283 	// if(!ScanStackByFrames)
284 	// 	runtime_throw("need frame info to dump stacks");
285 	// runtime_gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, &child, false);
286 
287 	// dump defer & panic records
288 	for(d = gp->defer; d != nil; d = d->__next) {
289 		dumpint(TagDefer);
290 		dumpint((uintptr)d);
291 		dumpint((uintptr)gp);
292 		dumpint((uintptr)d->__arg);
293 		dumpint((uintptr)d->__frame);
294 		dumpint((uintptr)d->__pfn);
295 		dumpint((uintptr)0);
296 		dumpint((uintptr)d->__next);
297 	}
298 	for (p = gp->panic; p != nil; p = p->__next) {
299 		dumpint(TagPanic);
300 		dumpint((uintptr)p);
301 		dumpint((uintptr)gp);
302 		dumpint((uintptr)p->__arg.__type_descriptor);
303 		dumpint((uintptr)p->__arg.__object);
304 		dumpint((uintptr)0);
305 		dumpint((uintptr)p->__next);
306 	}
307 }
308 
309 static void
dumpgs(void)310 dumpgs(void)
311 {
312 	G *gp;
313 	uint32 i;
314 
315 	// goroutines & stacks
316 	for(i = 0; i < runtime_allglen; i++) {
317 		gp = runtime_allg[i];
318 		switch(gp->status){
319 		default:
320 			runtime_printf("unexpected G.status %d\n", gp->status);
321 			runtime_throw("mark - bad status");
322 		case Gdead:
323 			break;
324 		case Grunnable:
325 		case Gsyscall:
326 		case Gwaiting:
327 			dumpgoroutine(gp);
328 			break;
329 		}
330 	}
331 }
332 
333 static void
finq_callback(FuncVal * fn,void * obj,const FuncType * ft,const PtrType * ot)334 finq_callback(FuncVal *fn, void *obj, const FuncType *ft, const PtrType *ot)
335 {
336 	dumpint(TagQueuedFinalizer);
337 	dumpint((uintptr)obj);
338 	dumpint((uintptr)fn);
339 	dumpint((uintptr)fn->fn);
340 	dumpint((uintptr)ft);
341 	dumpint((uintptr)ot);
342 }
343 
344 
345 static void
dumproots(void)346 dumproots(void)
347 {
348 	MSpan *s, **allspans;
349 	uint32 spanidx;
350 	Special *sp;
351 	SpecialFinalizer *spf;
352 	byte *p;
353 
354 	// data segment
355 	// dumpint(TagData);
356 	// dumpint((uintptr)data);
357 	// dumpmemrange(data, edata - data);
358 	// dumpfields((uintptr*)gcdata + 1);
359 
360 	// bss segment
361 	// dumpint(TagBss);
362 	// dumpint((uintptr)bss);
363 	// dumpmemrange(bss, ebss - bss);
364 	// dumpfields((uintptr*)gcbss + 1);
365 
366 	// MSpan.types
367 	allspans = runtime_mheap.allspans;
368 	for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
369 		s = allspans[spanidx];
370 		if(s->state == MSpanInUse) {
371 			// The garbage collector ignores type pointers stored in MSpan.types:
372 			//  - Compiler-generated types are stored outside of heap.
373 			//  - The reflect package has runtime-generated types cached in its data structures.
374 			//    The garbage collector relies on finding the references via that cache.
375 			switch(s->types.compression) {
376 			case MTypes_Empty:
377 			case MTypes_Single:
378 				break;
379 			case MTypes_Words:
380 			case MTypes_Bytes:
381 				dumpotherroot("runtime type info", (byte*)s->types.data);
382 				break;
383 			}
384 
385 			// Finalizers
386 			for(sp = s->specials; sp != nil; sp = sp->next) {
387 				if(sp->kind != KindSpecialFinalizer)
388 					continue;
389 				spf = (SpecialFinalizer*)sp;
390 				p = (byte*)((s->start << PageShift) + spf->offset);
391 				dumpfinalizer(p, spf->fn, spf->ft, spf->ot);
392 			}
393 		}
394 	}
395 
396 	// Finalizer queue
397 	runtime_iterate_finq(finq_callback);
398 }
399 
400 // Bit vector of free marks.
401 // Needs to be as big as the largest number of objects per span.
402 static byte hfree[PageSize/8];
403 
404 static void
dumpobjs(void)405 dumpobjs(void)
406 {
407 	uintptr i, j, size, n, off, shift, *bitp, bits, ti, kind;
408 	MSpan *s;
409 	MLink *l;
410 	byte *p;
411 	const Type *t;
412 
413 	for(i = 0; i < runtime_mheap.nspan; i++) {
414 		s = runtime_mheap.allspans[i];
415 		if(s->state != MSpanInUse)
416 			continue;
417 		p = (byte*)(s->start << PageShift);
418 		size = s->elemsize;
419 		n = (s->npages << PageShift) / size;
420 		if(n > PageSize/8)
421 			runtime_throw("free array doesn't have enough entries");
422 		for(l = s->freelist; l != nil; l = l->next) {
423 			hfree[((byte*)l - p) / size] = true;
424 		}
425 		for(j = 0; j < n; j++, p += size) {
426 			if(hfree[j]) {
427 				hfree[j] = false;
428 				continue;
429 			}
430 			off = (uintptr*)p - (uintptr*)runtime_mheap.arena_start;
431 			bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
432 			shift = off % wordsPerBitmapWord;
433 			bits = *bitp >> shift;
434 
435 			// Skip FlagNoGC allocations (stacks)
436 			if((bits & bitAllocated) == 0)
437 				continue;
438 
439 			// extract type and kind
440 			ti = runtime_gettype(p);
441 			t = (Type*)(ti & ~(uintptr)(PtrSize-1));
442 			kind = ti & (PtrSize-1);
443 
444 			// dump it
445 			if(kind == TypeInfo_Chan)
446 				t = ((const ChanType*)t)->__element_type; // use element type for chan encoding
447 			if(t == nil && scannable(p))
448 				kind = TypeInfo_Conservative; // special kind for conservatively scanned objects
449 			dumpobj(p, size, t, kind);
450 		}
451 	}
452 }
453 
454 static void
dumpparams(void)455 dumpparams(void)
456 {
457 	byte *x;
458 
459 	dumpint(TagParams);
460 	x = (byte*)1;
461 	if(*(byte*)&x == 1)
462 		dumpbool(false); // little-endian ptrs
463 	else
464 		dumpbool(true); // big-endian ptrs
465 	dumpint(PtrSize);
466 	dumpint(runtime_Hchansize);
467 	dumpint((uintptr)runtime_mheap.arena_start);
468 	dumpint((uintptr)runtime_mheap.arena_used);
469 	dumpint(0);
470 	dumpcstr((const int8 *)"");
471 	dumpint(runtime_ncpu);
472 }
473 
474 static void
dumpms(void)475 dumpms(void)
476 {
477 	M *mp;
478 
479 	for(mp = runtime_allm; mp != nil; mp = mp->alllink) {
480 		dumpint(TagOSThread);
481 		dumpint((uintptr)mp);
482 		dumpint(mp->id);
483 		dumpint(0);
484 	}
485 }
486 
487 static void
dumpmemstats(void)488 dumpmemstats(void)
489 {
490 	int32 i;
491 
492 	dumpint(TagMemStats);
493 	dumpint(mstats.alloc);
494 	dumpint(mstats.total_alloc);
495 	dumpint(mstats.sys);
496 	dumpint(mstats.nlookup);
497 	dumpint(mstats.nmalloc);
498 	dumpint(mstats.nfree);
499 	dumpint(mstats.heap_alloc);
500 	dumpint(mstats.heap_sys);
501 	dumpint(mstats.heap_idle);
502 	dumpint(mstats.heap_inuse);
503 	dumpint(mstats.heap_released);
504 	dumpint(mstats.heap_objects);
505 	dumpint(mstats.stacks_inuse);
506 	dumpint(mstats.stacks_sys);
507 	dumpint(mstats.mspan_inuse);
508 	dumpint(mstats.mspan_sys);
509 	dumpint(mstats.mcache_inuse);
510 	dumpint(mstats.mcache_sys);
511 	dumpint(mstats.buckhash_sys);
512 	dumpint(mstats.gc_sys);
513 	dumpint(mstats.other_sys);
514 	dumpint(mstats.next_gc);
515 	dumpint(mstats.last_gc);
516 	dumpint(mstats.pause_total_ns);
517 	for(i = 0; i < 256; i++)
518 		dumpint(mstats.pause_ns[i]);
519 	dumpint(mstats.numgc);
520 }
521 
522 static void
dumpmemprof_callback(Bucket * b,uintptr nstk,Location * stk,uintptr size,uintptr allocs,uintptr frees)523 dumpmemprof_callback(Bucket *b, uintptr nstk, Location *stk, uintptr size, uintptr allocs, uintptr frees)
524 {
525 	uintptr i, pc;
526 	byte buf[20];
527 
528 	dumpint(TagMemProf);
529 	dumpint((uintptr)b);
530 	dumpint(size);
531 	dumpint(nstk);
532 	for(i = 0; i < nstk; i++) {
533 		pc = stk[i].pc;
534 		if(stk[i].function.len == 0) {
535 			runtime_snprintf(buf, sizeof(buf), "%X", (uint64)pc);
536 			dumpcstr((int8*)buf);
537 			dumpcstr((const int8*)"?");
538 			dumpint(0);
539 		} else {
540 			dumpstr(stk[i].function);
541 			dumpstr(stk[i].filename);
542 			dumpint(stk[i].lineno);
543 		}
544 	}
545 	dumpint(allocs);
546 	dumpint(frees);
547 }
548 
549 static void
dumpmemprof(void)550 dumpmemprof(void)
551 {
552 	MSpan *s, **allspans;
553 	uint32 spanidx;
554 	Special *sp;
555 	SpecialProfile *spp;
556 	byte *p;
557 
558 	runtime_iterate_memprof(dumpmemprof_callback);
559 
560 	allspans = runtime_mheap.allspans;
561 	for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
562 		s = allspans[spanidx];
563 		if(s->state != MSpanInUse)
564 			continue;
565 		for(sp = s->specials; sp != nil; sp = sp->next) {
566 			if(sp->kind != KindSpecialProfile)
567 				continue;
568 			spp = (SpecialProfile*)sp;
569 			p = (byte*)((s->start << PageShift) + spp->offset);
570 			dumpint(TagAllocSample);
571 			dumpint((uintptr)p);
572 			dumpint((uintptr)spp->b);
573 		}
574 	}
575 }
576 
577 static void
mdump(G * gp)578 mdump(G *gp)
579 {
580 	const byte *hdr;
581 	uintptr i;
582 	MSpan *s;
583 
584 	// make sure we're done sweeping
585 	for(i = 0; i < runtime_mheap.nspan; i++) {
586 		s = runtime_mheap.allspans[i];
587 		if(s->state == MSpanInUse)
588 			runtime_MSpan_EnsureSwept(s);
589 	}
590 
591 	runtime_memclr((byte*)&typecache[0], sizeof(typecache));
592 	hdr = (const byte*)"go1.3 heap dump\n";
593 	hwrite(hdr, runtime_findnull(hdr));
594 	dumpparams();
595 	dumpobjs();
596 	dumpgs();
597 	dumpms();
598 	dumproots();
599 	dumpmemstats();
600 	dumpmemprof();
601 	dumpint(TagEOF);
602 	flush();
603 
604 	gp->param = nil;
605 	gp->status = Grunning;
606 	runtime_gogo(gp);
607 }
608 
609 void runtime_debug_WriteHeapDump(uintptr)
610   __asm__(GOSYM_PREFIX "runtime_debug.WriteHeapDump");
611 
612 void
runtime_debug_WriteHeapDump(uintptr fd)613 runtime_debug_WriteHeapDump(uintptr fd)
614 {
615 	M *m;
616 	G *g;
617 
618 	// Stop the world.
619 	runtime_semacquire(&runtime_worldsema, false);
620 	m = runtime_m();
621 	m->gcing = 1;
622 	m->locks++;
623 	runtime_stoptheworld();
624 
625 	// Update stats so we can dump them.
626 	// As a side effect, flushes all the MCaches so the MSpan.freelist
627 	// lists contain all the free objects.
628 	runtime_updatememstats(nil);
629 
630 	// Set dump file.
631 	dumpfd = fd;
632 
633 	// Call dump routine on M stack.
634 	g = runtime_g();
635 	g->status = Gwaiting;
636 	g->waitreason = "dumping heap";
637 	runtime_mcall(mdump);
638 
639 	// Reset dump file.
640 	dumpfd = 0;
641 
642 	// Start up the world again.
643 	m->gcing = 0;
644 	runtime_semrelease(&runtime_worldsema);
645 	runtime_starttheworld();
646 	m->locks--;
647 }
648 
649 // Runs the specified gc program.  Calls the callback for every
650 // pointer-like field specified by the program and passes to the
651 // callback the kind and offset of that field within the object.
652 // offset is the offset in the object of the start of the program.
653 // Returns a pointer to the opcode that ended the gc program (either
654 // GC_END or GC_ARRAY_NEXT).
655 /*
656 static uintptr*
657 playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg)
658 {
659 	uintptr len, elemsize, i, *end;
660 
661 	for(;;) {
662 		switch(prog[0]) {
663 		case GC_END:
664 			return prog;
665 		case GC_PTR:
666 			callback(arg, FieldKindPtr, offset + prog[1]);
667 			prog += 3;
668 			break;
669 		case GC_APTR:
670 			callback(arg, FieldKindPtr, offset + prog[1]);
671 			prog += 2;
672 			break;
673 		case GC_ARRAY_START:
674 			len = prog[2];
675 			elemsize = prog[3];
676 			end = nil;
677 			for(i = 0; i < len; i++) {
678 				end = playgcprog(offset + prog[1] + i * elemsize, prog + 4, callback, arg);
679 				if(end[0] != GC_ARRAY_NEXT)
680 					runtime_throw("GC_ARRAY_START did not have matching GC_ARRAY_NEXT");
681 			}
682 			prog = end + 1;
683 			break;
684 		case GC_ARRAY_NEXT:
685 			return prog;
686 		case GC_CALL:
687 			playgcprog(offset + prog[1], (uintptr*)((byte*)prog + *(int32*)&prog[2]), callback, arg);
688 			prog += 3;
689 			break;
690 		case GC_CHAN_PTR:
691 			callback(arg, FieldKindPtr, offset + prog[1]);
692 			prog += 3;
693 			break;
694 		case GC_STRING:
695 			callback(arg, FieldKindString, offset + prog[1]);
696 			prog += 2;
697 			break;
698 		case GC_EFACE:
699 			callback(arg, FieldKindEface, offset + prog[1]);
700 			prog += 2;
701 			break;
702 		case GC_IFACE:
703 			callback(arg, FieldKindIface, offset + prog[1]);
704 			prog += 2;
705 			break;
706 		case GC_SLICE:
707 			callback(arg, FieldKindSlice, offset + prog[1]);
708 			prog += 3;
709 			break;
710 		case GC_REGION:
711 			playgcprog(offset + prog[1], (uintptr*)prog[3] + 1, callback, arg);
712 			prog += 4;
713 			break;
714 		default:
715 			runtime_printf("%D\n", (uint64)prog[0]);
716 			runtime_throw("bad gc op");
717 		}
718 	}
719 }
720 
721 static void
722 dump_callback(void *p, uintptr kind, uintptr offset)
723 {
724 	USED(&p);
725 	dumpint(kind);
726 	dumpint(offset);
727 }
728 
729 // dumpint() the kind & offset of each field in an object.
730 static void
731 dumpfields(uintptr *prog)
732 {
733 	playgcprog(0, prog, dump_callback, nil);
734 	dumpint(FieldKindEol);
735 }
736 
737 static void
738 dumpeface_callback(void *p, uintptr kind, uintptr offset)
739 {
740 	Eface *e;
741 
742 	if(kind != FieldKindEface)
743 		return;
744 	e = (Eface*)((byte*)p + offset);
745 	dumptype(e->__type_descriptor);
746 }
747 */
748 
749 // The heap dump reader needs to be able to disambiguate
750 // Eface entries.  So it needs to know every type that might
751 // appear in such an entry.  The following two routines accomplish
752 // that.
753 
754 // Dump all the types that appear in the type field of
755 // any Eface contained in obj.
756 static void
dumpefacetypes(void * obj,uintptr size,const Type * type,uintptr kind)757 dumpefacetypes(void *obj __attribute__ ((unused)), uintptr size, const Type *type, uintptr kind)
758 {
759 	uintptr i;
760 
761 	switch(kind) {
762 	case TypeInfo_SingleObject:
763 		//playgcprog(0, (uintptr*)type->gc + 1, dumpeface_callback, obj);
764 		break;
765 	case TypeInfo_Array:
766 		for(i = 0; i <= size - type->__size; i += type->__size)
767 			//playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
768 		break;
769 	case TypeInfo_Chan:
770 		if(type->__size == 0) // channels may have zero-sized objects in them
771 			break;
772 		for(i = runtime_Hchansize; i <= size - type->__size; i += type->__size)
773 			//playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
774 		break;
775 	}
776 }
777