1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Implementation of runtime/debug.WriteHeapDump. Writes all
6// objects in the heap plus additional info (roots, threads,
7// finalizers, etc.) to a file.
8
9// The format of the dumped file is described at
10// https://golang.org/s/go15heapdump.
11
12package runtime
13
14import (
15	"internal/goarch"
16	"unsafe"
17)
18
19//go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
20func runtime_debug_WriteHeapDump(fd uintptr) {
21	stopTheWorld("write heap dump")
22
23	// Keep m on this G's stack instead of the system stack.
24	// Both readmemstats_m and writeheapdump_m have pretty large
25	// peak stack depths and we risk blowing the system stack.
26	// This is safe because the world is stopped, so we don't
27	// need to worry about anyone shrinking and therefore moving
28	// our stack.
29	var m MemStats
30	systemstack(func() {
31		// Call readmemstats_m here instead of deeper in
32		// writeheapdump_m because we might blow the system stack
33		// otherwise.
34		readmemstats_m(&m)
35		writeheapdump_m(fd, &m)
36	})
37
38	startTheWorld()
39}
40
41const (
42	fieldKindEol       = 0
43	fieldKindPtr       = 1
44	fieldKindIface     = 2
45	fieldKindEface     = 3
46	tagEOF             = 0
47	tagObject          = 1
48	tagOtherRoot       = 2
49	tagType            = 3
50	tagGoroutine       = 4
51	tagStackFrame      = 5
52	tagParams          = 6
53	tagFinalizer       = 7
54	tagItab            = 8
55	tagOSThread        = 9
56	tagMemStats        = 10
57	tagQueuedFinalizer = 11
58	tagData            = 12
59	tagBSS             = 13
60	tagDefer           = 14
61	tagPanic           = 15
62	tagMemProf         = 16
63	tagAllocSample     = 17
64)
65
66var dumpfd uintptr // fd to write the dump to.
67var tmpbuf []byte
68
69// buffer of pending write data
70const (
71	bufSize = 4096
72)
73
74var buf [bufSize]byte
75var nbuf uintptr
76
77func dwrite(data unsafe.Pointer, len uintptr) {
78	if len == 0 {
79		return
80	}
81	if nbuf+len <= bufSize {
82		copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
83		nbuf += len
84		return
85	}
86
87	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
88	if len >= bufSize {
89		write(dumpfd, data, int32(len))
90		nbuf = 0
91	} else {
92		copy(buf[:], (*[bufSize]byte)(data)[:len])
93		nbuf = len
94	}
95}
96
97func dwritebyte(b byte) {
98	dwrite(unsafe.Pointer(&b), 1)
99}
100
101func flush() {
102	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
103	nbuf = 0
104}
105
106// Cache of types that have been serialized already.
107// We use a type's hash field to pick a bucket.
108// Inside a bucket, we keep a list of types that
109// have been serialized so far, most recently used first.
110// Note: when a bucket overflows we may end up
111// serializing a type more than once. That's ok.
112const (
113	typeCacheBuckets = 256
114	typeCacheAssoc   = 4
115)
116
117type typeCacheBucket struct {
118	t [typeCacheAssoc]*_type
119}
120
121var typecache [typeCacheBuckets]typeCacheBucket
122
123// dump a uint64 in a varint format parseable by encoding/binary
124func dumpint(v uint64) {
125	var buf [10]byte
126	var n int
127	for v >= 0x80 {
128		buf[n] = byte(v | 0x80)
129		n++
130		v >>= 7
131	}
132	buf[n] = byte(v)
133	n++
134	dwrite(unsafe.Pointer(&buf), uintptr(n))
135}
136
137func dumpbool(b bool) {
138	if b {
139		dumpint(1)
140	} else {
141		dumpint(0)
142	}
143}
144
145// dump varint uint64 length followed by memory contents
146func dumpmemrange(data unsafe.Pointer, len uintptr) {
147	dumpint(uint64(len))
148	dwrite(data, len)
149}
150
151func dumpslice(b []byte) {
152	dumpint(uint64(len(b)))
153	if len(b) > 0 {
154		dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
155	}
156}
157
158func dumpstr(s string) {
159	sp := stringStructOf(&s)
160	dumpmemrange(sp.str, uintptr(sp.len))
161}
162
163// dump information for a type
164func dumptype(t *_type) {
165	if t == nil {
166		return
167	}
168
169	// If we've definitely serialized the type before,
170	// no need to do it again.
171	b := &typecache[t.hash&(typeCacheBuckets-1)]
172	if t == b.t[0] {
173		return
174	}
175	for i := 1; i < typeCacheAssoc; i++ {
176		if t == b.t[i] {
177			// Move-to-front
178			for j := i; j > 0; j-- {
179				b.t[j] = b.t[j-1]
180			}
181			b.t[0] = t
182			return
183		}
184	}
185
186	// Might not have been dumped yet. Dump it and
187	// remember we did so.
188	for j := typeCacheAssoc - 1; j > 0; j-- {
189		b.t[j] = b.t[j-1]
190	}
191	b.t[0] = t
192
193	// dump the type
194	dumpint(tagType)
195	dumpint(uint64(uintptr(unsafe.Pointer(t))))
196	dumpint(uint64(t.size))
197	if x := t.uncommon(); x == nil || t.nameOff(x.pkgpath).name() == "" {
198		dumpstr(t.string())
199	} else {
200		pkgpathstr := t.nameOff(x.pkgpath).name()
201		pkgpath := stringStructOf(&pkgpathstr)
202		namestr := t.name()
203		name := stringStructOf(&namestr)
204		dumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len)))
205		dwrite(pkgpath.str, uintptr(pkgpath.len))
206		dwritebyte('.')
207		dwrite(name.str, uintptr(name.len))
208	}
209	dumpbool(t.kind&kindDirectIface == 0 || t.ptrdata != 0)
210}
211
212// dump an object
213func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
214	dumpint(tagObject)
215	dumpint(uint64(uintptr(obj)))
216	dumpmemrange(obj, size)
217	dumpfields(bv)
218}
219
220func dumpotherroot(description string, to unsafe.Pointer) {
221	dumpint(tagOtherRoot)
222	dumpstr(description)
223	dumpint(uint64(uintptr(to)))
224}
225
226func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
227	dumpint(tagFinalizer)
228	dumpint(uint64(uintptr(obj)))
229	dumpint(uint64(uintptr(unsafe.Pointer(fn))))
230	dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
231	dumpint(uint64(uintptr(unsafe.Pointer(fint))))
232	dumpint(uint64(uintptr(unsafe.Pointer(ot))))
233}
234
235type childInfo struct {
236	// Information passed up from the callee frame about
237	// the layout of the outargs region.
238	argoff uintptr   // where the arguments start in the frame
239	arglen uintptr   // size of args region
240	args   bitvector // if args.n >= 0, pointer map of args region
241	sp     *uint8    // callee sp
242	depth  uintptr   // depth in call stack (0 == most recent)
243}
244
245// dump kinds & offsets of interesting fields in bv
246func dumpbv(cbv *bitvector, offset uintptr) {
247	for i := uintptr(0); i < uintptr(cbv.n); i++ {
248		if cbv.ptrbit(i) == 1 {
249			dumpint(fieldKindPtr)
250			dumpint(uint64(offset + i*goarch.PtrSize))
251		}
252	}
253}
254
255func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
256	child := (*childInfo)(arg)
257	f := s.fn
258
259	// Figure out what we can about our stack map
260	pc := s.pc
261	pcdata := int32(-1) // Use the entry map at function entry
262	if pc != f.entry() {
263		pc--
264		pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, pc, nil)
265	}
266	if pcdata == -1 {
267		// We do not have a valid pcdata value but there might be a
268		// stackmap for this function. It is likely that we are looking
269		// at the function prologue, assume so and hope for the best.
270		pcdata = 0
271	}
272	stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
273
274	var bv bitvector
275	if stkmap != nil && stkmap.n > 0 {
276		bv = stackmapdata(stkmap, pcdata)
277	} else {
278		bv.n = -1
279	}
280
281	// Dump main body of stack frame.
282	dumpint(tagStackFrame)
283	dumpint(uint64(s.sp))                              // lowest address in frame
284	dumpint(uint64(child.depth))                       // # of frames deep on the stack
285	dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack
286	dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp)      // frame contents
287	dumpint(uint64(f.entry()))
288	dumpint(uint64(s.pc))
289	dumpint(uint64(s.continpc))
290	name := funcname(f)
291	if name == "" {
292		name = "unknown function"
293	}
294	dumpstr(name)
295
296	// Dump fields in the outargs section
297	if child.args.n >= 0 {
298		dumpbv(&child.args, child.argoff)
299	} else {
300		// conservative - everything might be a pointer
301		for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
302			dumpint(fieldKindPtr)
303			dumpint(uint64(off))
304		}
305	}
306
307	// Dump fields in the local vars section
308	if stkmap == nil {
309		// No locals information, dump everything.
310		for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
311			dumpint(fieldKindPtr)
312			dumpint(uint64(off))
313		}
314	} else if stkmap.n < 0 {
315		// Locals size information, dump just the locals.
316		size := uintptr(-stkmap.n)
317		for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
318			dumpint(fieldKindPtr)
319			dumpint(uint64(off))
320		}
321	} else if stkmap.n > 0 {
322		// Locals bitmap information, scan just the pointers in
323		// locals.
324		dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
325	}
326	dumpint(fieldKindEol)
327
328	// Record arg info for parent.
329	child.argoff = s.argp - s.fp
330	child.arglen = s.arglen
331	child.sp = (*uint8)(unsafe.Pointer(s.sp))
332	child.depth++
333	stkmap = (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
334	if stkmap != nil {
335		child.args = stackmapdata(stkmap, pcdata)
336	} else {
337		child.args.n = -1
338	}
339	return true
340}
341
342func dumpgoroutine(gp *g) {
343	var sp, pc, lr uintptr
344	if gp.syscallsp != 0 {
345		sp = gp.syscallsp
346		pc = gp.syscallpc
347		lr = 0
348	} else {
349		sp = gp.sched.sp
350		pc = gp.sched.pc
351		lr = gp.sched.lr
352	}
353
354	dumpint(tagGoroutine)
355	dumpint(uint64(uintptr(unsafe.Pointer(gp))))
356	dumpint(uint64(sp))
357	dumpint(uint64(gp.goid))
358	dumpint(uint64(gp.gopc))
359	dumpint(uint64(readgstatus(gp)))
360	dumpbool(isSystemGoroutine(gp, false))
361	dumpbool(false) // isbackground
362	dumpint(uint64(gp.waitsince))
363	dumpstr(gp.waitreason.String())
364	dumpint(uint64(uintptr(gp.sched.ctxt)))
365	dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
366	dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
367	dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
368
369	// dump stack
370	var child childInfo
371	child.args.n = -1
372	child.arglen = 0
373	child.sp = nil
374	child.depth = 0
375	gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, noescape(unsafe.Pointer(&child)), 0)
376
377	// dump defer & panic records
378	for d := gp._defer; d != nil; d = d.link {
379		dumpint(tagDefer)
380		dumpint(uint64(uintptr(unsafe.Pointer(d))))
381		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
382		dumpint(uint64(d.sp))
383		dumpint(uint64(d.pc))
384		fn := *(**funcval)(unsafe.Pointer(&d.fn))
385		dumpint(uint64(uintptr(unsafe.Pointer(fn))))
386		if d.fn == nil {
387			// d.fn can be nil for open-coded defers
388			dumpint(uint64(0))
389		} else {
390			dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
391		}
392		dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
393	}
394	for p := gp._panic; p != nil; p = p.link {
395		dumpint(tagPanic)
396		dumpint(uint64(uintptr(unsafe.Pointer(p))))
397		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
398		eface := efaceOf(&p.arg)
399		dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
400		dumpint(uint64(uintptr(unsafe.Pointer(eface.data))))
401		dumpint(0) // was p->defer, no longer recorded
402		dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
403	}
404}
405
406func dumpgs() {
407	assertWorldStopped()
408
409	// goroutines & stacks
410	forEachG(func(gp *g) {
411		status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
412		switch status {
413		default:
414			print("runtime: unexpected G.status ", hex(status), "\n")
415			throw("dumpgs in STW - bad status")
416		case _Gdead:
417			// ok
418		case _Grunnable,
419			_Gsyscall,
420			_Gwaiting:
421			dumpgoroutine(gp)
422		}
423	})
424}
425
426func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
427	dumpint(tagQueuedFinalizer)
428	dumpint(uint64(uintptr(obj)))
429	dumpint(uint64(uintptr(unsafe.Pointer(fn))))
430	dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
431	dumpint(uint64(uintptr(unsafe.Pointer(fint))))
432	dumpint(uint64(uintptr(unsafe.Pointer(ot))))
433}
434
435func dumproots() {
436	// To protect mheap_.allspans.
437	assertWorldStopped()
438
439	// TODO(mwhudson): dump datamask etc from all objects
440	// data segment
441	dumpint(tagData)
442	dumpint(uint64(firstmoduledata.data))
443	dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data)
444	dumpfields(firstmoduledata.gcdatamask)
445
446	// bss segment
447	dumpint(tagBSS)
448	dumpint(uint64(firstmoduledata.bss))
449	dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss)
450	dumpfields(firstmoduledata.gcbssmask)
451
452	// mspan.types
453	for _, s := range mheap_.allspans {
454		if s.state.get() == mSpanInUse {
455			// Finalizers
456			for sp := s.specials; sp != nil; sp = sp.next {
457				if sp.kind != _KindSpecialFinalizer {
458					continue
459				}
460				spf := (*specialfinalizer)(unsafe.Pointer(sp))
461				p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
462				dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
463			}
464		}
465	}
466
467	// Finalizer queue
468	iterate_finq(finq_callback)
469}
470
471// Bit vector of free marks.
472// Needs to be as big as the largest number of objects per span.
473var freemark [_PageSize / 8]bool
474
475func dumpobjs() {
476	// To protect mheap_.allspans.
477	assertWorldStopped()
478
479	for _, s := range mheap_.allspans {
480		if s.state.get() != mSpanInUse {
481			continue
482		}
483		p := s.base()
484		size := s.elemsize
485		n := (s.npages << _PageShift) / size
486		if n > uintptr(len(freemark)) {
487			throw("freemark array doesn't have enough entries")
488		}
489
490		for freeIndex := uintptr(0); freeIndex < s.nelems; freeIndex++ {
491			if s.isFree(freeIndex) {
492				freemark[freeIndex] = true
493			}
494		}
495
496		for j := uintptr(0); j < n; j, p = j+1, p+size {
497			if freemark[j] {
498				freemark[j] = false
499				continue
500			}
501			dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
502		}
503	}
504}
505
506func dumpparams() {
507	dumpint(tagParams)
508	x := uintptr(1)
509	if *(*byte)(unsafe.Pointer(&x)) == 1 {
510		dumpbool(false) // little-endian ptrs
511	} else {
512		dumpbool(true) // big-endian ptrs
513	}
514	dumpint(goarch.PtrSize)
515	var arenaStart, arenaEnd uintptr
516	for i1 := range mheap_.arenas {
517		if mheap_.arenas[i1] == nil {
518			continue
519		}
520		for i, ha := range mheap_.arenas[i1] {
521			if ha == nil {
522				continue
523			}
524			base := arenaBase(arenaIdx(i1)<<arenaL1Shift | arenaIdx(i))
525			if arenaStart == 0 || base < arenaStart {
526				arenaStart = base
527			}
528			if base+heapArenaBytes > arenaEnd {
529				arenaEnd = base + heapArenaBytes
530			}
531		}
532	}
533	dumpint(uint64(arenaStart))
534	dumpint(uint64(arenaEnd))
535	dumpstr(goarch.GOARCH)
536	dumpstr(buildVersion)
537	dumpint(uint64(ncpu))
538}
539
540func itab_callback(tab *itab) {
541	t := tab._type
542	dumptype(t)
543	dumpint(tagItab)
544	dumpint(uint64(uintptr(unsafe.Pointer(tab))))
545	dumpint(uint64(uintptr(unsafe.Pointer(t))))
546}
547
548func dumpitabs() {
549	iterate_itabs(itab_callback)
550}
551
552func dumpms() {
553	for mp := allm; mp != nil; mp = mp.alllink {
554		dumpint(tagOSThread)
555		dumpint(uint64(uintptr(unsafe.Pointer(mp))))
556		dumpint(uint64(mp.id))
557		dumpint(mp.procid)
558	}
559}
560
561//go:systemstack
562func dumpmemstats(m *MemStats) {
563	assertWorldStopped()
564
565	// These ints should be identical to the exported
566	// MemStats structure and should be ordered the same
567	// way too.
568	dumpint(tagMemStats)
569	dumpint(m.Alloc)
570	dumpint(m.TotalAlloc)
571	dumpint(m.Sys)
572	dumpint(m.Lookups)
573	dumpint(m.Mallocs)
574	dumpint(m.Frees)
575	dumpint(m.HeapAlloc)
576	dumpint(m.HeapSys)
577	dumpint(m.HeapIdle)
578	dumpint(m.HeapInuse)
579	dumpint(m.HeapReleased)
580	dumpint(m.HeapObjects)
581	dumpint(m.StackInuse)
582	dumpint(m.StackSys)
583	dumpint(m.MSpanInuse)
584	dumpint(m.MSpanSys)
585	dumpint(m.MCacheInuse)
586	dumpint(m.MCacheSys)
587	dumpint(m.BuckHashSys)
588	dumpint(m.GCSys)
589	dumpint(m.OtherSys)
590	dumpint(m.NextGC)
591	dumpint(m.LastGC)
592	dumpint(m.PauseTotalNs)
593	for i := 0; i < 256; i++ {
594		dumpint(m.PauseNs[i])
595	}
596	dumpint(uint64(m.NumGC))
597}
598
599func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
600	stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
601	dumpint(tagMemProf)
602	dumpint(uint64(uintptr(unsafe.Pointer(b))))
603	dumpint(uint64(size))
604	dumpint(uint64(nstk))
605	for i := uintptr(0); i < nstk; i++ {
606		pc := stk[i]
607		f := findfunc(pc)
608		if !f.valid() {
609			var buf [64]byte
610			n := len(buf)
611			n--
612			buf[n] = ')'
613			if pc == 0 {
614				n--
615				buf[n] = '0'
616			} else {
617				for pc > 0 {
618					n--
619					buf[n] = "0123456789abcdef"[pc&15]
620					pc >>= 4
621				}
622			}
623			n--
624			buf[n] = 'x'
625			n--
626			buf[n] = '0'
627			n--
628			buf[n] = '('
629			dumpslice(buf[n:])
630			dumpstr("?")
631			dumpint(0)
632		} else {
633			dumpstr(funcname(f))
634			if i > 0 && pc > f.entry() {
635				pc--
636			}
637			file, line := funcline(f, pc)
638			dumpstr(file)
639			dumpint(uint64(line))
640		}
641	}
642	dumpint(uint64(allocs))
643	dumpint(uint64(frees))
644}
645
646func dumpmemprof() {
647	// To protect mheap_.allspans.
648	assertWorldStopped()
649
650	iterate_memprof(dumpmemprof_callback)
651	for _, s := range mheap_.allspans {
652		if s.state.get() != mSpanInUse {
653			continue
654		}
655		for sp := s.specials; sp != nil; sp = sp.next {
656			if sp.kind != _KindSpecialProfile {
657				continue
658			}
659			spp := (*specialprofile)(unsafe.Pointer(sp))
660			p := s.base() + uintptr(spp.special.offset)
661			dumpint(tagAllocSample)
662			dumpint(uint64(p))
663			dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
664		}
665	}
666}
667
668var dumphdr = []byte("go1.7 heap dump\n")
669
670func mdump(m *MemStats) {
671	assertWorldStopped()
672
673	// make sure we're done sweeping
674	for _, s := range mheap_.allspans {
675		if s.state.get() == mSpanInUse {
676			s.ensureSwept()
677		}
678	}
679	memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
680	dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
681	dumpparams()
682	dumpitabs()
683	dumpobjs()
684	dumpgs()
685	dumpms()
686	dumproots()
687	dumpmemstats(m)
688	dumpmemprof()
689	dumpint(tagEOF)
690	flush()
691}
692
693func writeheapdump_m(fd uintptr, m *MemStats) {
694	assertWorldStopped()
695
696	_g_ := getg()
697	casgstatus(_g_.m.curg, _Grunning, _Gwaiting)
698	_g_.waitreason = waitReasonDumpingHeap
699
700	// Update stats so we can dump them.
701	// As a side effect, flushes all the mcaches so the mspan.freelist
702	// lists contain all the free objects.
703	updatememstats()
704
705	// Set dump file.
706	dumpfd = fd
707
708	// Call dump routine.
709	mdump(m)
710
711	// Reset dump file.
712	dumpfd = 0
713	if tmpbuf != nil {
714		sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
715		tmpbuf = nil
716	}
717
718	casgstatus(_g_.m.curg, _Gwaiting, _Grunning)
719}
720
721// dumpint() the kind & offset of each field in an object.
722func dumpfields(bv bitvector) {
723	dumpbv(&bv, 0)
724	dumpint(fieldKindEol)
725}
726
727func makeheapobjbv(p uintptr, size uintptr) bitvector {
728	// Extend the temp buffer if necessary.
729	nptr := size / goarch.PtrSize
730	if uintptr(len(tmpbuf)) < nptr/8+1 {
731		if tmpbuf != nil {
732			sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
733		}
734		n := nptr/8 + 1
735		p := sysAlloc(n, &memstats.other_sys)
736		if p == nil {
737			throw("heapdump: out of memory")
738		}
739		tmpbuf = (*[1 << 30]byte)(p)[:n]
740	}
741	// Convert heap bitmap to pointer bitmap.
742	for i := uintptr(0); i < nptr/8+1; i++ {
743		tmpbuf[i] = 0
744	}
745	i := uintptr(0)
746	hbits := heapBitsForAddr(p)
747	for ; i < nptr; i++ {
748		if !hbits.morePointers() {
749			break // end of object
750		}
751		if hbits.isPointer() {
752			tmpbuf[i/8] |= 1 << (i % 8)
753		}
754		hbits = hbits.next()
755	}
756	return bitvector{int32(i), &tmpbuf[0]}
757}
758