1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Malloc profiling.
6// Patterned after tcmalloc's algorithms; shorter code.
7
8package runtime
9
10import (
11	"runtime/internal/atomic"
12	"unsafe"
13)
14
15// NOTE(rsc): Everything here could use cas if contention became an issue.
16var proflock mutex
17
18// All memory allocations are local and do not escape outside of the profiler.
19// The profiler is forbidden from referring to garbage-collected memory.
20
21const (
22	// profile types
23	memProfile bucketType = 1 + iota
24	blockProfile
25	mutexProfile
26
27	// a profile bucket from one of the categories above whose stack
28	// trace has been fixed up / pruned.
29	prunedProfile
30
31	// size of bucket hash table
32	buckHashSize = 179999
33
34	// max depth of stack to record in bucket
35	maxStack = 32
36)
37
38type bucketType int
39
40// A bucket holds per-call-stack profiling information.
41// The representation is a bit sleazy, inherited from C.
42// This struct defines the bucket header. It is followed in
43// memory by the stack words and then the actual record
44// data, either a memRecord or a blockRecord.
45//
46// Per-call-stack profiling information.
47// Lookup by hashing call stack into a linked-list hash table.
48//
49// No heap pointers.
50//
51//go:notinheap
52type bucket struct {
53	next    *bucket
54	allnext *bucket
55	typ     bucketType // memBucket or blockBucket (includes mutexProfile)
56	hash    uintptr
57	size    uintptr
58	nstk    uintptr
59	skip    int
60}
61
62// A memRecord is the bucket data for a bucket of type memProfile,
63// part of the memory profile.
64type memRecord struct {
65	// The following complex 3-stage scheme of stats accumulation
66	// is required to obtain a consistent picture of mallocs and frees
67	// for some point in time.
68	// The problem is that mallocs come in real time, while frees
69	// come only after a GC during concurrent sweeping. So if we would
70	// naively count them, we would get a skew toward mallocs.
71	//
72	// Hence, we delay information to get consistent snapshots as
73	// of mark termination. Allocations count toward the next mark
74	// termination's snapshot, while sweep frees count toward the
75	// previous mark termination's snapshot:
76	//
77	//              MT          MT          MT          MT
78	//             .·|         .·|         .·|         .·|
79	//          .·˙  |      .·˙  |      .·˙  |      .·˙  |
80	//       .·˙     |   .·˙     |   .·˙     |   .·˙     |
81	//    .·˙        |.·˙        |.·˙        |.·˙        |
82	//
83	//       alloc → ▲ ← free
84	//               ┠┅┅┅┅┅┅┅┅┅┅┅P
85	//       C+2     →    C+1    →  C
86	//
87	//                   alloc → ▲ ← free
88	//                           ┠┅┅┅┅┅┅┅┅┅┅┅P
89	//                   C+2     →    C+1    →  C
90	//
91	// Since we can't publish a consistent snapshot until all of
92	// the sweep frees are accounted for, we wait until the next
93	// mark termination ("MT" above) to publish the previous mark
94	// termination's snapshot ("P" above). To do this, allocation
95	// and free events are accounted to *future* heap profile
96	// cycles ("C+n" above) and we only publish a cycle once all
97	// of the events from that cycle must be done. Specifically:
98	//
99	// Mallocs are accounted to cycle C+2.
100	// Explicit frees are accounted to cycle C+2.
101	// GC frees (done during sweeping) are accounted to cycle C+1.
102	//
103	// After mark termination, we increment the global heap
104	// profile cycle counter and accumulate the stats from cycle C
105	// into the active profile.
106
107	// active is the currently published profile. A profiling
108	// cycle can be accumulated into active once its complete.
109	active memRecordCycle
110
111	// future records the profile events we're counting for cycles
112	// that have not yet been published. This is ring buffer
113	// indexed by the global heap profile cycle C and stores
114	// cycles C, C+1, and C+2. Unlike active, these counts are
115	// only for a single cycle; they are not cumulative across
116	// cycles.
117	//
118	// We store cycle C here because there's a window between when
119	// C becomes the active cycle and when we've flushed it to
120	// active.
121	future [3]memRecordCycle
122}
123
124// memRecordCycle
125type memRecordCycle struct {
126	allocs, frees           uintptr
127	alloc_bytes, free_bytes uintptr
128}
129
130// add accumulates b into a. It does not zero b.
131func (a *memRecordCycle) add(b *memRecordCycle) {
132	a.allocs += b.allocs
133	a.frees += b.frees
134	a.alloc_bytes += b.alloc_bytes
135	a.free_bytes += b.free_bytes
136}
137
138// A blockRecord is the bucket data for a bucket of type blockProfile,
139// which is used in blocking and mutex profiles.
140type blockRecord struct {
141	count  int64
142	cycles int64
143}
144
145var (
146	mbuckets    *bucket // memory profile buckets
147	bbuckets    *bucket // blocking profile buckets
148	xbuckets    *bucket // mutex profile buckets
149	sbuckets    *bucket // pre-symbolization profile buckets (stacks fixed up)
150	freebuckets *bucket // freelist of unused fixed up profile buckets
151	buckhash    *[179999]*bucket
152	bucketmem   uintptr
153
154	mProf struct {
155		// All fields in mProf are protected by proflock.
156
157		// cycle is the global heap profile cycle. This wraps
158		// at mProfCycleWrap.
159		cycle uint32
160		// flushed indicates that future[cycle] in all buckets
161		// has been flushed to the active profile.
162		flushed bool
163	}
164)
165
166const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
167
168// payloadOffset() returns a pointer into the part of a bucket
169// containing the profile payload (skips past the bucket struct itself
170// and then the stack trace).
171func payloadOffset(typ bucketType, nstk uintptr) uintptr {
172	if typ == prunedProfile {
173		// To allow reuse of prunedProfile buckets between different
174		// collections, allocate them with the max stack size (the portion
175		// of the stack used will vary from trace to trace).
176		nstk = maxStack
177	}
178	return unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr)
179}
180
181func max(x, y uintptr) uintptr {
182	if x > y {
183		return x
184	}
185	return y
186}
187
188// newBucket allocates a bucket with the given type and number of stack entries.
189func newBucket(typ bucketType, nstk int, skipCount int) *bucket {
190	size := payloadOffset(typ, uintptr(nstk))
191	switch typ {
192	default:
193		throw("invalid profile bucket type")
194	case prunedProfile:
195		// stack-fixed buckets are large enough to accommodate any payload.
196		size += max(unsafe.Sizeof(memRecord{}), unsafe.Sizeof(blockRecord{}))
197	case memProfile:
198		size += unsafe.Sizeof(memRecord{})
199	case blockProfile, mutexProfile:
200		size += unsafe.Sizeof(blockRecord{})
201	}
202
203	b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
204	bucketmem += size
205	b.typ = typ
206	b.nstk = uintptr(nstk)
207	b.skip = skipCount
208	return b
209}
210
211// stk returns the slice in b holding the stack.
212func (b *bucket) stk() []uintptr {
213	stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
214	return stk[:b.nstk:b.nstk]
215}
216
217// mp returns the memRecord associated with the memProfile bucket b.
218func (b *bucket) mp() *memRecord {
219	if b.typ != memProfile && b.typ != prunedProfile {
220		throw("bad use of bucket.mp")
221	}
222	return (*memRecord)(add(unsafe.Pointer(b), payloadOffset(b.typ, b.nstk)))
223}
224
225// bp returns the blockRecord associated with the blockProfile bucket b.
226func (b *bucket) bp() *blockRecord {
227	if b.typ != blockProfile && b.typ != mutexProfile && b.typ != prunedProfile {
228		throw("bad use of bucket.bp")
229	}
230	return (*blockRecord)(add(unsafe.Pointer(b), payloadOffset(b.typ, b.nstk)))
231}
232
233// Return the bucket for stk[0:nstk], allocating new bucket if needed.
234func stkbucket(typ bucketType, size uintptr, skip int, stk []uintptr, alloc bool) *bucket {
235	if buckhash == nil {
236		buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
237		if buckhash == nil {
238			throw("runtime: cannot allocate memory")
239		}
240	}
241
242	// Hash stack.
243	var h uintptr
244	for _, pc := range stk {
245		h += pc
246		h += h << 10
247		h ^= h >> 6
248	}
249	// hash in size
250	h += size
251	h += h << 10
252	h ^= h >> 6
253	// finalize
254	h += h << 3
255	h ^= h >> 11
256
257	i := int(h % buckHashSize)
258	for b := buckhash[i]; b != nil; b = b.next {
259		if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
260			return b
261		}
262	}
263
264	if !alloc {
265		return nil
266	}
267
268	// Create new bucket.
269	b := newBucket(typ, len(stk), skip)
270	copy(b.stk(), stk)
271	b.hash = h
272	b.size = size
273	b.next = buckhash[i]
274	buckhash[i] = b
275	if typ == memProfile {
276		b.allnext = mbuckets
277		mbuckets = b
278	} else if typ == mutexProfile {
279		b.allnext = xbuckets
280		xbuckets = b
281	} else if typ == prunedProfile {
282		b.allnext = sbuckets
283		sbuckets = b
284	} else {
285		b.allnext = bbuckets
286		bbuckets = b
287	}
288	return b
289}
290
291func eqslice(x, y []uintptr) bool {
292	if len(x) != len(y) {
293		return false
294	}
295	for i, xi := range x {
296		if xi != y[i] {
297			return false
298		}
299	}
300	return true
301}
302
303// mProf_NextCycle publishes the next heap profile cycle and creates a
304// fresh heap profile cycle. This operation is fast and can be done
305// during STW. The caller must call mProf_Flush before calling
306// mProf_NextCycle again.
307//
308// This is called by mark termination during STW so allocations and
309// frees after the world is started again count towards a new heap
310// profiling cycle.
311func mProf_NextCycle() {
312	lock(&proflock)
313	// We explicitly wrap mProf.cycle rather than depending on
314	// uint wraparound because the memRecord.future ring does not
315	// itself wrap at a power of two.
316	mProf.cycle = (mProf.cycle + 1) % mProfCycleWrap
317	mProf.flushed = false
318	unlock(&proflock)
319}
320
321// mProf_Flush flushes the events from the current heap profiling
322// cycle into the active profile. After this it is safe to start a new
323// heap profiling cycle with mProf_NextCycle.
324//
325// This is called by GC after mark termination starts the world. In
326// contrast with mProf_NextCycle, this is somewhat expensive, but safe
327// to do concurrently.
328func mProf_Flush() {
329	lock(&proflock)
330	if !mProf.flushed {
331		mProf_FlushLocked()
332		mProf.flushed = true
333	}
334	unlock(&proflock)
335}
336
337func mProf_FlushLocked() {
338	c := mProf.cycle
339	for b := mbuckets; b != nil; b = b.allnext {
340		mp := b.mp()
341
342		// Flush cycle C into the published profile and clear
343		// it for reuse.
344		mpc := &mp.future[c%uint32(len(mp.future))]
345		mp.active.add(mpc)
346		*mpc = memRecordCycle{}
347	}
348}
349
350// mProf_PostSweep records that all sweep frees for this GC cycle have
351// completed. This has the effect of publishing the heap profile
352// snapshot as of the last mark termination without advancing the heap
353// profile cycle.
354func mProf_PostSweep() {
355	lock(&proflock)
356	// Flush cycle C+1 to the active profile so everything as of
357	// the last mark termination becomes visible. *Don't* advance
358	// the cycle, since we're still accumulating allocs in cycle
359	// C+2, which have to become C+1 in the next mark termination
360	// and so on.
361	c := mProf.cycle
362	for b := mbuckets; b != nil; b = b.allnext {
363		mp := b.mp()
364		mpc := &mp.future[(c+1)%uint32(len(mp.future))]
365		mp.active.add(mpc)
366		*mpc = memRecordCycle{}
367	}
368	unlock(&proflock)
369}
370
371// Called by malloc to record a profiled block.
372func mProf_Malloc(p unsafe.Pointer, size uintptr) {
373	var stk [maxStack]uintptr
374	nstk := callersRaw(stk[:])
375	lock(&proflock)
376	skip := 1
377	b := stkbucket(memProfile, size, skip, stk[:nstk], true)
378	c := mProf.cycle
379	mp := b.mp()
380	mpc := &mp.future[(c+2)%uint32(len(mp.future))]
381	mpc.allocs++
382	mpc.alloc_bytes += size
383	unlock(&proflock)
384
385	// Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
386	// This reduces potential contention and chances of deadlocks.
387	// Since the object must be alive during call to mProf_Malloc,
388	// it's fine to do this non-atomically.
389	systemstack(func() {
390		setprofilebucket(p, b)
391	})
392}
393
394// Called when freeing a profiled block.
395func mProf_Free(b *bucket, size uintptr) {
396	lock(&proflock)
397	c := mProf.cycle
398	mp := b.mp()
399	mpc := &mp.future[(c+1)%uint32(len(mp.future))]
400	mpc.frees++
401	mpc.free_bytes += size
402	unlock(&proflock)
403}
404
405var blockprofilerate uint64 // in CPU ticks
406
407// SetBlockProfileRate controls the fraction of goroutine blocking events
408// that are reported in the blocking profile. The profiler aims to sample
409// an average of one blocking event per rate nanoseconds spent blocked.
410//
411// To include every blocking event in the profile, pass rate = 1.
412// To turn off profiling entirely, pass rate <= 0.
413func SetBlockProfileRate(rate int) {
414	var r int64
415	if rate <= 0 {
416		r = 0 // disable profiling
417	} else if rate == 1 {
418		r = 1 // profile everything
419	} else {
420		// convert ns to cycles, use float64 to prevent overflow during multiplication
421		r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
422		if r == 0 {
423			r = 1
424		}
425	}
426
427	atomic.Store64(&blockprofilerate, uint64(r))
428}
429
430func blockevent(cycles int64, skip int) {
431	if cycles <= 0 {
432		cycles = 1
433	}
434	if blocksampled(cycles) {
435		saveblockevent(cycles, skip+1, blockProfile)
436	}
437}
438
439func blocksampled(cycles int64) bool {
440	rate := int64(atomic.Load64(&blockprofilerate))
441	if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
442		return false
443	}
444	return true
445}
446
447func saveblockevent(cycles int64, skip int, which bucketType) {
448	gp := getg()
449	var nstk int
450	var stk [maxStack]uintptr
451	if gp.m.curg == nil || gp.m.curg == gp {
452		nstk = callersRaw(stk[:])
453	} else {
454		// FIXME: This should get a traceback of gp.m.curg.
455		// nstk = gcallers(gp.m.curg, skip, stk[:])
456		nstk = callersRaw(stk[:])
457	}
458	lock(&proflock)
459	b := stkbucket(which, 0, skip, stk[:nstk], true)
460	b.bp().count++
461	b.bp().cycles += cycles
462	unlock(&proflock)
463}
464
465var mutexprofilerate uint64 // fraction sampled
466
467// SetMutexProfileFraction controls the fraction of mutex contention events
468// that are reported in the mutex profile. On average 1/rate events are
469// reported. The previous rate is returned.
470//
471// To turn off profiling entirely, pass rate 0.
472// To just read the current rate, pass rate < 0.
473// (For n>1 the details of sampling may change.)
474func SetMutexProfileFraction(rate int) int {
475	if rate < 0 {
476		return int(mutexprofilerate)
477	}
478	old := mutexprofilerate
479	atomic.Store64(&mutexprofilerate, uint64(rate))
480	return int(old)
481}
482
483//go:linkname mutexevent sync.event
484func mutexevent(cycles int64, skip int) {
485	if cycles < 0 {
486		cycles = 0
487	}
488	rate := int64(atomic.Load64(&mutexprofilerate))
489	// TODO(pjw): measure impact of always calling fastrand vs using something
490	// like malloc.go:nextSample()
491	if rate > 0 && int64(fastrand())%rate == 0 {
492		saveblockevent(cycles, skip+1, mutexProfile)
493	}
494}
495
496// Go interface to profile data.
497
498// A StackRecord describes a single execution stack.
499type StackRecord struct {
500	Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
501}
502
503// Stack returns the stack trace associated with the record,
504// a prefix of r.Stack0.
505func (r *StackRecord) Stack() []uintptr {
506	for i, v := range r.Stack0 {
507		if v == 0 {
508			return r.Stack0[0:i]
509		}
510	}
511	return r.Stack0[0:]
512}
513
514// MemProfileRate controls the fraction of memory allocations
515// that are recorded and reported in the memory profile.
516// The profiler aims to sample an average of
517// one allocation per MemProfileRate bytes allocated.
518//
519// To include every allocated block in the profile, set MemProfileRate to 1.
520// To turn off profiling entirely, set MemProfileRate to 0.
521//
522// The tools that process the memory profiles assume that the
523// profile rate is constant across the lifetime of the program
524// and equal to the current value. Programs that change the
525// memory profiling rate should do so just once, as early as
526// possible in the execution of the program (for example,
527// at the beginning of main).
528var MemProfileRate int = 512 * 1024
529
530// A MemProfileRecord describes the live objects allocated
531// by a particular call sequence (stack trace).
532type MemProfileRecord struct {
533	AllocBytes, FreeBytes     int64       // number of bytes allocated, freed
534	AllocObjects, FreeObjects int64       // number of objects allocated, freed
535	Stack0                    [32]uintptr // stack trace for this record; ends at first 0 entry
536}
537
538// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
539func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
540
541// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
542func (r *MemProfileRecord) InUseObjects() int64 {
543	return r.AllocObjects - r.FreeObjects
544}
545
546// Stack returns the stack trace associated with the record,
547// a prefix of r.Stack0.
548func (r *MemProfileRecord) Stack() []uintptr {
549	for i, v := range r.Stack0 {
550		if v == 0 {
551			return r.Stack0[0:i]
552		}
553	}
554	return r.Stack0[0:]
555}
556
557// reusebucket tries to pick a prunedProfile bucket off
558// the freebuckets list, returning it if one is available or nil
559// if the free list is empty.
560func reusebucket(nstk int) *bucket {
561	var b *bucket
562	if freebuckets != nil {
563		b = freebuckets
564		freebuckets = freebuckets.allnext
565		b.typ = prunedProfile
566		b.nstk = uintptr(nstk)
567		mp := b.mp()
568		// Hack: rely on the fact that memprofile records are
569		// larger than blockprofile records when clearing.
570		*mp = memRecord{}
571	}
572	return b
573}
574
575// freebucket appends the specified prunedProfile bucket
576// onto the free list, and removes references to it from the hash.
577func freebucket(tofree *bucket) *bucket {
578	// Thread this bucket into the free list.
579	ret := tofree.allnext
580	tofree.allnext = freebuckets
581	freebuckets = tofree
582
583	// Clean up the hash. The hash may point directly to this bucket...
584	i := int(tofree.hash % buckHashSize)
585	if buckhash[i] == tofree {
586		buckhash[i] = tofree.next
587	} else {
588		// ... or when this bucket was inserted by stkbucket, it may have been
589		// chained off some other unrelated bucket.
590		for b := buckhash[i]; b != nil; b = b.next {
591			if b.next == tofree {
592				b.next = tofree.next
593				break
594			}
595		}
596	}
597	return ret
598}
599
600// fixupStack takes a 'raw' stack trace (stack of PCs generated by
601// callersRaw) and performs pre-symbolization fixup on it, returning
602// the results in 'canonStack'. For each frame we look at the
603// file/func/line information, then use that info to decide whether to
604// include the frame in the final symbolized stack (removing frames
605// corresponding to 'morestack' routines, for example). We also expand
606// frames if the PC values to which they refer correponds to inlined
607// functions to allow for expanded symbolic info to be filled in
608// later. Note: there is code in go-callers.c's backtrace_full callback()
609// function that performs very similar fixups; these two code paths
610// should be kept in sync.
611func fixupStack(stk []uintptr, skip int, canonStack *[maxStack]uintptr, size uintptr) int {
612	var cidx int
613	var termTrace bool
614	// Increase the skip count to take into account the frames corresponding
615	// to runtime.callersRaw and to the C routine that it invokes.
616	skip += 2
617	sawSigtramp := false
618	for _, pc := range stk {
619		// Subtract 1 from PC to undo the 1 we added in callback in
620		// go-callers.c.
621		function, file, _, frames := funcfileline(pc-1, -1, false)
622
623		// Skip an unnamed function above sigtramp, as it is
624		// likely the signal handler.
625		if sawSigtramp {
626			sawSigtramp = false
627			if function == "" {
628				continue
629			}
630		}
631
632		// Skip split-stack functions (match by function name)
633		skipFrame := false
634		if hasPrefix(function, "_____morestack_") || hasPrefix(function, "__morestack_") {
635			skipFrame = true
636		}
637
638		// Skip split-stack functions (match by file)
639		if hasSuffix(file, "/morestack.S") {
640			skipFrame = true
641		}
642
643		// Skip thunks and recover functions and other functions
644		// specific to gccgo, that do not appear in the gc toolchain.
645		fcn := function
646		if hasSuffix(fcn, "..r") {
647			skipFrame = true
648		} else if function == "runtime.deferreturn" || function == "runtime.sighandler" {
649			skipFrame = true
650		} else if function == "runtime.sigtramp" || function == "runtime.sigtrampgo" {
651			skipFrame = true
652			// Also skip subsequent unnamed functions,
653			// which will be the signal handler itself.
654			sawSigtramp = true
655		} else {
656			for fcn != "" && (fcn[len(fcn)-1] >= '0' && fcn[len(fcn)-1] <= '9') {
657				fcn = fcn[:len(fcn)-1]
658			}
659			if hasSuffix(fcn, "..stub") || hasSuffix(fcn, "..thunk") {
660				skipFrame = true
661			}
662		}
663		if skipFrame {
664			continue
665		}
666
667		// Terminate the trace if we encounter a frame corresponding to
668		// runtime.main, runtime.kickoff, makecontext, etc. See the
669		// corresponding code in go-callers.c, callback function used
670		// with backtrace_full.
671		if function == "makecontext" {
672			termTrace = true
673		}
674		if hasSuffix(file, "/proc.c") && function == "runtime_mstart" {
675			termTrace = true
676		}
677		if hasSuffix(file, "/proc.go") &&
678			(function == "runtime.main" || function == "runtime.kickoff") {
679			termTrace = true
680		}
681
682		// Expand inline frames.
683		for i := 0; i < frames; i++ {
684			(*canonStack)[cidx] = pc
685			cidx++
686			if cidx >= maxStack {
687				termTrace = true
688				break
689			}
690		}
691		if termTrace {
692			break
693		}
694	}
695
696	// Apply skip count. Needs to be done after expanding inline frames.
697	if skip != 0 {
698		if skip >= cidx {
699			return 0
700		}
701		copy(canonStack[:cidx-skip], canonStack[skip:])
702		return cidx - skip
703	}
704
705	return cidx
706}
707
708// fixupBucket takes a raw memprofile bucket and creates a new bucket
709// in which the stack trace has been fixed up (inline frames expanded,
710// unwanted frames stripped out). Original bucket is left unmodified;
711// a new symbolizeProfile bucket may be generated as a side effect.
712// Payload information from the original bucket is incorporated into
713// the new bucket.
714func fixupBucket(b *bucket) {
715	var canonStack [maxStack]uintptr
716	frames := fixupStack(b.stk(), b.skip, &canonStack, b.size)
717	cb := stkbucket(prunedProfile, b.size, 0, canonStack[:frames], true)
718	switch b.typ {
719	default:
720		throw("invalid profile bucket type")
721	case memProfile:
722		rawrecord := b.mp()
723		cb.mp().active.add(&rawrecord.active)
724	case blockProfile, mutexProfile:
725		bpcount := b.bp().count
726		cb.bp().count += bpcount
727		cb.bp().cycles += bpcount
728	}
729}
730
731// MemProfile returns a profile of memory allocated and freed per allocation
732// site.
733//
734// MemProfile returns n, the number of records in the current memory profile.
735// If len(p) >= n, MemProfile copies the profile into p and returns n, true.
736// If len(p) < n, MemProfile does not change p and returns n, false.
737//
738// If inuseZero is true, the profile includes allocation records
739// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
740// These are sites where memory was allocated, but it has all
741// been released back to the runtime.
742//
743// The returned profile may be up to two garbage collection cycles old.
744// This is to avoid skewing the profile toward allocations; because
745// allocations happen in real time but frees are delayed until the garbage
746// collector performs sweeping, the profile only accounts for allocations
747// that have had a chance to be freed by the garbage collector.
748//
749// Most clients should use the runtime/pprof package or
750// the testing package's -test.memprofile flag instead
751// of calling MemProfile directly.
752func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
753	lock(&proflock)
754	// If we're between mProf_NextCycle and mProf_Flush, take care
755	// of flushing to the active profile so we only have to look
756	// at the active profile below.
757	mProf_FlushLocked()
758	clear := true
759	for b := mbuckets; b != nil; b = b.allnext {
760		mp := b.mp()
761		if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
762			n++
763		}
764		if mp.active.allocs != 0 || mp.active.frees != 0 {
765			clear = false
766		}
767	}
768	if clear {
769		// Absolutely no data, suggesting that a garbage collection
770		// has not yet happened. In order to allow profiling when
771		// garbage collection is disabled from the beginning of execution,
772		// accumulate all of the cycles, and recount buckets.
773		n = 0
774		for b := mbuckets; b != nil; b = b.allnext {
775			mp := b.mp()
776			for c := range mp.future {
777				mp.active.add(&mp.future[c])
778				mp.future[c] = memRecordCycle{}
779			}
780			if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
781				n++
782			}
783		}
784	}
785	if n <= len(p) {
786		var bnext *bucket
787
788		// Post-process raw buckets to fix up their stack traces
789		for b := mbuckets; b != nil; b = bnext {
790			bnext = b.allnext
791			mp := b.mp()
792			if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
793				fixupBucket(b)
794			}
795		}
796
797		// Record pruned/fixed-up buckets
798		ok = true
799		idx := 0
800		for b := sbuckets; b != nil; b = b.allnext {
801			record(&p[idx], b)
802			idx++
803		}
804		n = idx
805
806		// Free up pruned buckets for use in next round
807		for b := sbuckets; b != nil; b = bnext {
808			bnext = freebucket(b)
809		}
810		sbuckets = nil
811	}
812	unlock(&proflock)
813	return
814}
815
816// Write b's data to r.
817func record(r *MemProfileRecord, b *bucket) {
818	mp := b.mp()
819	r.AllocBytes = int64(mp.active.alloc_bytes)
820	r.FreeBytes = int64(mp.active.free_bytes)
821	r.AllocObjects = int64(mp.active.allocs)
822	r.FreeObjects = int64(mp.active.frees)
823	for i, pc := range b.stk() {
824		if i >= len(r.Stack0) {
825			break
826		}
827		r.Stack0[i] = pc
828	}
829	for i := int(b.nstk); i < len(r.Stack0); i++ {
830		r.Stack0[i] = 0
831	}
832}
833
834func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
835	lock(&proflock)
836	for b := mbuckets; b != nil; b = b.allnext {
837		mp := b.mp()
838		fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
839	}
840	unlock(&proflock)
841}
842
843// BlockProfileRecord describes blocking events originated
844// at a particular call sequence (stack trace).
845type BlockProfileRecord struct {
846	Count  int64
847	Cycles int64
848	StackRecord
849}
850
851func harvestBlockMutexProfile(buckets *bucket, p []BlockProfileRecord) (n int, ok bool) {
852	for b := buckets; b != nil; b = b.allnext {
853		n++
854	}
855	if n <= len(p) {
856		var bnext *bucket
857
858		// Post-process raw buckets to create pruned/fixed-up buckets
859		for b := buckets; b != nil; b = bnext {
860			bnext = b.allnext
861			fixupBucket(b)
862		}
863
864		// Record
865		ok = true
866		for b := sbuckets; b != nil; b = b.allnext {
867			bp := b.bp()
868			r := &p[0]
869			r.Count = bp.count
870			r.Cycles = bp.cycles
871			i := 0
872			var pc uintptr
873			for i, pc = range b.stk() {
874				if i >= len(r.Stack0) {
875					break
876				}
877				r.Stack0[i] = pc
878			}
879			for ; i < len(r.Stack0); i++ {
880				r.Stack0[i] = 0
881			}
882			p = p[1:]
883		}
884
885		// Free up pruned buckets for use in next round.
886		for b := sbuckets; b != nil; b = bnext {
887			bnext = freebucket(b)
888		}
889		sbuckets = nil
890	}
891	return
892}
893
894// BlockProfile returns n, the number of records in the current blocking profile.
895// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
896// If len(p) < n, BlockProfile does not change p and returns n, false.
897//
898// Most clients should use the runtime/pprof package or
899// the testing package's -test.blockprofile flag instead
900// of calling BlockProfile directly.
901func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
902	lock(&proflock)
903	n, ok = harvestBlockMutexProfile(bbuckets, p)
904	unlock(&proflock)
905	return
906}
907
908// MutexProfile returns n, the number of records in the current mutex profile.
909// If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
910// Otherwise, MutexProfile does not change p, and returns n, false.
911//
912// Most clients should use the runtime/pprof package
913// instead of calling MutexProfile directly.
914func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
915	lock(&proflock)
916	n, ok = harvestBlockMutexProfile(xbuckets, p)
917	unlock(&proflock)
918	return
919}
920
921// ThreadCreateProfile returns n, the number of records in the thread creation profile.
922// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
923// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
924//
925// Most clients should use the runtime/pprof package instead
926// of calling ThreadCreateProfile directly.
927func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
928	first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
929	for mp := first; mp != nil; mp = mp.alllink {
930		n++
931	}
932	if n <= len(p) {
933		ok = true
934		i := 0
935		for mp := first; mp != nil; mp = mp.alllink {
936			for j := range mp.createstack {
937				p[i].Stack0[j] = mp.createstack[j].pc
938			}
939			i++
940		}
941	}
942	return
943}
944
945// GoroutineProfile returns n, the number of records in the active goroutine stack profile.
946// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
947// If len(p) < n, GoroutineProfile does not change p and returns n, false.
948//
949// Most clients should use the runtime/pprof package instead
950// of calling GoroutineProfile directly.
951func GoroutineProfile(p []StackRecord) (n int, ok bool) {
952	gp := getg()
953
954	isOK := func(gp1 *g) bool {
955		// Checking isSystemGoroutine here makes GoroutineProfile
956		// consistent with both NumGoroutine and Stack.
957		return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false)
958	}
959
960	stopTheWorld("profile")
961
962	n = 1
963	for _, gp1 := range allgs {
964		if isOK(gp1) {
965			n++
966		}
967	}
968
969	if n <= len(p) {
970		ok = true
971		r := p
972
973		// Save current goroutine.
974		saveg(gp, &r[0])
975		r = r[1:]
976
977		// Save other goroutines.
978		for _, gp1 := range allgs {
979			if isOK(gp1) {
980				if len(r) == 0 {
981					// Should be impossible, but better to return a
982					// truncated profile than to crash the entire process.
983					break
984				}
985				saveg(gp1, &r[0])
986				r = r[1:]
987			}
988		}
989	}
990
991	startTheWorld()
992
993	return n, ok
994}
995
996func saveg(gp *g, r *StackRecord) {
997	if gp == getg() {
998		var locbuf [32]location
999		n := callers(1, locbuf[:])
1000		for i := 0; i < n; i++ {
1001			r.Stack0[i] = locbuf[i].pc
1002		}
1003		if n < len(r.Stack0) {
1004			r.Stack0[n] = 0
1005		}
1006	} else {
1007		// FIXME: Not implemented.
1008		r.Stack0[0] = 0
1009	}
1010}
1011
1012// Stack formats a stack trace of the calling goroutine into buf
1013// and returns the number of bytes written to buf.
1014// If all is true, Stack formats stack traces of all other goroutines
1015// into buf after the trace for the current goroutine.
1016func Stack(buf []byte, all bool) int {
1017	if all {
1018		stopTheWorld("stack trace")
1019	}
1020
1021	n := 0
1022	if len(buf) > 0 {
1023		gp := getg()
1024		// Force traceback=1 to override GOTRACEBACK setting,
1025		// so that Stack's results are consistent.
1026		// GOTRACEBACK is only about crash dumps.
1027		gp.m.traceback = 1
1028		gp.writebuf = buf[0:0:len(buf)]
1029		goroutineheader(gp)
1030		traceback(1)
1031		if all {
1032			tracebackothers(gp)
1033		}
1034		gp.m.traceback = 0
1035		n = len(gp.writebuf)
1036		gp.writebuf = nil
1037	}
1038
1039	if all {
1040		startTheWorld()
1041	}
1042	return n
1043}
1044
1045// Tracing of alloc/free/gc.
1046
1047var tracelock mutex
1048
1049func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
1050	lock(&tracelock)
1051	gp := getg()
1052	gp.m.traceback = 2
1053	if typ == nil {
1054		print("tracealloc(", p, ", ", hex(size), ")\n")
1055	} else {
1056		print("tracealloc(", p, ", ", hex(size), ", ", typ.string(), ")\n")
1057	}
1058	if gp.m.curg == nil || gp == gp.m.curg {
1059		goroutineheader(gp)
1060		traceback(1)
1061	} else {
1062		goroutineheader(gp.m.curg)
1063		// FIXME: Can't do traceback of other g.
1064	}
1065	print("\n")
1066	gp.m.traceback = 0
1067	unlock(&tracelock)
1068}
1069
1070func tracefree(p unsafe.Pointer, size uintptr) {
1071	lock(&tracelock)
1072	gp := getg()
1073	gp.m.traceback = 2
1074	print("tracefree(", p, ", ", hex(size), ")\n")
1075	goroutineheader(gp)
1076	traceback(1)
1077	print("\n")
1078	gp.m.traceback = 0
1079	unlock(&tracelock)
1080}
1081
1082func tracegc() {
1083	lock(&tracelock)
1084	gp := getg()
1085	gp.m.traceback = 2
1086	print("tracegc()\n")
1087	// running on m->g0 stack; show all non-g0 goroutines
1088	tracebackothers(gp)
1089	print("end tracegc\n")
1090	print("\n")
1091	gp.m.traceback = 0
1092	unlock(&tracelock)
1093}
1094