1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Go execution tracer.
6// The tracer captures a wide range of execution events like goroutine
7// creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
8// changes of heap size, processor start/stop, etc and writes them to a buffer
9// in a compact form. A precise nanosecond-precision timestamp and a stack
10// trace is captured for most events.
11// See https://golang.org/s/go15trace for more info.
12
13package runtime
14
15import (
16	"runtime/internal/sys"
17	"unsafe"
18)
19
20// Event types in the trace, args are given in square brackets.
21const (
22	traceEvNone              = 0  // unused
23	traceEvBatch             = 1  // start of per-P batch of events [pid, timestamp]
24	traceEvFrequency         = 2  // contains tracer timer frequency [frequency (ticks per second)]
25	traceEvStack             = 3  // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
26	traceEvGomaxprocs        = 4  // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
27	traceEvProcStart         = 5  // start of P [timestamp, thread id]
28	traceEvProcStop          = 6  // stop of P [timestamp]
29	traceEvGCStart           = 7  // GC start [timestamp, seq, stack id]
30	traceEvGCDone            = 8  // GC done [timestamp]
31	traceEvGCSTWStart        = 9  // GC STW start [timestamp, kind]
32	traceEvGCSTWDone         = 10 // GC STW done [timestamp]
33	traceEvGCSweepStart      = 11 // GC sweep start [timestamp, stack id]
34	traceEvGCSweepDone       = 12 // GC sweep done [timestamp, swept, reclaimed]
35	traceEvGoCreate          = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
36	traceEvGoStart           = 14 // goroutine starts running [timestamp, goroutine id, seq]
37	traceEvGoEnd             = 15 // goroutine ends [timestamp]
38	traceEvGoStop            = 16 // goroutine stops (like in select{}) [timestamp, stack]
39	traceEvGoSched           = 17 // goroutine calls Gosched [timestamp, stack]
40	traceEvGoPreempt         = 18 // goroutine is preempted [timestamp, stack]
41	traceEvGoSleep           = 19 // goroutine calls Sleep [timestamp, stack]
42	traceEvGoBlock           = 20 // goroutine blocks [timestamp, stack]
43	traceEvGoUnblock         = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
44	traceEvGoBlockSend       = 22 // goroutine blocks on chan send [timestamp, stack]
45	traceEvGoBlockRecv       = 23 // goroutine blocks on chan recv [timestamp, stack]
46	traceEvGoBlockSelect     = 24 // goroutine blocks on select [timestamp, stack]
47	traceEvGoBlockSync       = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
48	traceEvGoBlockCond       = 26 // goroutine blocks on Cond [timestamp, stack]
49	traceEvGoBlockNet        = 27 // goroutine blocks on network [timestamp, stack]
50	traceEvGoSysCall         = 28 // syscall enter [timestamp, stack]
51	traceEvGoSysExit         = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
52	traceEvGoSysBlock        = 30 // syscall blocks [timestamp]
53	traceEvGoWaiting         = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
54	traceEvGoInSyscall       = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
55	traceEvHeapAlloc         = 33 // memstats.heap_live change [timestamp, heap_alloc]
56	traceEvNextGC            = 34 // memstats.next_gc change [timestamp, next_gc]
57	traceEvTimerGoroutine    = 35 // denotes timer goroutine [timer goroutine id]
58	traceEvFutileWakeup      = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
59	traceEvString            = 37 // string dictionary entry [ID, length, string]
60	traceEvGoStartLocal      = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
61	traceEvGoUnblockLocal    = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
62	traceEvGoSysExitLocal    = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
63	traceEvGoStartLabel      = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
64	traceEvGoBlockGC         = 42 // goroutine blocks on GC assist [timestamp, stack]
65	traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
66	traceEvGCMarkAssistDone  = 44 // GC mark assist done [timestamp]
67	traceEvUserTaskCreate    = 45 // trace.NewContext [timestamp, internal task id, internal parent task id, stack, name string]
68	traceEvUserTaskEnd       = 46 // end of a task [timestamp, internal task id, stack]
69	traceEvUserRegion        = 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), stack, name string]
70	traceEvUserLog           = 48 // trace.Log [timestamp, internal task id, key string id, stack, value string]
71	traceEvCount             = 49
72	// Byte is used but only 6 bits are available for event type.
73	// The remaining 2 bits are used to specify the number of arguments.
74	// That means, the max event type value is 63.
75)
76
77const (
78	// Timestamps in trace are cputicks/traceTickDiv.
79	// This makes absolute values of timestamp diffs smaller,
80	// and so they are encoded in less number of bytes.
81	// 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
82	// The suggested increment frequency for PowerPC's time base register is
83	// 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
84	// and ppc64le.
85	// Tracing won't work reliably for architectures where cputicks is emulated
86	// by nanotime, so the value doesn't matter for those architectures.
87	traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32)
88	// Maximum number of PCs in a single stack trace.
89	// Since events contain only stack id rather than whole stack trace,
90	// we can allow quite large values here.
91	traceStackSize = 128
92	// Identifier of a fake P that is used when we trace without a real P.
93	traceGlobProc = -1
94	// Maximum number of bytes to encode uint64 in base-128.
95	traceBytesPerNumber = 10
96	// Shift of the number of arguments in the first event byte.
97	traceArgCountShift = 6
98	// Flag passed to traceGoPark to denote that the previous wakeup of this
99	// goroutine was futile. For example, a goroutine was unblocked on a mutex,
100	// but another goroutine got ahead and acquired the mutex before the first
101	// goroutine is scheduled, so the first goroutine has to block again.
102	// Such wakeups happen on buffered channels and sync.Mutex,
103	// but are generally not interesting for end user.
104	traceFutileWakeup byte = 128
105)
106
107// trace is global tracing context.
108var trace struct {
109	lock          mutex       // protects the following members
110	lockOwner     *g          // to avoid deadlocks during recursive lock locks
111	enabled       bool        // when set runtime traces events
112	shutdown      bool        // set when we are waiting for trace reader to finish after setting enabled to false
113	headerWritten bool        // whether ReadTrace has emitted trace header
114	footerWritten bool        // whether ReadTrace has emitted trace footer
115	shutdownSema  uint32      // used to wait for ReadTrace completion
116	seqStart      uint64      // sequence number when tracing was started
117	ticksStart    int64       // cputicks when tracing was started
118	ticksEnd      int64       // cputicks when tracing was stopped
119	timeStart     int64       // nanotime when tracing was started
120	timeEnd       int64       // nanotime when tracing was stopped
121	seqGC         uint64      // GC start/done sequencer
122	reading       traceBufPtr // buffer currently handed off to user
123	empty         traceBufPtr // stack of empty buffers
124	fullHead      traceBufPtr // queue of full buffers
125	fullTail      traceBufPtr
126	reader        guintptr        // goroutine that called ReadTrace, or nil
127	stackTab      traceStackTable // maps stack traces to unique ids
128
129	// Dictionary for traceEvString.
130	//
131	// TODO: central lock to access the map is not ideal.
132	//   option: pre-assign ids to all user annotation region names and tags
133	//   option: per-P cache
134	//   option: sync.Map like data structure
135	stringsLock mutex
136	strings     map[string]uint64
137	stringSeq   uint64
138
139	// markWorkerLabels maps gcMarkWorkerMode to string ID.
140	markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
141
142	bufLock mutex       // protects buf
143	buf     traceBufPtr // global trace buffer, used when running without a p
144}
145
146// traceBufHeader is per-P tracing buffer.
147//go:notinheap
148type traceBufHeader struct {
149	link      traceBufPtr              // in trace.empty/full
150	lastTicks uint64                   // when we wrote the last event
151	pos       int                      // next write offset in arr
152	stk       [traceStackSize]location // scratch buffer for traceback
153}
154
155// traceBuf is per-P tracing buffer.
156//
157//go:notinheap
158type traceBuf struct {
159	traceBufHeader
160	arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
161}
162
163// traceBufPtr is a *traceBuf that is not traced by the garbage
164// collector and doesn't have write barriers. traceBufs are not
165// allocated from the GC'd heap, so this is safe, and are often
166// manipulated in contexts where write barriers are not allowed, so
167// this is necessary.
168//
169// TODO: Since traceBuf is now go:notinheap, this isn't necessary.
170type traceBufPtr uintptr
171
172func (tp traceBufPtr) ptr() *traceBuf   { return (*traceBuf)(unsafe.Pointer(tp)) }
173func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
174func traceBufPtrOf(b *traceBuf) traceBufPtr {
175	return traceBufPtr(unsafe.Pointer(b))
176}
177
178// StartTrace enables tracing for the current process.
179// While tracing, the data will be buffered and available via ReadTrace.
180// StartTrace returns an error if tracing is already enabled.
181// Most clients should use the runtime/trace package or the testing package's
182// -test.trace flag instead of calling StartTrace directly.
183func StartTrace() error {
184	// Stop the world, so that we can take a consistent snapshot
185	// of all goroutines at the beginning of the trace.
186	stopTheWorld("start tracing")
187
188	// We are in stop-the-world, but syscalls can finish and write to trace concurrently.
189	// Exitsyscall could check trace.enabled long before and then suddenly wake up
190	// and decide to write to trace at a random point in time.
191	// However, such syscall will use the global trace.buf buffer, because we've
192	// acquired all p's by doing stop-the-world. So this protects us from such races.
193	lock(&trace.bufLock)
194
195	if trace.enabled || trace.shutdown {
196		unlock(&trace.bufLock)
197		startTheWorld()
198		return errorString("tracing is already enabled")
199	}
200
201	// Can't set trace.enabled yet. While the world is stopped, exitsyscall could
202	// already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
203	// That would lead to an inconsistent trace:
204	// - either GoSysExit appears before EvGoInSyscall,
205	// - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
206	// To instruct traceEvent that it must not ignore events below, we set startingtrace.
207	// trace.enabled is set afterwards once we have emitted all preliminary events.
208	_g_ := getg()
209	_g_.m.startingtrace = true
210
211	// Obtain current stack ID to use in all traceEvGoCreate events below.
212	mp := acquirem()
213	stkBuf := make([]location, traceStackSize)
214	stackID := traceStackID(mp, stkBuf, 2)
215	releasem(mp)
216
217	for _, gp := range allgs {
218		status := readgstatus(gp)
219		if status != _Gdead {
220			gp.traceseq = 0
221			gp.tracelastp = getg().m.p
222			// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
223			id := trace.stackTab.put([]location{location{pc: gp.startpc + sys.PCQuantum}})
224			traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
225		}
226		if status == _Gwaiting {
227			// traceEvGoWaiting is implied to have seq=1.
228			gp.traceseq++
229			traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
230		}
231		if status == _Gsyscall {
232			gp.traceseq++
233			traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
234		} else {
235			gp.sysblocktraced = false
236		}
237	}
238	traceProcStart()
239	traceGoStart()
240	// Note: ticksStart needs to be set after we emit traceEvGoInSyscall events.
241	// If we do it the other way around, it is possible that exitsyscall will
242	// query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp.
243	// It will lead to a false conclusion that cputicks is broken.
244	trace.ticksStart = cputicks()
245	trace.timeStart = nanotime()
246	trace.headerWritten = false
247	trace.footerWritten = false
248
249	// string to id mapping
250	//  0 : reserved for an empty string
251	//  remaining: other strings registered by traceString
252	trace.stringSeq = 0
253	trace.strings = make(map[string]uint64)
254
255	trace.seqGC = 0
256	_g_.m.startingtrace = false
257	trace.enabled = true
258
259	// Register runtime goroutine labels.
260	_, pid, bufp := traceAcquireBuffer()
261	for i, label := range gcMarkWorkerModeStrings[:] {
262		trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
263	}
264	traceReleaseBuffer(pid)
265
266	unlock(&trace.bufLock)
267
268	startTheWorld()
269	return nil
270}
271
272// StopTrace stops tracing, if it was previously enabled.
273// StopTrace only returns after all the reads for the trace have completed.
274func StopTrace() {
275	// Stop the world so that we can collect the trace buffers from all p's below,
276	// and also to avoid races with traceEvent.
277	stopTheWorld("stop tracing")
278
279	// See the comment in StartTrace.
280	lock(&trace.bufLock)
281
282	if !trace.enabled {
283		unlock(&trace.bufLock)
284		startTheWorld()
285		return
286	}
287
288	traceGoSched()
289
290	// Loop over all allocated Ps because dead Ps may still have
291	// trace buffers.
292	for _, p := range allp[:cap(allp)] {
293		buf := p.tracebuf
294		if buf != 0 {
295			traceFullQueue(buf)
296			p.tracebuf = 0
297		}
298	}
299	if trace.buf != 0 {
300		buf := trace.buf
301		trace.buf = 0
302		if buf.ptr().pos != 0 {
303			traceFullQueue(buf)
304		}
305	}
306
307	for {
308		trace.ticksEnd = cputicks()
309		trace.timeEnd = nanotime()
310		// Windows time can tick only every 15ms, wait for at least one tick.
311		if trace.timeEnd != trace.timeStart {
312			break
313		}
314		osyield()
315	}
316
317	trace.enabled = false
318	trace.shutdown = true
319	unlock(&trace.bufLock)
320
321	startTheWorld()
322
323	// The world is started but we've set trace.shutdown, so new tracing can't start.
324	// Wait for the trace reader to flush pending buffers and stop.
325	semacquire(&trace.shutdownSema)
326	if raceenabled {
327		raceacquire(unsafe.Pointer(&trace.shutdownSema))
328	}
329
330	// The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
331	lock(&trace.lock)
332	for _, p := range allp[:cap(allp)] {
333		if p.tracebuf != 0 {
334			throw("trace: non-empty trace buffer in proc")
335		}
336	}
337	if trace.buf != 0 {
338		throw("trace: non-empty global trace buffer")
339	}
340	if trace.fullHead != 0 || trace.fullTail != 0 {
341		throw("trace: non-empty full trace buffer")
342	}
343	if trace.reading != 0 || trace.reader != 0 {
344		throw("trace: reading after shutdown")
345	}
346	for trace.empty != 0 {
347		buf := trace.empty
348		trace.empty = buf.ptr().link
349		sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
350	}
351	trace.strings = nil
352	trace.shutdown = false
353	unlock(&trace.lock)
354}
355
356// ReadTrace returns the next chunk of binary tracing data, blocking until data
357// is available. If tracing is turned off and all the data accumulated while it
358// was on has been returned, ReadTrace returns nil. The caller must copy the
359// returned data before calling ReadTrace again.
360// ReadTrace must be called from one goroutine at a time.
361func ReadTrace() []byte {
362	// This function may need to lock trace.lock recursively
363	// (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
364	// To allow this we use trace.lockOwner.
365	// Also this function must not allocate while holding trace.lock:
366	// allocation can call heap allocate, which will try to emit a trace
367	// event while holding heap lock.
368	lock(&trace.lock)
369	trace.lockOwner = getg()
370
371	if trace.reader != 0 {
372		// More than one goroutine reads trace. This is bad.
373		// But we rather do not crash the program because of tracing,
374		// because tracing can be enabled at runtime on prod servers.
375		trace.lockOwner = nil
376		unlock(&trace.lock)
377		println("runtime: ReadTrace called from multiple goroutines simultaneously")
378		return nil
379	}
380	// Recycle the old buffer.
381	if buf := trace.reading; buf != 0 {
382		buf.ptr().link = trace.empty
383		trace.empty = buf
384		trace.reading = 0
385	}
386	// Write trace header.
387	if !trace.headerWritten {
388		trace.headerWritten = true
389		trace.lockOwner = nil
390		unlock(&trace.lock)
391		return []byte("go 1.11 trace\x00\x00\x00")
392	}
393	// Wait for new data.
394	if trace.fullHead == 0 && !trace.shutdown {
395		trace.reader.set(getg())
396		goparkunlock(&trace.lock, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
397		lock(&trace.lock)
398	}
399	// Write a buffer.
400	if trace.fullHead != 0 {
401		buf := traceFullDequeue()
402		trace.reading = buf
403		trace.lockOwner = nil
404		unlock(&trace.lock)
405		return buf.ptr().arr[:buf.ptr().pos]
406	}
407	// Write footer with timer frequency.
408	if !trace.footerWritten {
409		trace.footerWritten = true
410		// Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
411		freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
412		trace.lockOwner = nil
413		unlock(&trace.lock)
414		var data []byte
415		data = append(data, traceEvFrequency|0<<traceArgCountShift)
416		data = traceAppend(data, uint64(freq))
417		for i := range timers {
418			tb := &timers[i]
419			if tb.gp != nil {
420				data = append(data, traceEvTimerGoroutine|0<<traceArgCountShift)
421				data = traceAppend(data, uint64(tb.gp.goid))
422			}
423		}
424		// This will emit a bunch of full buffers, we will pick them up
425		// on the next iteration.
426		trace.stackTab.dump()
427		return data
428	}
429	// Done.
430	if trace.shutdown {
431		trace.lockOwner = nil
432		unlock(&trace.lock)
433		if raceenabled {
434			// Model synchronization on trace.shutdownSema, which race
435			// detector does not see. This is required to avoid false
436			// race reports on writer passed to trace.Start.
437			racerelease(unsafe.Pointer(&trace.shutdownSema))
438		}
439		// trace.enabled is already reset, so can call traceable functions.
440		semrelease(&trace.shutdownSema)
441		return nil
442	}
443	// Also bad, but see the comment above.
444	trace.lockOwner = nil
445	unlock(&trace.lock)
446	println("runtime: spurious wakeup of trace reader")
447	return nil
448}
449
450// traceReader returns the trace reader that should be woken up, if any.
451func traceReader() *g {
452	if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
453		return nil
454	}
455	lock(&trace.lock)
456	if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
457		unlock(&trace.lock)
458		return nil
459	}
460	gp := trace.reader.ptr()
461	trace.reader.set(nil)
462	unlock(&trace.lock)
463	return gp
464}
465
466// traceProcFree frees trace buffer associated with pp.
467func traceProcFree(pp *p) {
468	buf := pp.tracebuf
469	pp.tracebuf = 0
470	if buf == 0 {
471		return
472	}
473	lock(&trace.lock)
474	traceFullQueue(buf)
475	unlock(&trace.lock)
476}
477
478// traceFullQueue queues buf into queue of full buffers.
479func traceFullQueue(buf traceBufPtr) {
480	buf.ptr().link = 0
481	if trace.fullHead == 0 {
482		trace.fullHead = buf
483	} else {
484		trace.fullTail.ptr().link = buf
485	}
486	trace.fullTail = buf
487}
488
489// traceFullDequeue dequeues from queue of full buffers.
490func traceFullDequeue() traceBufPtr {
491	buf := trace.fullHead
492	if buf == 0 {
493		return 0
494	}
495	trace.fullHead = buf.ptr().link
496	if trace.fullHead == 0 {
497		trace.fullTail = 0
498	}
499	buf.ptr().link = 0
500	return buf
501}
502
503// traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
504// ev is event type.
505// If skip > 0, write current stack id as the last argument (skipping skip top frames).
506// If skip = 0, this event type should contain a stack, but we don't want
507// to collect and remember it for this particular call.
508func traceEvent(ev byte, skip int, args ...uint64) {
509	mp, pid, bufp := traceAcquireBuffer()
510	// Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
511	// This protects from races between traceEvent and StartTrace/StopTrace.
512
513	// The caller checked that trace.enabled == true, but trace.enabled might have been
514	// turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
515	// StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
516	// so if we see trace.enabled == true now, we know it's true for the rest of the function.
517	// Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
518	// during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
519	//
520	// Note trace_userTaskCreate runs the same check.
521	if !trace.enabled && !mp.startingtrace {
522		traceReleaseBuffer(pid)
523		return
524	}
525
526	if skip > 0 {
527		if getg() == mp.curg {
528			skip++ // +1 because stack is captured in traceEventLocked.
529		}
530	}
531	traceEventLocked(0, mp, pid, bufp, ev, skip, args...)
532	traceReleaseBuffer(pid)
533}
534
535func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, skip int, args ...uint64) {
536	buf := bufp.ptr()
537	// TODO: test on non-zero extraBytes param.
538	maxSize := 2 + 5*traceBytesPerNumber + extraBytes // event type, length, sequence, timestamp, stack id and two add params
539	if buf == nil || len(buf.arr)-buf.pos < maxSize {
540		buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
541		bufp.set(buf)
542	}
543
544	ticks := uint64(cputicks()) / traceTickDiv
545	tickDiff := ticks - buf.lastTicks
546	buf.lastTicks = ticks
547	narg := byte(len(args))
548	if skip >= 0 {
549		narg++
550	}
551	// We have only 2 bits for number of arguments.
552	// If number is >= 3, then the event type is followed by event length in bytes.
553	if narg > 3 {
554		narg = 3
555	}
556	startPos := buf.pos
557	buf.byte(ev | narg<<traceArgCountShift)
558	var lenp *byte
559	if narg == 3 {
560		// Reserve the byte for length assuming that length < 128.
561		buf.varint(0)
562		lenp = &buf.arr[buf.pos-1]
563	}
564	buf.varint(tickDiff)
565	for _, a := range args {
566		buf.varint(a)
567	}
568	if skip == 0 {
569		buf.varint(0)
570	} else if skip > 0 {
571		buf.varint(traceStackID(mp, buf.stk[:], skip))
572	}
573	evSize := buf.pos - startPos
574	if evSize > maxSize {
575		throw("invalid length of trace event")
576	}
577	if lenp != nil {
578		// Fill in actual length.
579		*lenp = byte(evSize - 2)
580	}
581}
582
583func traceStackID(mp *m, buf []location, skip int) uint64 {
584	_g_ := getg()
585	gp := mp.curg
586	var nstk int
587	if gp == _g_ {
588		nstk = callers(skip+1, buf)
589	} else if gp != nil {
590		// FIXME: get stack trace of different goroutine.
591	}
592	if nstk > 0 {
593		nstk-- // skip runtime.goexit
594	}
595	if nstk > 0 && gp.goid == 1 {
596		nstk-- // skip runtime.main
597	}
598	id := trace.stackTab.put(buf[:nstk])
599	return uint64(id)
600}
601
602// traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
603func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
604	mp = acquirem()
605	if p := mp.p.ptr(); p != nil {
606		return mp, p.id, &p.tracebuf
607	}
608	lock(&trace.bufLock)
609	return mp, traceGlobProc, &trace.buf
610}
611
612// traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
613func traceReleaseBuffer(pid int32) {
614	if pid == traceGlobProc {
615		unlock(&trace.bufLock)
616	}
617	releasem(getg().m)
618}
619
620// traceFlush puts buf onto stack of full buffers and returns an empty buffer.
621func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
622	owner := trace.lockOwner
623	dolock := owner == nil || owner != getg().m.curg
624	if dolock {
625		lock(&trace.lock)
626	}
627	if buf != 0 {
628		traceFullQueue(buf)
629	}
630	if trace.empty != 0 {
631		buf = trace.empty
632		trace.empty = buf.ptr().link
633	} else {
634		buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
635		if buf == 0 {
636			throw("trace: out of memory")
637		}
638	}
639	bufp := buf.ptr()
640	bufp.link.set(nil)
641	bufp.pos = 0
642
643	// initialize the buffer for a new batch
644	ticks := uint64(cputicks()) / traceTickDiv
645	bufp.lastTicks = ticks
646	bufp.byte(traceEvBatch | 1<<traceArgCountShift)
647	bufp.varint(uint64(pid))
648	bufp.varint(ticks)
649
650	if dolock {
651		unlock(&trace.lock)
652	}
653	return buf
654}
655
656// traceString adds a string to the trace.strings and returns the id.
657func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
658	if s == "" {
659		return 0, bufp
660	}
661
662	lock(&trace.stringsLock)
663	if raceenabled {
664		// raceacquire is necessary because the map access
665		// below is race annotated.
666		raceacquire(unsafe.Pointer(&trace.stringsLock))
667	}
668
669	if id, ok := trace.strings[s]; ok {
670		if raceenabled {
671			racerelease(unsafe.Pointer(&trace.stringsLock))
672		}
673		unlock(&trace.stringsLock)
674
675		return id, bufp
676	}
677
678	trace.stringSeq++
679	id := trace.stringSeq
680	trace.strings[s] = id
681
682	if raceenabled {
683		racerelease(unsafe.Pointer(&trace.stringsLock))
684	}
685	unlock(&trace.stringsLock)
686
687	// memory allocation in above may trigger tracing and
688	// cause *bufp changes. Following code now works with *bufp,
689	// so there must be no memory allocation or any activities
690	// that causes tracing after this point.
691
692	buf := bufp.ptr()
693	size := 1 + 2*traceBytesPerNumber + len(s)
694	if buf == nil || len(buf.arr)-buf.pos < size {
695		buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
696		bufp.set(buf)
697	}
698	buf.byte(traceEvString)
699	buf.varint(id)
700
701	// double-check the string and the length can fit.
702	// Otherwise, truncate the string.
703	slen := len(s)
704	if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
705		slen = room
706	}
707
708	buf.varint(uint64(slen))
709	buf.pos += copy(buf.arr[buf.pos:], s[:slen])
710
711	bufp.set(buf)
712	return id, bufp
713}
714
715// traceAppend appends v to buf in little-endian-base-128 encoding.
716func traceAppend(buf []byte, v uint64) []byte {
717	for ; v >= 0x80; v >>= 7 {
718		buf = append(buf, 0x80|byte(v))
719	}
720	buf = append(buf, byte(v))
721	return buf
722}
723
724// varint appends v to buf in little-endian-base-128 encoding.
725func (buf *traceBuf) varint(v uint64) {
726	pos := buf.pos
727	for ; v >= 0x80; v >>= 7 {
728		buf.arr[pos] = 0x80 | byte(v)
729		pos++
730	}
731	buf.arr[pos] = byte(v)
732	pos++
733	buf.pos = pos
734}
735
736// byte appends v to buf.
737func (buf *traceBuf) byte(v byte) {
738	buf.arr[buf.pos] = v
739	buf.pos++
740}
741
742// traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
743// It is lock-free for reading.
744type traceStackTable struct {
745	lock mutex
746	seq  uint32
747	mem  traceAlloc
748	tab  [1 << 13]traceStackPtr
749}
750
751// traceStack is a single stack in traceStackTable.
752type traceStack struct {
753	link traceStackPtr
754	hash uintptr
755	id   uint32
756	n    int
757	stk  [0]location // real type [n]location
758}
759
760type traceStackPtr uintptr
761
762func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
763
764// stack returns slice of PCs.
765func (ts *traceStack) stack() []location {
766	return (*[traceStackSize]location)(unsafe.Pointer(&ts.stk))[:ts.n]
767}
768
769// put returns a unique id for the stack trace pcs and caches it in the table,
770// if it sees the trace for the first time.
771func (tab *traceStackTable) put(pcs []location) uint32 {
772	if len(pcs) == 0 {
773		return 0
774	}
775	var hash uintptr
776	for _, loc := range pcs {
777		hash += loc.pc
778		hash += hash << 10
779		hash ^= hash >> 6
780	}
781	// First, search the hashtable w/o the mutex.
782	if id := tab.find(pcs, hash); id != 0 {
783		return id
784	}
785	// Now, double check under the mutex.
786	lock(&tab.lock)
787	if id := tab.find(pcs, hash); id != 0 {
788		unlock(&tab.lock)
789		return id
790	}
791	// Create new record.
792	tab.seq++
793	stk := tab.newStack(len(pcs))
794	stk.hash = hash
795	stk.id = tab.seq
796	stk.n = len(pcs)
797	stkpc := stk.stack()
798	for i, pc := range pcs {
799		// Use memmove to avoid write barrier.
800		memmove(unsafe.Pointer(&stkpc[i]), unsafe.Pointer(&pc), unsafe.Sizeof(pc))
801	}
802	part := int(hash % uintptr(len(tab.tab)))
803	stk.link = tab.tab[part]
804	atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
805	unlock(&tab.lock)
806	return stk.id
807}
808
809// find checks if the stack trace pcs is already present in the table.
810func (tab *traceStackTable) find(pcs []location, hash uintptr) uint32 {
811	part := int(hash % uintptr(len(tab.tab)))
812Search:
813	for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
814		if stk.hash == hash && stk.n == len(pcs) {
815			for i, stkpc := range stk.stack() {
816				if stkpc != pcs[i] {
817					continue Search
818				}
819			}
820			return stk.id
821		}
822	}
823	return 0
824}
825
826// newStack allocates a new stack of size n.
827func (tab *traceStackTable) newStack(n int) *traceStack {
828	return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*unsafe.Sizeof(location{})))
829}
830
831// dump writes all previously cached stacks to trace buffers,
832// releases all memory and resets state.
833func (tab *traceStackTable) dump() {
834	var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
835	bufp := traceFlush(0, 0)
836	for _, stk := range tab.tab {
837		stk := stk.ptr()
838		for ; stk != nil; stk = stk.link.ptr() {
839			tmpbuf := tmp[:0]
840			tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
841			frames := stk.stack()
842			tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
843			for _, f := range frames {
844				var frame traceFrame
845				frame, bufp = traceFrameForPC(bufp, 0, f)
846				tmpbuf = traceAppend(tmpbuf, uint64(f.pc))
847				tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
848				tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
849				tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
850			}
851			// Now copy to the buffer.
852			size := 1 + traceBytesPerNumber + len(tmpbuf)
853			if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
854				bufp = traceFlush(bufp, 0)
855			}
856			buf := bufp.ptr()
857			buf.byte(traceEvStack | 3<<traceArgCountShift)
858			buf.varint(uint64(len(tmpbuf)))
859			buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
860		}
861	}
862
863	lock(&trace.lock)
864	traceFullQueue(bufp)
865	unlock(&trace.lock)
866
867	tab.mem.drop()
868	*tab = traceStackTable{}
869}
870
871type traceFrame struct {
872	funcID uint64
873	fileID uint64
874	line   uint64
875}
876
877// traceFrameForPC records the frame information.
878// It may allocate memory.
879func traceFrameForPC(buf traceBufPtr, pid int32, f location) (traceFrame, traceBufPtr) {
880	bufp := &buf
881	var frame traceFrame
882
883	fn := f.function
884	const maxLen = 1 << 10
885	if len(fn) > maxLen {
886		fn = fn[len(fn)-maxLen:]
887	}
888	frame.funcID, bufp = traceString(bufp, pid, fn)
889	frame.line = uint64(f.lineno)
890	file := f.filename
891	if len(file) > maxLen {
892		file = file[len(file)-maxLen:]
893	}
894	frame.fileID, bufp = traceString(bufp, pid, file)
895	return frame, (*bufp)
896}
897
898// traceAlloc is a non-thread-safe region allocator.
899// It holds a linked list of traceAllocBlock.
900type traceAlloc struct {
901	head traceAllocBlockPtr
902	off  uintptr
903}
904
905// traceAllocBlock is a block in traceAlloc.
906//
907// traceAllocBlock is allocated from non-GC'd memory, so it must not
908// contain heap pointers. Writes to pointers to traceAllocBlocks do
909// not need write barriers.
910//
911//go:notinheap
912type traceAllocBlock struct {
913	next traceAllocBlockPtr
914	data [64<<10 - sys.PtrSize]byte
915}
916
917// TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
918type traceAllocBlockPtr uintptr
919
920func (p traceAllocBlockPtr) ptr() *traceAllocBlock   { return (*traceAllocBlock)(unsafe.Pointer(p)) }
921func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
922
923// alloc allocates n-byte block.
924func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
925	n = round(n, sys.PtrSize)
926	if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
927		if n > uintptr(len(a.head.ptr().data)) {
928			throw("trace: alloc too large")
929		}
930		// This is only safe because the strings returned by callers
931		// are stored in a location that is not in the Go heap.
932		block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
933		if block == nil {
934			throw("trace: out of memory")
935		}
936		block.next.set(a.head.ptr())
937		a.head.set(block)
938		a.off = 0
939	}
940	p := &a.head.ptr().data[a.off]
941	a.off += n
942	return unsafe.Pointer(p)
943}
944
945// drop frees all previously allocated memory and resets the allocator.
946func (a *traceAlloc) drop() {
947	for a.head != 0 {
948		block := a.head.ptr()
949		a.head.set(block.next.ptr())
950		sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
951	}
952}
953
954// The following functions write specific events to trace.
955
956func traceGomaxprocs(procs int32) {
957	traceEvent(traceEvGomaxprocs, 1, uint64(procs))
958}
959
960func traceProcStart() {
961	traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
962}
963
964func traceProcStop(pp *p) {
965	// Sysmon and stopTheWorld can stop Ps blocked in syscalls,
966	// to handle this we temporary employ the P.
967	mp := acquirem()
968	oldp := mp.p
969	mp.p.set(pp)
970	traceEvent(traceEvProcStop, -1)
971	mp.p = oldp
972	releasem(mp)
973}
974
975func traceGCStart() {
976	traceEvent(traceEvGCStart, 3, trace.seqGC)
977	trace.seqGC++
978}
979
980func traceGCDone() {
981	traceEvent(traceEvGCDone, -1)
982}
983
984func traceGCSTWStart(kind int) {
985	traceEvent(traceEvGCSTWStart, -1, uint64(kind))
986}
987
988func traceGCSTWDone() {
989	traceEvent(traceEvGCSTWDone, -1)
990}
991
992// traceGCSweepStart prepares to trace a sweep loop. This does not
993// emit any events until traceGCSweepSpan is called.
994//
995// traceGCSweepStart must be paired with traceGCSweepDone and there
996// must be no preemption points between these two calls.
997func traceGCSweepStart() {
998	// Delay the actual GCSweepStart event until the first span
999	// sweep. If we don't sweep anything, don't emit any events.
1000	_p_ := getg().m.p.ptr()
1001	if _p_.traceSweep {
1002		throw("double traceGCSweepStart")
1003	}
1004	_p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
1005}
1006
1007// traceGCSweepSpan traces the sweep of a single page.
1008//
1009// This may be called outside a traceGCSweepStart/traceGCSweepDone
1010// pair; however, it will not emit any trace events in this case.
1011func traceGCSweepSpan(bytesSwept uintptr) {
1012	_p_ := getg().m.p.ptr()
1013	if _p_.traceSweep {
1014		if _p_.traceSwept == 0 {
1015			traceEvent(traceEvGCSweepStart, 1)
1016		}
1017		_p_.traceSwept += bytesSwept
1018	}
1019}
1020
1021func traceGCSweepDone() {
1022	_p_ := getg().m.p.ptr()
1023	if !_p_.traceSweep {
1024		throw("missing traceGCSweepStart")
1025	}
1026	if _p_.traceSwept != 0 {
1027		traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
1028	}
1029	_p_.traceSweep = false
1030}
1031
1032func traceGCMarkAssistStart() {
1033	traceEvent(traceEvGCMarkAssistStart, 1)
1034}
1035
1036func traceGCMarkAssistDone() {
1037	traceEvent(traceEvGCMarkAssistDone, -1)
1038}
1039
1040func traceGoCreate(newg *g, pc uintptr) {
1041	newg.traceseq = 0
1042	newg.tracelastp = getg().m.p
1043	// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
1044	id := trace.stackTab.put([]location{location{pc: pc + sys.PCQuantum}})
1045	traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
1046}
1047
1048func traceGoStart() {
1049	_g_ := getg().m.curg
1050	_p_ := _g_.m.p
1051	_g_.traceseq++
1052	if _g_ == _p_.ptr().gcBgMarkWorker.ptr() {
1053		traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
1054	} else if _g_.tracelastp == _p_ {
1055		traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
1056	} else {
1057		_g_.tracelastp = _p_
1058		traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
1059	}
1060}
1061
1062func traceGoEnd() {
1063	traceEvent(traceEvGoEnd, -1)
1064}
1065
1066func traceGoSched() {
1067	_g_ := getg()
1068	_g_.tracelastp = _g_.m.p
1069	traceEvent(traceEvGoSched, 1)
1070}
1071
1072func traceGoPreempt() {
1073	_g_ := getg()
1074	_g_.tracelastp = _g_.m.p
1075	traceEvent(traceEvGoPreempt, 1)
1076}
1077
1078func traceGoPark(traceEv byte, skip int) {
1079	if traceEv&traceFutileWakeup != 0 {
1080		traceEvent(traceEvFutileWakeup, -1)
1081	}
1082	traceEvent(traceEv & ^traceFutileWakeup, skip)
1083}
1084
1085func traceGoUnpark(gp *g, skip int) {
1086	_p_ := getg().m.p
1087	gp.traceseq++
1088	if gp.tracelastp == _p_ {
1089		traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
1090	} else {
1091		gp.tracelastp = _p_
1092		traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
1093	}
1094}
1095
1096func traceGoSysCall() {
1097	traceEvent(traceEvGoSysCall, 1)
1098}
1099
1100func traceGoSysExit(ts int64) {
1101	if ts != 0 && ts < trace.ticksStart {
1102		// There is a race between the code that initializes sysexitticks
1103		// (in exitsyscall, which runs without a P, and therefore is not
1104		// stopped with the rest of the world) and the code that initializes
1105		// a new trace. The recorded sysexitticks must therefore be treated
1106		// as "best effort". If they are valid for this trace, then great,
1107		// use them for greater accuracy. But if they're not valid for this
1108		// trace, assume that the trace was started after the actual syscall
1109		// exit (but before we actually managed to start the goroutine,
1110		// aka right now), and assign a fresh time stamp to keep the log consistent.
1111		ts = 0
1112	}
1113	_g_ := getg().m.curg
1114	_g_.traceseq++
1115	_g_.tracelastp = _g_.m.p
1116	traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
1117}
1118
1119func traceGoSysBlock(pp *p) {
1120	// Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
1121	// to handle this we temporary employ the P.
1122	mp := acquirem()
1123	oldp := mp.p
1124	mp.p.set(pp)
1125	traceEvent(traceEvGoSysBlock, -1)
1126	mp.p = oldp
1127	releasem(mp)
1128}
1129
1130func traceHeapAlloc() {
1131	traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
1132}
1133
1134func traceNextGC() {
1135	if memstats.next_gc == ^uint64(0) {
1136		// Heap-based triggering is disabled.
1137		traceEvent(traceEvNextGC, -1, 0)
1138	} else {
1139		traceEvent(traceEvNextGC, -1, memstats.next_gc)
1140	}
1141}
1142
1143// To access runtime functions from runtime/trace.
1144// See runtime/trace/annotation.go
1145
1146//go:linkname trace_userTaskCreate runtime..z2ftrace.userTaskCreate
1147func trace_userTaskCreate(id, parentID uint64, taskType string) {
1148	if !trace.enabled {
1149		return
1150	}
1151
1152	// Same as in traceEvent.
1153	mp, pid, bufp := traceAcquireBuffer()
1154	if !trace.enabled && !mp.startingtrace {
1155		traceReleaseBuffer(pid)
1156		return
1157	}
1158
1159	typeStringID, bufp := traceString(bufp, pid, taskType)
1160	traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 3, id, parentID, typeStringID)
1161	traceReleaseBuffer(pid)
1162}
1163
1164//go:linkname trace_userTaskEnd runtime..z2ftrace.userTaskEnd
1165func trace_userTaskEnd(id uint64) {
1166	traceEvent(traceEvUserTaskEnd, 2, id)
1167}
1168
1169//go:linkname trace_userRegion runtime..z2ftrace.userRegion
1170func trace_userRegion(id, mode uint64, name string) {
1171	if !trace.enabled {
1172		return
1173	}
1174
1175	mp, pid, bufp := traceAcquireBuffer()
1176	if !trace.enabled && !mp.startingtrace {
1177		traceReleaseBuffer(pid)
1178		return
1179	}
1180
1181	nameStringID, bufp := traceString(bufp, pid, name)
1182	traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 3, id, mode, nameStringID)
1183	traceReleaseBuffer(pid)
1184}
1185
1186//go:linkname trace_userLog runtime..z2ftrace.userLog
1187func trace_userLog(id uint64, category, message string) {
1188	if !trace.enabled {
1189		return
1190	}
1191
1192	mp, pid, bufp := traceAcquireBuffer()
1193	if !trace.enabled && !mp.startingtrace {
1194		traceReleaseBuffer(pid)
1195		return
1196	}
1197
1198	categoryID, bufp := traceString(bufp, pid, category)
1199
1200	extraSpace := traceBytesPerNumber + len(message) // extraSpace for the value string
1201	traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 3, id, categoryID)
1202	// traceEventLocked reserved extra space for val and len(val)
1203	// in buf, so buf now has room for the following.
1204	buf := bufp.ptr()
1205
1206	// double-check the message and its length can fit.
1207	// Otherwise, truncate the message.
1208	slen := len(message)
1209	if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
1210		slen = room
1211	}
1212	buf.varint(uint64(slen))
1213	buf.pos += copy(buf.arr[buf.pos:], message[:slen])
1214
1215	traceReleaseBuffer(pid)
1216}
1217