1// Copyright 2014 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5// Go execution tracer. 6// The tracer captures a wide range of execution events like goroutine 7// creation/blocking/unblocking, syscall enter/exit/block, GC-related events, 8// changes of heap size, processor start/stop, etc and writes them to a buffer 9// in a compact form. A precise nanosecond-precision timestamp and a stack 10// trace is captured for most events. 11// See https://golang.org/s/go15trace for more info. 12 13package runtime 14 15import ( 16 "runtime/internal/sys" 17 "unsafe" 18) 19 20// Event types in the trace, args are given in square brackets. 21const ( 22 traceEvNone = 0 // unused 23 traceEvBatch = 1 // start of per-P batch of events [pid, timestamp] 24 traceEvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)] 25 traceEvStack = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}] 26 traceEvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id] 27 traceEvProcStart = 5 // start of P [timestamp, thread id] 28 traceEvProcStop = 6 // stop of P [timestamp] 29 traceEvGCStart = 7 // GC start [timestamp, seq, stack id] 30 traceEvGCDone = 8 // GC done [timestamp] 31 traceEvGCSTWStart = 9 // GC STW start [timestamp, kind] 32 traceEvGCSTWDone = 10 // GC STW done [timestamp] 33 traceEvGCSweepStart = 11 // GC sweep start [timestamp, stack id] 34 traceEvGCSweepDone = 12 // GC sweep done [timestamp, swept, reclaimed] 35 traceEvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id] 36 traceEvGoStart = 14 // goroutine starts running [timestamp, goroutine id, seq] 37 traceEvGoEnd = 15 // goroutine ends [timestamp] 38 traceEvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack] 39 traceEvGoSched = 17 // goroutine calls Gosched [timestamp, stack] 40 traceEvGoPreempt = 18 // goroutine is preempted [timestamp, stack] 41 traceEvGoSleep = 19 // goroutine calls Sleep [timestamp, stack] 42 traceEvGoBlock = 20 // goroutine blocks [timestamp, stack] 43 traceEvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack] 44 traceEvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack] 45 traceEvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack] 46 traceEvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack] 47 traceEvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack] 48 traceEvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack] 49 traceEvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack] 50 traceEvGoSysCall = 28 // syscall enter [timestamp, stack] 51 traceEvGoSysExit = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp] 52 traceEvGoSysBlock = 30 // syscall blocks [timestamp] 53 traceEvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id] 54 traceEvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id] 55 traceEvHeapAlloc = 33 // memstats.heap_live change [timestamp, heap_alloc] 56 traceEvNextGC = 34 // memstats.next_gc change [timestamp, next_gc] 57 traceEvTimerGoroutine = 35 // not currently used; previously denoted timer goroutine [timer goroutine id] 58 traceEvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp] 59 traceEvString = 37 // string dictionary entry [ID, length, string] 60 traceEvGoStartLocal = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id] 61 traceEvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack] 62 traceEvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp] 63 traceEvGoStartLabel = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id] 64 traceEvGoBlockGC = 42 // goroutine blocks on GC assist [timestamp, stack] 65 traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack] 66 traceEvGCMarkAssistDone = 44 // GC mark assist done [timestamp] 67 traceEvUserTaskCreate = 45 // trace.NewContext [timestamp, internal task id, internal parent task id, stack, name string] 68 traceEvUserTaskEnd = 46 // end of a task [timestamp, internal task id, stack] 69 traceEvUserRegion = 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), stack, name string] 70 traceEvUserLog = 48 // trace.Log [timestamp, internal task id, key string id, stack, value string] 71 traceEvCount = 49 72 // Byte is used but only 6 bits are available for event type. 73 // The remaining 2 bits are used to specify the number of arguments. 74 // That means, the max event type value is 63. 75) 76 77const ( 78 // Timestamps in trace are cputicks/traceTickDiv. 79 // This makes absolute values of timestamp diffs smaller, 80 // and so they are encoded in less number of bytes. 81 // 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine). 82 // The suggested increment frequency for PowerPC's time base register is 83 // 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64 84 // and ppc64le. 85 // Tracing won't work reliably for architectures where cputicks is emulated 86 // by nanotime, so the value doesn't matter for those architectures. 87 traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64) 88 // Maximum number of PCs in a single stack trace. 89 // Since events contain only stack id rather than whole stack trace, 90 // we can allow quite large values here. 91 traceStackSize = 128 92 // Identifier of a fake P that is used when we trace without a real P. 93 traceGlobProc = -1 94 // Maximum number of bytes to encode uint64 in base-128. 95 traceBytesPerNumber = 10 96 // Shift of the number of arguments in the first event byte. 97 traceArgCountShift = 6 98 // Flag passed to traceGoPark to denote that the previous wakeup of this 99 // goroutine was futile. For example, a goroutine was unblocked on a mutex, 100 // but another goroutine got ahead and acquired the mutex before the first 101 // goroutine is scheduled, so the first goroutine has to block again. 102 // Such wakeups happen on buffered channels and sync.Mutex, 103 // but are generally not interesting for end user. 104 traceFutileWakeup byte = 128 105) 106 107// trace is global tracing context. 108var trace struct { 109 lock mutex // protects the following members 110 lockOwner *g // to avoid deadlocks during recursive lock locks 111 enabled bool // when set runtime traces events 112 shutdown bool // set when we are waiting for trace reader to finish after setting enabled to false 113 headerWritten bool // whether ReadTrace has emitted trace header 114 footerWritten bool // whether ReadTrace has emitted trace footer 115 shutdownSema uint32 // used to wait for ReadTrace completion 116 seqStart uint64 // sequence number when tracing was started 117 ticksStart int64 // cputicks when tracing was started 118 ticksEnd int64 // cputicks when tracing was stopped 119 timeStart int64 // nanotime when tracing was started 120 timeEnd int64 // nanotime when tracing was stopped 121 seqGC uint64 // GC start/done sequencer 122 reading traceBufPtr // buffer currently handed off to user 123 empty traceBufPtr // stack of empty buffers 124 fullHead traceBufPtr // queue of full buffers 125 fullTail traceBufPtr 126 reader guintptr // goroutine that called ReadTrace, or nil 127 stackTab traceStackTable // maps stack traces to unique ids 128 129 // Dictionary for traceEvString. 130 // 131 // TODO: central lock to access the map is not ideal. 132 // option: pre-assign ids to all user annotation region names and tags 133 // option: per-P cache 134 // option: sync.Map like data structure 135 stringsLock mutex 136 strings map[string]uint64 137 stringSeq uint64 138 139 // markWorkerLabels maps gcMarkWorkerMode to string ID. 140 markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64 141 142 bufLock mutex // protects buf 143 buf traceBufPtr // global trace buffer, used when running without a p 144} 145 146// traceBufHeader is per-P tracing buffer. 147//go:notinheap 148type traceBufHeader struct { 149 link traceBufPtr // in trace.empty/full 150 lastTicks uint64 // when we wrote the last event 151 pos int // next write offset in arr 152 stk [traceStackSize]location // scratch buffer for traceback 153} 154 155// traceBuf is per-P tracing buffer. 156// 157//go:notinheap 158type traceBuf struct { 159 traceBufHeader 160 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf 161} 162 163// traceBufPtr is a *traceBuf that is not traced by the garbage 164// collector and doesn't have write barriers. traceBufs are not 165// allocated from the GC'd heap, so this is safe, and are often 166// manipulated in contexts where write barriers are not allowed, so 167// this is necessary. 168// 169// TODO: Since traceBuf is now go:notinheap, this isn't necessary. 170type traceBufPtr uintptr 171 172func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) } 173func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) } 174func traceBufPtrOf(b *traceBuf) traceBufPtr { 175 return traceBufPtr(unsafe.Pointer(b)) 176} 177 178// StartTrace enables tracing for the current process. 179// While tracing, the data will be buffered and available via ReadTrace. 180// StartTrace returns an error if tracing is already enabled. 181// Most clients should use the runtime/trace package or the testing package's 182// -test.trace flag instead of calling StartTrace directly. 183func StartTrace() error { 184 // Stop the world, so that we can take a consistent snapshot 185 // of all goroutines at the beginning of the trace. 186 stopTheWorld("start tracing") 187 188 // We are in stop-the-world, but syscalls can finish and write to trace concurrently. 189 // Exitsyscall could check trace.enabled long before and then suddenly wake up 190 // and decide to write to trace at a random point in time. 191 // However, such syscall will use the global trace.buf buffer, because we've 192 // acquired all p's by doing stop-the-world. So this protects us from such races. 193 lock(&trace.bufLock) 194 195 if trace.enabled || trace.shutdown { 196 unlock(&trace.bufLock) 197 startTheWorld() 198 return errorString("tracing is already enabled") 199 } 200 201 // Can't set trace.enabled yet. While the world is stopped, exitsyscall could 202 // already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here. 203 // That would lead to an inconsistent trace: 204 // - either GoSysExit appears before EvGoInSyscall, 205 // - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below. 206 // To instruct traceEvent that it must not ignore events below, we set startingtrace. 207 // trace.enabled is set afterwards once we have emitted all preliminary events. 208 _g_ := getg() 209 _g_.m.startingtrace = true 210 211 // Obtain current stack ID to use in all traceEvGoCreate events below. 212 mp := acquirem() 213 stkBuf := make([]location, traceStackSize) 214 stackID := traceStackID(mp, stkBuf, 2) 215 releasem(mp) 216 217 for _, gp := range allgs { 218 status := readgstatus(gp) 219 if status != _Gdead { 220 gp.traceseq = 0 221 gp.tracelastp = getg().m.p 222 // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum. 223 id := trace.stackTab.put([]location{location{pc: gp.startpc + sys.PCQuantum}}) 224 traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID) 225 } 226 if status == _Gwaiting { 227 // traceEvGoWaiting is implied to have seq=1. 228 gp.traceseq++ 229 traceEvent(traceEvGoWaiting, -1, uint64(gp.goid)) 230 } 231 if status == _Gsyscall { 232 gp.traceseq++ 233 traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid)) 234 } else { 235 gp.sysblocktraced = false 236 } 237 } 238 traceProcStart() 239 traceGoStart() 240 // Note: ticksStart needs to be set after we emit traceEvGoInSyscall events. 241 // If we do it the other way around, it is possible that exitsyscall will 242 // query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp. 243 // It will lead to a false conclusion that cputicks is broken. 244 trace.ticksStart = cputicks() 245 trace.timeStart = nanotime() 246 trace.headerWritten = false 247 trace.footerWritten = false 248 249 // string to id mapping 250 // 0 : reserved for an empty string 251 // remaining: other strings registered by traceString 252 trace.stringSeq = 0 253 trace.strings = make(map[string]uint64) 254 255 trace.seqGC = 0 256 _g_.m.startingtrace = false 257 trace.enabled = true 258 259 // Register runtime goroutine labels. 260 _, pid, bufp := traceAcquireBuffer() 261 for i, label := range gcMarkWorkerModeStrings[:] { 262 trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label) 263 } 264 traceReleaseBuffer(pid) 265 266 unlock(&trace.bufLock) 267 268 startTheWorld() 269 return nil 270} 271 272// StopTrace stops tracing, if it was previously enabled. 273// StopTrace only returns after all the reads for the trace have completed. 274func StopTrace() { 275 // Stop the world so that we can collect the trace buffers from all p's below, 276 // and also to avoid races with traceEvent. 277 stopTheWorld("stop tracing") 278 279 // See the comment in StartTrace. 280 lock(&trace.bufLock) 281 282 if !trace.enabled { 283 unlock(&trace.bufLock) 284 startTheWorld() 285 return 286 } 287 288 traceGoSched() 289 290 // Loop over all allocated Ps because dead Ps may still have 291 // trace buffers. 292 for _, p := range allp[:cap(allp)] { 293 buf := p.tracebuf 294 if buf != 0 { 295 traceFullQueue(buf) 296 p.tracebuf = 0 297 } 298 } 299 if trace.buf != 0 { 300 buf := trace.buf 301 trace.buf = 0 302 if buf.ptr().pos != 0 { 303 traceFullQueue(buf) 304 } 305 } 306 307 for { 308 trace.ticksEnd = cputicks() 309 trace.timeEnd = nanotime() 310 // Windows time can tick only every 15ms, wait for at least one tick. 311 if trace.timeEnd != trace.timeStart { 312 break 313 } 314 osyield() 315 } 316 317 trace.enabled = false 318 trace.shutdown = true 319 unlock(&trace.bufLock) 320 321 startTheWorld() 322 323 // The world is started but we've set trace.shutdown, so new tracing can't start. 324 // Wait for the trace reader to flush pending buffers and stop. 325 semacquire(&trace.shutdownSema) 326 if raceenabled { 327 raceacquire(unsafe.Pointer(&trace.shutdownSema)) 328 } 329 330 // The lock protects us from races with StartTrace/StopTrace because they do stop-the-world. 331 lock(&trace.lock) 332 for _, p := range allp[:cap(allp)] { 333 if p.tracebuf != 0 { 334 throw("trace: non-empty trace buffer in proc") 335 } 336 } 337 if trace.buf != 0 { 338 throw("trace: non-empty global trace buffer") 339 } 340 if trace.fullHead != 0 || trace.fullTail != 0 { 341 throw("trace: non-empty full trace buffer") 342 } 343 if trace.reading != 0 || trace.reader != 0 { 344 throw("trace: reading after shutdown") 345 } 346 for trace.empty != 0 { 347 buf := trace.empty 348 trace.empty = buf.ptr().link 349 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys) 350 } 351 trace.strings = nil 352 trace.shutdown = false 353 unlock(&trace.lock) 354} 355 356// ReadTrace returns the next chunk of binary tracing data, blocking until data 357// is available. If tracing is turned off and all the data accumulated while it 358// was on has been returned, ReadTrace returns nil. The caller must copy the 359// returned data before calling ReadTrace again. 360// ReadTrace must be called from one goroutine at a time. 361func ReadTrace() []byte { 362 // This function may need to lock trace.lock recursively 363 // (goparkunlock -> traceGoPark -> traceEvent -> traceFlush). 364 // To allow this we use trace.lockOwner. 365 // Also this function must not allocate while holding trace.lock: 366 // allocation can call heap allocate, which will try to emit a trace 367 // event while holding heap lock. 368 lock(&trace.lock) 369 trace.lockOwner = getg() 370 371 if trace.reader != 0 { 372 // More than one goroutine reads trace. This is bad. 373 // But we rather do not crash the program because of tracing, 374 // because tracing can be enabled at runtime on prod servers. 375 trace.lockOwner = nil 376 unlock(&trace.lock) 377 println("runtime: ReadTrace called from multiple goroutines simultaneously") 378 return nil 379 } 380 // Recycle the old buffer. 381 if buf := trace.reading; buf != 0 { 382 buf.ptr().link = trace.empty 383 trace.empty = buf 384 trace.reading = 0 385 } 386 // Write trace header. 387 if !trace.headerWritten { 388 trace.headerWritten = true 389 trace.lockOwner = nil 390 unlock(&trace.lock) 391 return []byte("go 1.11 trace\x00\x00\x00") 392 } 393 // Wait for new data. 394 if trace.fullHead == 0 && !trace.shutdown { 395 trace.reader.set(getg()) 396 goparkunlock(&trace.lock, waitReasonTraceReaderBlocked, traceEvGoBlock, 2) 397 lock(&trace.lock) 398 } 399 // Write a buffer. 400 if trace.fullHead != 0 { 401 buf := traceFullDequeue() 402 trace.reading = buf 403 trace.lockOwner = nil 404 unlock(&trace.lock) 405 return buf.ptr().arr[:buf.ptr().pos] 406 } 407 // Write footer with timer frequency. 408 if !trace.footerWritten { 409 trace.footerWritten = true 410 // Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64. 411 freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv 412 trace.lockOwner = nil 413 unlock(&trace.lock) 414 var data []byte 415 data = append(data, traceEvFrequency|0<<traceArgCountShift) 416 data = traceAppend(data, uint64(freq)) 417 // This will emit a bunch of full buffers, we will pick them up 418 // on the next iteration. 419 trace.stackTab.dump() 420 return data 421 } 422 // Done. 423 if trace.shutdown { 424 trace.lockOwner = nil 425 unlock(&trace.lock) 426 if raceenabled { 427 // Model synchronization on trace.shutdownSema, which race 428 // detector does not see. This is required to avoid false 429 // race reports on writer passed to trace.Start. 430 racerelease(unsafe.Pointer(&trace.shutdownSema)) 431 } 432 // trace.enabled is already reset, so can call traceable functions. 433 semrelease(&trace.shutdownSema) 434 return nil 435 } 436 // Also bad, but see the comment above. 437 trace.lockOwner = nil 438 unlock(&trace.lock) 439 println("runtime: spurious wakeup of trace reader") 440 return nil 441} 442 443// traceReader returns the trace reader that should be woken up, if any. 444func traceReader() *g { 445 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) { 446 return nil 447 } 448 lock(&trace.lock) 449 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) { 450 unlock(&trace.lock) 451 return nil 452 } 453 gp := trace.reader.ptr() 454 trace.reader.set(nil) 455 unlock(&trace.lock) 456 return gp 457} 458 459// traceProcFree frees trace buffer associated with pp. 460func traceProcFree(pp *p) { 461 buf := pp.tracebuf 462 pp.tracebuf = 0 463 if buf == 0 { 464 return 465 } 466 lock(&trace.lock) 467 traceFullQueue(buf) 468 unlock(&trace.lock) 469} 470 471// traceFullQueue queues buf into queue of full buffers. 472func traceFullQueue(buf traceBufPtr) { 473 buf.ptr().link = 0 474 if trace.fullHead == 0 { 475 trace.fullHead = buf 476 } else { 477 trace.fullTail.ptr().link = buf 478 } 479 trace.fullTail = buf 480} 481 482// traceFullDequeue dequeues from queue of full buffers. 483func traceFullDequeue() traceBufPtr { 484 buf := trace.fullHead 485 if buf == 0 { 486 return 0 487 } 488 trace.fullHead = buf.ptr().link 489 if trace.fullHead == 0 { 490 trace.fullTail = 0 491 } 492 buf.ptr().link = 0 493 return buf 494} 495 496// traceEvent writes a single event to trace buffer, flushing the buffer if necessary. 497// ev is event type. 498// If skip > 0, write current stack id as the last argument (skipping skip top frames). 499// If skip = 0, this event type should contain a stack, but we don't want 500// to collect and remember it for this particular call. 501func traceEvent(ev byte, skip int, args ...uint64) { 502 mp, pid, bufp := traceAcquireBuffer() 503 // Double-check trace.enabled now that we've done m.locks++ and acquired bufLock. 504 // This protects from races between traceEvent and StartTrace/StopTrace. 505 506 // The caller checked that trace.enabled == true, but trace.enabled might have been 507 // turned off between the check and now. Check again. traceLockBuffer did mp.locks++, 508 // StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero, 509 // so if we see trace.enabled == true now, we know it's true for the rest of the function. 510 // Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace 511 // during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer. 512 // 513 // Note trace_userTaskCreate runs the same check. 514 if !trace.enabled && !mp.startingtrace { 515 traceReleaseBuffer(pid) 516 return 517 } 518 519 if skip > 0 { 520 if getg() == mp.curg { 521 skip++ // +1 because stack is captured in traceEventLocked. 522 } 523 } 524 traceEventLocked(0, mp, pid, bufp, ev, skip, args...) 525 traceReleaseBuffer(pid) 526} 527 528func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, skip int, args ...uint64) { 529 buf := bufp.ptr() 530 // TODO: test on non-zero extraBytes param. 531 maxSize := 2 + 5*traceBytesPerNumber + extraBytes // event type, length, sequence, timestamp, stack id and two add params 532 if buf == nil || len(buf.arr)-buf.pos < maxSize { 533 buf = traceFlush(traceBufPtrOf(buf), pid).ptr() 534 bufp.set(buf) 535 } 536 537 ticks := uint64(cputicks()) / traceTickDiv 538 tickDiff := ticks - buf.lastTicks 539 buf.lastTicks = ticks 540 narg := byte(len(args)) 541 if skip >= 0 { 542 narg++ 543 } 544 // We have only 2 bits for number of arguments. 545 // If number is >= 3, then the event type is followed by event length in bytes. 546 if narg > 3 { 547 narg = 3 548 } 549 startPos := buf.pos 550 buf.byte(ev | narg<<traceArgCountShift) 551 var lenp *byte 552 if narg == 3 { 553 // Reserve the byte for length assuming that length < 128. 554 buf.varint(0) 555 lenp = &buf.arr[buf.pos-1] 556 } 557 buf.varint(tickDiff) 558 for _, a := range args { 559 buf.varint(a) 560 } 561 if skip == 0 { 562 buf.varint(0) 563 } else if skip > 0 { 564 buf.varint(traceStackID(mp, buf.stk[:], skip)) 565 } 566 evSize := buf.pos - startPos 567 if evSize > maxSize { 568 throw("invalid length of trace event") 569 } 570 if lenp != nil { 571 // Fill in actual length. 572 *lenp = byte(evSize - 2) 573 } 574} 575 576func traceStackID(mp *m, buf []location, skip int) uint64 { 577 _g_ := getg() 578 gp := mp.curg 579 var nstk int 580 if gp == _g_ { 581 nstk = callers(skip+1, buf) 582 } else if gp != nil { 583 // FIXME: get stack trace of different goroutine. 584 } 585 if nstk > 0 { 586 nstk-- // skip runtime.goexit 587 } 588 if nstk > 0 && gp.goid == 1 { 589 nstk-- // skip runtime.main 590 } 591 id := trace.stackTab.put(buf[:nstk]) 592 return uint64(id) 593} 594 595// traceAcquireBuffer returns trace buffer to use and, if necessary, locks it. 596func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) { 597 mp = acquirem() 598 if p := mp.p.ptr(); p != nil { 599 return mp, p.id, &p.tracebuf 600 } 601 lock(&trace.bufLock) 602 return mp, traceGlobProc, &trace.buf 603} 604 605// traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer. 606func traceReleaseBuffer(pid int32) { 607 if pid == traceGlobProc { 608 unlock(&trace.bufLock) 609 } 610 releasem(getg().m) 611} 612 613// traceFlush puts buf onto stack of full buffers and returns an empty buffer. 614func traceFlush(buf traceBufPtr, pid int32) traceBufPtr { 615 owner := trace.lockOwner 616 dolock := owner == nil || owner != getg().m.curg 617 if dolock { 618 lock(&trace.lock) 619 } 620 if buf != 0 { 621 traceFullQueue(buf) 622 } 623 if trace.empty != 0 { 624 buf = trace.empty 625 trace.empty = buf.ptr().link 626 } else { 627 buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys)) 628 if buf == 0 { 629 throw("trace: out of memory") 630 } 631 } 632 bufp := buf.ptr() 633 bufp.link.set(nil) 634 bufp.pos = 0 635 636 // initialize the buffer for a new batch 637 ticks := uint64(cputicks()) / traceTickDiv 638 bufp.lastTicks = ticks 639 bufp.byte(traceEvBatch | 1<<traceArgCountShift) 640 bufp.varint(uint64(pid)) 641 bufp.varint(ticks) 642 643 if dolock { 644 unlock(&trace.lock) 645 } 646 return buf 647} 648 649// traceString adds a string to the trace.strings and returns the id. 650func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) { 651 if s == "" { 652 return 0, bufp 653 } 654 655 lock(&trace.stringsLock) 656 if raceenabled { 657 // raceacquire is necessary because the map access 658 // below is race annotated. 659 raceacquire(unsafe.Pointer(&trace.stringsLock)) 660 } 661 662 if id, ok := trace.strings[s]; ok { 663 if raceenabled { 664 racerelease(unsafe.Pointer(&trace.stringsLock)) 665 } 666 unlock(&trace.stringsLock) 667 668 return id, bufp 669 } 670 671 trace.stringSeq++ 672 id := trace.stringSeq 673 trace.strings[s] = id 674 675 if raceenabled { 676 racerelease(unsafe.Pointer(&trace.stringsLock)) 677 } 678 unlock(&trace.stringsLock) 679 680 // memory allocation in above may trigger tracing and 681 // cause *bufp changes. Following code now works with *bufp, 682 // so there must be no memory allocation or any activities 683 // that causes tracing after this point. 684 685 buf := bufp.ptr() 686 size := 1 + 2*traceBytesPerNumber + len(s) 687 if buf == nil || len(buf.arr)-buf.pos < size { 688 buf = traceFlush(traceBufPtrOf(buf), pid).ptr() 689 bufp.set(buf) 690 } 691 buf.byte(traceEvString) 692 buf.varint(id) 693 694 // double-check the string and the length can fit. 695 // Otherwise, truncate the string. 696 slen := len(s) 697 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber { 698 slen = room 699 } 700 701 buf.varint(uint64(slen)) 702 buf.pos += copy(buf.arr[buf.pos:], s[:slen]) 703 704 bufp.set(buf) 705 return id, bufp 706} 707 708// traceAppend appends v to buf in little-endian-base-128 encoding. 709func traceAppend(buf []byte, v uint64) []byte { 710 for ; v >= 0x80; v >>= 7 { 711 buf = append(buf, 0x80|byte(v)) 712 } 713 buf = append(buf, byte(v)) 714 return buf 715} 716 717// varint appends v to buf in little-endian-base-128 encoding. 718func (buf *traceBuf) varint(v uint64) { 719 pos := buf.pos 720 for ; v >= 0x80; v >>= 7 { 721 buf.arr[pos] = 0x80 | byte(v) 722 pos++ 723 } 724 buf.arr[pos] = byte(v) 725 pos++ 726 buf.pos = pos 727} 728 729// byte appends v to buf. 730func (buf *traceBuf) byte(v byte) { 731 buf.arr[buf.pos] = v 732 buf.pos++ 733} 734 735// traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids. 736// It is lock-free for reading. 737type traceStackTable struct { 738 lock mutex 739 seq uint32 740 mem traceAlloc 741 tab [1 << 13]traceStackPtr 742} 743 744// traceStack is a single stack in traceStackTable. 745type traceStack struct { 746 link traceStackPtr 747 hash uintptr 748 id uint32 749 n int 750 stk [0]location // real type [n]location 751} 752 753type traceStackPtr uintptr 754 755func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) } 756 757// stack returns slice of PCs. 758func (ts *traceStack) stack() []location { 759 return (*[traceStackSize]location)(unsafe.Pointer(&ts.stk))[:ts.n] 760} 761 762// put returns a unique id for the stack trace pcs and caches it in the table, 763// if it sees the trace for the first time. 764func (tab *traceStackTable) put(pcs []location) uint32 { 765 if len(pcs) == 0 { 766 return 0 767 } 768 var hash uintptr 769 for _, loc := range pcs { 770 hash += loc.pc 771 hash += hash << 10 772 hash ^= hash >> 6 773 } 774 // First, search the hashtable w/o the mutex. 775 if id := tab.find(pcs, hash); id != 0 { 776 return id 777 } 778 // Now, double check under the mutex. 779 lock(&tab.lock) 780 if id := tab.find(pcs, hash); id != 0 { 781 unlock(&tab.lock) 782 return id 783 } 784 // Create new record. 785 tab.seq++ 786 stk := tab.newStack(len(pcs)) 787 stk.hash = hash 788 stk.id = tab.seq 789 stk.n = len(pcs) 790 stkpc := stk.stack() 791 for i, pc := range pcs { 792 // Use memmove to avoid write barrier. 793 memmove(unsafe.Pointer(&stkpc[i]), unsafe.Pointer(&pc), unsafe.Sizeof(pc)) 794 } 795 part := int(hash % uintptr(len(tab.tab))) 796 stk.link = tab.tab[part] 797 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk)) 798 unlock(&tab.lock) 799 return stk.id 800} 801 802// find checks if the stack trace pcs is already present in the table. 803func (tab *traceStackTable) find(pcs []location, hash uintptr) uint32 { 804 part := int(hash % uintptr(len(tab.tab))) 805Search: 806 for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() { 807 if stk.hash == hash && stk.n == len(pcs) { 808 for i, stkpc := range stk.stack() { 809 if stkpc != pcs[i] { 810 continue Search 811 } 812 } 813 return stk.id 814 } 815 } 816 return 0 817} 818 819// newStack allocates a new stack of size n. 820func (tab *traceStackTable) newStack(n int) *traceStack { 821 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*unsafe.Sizeof(location{}))) 822} 823 824// dump writes all previously cached stacks to trace buffers, 825// releases all memory and resets state. 826func (tab *traceStackTable) dump() { 827 var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte 828 bufp := traceFlush(0, 0) 829 for _, stk := range tab.tab { 830 stk := stk.ptr() 831 for ; stk != nil; stk = stk.link.ptr() { 832 tmpbuf := tmp[:0] 833 tmpbuf = traceAppend(tmpbuf, uint64(stk.id)) 834 frames := stk.stack() 835 tmpbuf = traceAppend(tmpbuf, uint64(len(frames))) 836 for _, f := range frames { 837 var frame traceFrame 838 frame, bufp = traceFrameForPC(bufp, 0, f) 839 tmpbuf = traceAppend(tmpbuf, uint64(f.pc)) 840 tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID)) 841 tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID)) 842 tmpbuf = traceAppend(tmpbuf, uint64(frame.line)) 843 } 844 // Now copy to the buffer. 845 size := 1 + traceBytesPerNumber + len(tmpbuf) 846 if buf := bufp.ptr(); len(buf.arr)-buf.pos < size { 847 bufp = traceFlush(bufp, 0) 848 } 849 buf := bufp.ptr() 850 buf.byte(traceEvStack | 3<<traceArgCountShift) 851 buf.varint(uint64(len(tmpbuf))) 852 buf.pos += copy(buf.arr[buf.pos:], tmpbuf) 853 } 854 } 855 856 lock(&trace.lock) 857 traceFullQueue(bufp) 858 unlock(&trace.lock) 859 860 tab.mem.drop() 861 *tab = traceStackTable{} 862} 863 864type traceFrame struct { 865 funcID uint64 866 fileID uint64 867 line uint64 868} 869 870// traceFrameForPC records the frame information. 871// It may allocate memory. 872func traceFrameForPC(buf traceBufPtr, pid int32, f location) (traceFrame, traceBufPtr) { 873 bufp := &buf 874 var frame traceFrame 875 876 fn := f.function 877 const maxLen = 1 << 10 878 if len(fn) > maxLen { 879 fn = fn[len(fn)-maxLen:] 880 } 881 frame.funcID, bufp = traceString(bufp, pid, fn) 882 frame.line = uint64(f.lineno) 883 file := f.filename 884 if len(file) > maxLen { 885 file = file[len(file)-maxLen:] 886 } 887 frame.fileID, bufp = traceString(bufp, pid, file) 888 return frame, (*bufp) 889} 890 891// traceAlloc is a non-thread-safe region allocator. 892// It holds a linked list of traceAllocBlock. 893type traceAlloc struct { 894 head traceAllocBlockPtr 895 off uintptr 896} 897 898// traceAllocBlock is a block in traceAlloc. 899// 900// traceAllocBlock is allocated from non-GC'd memory, so it must not 901// contain heap pointers. Writes to pointers to traceAllocBlocks do 902// not need write barriers. 903// 904//go:notinheap 905type traceAllocBlock struct { 906 next traceAllocBlockPtr 907 data [64<<10 - sys.PtrSize]byte 908} 909 910// TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary. 911type traceAllocBlockPtr uintptr 912 913func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) } 914func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) } 915 916// alloc allocates n-byte block. 917func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer { 918 n = alignUp(n, sys.PtrSize) 919 if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) { 920 if n > uintptr(len(a.head.ptr().data)) { 921 throw("trace: alloc too large") 922 } 923 // This is only safe because the strings returned by callers 924 // are stored in a location that is not in the Go heap. 925 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)) 926 if block == nil { 927 throw("trace: out of memory") 928 } 929 block.next.set(a.head.ptr()) 930 a.head.set(block) 931 a.off = 0 932 } 933 p := &a.head.ptr().data[a.off] 934 a.off += n 935 return unsafe.Pointer(p) 936} 937 938// drop frees all previously allocated memory and resets the allocator. 939func (a *traceAlloc) drop() { 940 for a.head != 0 { 941 block := a.head.ptr() 942 a.head.set(block.next.ptr()) 943 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys) 944 } 945} 946 947// The following functions write specific events to trace. 948 949func traceGomaxprocs(procs int32) { 950 traceEvent(traceEvGomaxprocs, 1, uint64(procs)) 951} 952 953func traceProcStart() { 954 traceEvent(traceEvProcStart, -1, uint64(getg().m.id)) 955} 956 957func traceProcStop(pp *p) { 958 // Sysmon and stopTheWorld can stop Ps blocked in syscalls, 959 // to handle this we temporary employ the P. 960 mp := acquirem() 961 oldp := mp.p 962 mp.p.set(pp) 963 traceEvent(traceEvProcStop, -1) 964 mp.p = oldp 965 releasem(mp) 966} 967 968func traceGCStart() { 969 traceEvent(traceEvGCStart, 3, trace.seqGC) 970 trace.seqGC++ 971} 972 973func traceGCDone() { 974 traceEvent(traceEvGCDone, -1) 975} 976 977func traceGCSTWStart(kind int) { 978 traceEvent(traceEvGCSTWStart, -1, uint64(kind)) 979} 980 981func traceGCSTWDone() { 982 traceEvent(traceEvGCSTWDone, -1) 983} 984 985// traceGCSweepStart prepares to trace a sweep loop. This does not 986// emit any events until traceGCSweepSpan is called. 987// 988// traceGCSweepStart must be paired with traceGCSweepDone and there 989// must be no preemption points between these two calls. 990func traceGCSweepStart() { 991 // Delay the actual GCSweepStart event until the first span 992 // sweep. If we don't sweep anything, don't emit any events. 993 _p_ := getg().m.p.ptr() 994 if _p_.traceSweep { 995 throw("double traceGCSweepStart") 996 } 997 _p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0 998} 999 1000// traceGCSweepSpan traces the sweep of a single page. 1001// 1002// This may be called outside a traceGCSweepStart/traceGCSweepDone 1003// pair; however, it will not emit any trace events in this case. 1004func traceGCSweepSpan(bytesSwept uintptr) { 1005 _p_ := getg().m.p.ptr() 1006 if _p_.traceSweep { 1007 if _p_.traceSwept == 0 { 1008 traceEvent(traceEvGCSweepStart, 1) 1009 } 1010 _p_.traceSwept += bytesSwept 1011 } 1012} 1013 1014func traceGCSweepDone() { 1015 _p_ := getg().m.p.ptr() 1016 if !_p_.traceSweep { 1017 throw("missing traceGCSweepStart") 1018 } 1019 if _p_.traceSwept != 0 { 1020 traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed)) 1021 } 1022 _p_.traceSweep = false 1023} 1024 1025func traceGCMarkAssistStart() { 1026 traceEvent(traceEvGCMarkAssistStart, 1) 1027} 1028 1029func traceGCMarkAssistDone() { 1030 traceEvent(traceEvGCMarkAssistDone, -1) 1031} 1032 1033func traceGoCreate(newg *g, pc uintptr) { 1034 newg.traceseq = 0 1035 newg.tracelastp = getg().m.p 1036 // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum. 1037 id := trace.stackTab.put([]location{location{pc: pc + sys.PCQuantum}}) 1038 traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id)) 1039} 1040 1041func traceGoStart() { 1042 _g_ := getg().m.curg 1043 _p_ := _g_.m.p 1044 _g_.traceseq++ 1045 if _g_ == _p_.ptr().gcBgMarkWorker.ptr() { 1046 traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode]) 1047 } else if _g_.tracelastp == _p_ { 1048 traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid)) 1049 } else { 1050 _g_.tracelastp = _p_ 1051 traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq) 1052 } 1053} 1054 1055func traceGoEnd() { 1056 traceEvent(traceEvGoEnd, -1) 1057} 1058 1059func traceGoSched() { 1060 _g_ := getg() 1061 _g_.tracelastp = _g_.m.p 1062 traceEvent(traceEvGoSched, 1) 1063} 1064 1065func traceGoPreempt() { 1066 _g_ := getg() 1067 _g_.tracelastp = _g_.m.p 1068 traceEvent(traceEvGoPreempt, 1) 1069} 1070 1071func traceGoPark(traceEv byte, skip int) { 1072 if traceEv&traceFutileWakeup != 0 { 1073 traceEvent(traceEvFutileWakeup, -1) 1074 } 1075 traceEvent(traceEv & ^traceFutileWakeup, skip) 1076} 1077 1078func traceGoUnpark(gp *g, skip int) { 1079 _p_ := getg().m.p 1080 gp.traceseq++ 1081 if gp.tracelastp == _p_ { 1082 traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid)) 1083 } else { 1084 gp.tracelastp = _p_ 1085 traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq) 1086 } 1087} 1088 1089func traceGoSysCall() { 1090 traceEvent(traceEvGoSysCall, 1) 1091} 1092 1093func traceGoSysExit(ts int64) { 1094 if ts != 0 && ts < trace.ticksStart { 1095 // There is a race between the code that initializes sysexitticks 1096 // (in exitsyscall, which runs without a P, and therefore is not 1097 // stopped with the rest of the world) and the code that initializes 1098 // a new trace. The recorded sysexitticks must therefore be treated 1099 // as "best effort". If they are valid for this trace, then great, 1100 // use them for greater accuracy. But if they're not valid for this 1101 // trace, assume that the trace was started after the actual syscall 1102 // exit (but before we actually managed to start the goroutine, 1103 // aka right now), and assign a fresh time stamp to keep the log consistent. 1104 ts = 0 1105 } 1106 _g_ := getg().m.curg 1107 _g_.traceseq++ 1108 _g_.tracelastp = _g_.m.p 1109 traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv) 1110} 1111 1112func traceGoSysBlock(pp *p) { 1113 // Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked, 1114 // to handle this we temporary employ the P. 1115 mp := acquirem() 1116 oldp := mp.p 1117 mp.p.set(pp) 1118 traceEvent(traceEvGoSysBlock, -1) 1119 mp.p = oldp 1120 releasem(mp) 1121} 1122 1123func traceHeapAlloc() { 1124 traceEvent(traceEvHeapAlloc, -1, memstats.heap_live) 1125} 1126 1127func traceNextGC() { 1128 if memstats.next_gc == ^uint64(0) { 1129 // Heap-based triggering is disabled. 1130 traceEvent(traceEvNextGC, -1, 0) 1131 } else { 1132 traceEvent(traceEvNextGC, -1, memstats.next_gc) 1133 } 1134} 1135 1136// To access runtime functions from runtime/trace. 1137// See runtime/trace/annotation.go 1138 1139//go:linkname trace_userTaskCreate runtime..z2ftrace.userTaskCreate 1140func trace_userTaskCreate(id, parentID uint64, taskType string) { 1141 if !trace.enabled { 1142 return 1143 } 1144 1145 // Same as in traceEvent. 1146 mp, pid, bufp := traceAcquireBuffer() 1147 if !trace.enabled && !mp.startingtrace { 1148 traceReleaseBuffer(pid) 1149 return 1150 } 1151 1152 typeStringID, bufp := traceString(bufp, pid, taskType) 1153 traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 3, id, parentID, typeStringID) 1154 traceReleaseBuffer(pid) 1155} 1156 1157//go:linkname trace_userTaskEnd runtime..z2ftrace.userTaskEnd 1158func trace_userTaskEnd(id uint64) { 1159 traceEvent(traceEvUserTaskEnd, 2, id) 1160} 1161 1162//go:linkname trace_userRegion runtime..z2ftrace.userRegion 1163func trace_userRegion(id, mode uint64, name string) { 1164 if !trace.enabled { 1165 return 1166 } 1167 1168 mp, pid, bufp := traceAcquireBuffer() 1169 if !trace.enabled && !mp.startingtrace { 1170 traceReleaseBuffer(pid) 1171 return 1172 } 1173 1174 nameStringID, bufp := traceString(bufp, pid, name) 1175 traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 3, id, mode, nameStringID) 1176 traceReleaseBuffer(pid) 1177} 1178 1179//go:linkname trace_userLog runtime..z2ftrace.userLog 1180func trace_userLog(id uint64, category, message string) { 1181 if !trace.enabled { 1182 return 1183 } 1184 1185 mp, pid, bufp := traceAcquireBuffer() 1186 if !trace.enabled && !mp.startingtrace { 1187 traceReleaseBuffer(pid) 1188 return 1189 } 1190 1191 categoryID, bufp := traceString(bufp, pid, category) 1192 1193 extraSpace := traceBytesPerNumber + len(message) // extraSpace for the value string 1194 traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 3, id, categoryID) 1195 // traceEventLocked reserved extra space for val and len(val) 1196 // in buf, so buf now has room for the following. 1197 buf := bufp.ptr() 1198 1199 // double-check the message and its length can fit. 1200 // Otherwise, truncate the message. 1201 slen := len(message) 1202 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber { 1203 slen = room 1204 } 1205 buf.varint(uint64(slen)) 1206 buf.pos += copy(buf.arr[buf.pos:], message[:slen]) 1207 1208 traceReleaseBuffer(pid) 1209} 1210