1// Copyright 2019 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5// This file provides an internal debug logging facility. The debug 6// log is a lightweight, in-memory, per-M ring buffer. By default, the 7// runtime prints the debug log on panic. 8// 9// To print something to the debug log, call dlog to obtain a dlogger 10// and use the methods on that to add values. The values will be 11// space-separated in the output (much like println). 12// 13// This facility can be enabled by passing -tags debuglog when 14// building. Without this tag, dlog calls compile to nothing. 15 16package runtime 17 18import ( 19 "runtime/internal/atomic" 20 "unsafe" 21) 22 23// debugLogBytes is the size of each per-M ring buffer. This is 24// allocated off-heap to avoid blowing up the M and hence the GC'd 25// heap size. 26const debugLogBytes = 16 << 10 27 28// debugLogStringLimit is the maximum number of bytes in a string. 29// Above this, the string will be truncated with "..(n more bytes).." 30const debugLogStringLimit = debugLogBytes / 8 31 32// dlog returns a debug logger. The caller can use methods on the 33// returned logger to add values, which will be space-separated in the 34// final output, much like println. The caller must call end() to 35// finish the message. 36// 37// dlog can be used from highly-constrained corners of the runtime: it 38// is safe to use in the signal handler, from within the write 39// barrier, from within the stack implementation, and in places that 40// must be recursively nosplit. 41// 42// This will be compiled away if built without the debuglog build tag. 43// However, argument construction may not be. If any of the arguments 44// are not literals or trivial expressions, consider protecting the 45// call with "if dlogEnabled". 46// 47//go:nosplit 48//go:nowritebarrierrec 49func dlog() *dlogger { 50 if !dlogEnabled { 51 return nil 52 } 53 54 // Get the time. 55 tick, nano := uint64(cputicks()), uint64(nanotime()) 56 57 // Try to get a cached logger. 58 l := getCachedDlogger() 59 60 // If we couldn't get a cached logger, try to get one from the 61 // global pool. 62 if l == nil { 63 allp := (*uintptr)(unsafe.Pointer(&allDloggers)) 64 all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp))) 65 for l1 := all; l1 != nil; l1 = l1.allLink { 66 if atomic.Load(&l1.owned) == 0 && atomic.Cas(&l1.owned, 0, 1) { 67 l = l1 68 break 69 } 70 } 71 } 72 73 // If that failed, allocate a new logger. 74 if l == nil { 75 l = (*dlogger)(sysAlloc(unsafe.Sizeof(dlogger{}), nil)) 76 if l == nil { 77 throw("failed to allocate debug log") 78 } 79 l.w.r.data = &l.w.data 80 l.owned = 1 81 82 // Prepend to allDloggers list. 83 headp := (*uintptr)(unsafe.Pointer(&allDloggers)) 84 for { 85 head := atomic.Loaduintptr(headp) 86 l.allLink = (*dlogger)(unsafe.Pointer(head)) 87 if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) { 88 break 89 } 90 } 91 } 92 93 // If the time delta is getting too high, write a new sync 94 // packet. We set the limit so we don't write more than 6 95 // bytes of delta in the record header. 96 const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets 97 if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit { 98 l.w.writeSync(tick, nano) 99 } 100 101 // Reserve space for framing header. 102 l.w.ensure(debugLogHeaderSize) 103 l.w.write += debugLogHeaderSize 104 105 // Write record header. 106 l.w.uvarint(tick - l.w.tick) 107 l.w.uvarint(nano - l.w.nano) 108 gp := getg() 109 if gp != nil && gp.m != nil && gp.m.p != 0 { 110 l.w.varint(int64(gp.m.p.ptr().id)) 111 } else { 112 l.w.varint(-1) 113 } 114 115 return l 116} 117 118// A dlogger writes to the debug log. 119// 120// To obtain a dlogger, call dlog(). When done with the dlogger, call 121// end(). 122// 123//go:notinheap 124type dlogger struct { 125 w debugLogWriter 126 127 // allLink is the next dlogger in the allDloggers list. 128 allLink *dlogger 129 130 // owned indicates that this dlogger is owned by an M. This is 131 // accessed atomically. 132 owned uint32 133} 134 135// allDloggers is a list of all dloggers, linked through 136// dlogger.allLink. This is accessed atomically. This is prepend only, 137// so it doesn't need to protect against ABA races. 138var allDloggers *dlogger 139 140//go:nosplit 141func (l *dlogger) end() { 142 if !dlogEnabled { 143 return 144 } 145 146 // Fill in framing header. 147 size := l.w.write - l.w.r.end 148 if !l.w.writeFrameAt(l.w.r.end, size) { 149 throw("record too large") 150 } 151 152 // Commit the record. 153 l.w.r.end = l.w.write 154 155 // Attempt to return this logger to the cache. 156 if putCachedDlogger(l) { 157 return 158 } 159 160 // Return the logger to the global pool. 161 atomic.Store(&l.owned, 0) 162} 163 164const ( 165 debugLogUnknown = 1 + iota 166 debugLogBoolTrue 167 debugLogBoolFalse 168 debugLogInt 169 debugLogUint 170 debugLogHex 171 debugLogPtr 172 debugLogString 173 debugLogConstString 174 debugLogStringOverflow 175 176 debugLogPC 177 debugLogTraceback 178) 179 180//go:nosplit 181func (l *dlogger) b(x bool) *dlogger { 182 if !dlogEnabled { 183 return l 184 } 185 if x { 186 l.w.byte(debugLogBoolTrue) 187 } else { 188 l.w.byte(debugLogBoolFalse) 189 } 190 return l 191} 192 193//go:nosplit 194func (l *dlogger) i(x int) *dlogger { 195 return l.i64(int64(x)) 196} 197 198//go:nosplit 199func (l *dlogger) i8(x int8) *dlogger { 200 return l.i64(int64(x)) 201} 202 203//go:nosplit 204func (l *dlogger) i16(x int16) *dlogger { 205 return l.i64(int64(x)) 206} 207 208//go:nosplit 209func (l *dlogger) i32(x int32) *dlogger { 210 return l.i64(int64(x)) 211} 212 213//go:nosplit 214func (l *dlogger) i64(x int64) *dlogger { 215 if !dlogEnabled { 216 return l 217 } 218 l.w.byte(debugLogInt) 219 l.w.varint(x) 220 return l 221} 222 223//go:nosplit 224func (l *dlogger) u(x uint) *dlogger { 225 return l.u64(uint64(x)) 226} 227 228//go:nosplit 229func (l *dlogger) uptr(x uintptr) *dlogger { 230 return l.u64(uint64(x)) 231} 232 233//go:nosplit 234func (l *dlogger) u8(x uint8) *dlogger { 235 return l.u64(uint64(x)) 236} 237 238//go:nosplit 239func (l *dlogger) u16(x uint16) *dlogger { 240 return l.u64(uint64(x)) 241} 242 243//go:nosplit 244func (l *dlogger) u32(x uint32) *dlogger { 245 return l.u64(uint64(x)) 246} 247 248//go:nosplit 249func (l *dlogger) u64(x uint64) *dlogger { 250 if !dlogEnabled { 251 return l 252 } 253 l.w.byte(debugLogUint) 254 l.w.uvarint(x) 255 return l 256} 257 258//go:nosplit 259func (l *dlogger) hex(x uint64) *dlogger { 260 if !dlogEnabled { 261 return l 262 } 263 l.w.byte(debugLogHex) 264 l.w.uvarint(x) 265 return l 266} 267 268//go:nosplit 269func (l *dlogger) p(x interface{}) *dlogger { 270 if !dlogEnabled { 271 return l 272 } 273 l.w.byte(debugLogPtr) 274 if x == nil { 275 l.w.uvarint(0) 276 } else { 277 v := efaceOf(&x) 278 switch v._type.kind & kindMask { 279 case kindChan, kindFunc, kindMap, kindPtr, kindUnsafePointer: 280 l.w.uvarint(uint64(uintptr(v.data))) 281 default: 282 throw("not a pointer type") 283 } 284 } 285 return l 286} 287 288// C function to get an address before the read-only data segment. 289func getText() uintptr 290 291// C function to get an address after the read-only data segment. 292func getEtext() uintptr 293 294//go:nosplit 295func (l *dlogger) s(x string) *dlogger { 296 if !dlogEnabled { 297 return l 298 } 299 str := stringStructOf(&x) 300 text := getText() 301 etext := getEtext() 302 if len(x) > 4 && text != 0 && etext != 0 && text <= uintptr(str.str) && uintptr(str.str) < etext { 303 // String constants are in the rodata section, which 304 // isn't recorded in moduledata. But it has to be 305 // somewhere between etext and end. 306 l.w.byte(debugLogConstString) 307 l.w.uvarint(uint64(str.len)) 308 l.w.uvarint(uint64(uintptr(str.str) - text)) 309 } else { 310 l.w.byte(debugLogString) 311 var b []byte 312 bb := (*slice)(unsafe.Pointer(&b)) 313 bb.array = str.str 314 bb.len, bb.cap = str.len, str.len 315 if len(b) > debugLogStringLimit { 316 b = b[:debugLogStringLimit] 317 } 318 l.w.uvarint(uint64(len(b))) 319 l.w.bytes(b) 320 if len(b) != len(x) { 321 l.w.byte(debugLogStringOverflow) 322 l.w.uvarint(uint64(len(x) - len(b))) 323 } 324 } 325 return l 326} 327 328//go:nosplit 329func (l *dlogger) pc(x uintptr) *dlogger { 330 if !dlogEnabled { 331 return l 332 } 333 l.w.byte(debugLogPC) 334 l.w.uvarint(uint64(x)) 335 return l 336} 337 338//go:nosplit 339func (l *dlogger) traceback(x []uintptr) *dlogger { 340 if !dlogEnabled { 341 return l 342 } 343 l.w.byte(debugLogTraceback) 344 l.w.uvarint(uint64(len(x))) 345 for _, pc := range x { 346 l.w.uvarint(uint64(pc)) 347 } 348 return l 349} 350 351// A debugLogWriter is a ring buffer of binary debug log records. 352// 353// A log record consists of a 2-byte framing header and a sequence of 354// fields. The framing header gives the size of the record as a little 355// endian 16-bit value. Each field starts with a byte indicating its 356// type, followed by type-specific data. If the size in the framing 357// header is 0, it's a sync record consisting of two little endian 358// 64-bit values giving a new time base. 359// 360// Because this is a ring buffer, new records will eventually 361// overwrite old records. Hence, it maintains a reader that consumes 362// the log as it gets overwritten. That reader state is where an 363// actual log reader would start. 364// 365//go:notinheap 366type debugLogWriter struct { 367 write uint64 368 data debugLogBuf 369 370 // tick and nano are the time bases from the most recently 371 // written sync record. 372 tick, nano uint64 373 374 // r is a reader that consumes records as they get overwritten 375 // by the writer. It also acts as the initial reader state 376 // when printing the log. 377 r debugLogReader 378 379 // buf is a scratch buffer for encoding. This is here to 380 // reduce stack usage. 381 buf [10]byte 382} 383 384//go:notinheap 385type debugLogBuf [debugLogBytes]byte 386 387const ( 388 // debugLogHeaderSize is the number of bytes in the framing 389 // header of every dlog record. 390 debugLogHeaderSize = 2 391 392 // debugLogSyncSize is the number of bytes in a sync record. 393 debugLogSyncSize = debugLogHeaderSize + 2*8 394) 395 396//go:nosplit 397func (l *debugLogWriter) ensure(n uint64) { 398 for l.write+n >= l.r.begin+uint64(len(l.data)) { 399 // Consume record at begin. 400 if l.r.skip() == ^uint64(0) { 401 // Wrapped around within a record. 402 // 403 // TODO(austin): It would be better to just 404 // eat the whole buffer at this point, but we 405 // have to communicate that to the reader 406 // somehow. 407 throw("record wrapped around") 408 } 409 } 410} 411 412//go:nosplit 413func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool { 414 l.data[pos%uint64(len(l.data))] = uint8(size) 415 l.data[(pos+1)%uint64(len(l.data))] = uint8(size >> 8) 416 return size <= 0xFFFF 417} 418 419//go:nosplit 420func (l *debugLogWriter) writeSync(tick, nano uint64) { 421 l.tick, l.nano = tick, nano 422 l.ensure(debugLogHeaderSize) 423 l.writeFrameAt(l.write, 0) 424 l.write += debugLogHeaderSize 425 l.writeUint64LE(tick) 426 l.writeUint64LE(nano) 427 l.r.end = l.write 428} 429 430//go:nosplit 431func (l *debugLogWriter) writeUint64LE(x uint64) { 432 var b [8]byte 433 b[0] = byte(x) 434 b[1] = byte(x >> 8) 435 b[2] = byte(x >> 16) 436 b[3] = byte(x >> 24) 437 b[4] = byte(x >> 32) 438 b[5] = byte(x >> 40) 439 b[6] = byte(x >> 48) 440 b[7] = byte(x >> 56) 441 l.bytes(b[:]) 442} 443 444//go:nosplit 445func (l *debugLogWriter) byte(x byte) { 446 l.ensure(1) 447 pos := l.write 448 l.write++ 449 l.data[pos%uint64(len(l.data))] = x 450} 451 452//go:nosplit 453func (l *debugLogWriter) bytes(x []byte) { 454 l.ensure(uint64(len(x))) 455 pos := l.write 456 l.write += uint64(len(x)) 457 for len(x) > 0 { 458 n := copy(l.data[pos%uint64(len(l.data)):], x) 459 pos += uint64(n) 460 x = x[n:] 461 } 462} 463 464//go:nosplit 465func (l *debugLogWriter) varint(x int64) { 466 var u uint64 467 if x < 0 { 468 u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1 469 } else { 470 u = (uint64(x) << 1) // do not complement i, bit 0 is 0 471 } 472 l.uvarint(u) 473} 474 475//go:nosplit 476func (l *debugLogWriter) uvarint(u uint64) { 477 i := 0 478 for u >= 0x80 { 479 l.buf[i] = byte(u) | 0x80 480 u >>= 7 481 i++ 482 } 483 l.buf[i] = byte(u) 484 i++ 485 l.bytes(l.buf[:i]) 486} 487 488type debugLogReader struct { 489 data *debugLogBuf 490 491 // begin and end are the positions in the log of the beginning 492 // and end of the log data, modulo len(data). 493 begin, end uint64 494 495 // tick and nano are the current time base at begin. 496 tick, nano uint64 497} 498 499//go:nosplit 500func (r *debugLogReader) skip() uint64 { 501 // Read size at pos. 502 if r.begin+debugLogHeaderSize > r.end { 503 return ^uint64(0) 504 } 505 size := uint64(r.readUint16LEAt(r.begin)) 506 if size == 0 { 507 // Sync packet. 508 r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize) 509 r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8) 510 size = debugLogSyncSize 511 } 512 if r.begin+size > r.end { 513 return ^uint64(0) 514 } 515 r.begin += size 516 return size 517} 518 519//go:nosplit 520func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 { 521 return uint16(r.data[pos%uint64(len(r.data))]) | 522 uint16(r.data[(pos+1)%uint64(len(r.data))])<<8 523} 524 525//go:nosplit 526func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 { 527 var b [8]byte 528 for i := range b { 529 b[i] = r.data[pos%uint64(len(r.data))] 530 pos++ 531 } 532 return uint64(b[0]) | uint64(b[1])<<8 | 533 uint64(b[2])<<16 | uint64(b[3])<<24 | 534 uint64(b[4])<<32 | uint64(b[5])<<40 | 535 uint64(b[6])<<48 | uint64(b[7])<<56 536} 537 538func (r *debugLogReader) peek() (tick uint64) { 539 // Consume any sync records. 540 size := uint64(0) 541 for size == 0 { 542 if r.begin+debugLogHeaderSize > r.end { 543 return ^uint64(0) 544 } 545 size = uint64(r.readUint16LEAt(r.begin)) 546 if size != 0 { 547 break 548 } 549 if r.begin+debugLogSyncSize > r.end { 550 return ^uint64(0) 551 } 552 // Sync packet. 553 r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize) 554 r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8) 555 r.begin += debugLogSyncSize 556 } 557 558 // Peek tick delta. 559 if r.begin+size > r.end { 560 return ^uint64(0) 561 } 562 pos := r.begin + debugLogHeaderSize 563 var u uint64 564 for i := uint(0); ; i += 7 { 565 b := r.data[pos%uint64(len(r.data))] 566 pos++ 567 u |= uint64(b&^0x80) << i 568 if b&0x80 == 0 { 569 break 570 } 571 } 572 if pos > r.begin+size { 573 return ^uint64(0) 574 } 575 return r.tick + u 576} 577 578func (r *debugLogReader) header() (end, tick, nano uint64, p int) { 579 // Read size. We've already skipped sync packets and checked 580 // bounds in peek. 581 size := uint64(r.readUint16LEAt(r.begin)) 582 end = r.begin + size 583 r.begin += debugLogHeaderSize 584 585 // Read tick, nano, and p. 586 tick = r.uvarint() + r.tick 587 nano = r.uvarint() + r.nano 588 p = int(r.varint()) 589 590 return 591} 592 593func (r *debugLogReader) uvarint() uint64 { 594 var u uint64 595 for i := uint(0); ; i += 7 { 596 b := r.data[r.begin%uint64(len(r.data))] 597 r.begin++ 598 u |= uint64(b&^0x80) << i 599 if b&0x80 == 0 { 600 break 601 } 602 } 603 return u 604} 605 606func (r *debugLogReader) varint() int64 { 607 u := r.uvarint() 608 var v int64 609 if u&1 == 0 { 610 v = int64(u >> 1) 611 } else { 612 v = ^int64(u >> 1) 613 } 614 return v 615} 616 617func (r *debugLogReader) printVal() bool { 618 typ := r.data[r.begin%uint64(len(r.data))] 619 r.begin++ 620 621 switch typ { 622 default: 623 print("<unknown field type ", hex(typ), " pos ", r.begin-1, " end ", r.end, ">\n") 624 return false 625 626 case debugLogUnknown: 627 print("<unknown kind>") 628 629 case debugLogBoolTrue: 630 print(true) 631 632 case debugLogBoolFalse: 633 print(false) 634 635 case debugLogInt: 636 print(r.varint()) 637 638 case debugLogUint: 639 print(r.uvarint()) 640 641 case debugLogHex, debugLogPtr: 642 print(hex(r.uvarint())) 643 644 case debugLogString: 645 sl := r.uvarint() 646 if r.begin+sl > r.end { 647 r.begin = r.end 648 print("<string length corrupted>") 649 break 650 } 651 for sl > 0 { 652 b := r.data[r.begin%uint64(len(r.data)):] 653 if uint64(len(b)) > sl { 654 b = b[:sl] 655 } 656 r.begin += uint64(len(b)) 657 sl -= uint64(len(b)) 658 gwrite(b) 659 } 660 661 case debugLogConstString: 662 len, ptr := int(r.uvarint()), uintptr(r.uvarint()) 663 ptr += getText() 664 str := stringStruct{ 665 str: unsafe.Pointer(ptr), 666 len: len, 667 } 668 s := *(*string)(unsafe.Pointer(&str)) 669 print(s) 670 671 case debugLogStringOverflow: 672 print("..(", r.uvarint(), " more bytes)..") 673 674 case debugLogPC: 675 printDebugLogPC(uintptr(r.uvarint())) 676 677 case debugLogTraceback: 678 n := int(r.uvarint()) 679 for i := 0; i < n; i++ { 680 print("\n\t") 681 printDebugLogPC(uintptr(r.uvarint())) 682 } 683 } 684 685 return true 686} 687 688// printDebugLog prints the debug log. 689func printDebugLog() { 690 if !dlogEnabled { 691 return 692 } 693 694 // This function should not panic or throw since it is used in 695 // the fatal panic path and this may deadlock. 696 697 printlock() 698 699 // Get the list of all debug logs. 700 allp := (*uintptr)(unsafe.Pointer(&allDloggers)) 701 all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp))) 702 703 // Count the logs. 704 n := 0 705 for l := all; l != nil; l = l.allLink { 706 n++ 707 } 708 if n == 0 { 709 printunlock() 710 return 711 } 712 713 // Prepare read state for all logs. 714 type readState struct { 715 debugLogReader 716 first bool 717 lost uint64 718 nextTick uint64 719 } 720 state1 := sysAlloc(unsafe.Sizeof(readState{})*uintptr(n), nil) 721 if state1 == nil { 722 println("failed to allocate read state for", n, "logs") 723 printunlock() 724 return 725 } 726 state := (*[1 << 20]readState)(state1)[:n] 727 { 728 l := all 729 for i := range state { 730 s := &state[i] 731 s.debugLogReader = l.w.r 732 s.first = true 733 s.lost = l.w.r.begin 734 s.nextTick = s.peek() 735 l = l.allLink 736 } 737 } 738 739 // Print records. 740 for { 741 // Find the next record. 742 var best struct { 743 tick uint64 744 i int 745 } 746 best.tick = ^uint64(0) 747 for i := range state { 748 if state[i].nextTick < best.tick { 749 best.tick = state[i].nextTick 750 best.i = i 751 } 752 } 753 if best.tick == ^uint64(0) { 754 break 755 } 756 757 // Print record. 758 s := &state[best.i] 759 if s.first { 760 print(">> begin log ", best.i) 761 if s.lost != 0 { 762 print("; lost first ", s.lost>>10, "KB") 763 } 764 print(" <<\n") 765 s.first = false 766 } 767 768 end, _, nano, p := s.header() 769 oldEnd := s.end 770 s.end = end 771 772 print("[") 773 var tmpbuf [21]byte 774 pnano := int64(nano) - runtimeInitTime 775 if pnano < 0 { 776 // Logged before runtimeInitTime was set. 777 pnano = 0 778 } 779 print(string(itoaDiv(tmpbuf[:], uint64(pnano), 9))) 780 print(" P ", p, "] ") 781 782 for i := 0; s.begin < s.end; i++ { 783 if i > 0 { 784 print(" ") 785 } 786 if !s.printVal() { 787 // Abort this P log. 788 print("<aborting P log>") 789 end = oldEnd 790 break 791 } 792 } 793 println() 794 795 // Move on to the next record. 796 s.begin = end 797 s.end = oldEnd 798 s.nextTick = s.peek() 799 } 800 801 printunlock() 802} 803 804func printDebugLogPC(pc uintptr) { 805 print(hex(pc)) 806 name, file, line, _ := funcfileline(pc, -1, false) 807 if name == "" { 808 print(" [unknown PC]") 809 } else { 810 print(" [", name, "+", hex(pc-funcentry(pc)), 811 " ", file, ":", line, "]") 812 } 813} 814