1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * kdb helper for dumping the ftrace buffer 4 * 5 * Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com> 6 * 7 * ftrace_dump_buf based on ftrace_dump: 8 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 9 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 10 * 11 */ 12 #include <linux/init.h> 13 #include <linux/kgdb.h> 14 #include <linux/kdb.h> 15 #include <linux/ftrace.h> 16 17 #include "trace.h" 18 #include "trace_output.h" 19 20 static struct trace_iterator iter; 21 static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS]; 22 23 static void ftrace_dump_buf(int skip_entries, long cpu_file) 24 { 25 struct trace_array *tr; 26 unsigned int old_userobj; 27 int cnt = 0, cpu; 28 29 tr = iter.tr; 30 31 old_userobj = tr->trace_flags; 32 33 /* don't look at user memory in panic mode */ 34 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 35 36 kdb_printf("Dumping ftrace buffer:\n"); 37 if (skip_entries) 38 kdb_printf("(skipping %d entries)\n", skip_entries); 39 40 /* reset all but tr, trace, and overruns */ 41 memset(&iter.seq, 0, 42 sizeof(struct trace_iterator) - 43 offsetof(struct trace_iterator, seq)); 44 iter.iter_flags |= TRACE_FILE_LAT_FMT; 45 iter.pos = -1; 46 47 if (cpu_file == RING_BUFFER_ALL_CPUS) { 48 for_each_tracing_cpu(cpu) { 49 iter.buffer_iter[cpu] = 50 ring_buffer_read_prepare(iter.trace_buffer->buffer, 51 cpu, GFP_ATOMIC); 52 ring_buffer_read_start(iter.buffer_iter[cpu]); 53 tracing_iter_reset(&iter, cpu); 54 } 55 } else { 56 iter.cpu_file = cpu_file; 57 iter.buffer_iter[cpu_file] = 58 ring_buffer_read_prepare(iter.trace_buffer->buffer, 59 cpu_file, GFP_ATOMIC); 60 ring_buffer_read_start(iter.buffer_iter[cpu_file]); 61 tracing_iter_reset(&iter, cpu_file); 62 } 63 64 while (trace_find_next_entry_inc(&iter)) { 65 if (!cnt) 66 kdb_printf("---------------------------------\n"); 67 cnt++; 68 69 if (!skip_entries) { 70 print_trace_line(&iter); 71 trace_printk_seq(&iter.seq); 72 } else { 73 skip_entries--; 74 } 75 76 if (KDB_FLAG(CMD_INTERRUPT)) 77 goto out; 78 } 79 80 if (!cnt) 81 kdb_printf(" (ftrace buffer empty)\n"); 82 else 83 kdb_printf("---------------------------------\n"); 84 85 out: 86 tr->trace_flags = old_userobj; 87 88 for_each_tracing_cpu(cpu) { 89 if (iter.buffer_iter[cpu]) { 90 ring_buffer_read_finish(iter.buffer_iter[cpu]); 91 iter.buffer_iter[cpu] = NULL; 92 } 93 } 94 } 95 96 /* 97 * kdb_ftdump - Dump the ftrace log buffer 98 */ 99 static int kdb_ftdump(int argc, const char **argv) 100 { 101 int skip_entries = 0; 102 long cpu_file; 103 char *cp; 104 int cnt; 105 int cpu; 106 107 if (argc > 2) 108 return KDB_ARGCOUNT; 109 110 if (argc) { 111 skip_entries = simple_strtol(argv[1], &cp, 0); 112 if (*cp) 113 skip_entries = 0; 114 } 115 116 if (argc == 2) { 117 cpu_file = simple_strtol(argv[2], &cp, 0); 118 if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 || 119 !cpu_online(cpu_file)) 120 return KDB_BADINT; 121 } else { 122 cpu_file = RING_BUFFER_ALL_CPUS; 123 } 124 125 kdb_trap_printk++; 126 127 trace_init_global_iter(&iter); 128 iter.buffer_iter = buffer_iter; 129 130 for_each_tracing_cpu(cpu) { 131 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); 132 } 133 134 /* A negative skip_entries means skip all but the last entries */ 135 if (skip_entries < 0) { 136 if (cpu_file == RING_BUFFER_ALL_CPUS) 137 cnt = trace_total_entries(NULL); 138 else 139 cnt = trace_total_entries_cpu(NULL, cpu_file); 140 skip_entries = max(cnt + skip_entries, 0); 141 } 142 143 ftrace_dump_buf(skip_entries, cpu_file); 144 145 for_each_tracing_cpu(cpu) { 146 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); 147 } 148 149 kdb_trap_printk--; 150 151 return 0; 152 } 153 154 static __init int kdb_ftrace_register(void) 155 { 156 kdb_register_flags("ftdump", kdb_ftdump, "[skip_#entries] [cpu]", 157 "Dump ftrace log; -skip dumps last #entries", 0, 158 KDB_ENABLE_ALWAYS_SAFE); 159 return 0; 160 } 161 162 late_initcall(kdb_ftrace_register); 163