xref: /dragonfly/sys/kern/kern_ktr.c (revision c2cd059b)
1 /*
2  * Copyright (c) 2005 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * The following copyright applies to the DDB command code:
36  *
37  * Copyright (c) 2000 John Baldwin <jhb@FreeBSD.org>
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. Neither the name of the author nor the names of any co-contributors
49  *    may be used to endorse or promote products derived from this software
50  *    without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62  * SUCH DAMAGE.
63  */
64 /*
65  * $DragonFly: src/sys/kern/kern_ktr.c,v 1.13 2006/03/25 21:28:07 swildner Exp $
66  */
67 /*
68  * Kernel tracepoint facility.
69  */
70 
71 #include "opt_ddb.h"
72 #include "opt_ktr.h"
73 
74 #include <sys/param.h>
75 #include <sys/cons.h>
76 #include <sys/kernel.h>
77 #include <sys/libkern.h>
78 #include <sys/proc.h>
79 #include <sys/sysctl.h>
80 #include <sys/ktr.h>
81 #include <sys/systm.h>
82 #include <sys/time.h>
83 #include <sys/malloc.h>
84 #include <sys/spinlock.h>
85 #include <sys/thread2.h>
86 #include <sys/spinlock2.h>
87 #include <sys/ctype.h>
88 
89 #include <machine/cpu.h>
90 #include <machine/cpufunc.h>
91 #include <machine/specialreg.h>
92 #include <machine/md_var.h>
93 
94 #include <ddb/ddb.h>
95 
96 #ifndef KTR_ENTRIES
97 #define	KTR_ENTRIES		2048
98 #endif
99 #define KTR_ENTRIES_MASK	(KTR_ENTRIES - 1)
100 
101 /*
102  * test logging support.  When ktr_testlogcnt is non-zero each synchronization
103  * interrupt will issue six back-to-back ktr logging messages on cpu 0
104  * so the user can determine KTR logging overheads.
105  */
106 #if !defined(KTR_TESTLOG)
107 #define KTR_TESTLOG	KTR_ALL
108 #endif
109 KTR_INFO_MASTER(testlog);
110 KTR_INFO(KTR_TESTLOG, testlog, test1, 0, "test1", sizeof(void *) * 4);
111 KTR_INFO(KTR_TESTLOG, testlog, test2, 1, "test2", sizeof(void *) * 4);
112 KTR_INFO(KTR_TESTLOG, testlog, test3, 2, "test3", sizeof(void *) * 4);
113 KTR_INFO(KTR_TESTLOG, testlog, test4, 3, "test4", 0);
114 KTR_INFO(KTR_TESTLOG, testlog, test5, 4, "test5", 0);
115 KTR_INFO(KTR_TESTLOG, testlog, test6, 5, "test6", 0);
116 #ifdef SMP
117 KTR_INFO(KTR_TESTLOG, testlog, pingpong, 6, "pingpong", 0);
118 KTR_INFO(KTR_TESTLOG, testlog, pipeline, 7, "pipeline", 0);
119 #endif
120 KTR_INFO(KTR_TESTLOG, testlog, crit_beg, 8, "crit_beg", 0);
121 KTR_INFO(KTR_TESTLOG, testlog, crit_end, 9, "crit_end", 0);
122 KTR_INFO(KTR_TESTLOG, testlog, spin_beg, 10, "spin_beg", 0);
123 KTR_INFO(KTR_TESTLOG, testlog, spin_end, 11, "spin_end", 0);
124 #define logtest(name)	KTR_LOG(testlog_ ## name, 0, 0, 0, 0)
125 #define logtest_noargs(name)	KTR_LOG(testlog_ ## name)
126 
127 MALLOC_DEFINE(M_KTR, "ktr", "ktr buffers");
128 
129 SYSCTL_NODE(_debug, OID_AUTO, ktr, CTLFLAG_RW, 0, "ktr");
130 
131 static int32_t	ktr_cpumask = -1;
132 TUNABLE_INT("debug.ktr.cpumask", &ktr_cpumask);
133 SYSCTL_INT(_debug_ktr, OID_AUTO, cpumask, CTLFLAG_RW, &ktr_cpumask, 0, "");
134 
135 static int	ktr_entries = KTR_ENTRIES;
136 SYSCTL_INT(_debug_ktr, OID_AUTO, entries, CTLFLAG_RD, &ktr_entries, 0, "");
137 
138 static int	ktr_version = KTR_VERSION;
139 SYSCTL_INT(_debug_ktr, OID_AUTO, version, CTLFLAG_RD, &ktr_version, 0, "");
140 
141 static int	ktr_stacktrace = 1;
142 SYSCTL_INT(_debug_ktr, OID_AUTO, stacktrace, CTLFLAG_RD, &ktr_stacktrace, 0, "");
143 
144 static int	ktr_resynchronize = 0;
145 SYSCTL_INT(_debug_ktr, OID_AUTO, resynchronize, CTLFLAG_RW, &ktr_resynchronize, 0, "");
146 
147 #if KTR_TESTLOG
148 static int	ktr_testlogcnt = 0;
149 SYSCTL_INT(_debug_ktr, OID_AUTO, testlogcnt, CTLFLAG_RW, &ktr_testlogcnt, 0, "");
150 static int	ktr_testipicnt = 0;
151 static int	ktr_testipicnt_remainder;
152 SYSCTL_INT(_debug_ktr, OID_AUTO, testipicnt, CTLFLAG_RW, &ktr_testipicnt, 0, "");
153 static int	ktr_testcritcnt = 0;
154 SYSCTL_INT(_debug_ktr, OID_AUTO, testcritcnt, CTLFLAG_RW, &ktr_testcritcnt, 0, "");
155 static int	ktr_testspincnt = 0;
156 SYSCTL_INT(_debug_ktr, OID_AUTO, testspincnt, CTLFLAG_RW, &ktr_testspincnt, 0, "");
157 #endif
158 
159 /*
160  * Give cpu0 a static buffer so the tracepoint facility can be used during
161  * early boot (note however that we still use a critical section, XXX).
162  */
163 static struct	ktr_entry ktr_buf0[KTR_ENTRIES];
164 static struct	ktr_entry *ktr_buf[MAXCPU] = { &ktr_buf0[0] };
165 static int	ktr_idx[MAXCPU];
166 #ifdef SMP
167 static int	ktr_sync_state = 0;
168 static int	ktr_sync_count;
169 static int64_t	ktr_sync_tsc;
170 #endif
171 struct callout	ktr_resync_callout;
172 
173 #ifdef KTR_VERBOSE
174 int	ktr_verbose = KTR_VERBOSE;
175 TUNABLE_INT("debug.ktr.verbose", &ktr_verbose);
176 SYSCTL_INT(_debug_ktr, OID_AUTO, verbose, CTLFLAG_RW, &ktr_verbose, 0, "");
177 #endif
178 
179 extern int64_t tsc_offsets[];
180 
181 #if KTR_TESTLOG || KTR_ALL
182 
183 static void
184 ktr_sysinit(void *dummy)
185 {
186 	int i;
187 
188 	for(i = 1; i < ncpus; ++i) {
189 		ktr_buf[i] = malloc(KTR_ENTRIES * sizeof(struct ktr_entry),
190 				    M_KTR, M_WAITOK | M_ZERO);
191 	}
192 }
193 SYSINIT(ktr_sysinit, SI_SUB_INTRINSIC, SI_ORDER_FIRST, ktr_sysinit, NULL);
194 
195 #endif
196 
197 /*
198  * Try to resynchronize the TSC's for all cpus.  This is really, really nasty.
199  * We have to send an IPIQ message to all remote cpus, wait until they
200  * get into their IPIQ processing code loop, then do an even stricter hard
201  * loop to get the cpus as close to synchronized as we can to get the most
202  * accurate reading.
203  *
204  * This callback occurs on cpu0.
205  */
206 static void ktr_resync_callback(void *dummy);
207 static void ktr_pingpong_remote(void *dummy);
208 static void ktr_pipeline_remote(void *dummy);
209 
210 static void
211 ktr_resyncinit(void *dummy)
212 {
213 	callout_init(&ktr_resync_callout);
214 	callout_reset(&ktr_resync_callout, hz / 10, ktr_resync_callback, NULL);
215 }
216 SYSINIT(ktr_resync, SI_SUB_FINISH_SMP+1, SI_ORDER_ANY, ktr_resyncinit, NULL);
217 
218 #ifdef SMP
219 
220 static void ktr_resync_remote(void *dummy);
221 extern cpumask_t smp_active_mask;
222 
223 /*
224  * We use a callout callback instead of a systimer because we cannot afford
225  * to preempt anyone to do this, or we might deadlock a spin-lock or
226  * serializer between two cpus.
227  */
228 static
229 void
230 ktr_resync_callback(void *dummy __unused)
231 {
232 	int count;
233 
234 	KKASSERT(mycpu->gd_cpuid == 0);
235 
236 #if KTR_TESTLOG
237 	/*
238 	 * Test logging
239 	 */
240 	if (ktr_testlogcnt) {
241 		--ktr_testlogcnt;
242 		cpu_disable_intr();
243 		logtest(test1);
244 		logtest(test2);
245 		logtest(test3);
246 		logtest_noargs(test4);
247 		logtest_noargs(test5);
248 		logtest_noargs(test6);
249 		cpu_enable_intr();
250 	}
251 
252 	/*
253 	 * Test IPI messaging
254 	 */
255 	if (ktr_testipicnt && ktr_testipicnt_remainder == 0 && ncpus > 1) {
256 		ktr_testipicnt_remainder = ktr_testipicnt;
257 		ktr_testipicnt = 0;
258 		lwkt_send_ipiq_bycpu(1, ktr_pingpong_remote, NULL);
259 	}
260 
261 	/*
262 	 * Test critical sections
263 	 */
264 	if (ktr_testcritcnt) {
265 		crit_enter();
266 		crit_exit();
267 		logtest_noargs(crit_beg);
268 		for (count = ktr_testcritcnt; count; --count) {
269 			crit_enter();
270 			crit_exit();
271 		}
272 		logtest_noargs(crit_end);
273 		ktr_testcritcnt = 0;
274 	}
275 
276 	/*
277 	 * Test spinlock sections
278 	 */
279 	if (ktr_testspincnt) {
280 		struct spinlock spin;
281 
282 		spin_init(&spin);
283 		spin_lock_quick(&spin);
284 		spin_unlock_quick(&spin);
285 		logtest_noargs(spin_beg);
286 		for (count = ktr_testspincnt; count; --count) {
287 			spin_lock_quick(&spin);
288 			spin_unlock_quick(&spin);
289 		}
290 		logtest_noargs(spin_end);
291 		ktr_testspincnt = 0;
292 	}
293 #endif
294 
295 	/*
296 	 * Resynchronize the TSC
297 	 */
298 	if (ktr_resynchronize == 0)
299 		goto done;
300 	if ((cpu_feature & CPUID_TSC) == 0)
301 		return;
302 
303 	/*
304 	 * Send the synchronizing IPI and wait for all cpus to get into
305 	 * their spin loop.  We must process incoming IPIs while waiting
306 	 * to avoid a deadlock.
307 	 */
308 	crit_enter();
309 	ktr_sync_count = 0;
310 	ktr_sync_state = 1;
311 	ktr_sync_tsc = rdtsc();
312 	count = lwkt_send_ipiq_mask(mycpu->gd_other_cpus & smp_active_mask,
313 				    (ipifunc1_t)ktr_resync_remote, NULL);
314 	while (ktr_sync_count != count)
315 		lwkt_process_ipiq();
316 
317 	/*
318 	 * Continuously update the TSC for cpu 0 while waiting for all other
319 	 * cpus to finish stage 2.
320 	 */
321 	cpu_disable_intr();
322 	ktr_sync_tsc = rdtsc();
323 	cpu_sfence();
324 	ktr_sync_state = 2;
325 	cpu_sfence();
326 	while (ktr_sync_count != 0) {
327 		ktr_sync_tsc = rdtsc();
328 		cpu_lfence();
329 		cpu_nop();
330 	}
331 	cpu_enable_intr();
332 	crit_exit();
333 	ktr_sync_state = 0;
334 done:
335 	callout_reset(&ktr_resync_callout, hz / 10, ktr_resync_callback, NULL);
336 }
337 
338 /*
339  * The remote-end of the KTR synchronization protocol runs on all cpus except
340  * cpu 0.  Since this is an IPI function, it is entered with the current
341  * thread in a critical section.
342  */
343 static void
344 ktr_resync_remote(void *dummy __unused)
345 {
346 	volatile int64_t tsc1 = ktr_sync_tsc;
347 	volatile int64_t tsc2;
348 
349 	/*
350 	 * Inform the master that we have entered our hard loop.
351 	 */
352 	KKASSERT(ktr_sync_state == 1);
353 	atomic_add_int(&ktr_sync_count, 1);
354 	while (ktr_sync_state == 1) {
355 		lwkt_process_ipiq();
356 	}
357 
358 	/*
359 	 * Now the master is in a hard loop, synchronize the TSC and
360 	 * we are done.
361 	 */
362 	cpu_disable_intr();
363 	KKASSERT(ktr_sync_state == 2);
364 	tsc2 = ktr_sync_tsc;
365 	if (tsc2 > tsc1)
366 		tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc2;
367 	atomic_subtract_int(&ktr_sync_count, 1);
368 	cpu_enable_intr();
369 }
370 
371 static
372 void
373 ktr_pingpong_remote(void *dummy __unused)
374 {
375 	int other_cpu;
376 
377 	logtest_noargs(pingpong);
378 	other_cpu = 1 - mycpu->gd_cpuid;
379 	if (ktr_testipicnt_remainder) {
380 		--ktr_testipicnt_remainder;
381 		lwkt_send_ipiq_bycpu(other_cpu, ktr_pingpong_remote, NULL);
382 	} else {
383 		lwkt_send_ipiq_bycpu(other_cpu, ktr_pipeline_remote, NULL);
384 		lwkt_send_ipiq_bycpu(other_cpu, ktr_pipeline_remote, NULL);
385 		lwkt_send_ipiq_bycpu(other_cpu, ktr_pipeline_remote, NULL);
386 		lwkt_send_ipiq_bycpu(other_cpu, ktr_pipeline_remote, NULL);
387 		lwkt_send_ipiq_bycpu(other_cpu, ktr_pipeline_remote, NULL);
388 	}
389 }
390 
391 static
392 void
393 ktr_pipeline_remote(void *dummy __unused)
394 {
395 	logtest_noargs(pipeline);
396 }
397 
398 #else	/* !SMP */
399 
400 /*
401  * The resync callback for UP doesn't do anything other then run the test
402  * log messages.  If test logging is not enabled, don't bother resetting
403  * the callout.
404  */
405 static
406 void
407 ktr_resync_callback(void *dummy __unused)
408 {
409 #if KTR_TESTLOG
410 	/*
411 	 * Test logging
412 	 */
413 	if (ktr_testlogcnt) {
414 		--ktr_testlogcnt;
415 		cpu_disable_intr();
416 		logtest(test1);
417 		logtest(test2);
418 		logtest(test3);
419 		logtest_noargs(test4);
420 		logtest_noargs(test5);
421 		logtest_noargs(test6);
422 		cpu_enable_intr();
423 	}
424 	callout_reset(&ktr_resync_callout, hz / 10, ktr_resync_callback, NULL);
425 #endif
426 }
427 
428 #endif
429 
430 /*
431  * KTR_WRITE_ENTRY - Primary entry point for kernel trace logging
432  */
433 static __inline
434 void
435 ktr_write_entry(struct ktr_info *info, const char *file, int line,
436 		const void *ptr)
437 {
438 	struct ktr_entry *entry;
439 	int cpu;
440 
441 	cpu = mycpu->gd_cpuid;
442 	if (!ktr_buf[cpu])
443 		return;
444 
445 	crit_enter();
446 	entry = ktr_buf[cpu] + (ktr_idx[cpu] & KTR_ENTRIES_MASK);
447 	++ktr_idx[cpu];
448 	if (cpu_feature & CPUID_TSC) {
449 #ifdef SMP
450 		entry->ktr_timestamp = rdtsc() - tsc_offsets[cpu];
451 #else
452 		entry->ktr_timestamp = rdtsc();
453 #endif
454 	} else {
455 		entry->ktr_timestamp = get_approximate_time_t();
456 	}
457 	entry->ktr_info = info;
458 	entry->ktr_file = file;
459 	entry->ktr_line = line;
460 	crit_exit();
461 	if (info->kf_data_size > KTR_BUFSIZE)
462 		bcopyi(ptr, entry->ktr_data, KTR_BUFSIZE);
463 	else if (info->kf_data_size)
464 		bcopyi(ptr, entry->ktr_data, info->kf_data_size);
465 	if (ktr_stacktrace)
466 		cpu_ktr_caller(entry);
467 #ifdef KTR_VERBOSE
468 	if (ktr_verbose && info->kf_format) {
469 #ifdef SMP
470 		printf("cpu%d ", cpu);
471 #endif
472 		if (ktr_verbose > 1) {
473 			printf("%s.%d\t", entry->ktr_file, entry->ktr_line);
474 		}
475 		vprintf(info->kf_format, ptr);
476 		printf("\n");
477 	}
478 #endif
479 }
480 
481 void
482 ktr_log(struct ktr_info *info, const char *file, int line, ...)
483 {
484 	__va_list va;
485 
486 	if (panicstr == NULL) {
487 		__va_start(va, line);
488 		ktr_write_entry(info, file, line, va);
489 		__va_end(va);
490 	}
491 }
492 
493 void
494 ktr_log_ptr(struct ktr_info *info, const char *file, int line, const void *ptr)
495 {
496 	if (panicstr == NULL) {
497 		ktr_write_entry(info, file, line, ptr);
498 	}
499 }
500 
501 #ifdef DDB
502 
503 #define	NUM_LINES_PER_PAGE	19
504 
505 struct tstate {
506 	int	cur;
507 	int	first;
508 };
509 
510 static	int db_ktr_verbose;
511 static	int db_mach_vtrace(int cpu, struct ktr_entry *kp, int idx);
512 
513 DB_SHOW_COMMAND(ktr, db_ktr_all)
514 {
515 	int a_flag = 0;
516 	int c;
517 	int nl = 0;
518 	int i;
519 	struct tstate tstate[MAXCPU];
520 	int printcpu = -1;
521 
522 	for(i = 0; i < ncpus; i++) {
523 		tstate[i].first = -1;
524 		tstate[i].cur = ktr_idx[i] & KTR_ENTRIES_MASK;
525 	}
526 	db_ktr_verbose = 0;
527 	while ((c = *(modif++)) != '\0') {
528 		if (c == 'v') {
529 			db_ktr_verbose = 1;
530 		}
531 		else if (c == 'a') {
532 			a_flag = 1;
533 		}
534 		else if (c == 'c') {
535 			printcpu = 0;
536 			while ((c = *(modif++)) != '\0') {
537 				if (isdigit(c)) {
538 					printcpu *= 10;
539 					printcpu += c - '0';
540 				}
541 				else {
542 					modif++;
543 					break;
544 				}
545 			}
546 			modif--;
547 		}
548 	}
549 	if (printcpu > ncpus - 1) {
550 		db_printf("Invalid cpu number\n");
551 		return;
552 	}
553 	/*
554 	 * Lopp throug all the buffers and print the content of them, sorted
555 	 * by the timestamp.
556 	 */
557 	while (1) {
558 		int counter;
559 		u_int64_t highest_ts;
560 		int highest_cpu;
561 		struct ktr_entry *kp;
562 
563 		if (a_flag == 1 && cncheckc() != -1)
564 			return;
565 		highest_ts = 0;
566 		highest_cpu = -1;
567 		/*
568 		 * Find the lowest timestamp
569 		 */
570 		for (i = 0, counter = 0; i < ncpus; i++) {
571 			if (ktr_buf[i] == NULL)
572 				continue;
573 			if (printcpu != -1 && printcpu != i)
574 				continue;
575 			if (tstate[i].cur == -1) {
576 				counter++;
577 				if (counter == ncpus) {
578 					db_printf("--- End of trace buffer ---\n");
579 					return;
580 				}
581 				continue;
582 			}
583 			if (ktr_buf[i][tstate[i].cur].ktr_timestamp > highest_ts) {
584 				highest_ts = ktr_buf[i][tstate[i].cur].ktr_timestamp;
585 				highest_cpu = i;
586 			}
587 		}
588 		i = highest_cpu;
589 		KKASSERT(i != -1);
590 		kp = &ktr_buf[i][tstate[i].cur];
591 		if (tstate[i].first == -1)
592 			tstate[i].first = tstate[i].cur;
593 		if (--tstate[i].cur < 0)
594 			tstate[i].cur = KTR_ENTRIES - 1;
595 		if (tstate[i].first == tstate[i].cur) {
596 			db_mach_vtrace(i, kp, tstate[i].cur + 1);
597 			tstate[i].cur = -1;
598 			continue;
599 		}
600 		if (ktr_buf[i][tstate[i].cur].ktr_info == NULL)
601 			tstate[i].cur = -1;
602 		if (db_more(&nl) == -1)
603 			break;
604 		if (db_mach_vtrace(i, kp, tstate[i].cur + 1) == 0)
605 			tstate[i].cur = -1;
606 	}
607 }
608 
609 static int
610 db_mach_vtrace(int cpu, struct ktr_entry *kp, int idx)
611 {
612 	if (kp->ktr_info == NULL)
613 		return(0);
614 #ifdef SMP
615 	db_printf("cpu%d ", cpu);
616 #endif
617 	db_printf("%d: ", idx);
618 	if (db_ktr_verbose) {
619 		db_printf("%10.10lld %s.%d\t", (long long)kp->ktr_timestamp,
620 		    kp->ktr_file, kp->ktr_line);
621 	}
622 	db_printf("%s\t", kp->ktr_info->kf_name);
623 	db_printf("from(%p,%p) ", kp->ktr_caller1, kp->ktr_caller2);
624 	if (kp->ktr_info->kf_format) {
625 		int32_t *args = kp->ktr_data;
626 		db_printf(kp->ktr_info->kf_format,
627 			  args[0], args[1], args[2], args[3],
628 			  args[4], args[5], args[6], args[7],
629 			  args[8], args[9], args[10], args[11]);
630 
631 	}
632 	db_printf("\n");
633 
634 	return(1);
635 }
636 
637 #endif	/* DDB */
638