xref: /dragonfly/sys/kern/kern_ktr.c (revision 60233e58)
1 /*
2  * Copyright (c) 2005 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * The following copyright applies to the DDB command code:
36  *
37  * Copyright (c) 2000 John Baldwin <jhb@FreeBSD.org>
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. Neither the name of the author nor the names of any co-contributors
49  *    may be used to endorse or promote products derived from this software
50  *    without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62  * SUCH DAMAGE.
63  */
64 /*
65  * $DragonFly: src/sys/kern/kern_ktr.c,v 1.23 2008/02/12 23:33:23 corecode Exp $
66  */
67 /*
68  * Kernel tracepoint facility.
69  */
70 
71 #include "opt_ddb.h"
72 #include "opt_ktr.h"
73 
74 #include <sys/param.h>
75 #include <sys/cons.h>
76 #include <sys/kernel.h>
77 #include <sys/libkern.h>
78 #include <sys/proc.h>
79 #include <sys/sysctl.h>
80 #include <sys/ktr.h>
81 #include <sys/systm.h>
82 #include <sys/time.h>
83 #include <sys/malloc.h>
84 #include <sys/spinlock.h>
85 #include <sys/thread2.h>
86 #include <sys/spinlock2.h>
87 #include <sys/ctype.h>
88 
89 #include <machine/cpu.h>
90 #include <machine/cpufunc.h>
91 #include <machine/specialreg.h>
92 #include <machine/md_var.h>
93 
94 #include <ddb/ddb.h>
95 
96 #ifndef KTR_ENTRIES
97 #define	KTR_ENTRIES		2048
98 #endif
99 #define KTR_ENTRIES_MASK	(KTR_ENTRIES - 1)
100 
101 /*
102  * test logging support.  When ktr_testlogcnt is non-zero each synchronization
103  * interrupt will issue six back-to-back ktr logging messages on cpu 0
104  * so the user can determine KTR logging overheads.
105  */
106 #if !defined(KTR_TESTLOG)
107 #define KTR_TESTLOG	KTR_ALL
108 #endif
109 KTR_INFO_MASTER(testlog);
110 #if KTR_TESTLOG
111 KTR_INFO(KTR_TESTLOG, testlog, test1, 0, "test1", sizeof(void *) * 4);
112 KTR_INFO(KTR_TESTLOG, testlog, test2, 1, "test2", sizeof(void *) * 4);
113 KTR_INFO(KTR_TESTLOG, testlog, test3, 2, "test3", sizeof(void *) * 4);
114 KTR_INFO(KTR_TESTLOG, testlog, test4, 3, "test4", 0);
115 KTR_INFO(KTR_TESTLOG, testlog, test5, 4, "test5", 0);
116 KTR_INFO(KTR_TESTLOG, testlog, test6, 5, "test6", 0);
117 #ifdef SMP
118 KTR_INFO(KTR_TESTLOG, testlog, pingpong, 6, "pingpong", 0);
119 KTR_INFO(KTR_TESTLOG, testlog, pipeline, 7, "pipeline", 0);
120 #endif
121 KTR_INFO(KTR_TESTLOG, testlog, crit_beg, 8, "crit_beg", 0);
122 KTR_INFO(KTR_TESTLOG, testlog, crit_end, 9, "crit_end", 0);
123 KTR_INFO(KTR_TESTLOG, testlog, spin_beg, 10, "spin_beg", 0);
124 KTR_INFO(KTR_TESTLOG, testlog, spin_end, 11, "spin_end", 0);
125 #define logtest(name)	KTR_LOG(testlog_ ## name, 0, 0, 0, 0)
126 #define logtest_noargs(name)	KTR_LOG(testlog_ ## name)
127 #endif
128 
129 MALLOC_DEFINE(M_KTR, "ktr", "ktr buffers");
130 
131 SYSCTL_NODE(_debug, OID_AUTO, ktr, CTLFLAG_RW, 0, "ktr");
132 
133 int		ktr_entries = KTR_ENTRIES;
134 SYSCTL_INT(_debug_ktr, OID_AUTO, entries, CTLFLAG_RD, &ktr_entries, 0, "");
135 
136 int		ktr_version = KTR_VERSION;
137 SYSCTL_INT(_debug_ktr, OID_AUTO, version, CTLFLAG_RD, &ktr_version, 0, "");
138 
139 static int	ktr_stacktrace = 1;
140 SYSCTL_INT(_debug_ktr, OID_AUTO, stacktrace, CTLFLAG_RD, &ktr_stacktrace, 0, "");
141 
142 static int	ktr_resynchronize = 0;
143 SYSCTL_INT(_debug_ktr, OID_AUTO, resynchronize, CTLFLAG_RW, &ktr_resynchronize, 0, "");
144 
145 #if KTR_TESTLOG
146 static int	ktr_testlogcnt = 0;
147 SYSCTL_INT(_debug_ktr, OID_AUTO, testlogcnt, CTLFLAG_RW, &ktr_testlogcnt, 0, "");
148 static int	ktr_testipicnt = 0;
149 static int	ktr_testipicnt_remainder;
150 SYSCTL_INT(_debug_ktr, OID_AUTO, testipicnt, CTLFLAG_RW, &ktr_testipicnt, 0, "");
151 static int	ktr_testcritcnt = 0;
152 SYSCTL_INT(_debug_ktr, OID_AUTO, testcritcnt, CTLFLAG_RW, &ktr_testcritcnt, 0, "");
153 static int	ktr_testspincnt = 0;
154 SYSCTL_INT(_debug_ktr, OID_AUTO, testspincnt, CTLFLAG_RW, &ktr_testspincnt, 0, "");
155 #endif
156 
157 /*
158  * Give cpu0 a static buffer so the tracepoint facility can be used during
159  * early boot (note however that we still use a critical section, XXX).
160  */
161 static struct	ktr_entry ktr_buf0[KTR_ENTRIES];
162 
163 __cachealign struct ktr_cpu ktr_cpu[MAXCPU] = {
164 	{ .core.ktr_buf = &ktr_buf0[0] }
165 };
166 
167 #ifdef SMP
168 static int	ktr_sync_state = 0;
169 static int	ktr_sync_count;
170 static int64_t	ktr_sync_tsc;
171 #endif
172 struct callout	ktr_resync_callout;
173 
174 #ifdef KTR_VERBOSE
175 int	ktr_verbose = KTR_VERBOSE;
176 TUNABLE_INT("debug.ktr.verbose", &ktr_verbose);
177 SYSCTL_INT(_debug_ktr, OID_AUTO, verbose, CTLFLAG_RW, &ktr_verbose, 0, "");
178 #endif
179 
180 static void ktr_resync_callback(void *dummy __unused);
181 
182 extern int64_t tsc_offsets[];
183 
184 static void
185 ktr_sysinit(void *dummy)
186 {
187 	struct ktr_cpu_core *kcpu;
188 	int i;
189 
190 	for(i = 1; i < ncpus; ++i) {
191 		kcpu = &ktr_cpu[i].core;
192 		kcpu->ktr_buf = kmalloc(KTR_ENTRIES * sizeof(struct ktr_entry),
193 					M_KTR, M_WAITOK | M_ZERO);
194 	}
195 	callout_init(&ktr_resync_callout);
196 	callout_reset(&ktr_resync_callout, hz / 10, ktr_resync_callback, NULL);
197 }
198 SYSINIT(ktr_sysinit, SI_BOOT2_KLD, SI_ORDER_ANY, ktr_sysinit, NULL);
199 
200 /*
201  * Try to resynchronize the TSC's for all cpus.  This is really, really nasty.
202  * We have to send an IPIQ message to all remote cpus, wait until they
203  * get into their IPIQ processing code loop, then do an even stricter hard
204  * loop to get the cpus as close to synchronized as we can to get the most
205  * accurate reading.
206  *
207  * This callback occurs on cpu0.
208  */
209 #if KTR_TESTLOG
210 static void ktr_pingpong_remote(void *dummy);
211 static void ktr_pipeline_remote(void *dummy);
212 #endif
213 
214 #if defined(SMP) && defined(_RDTSC_SUPPORTED_)
215 
216 static void ktr_resync_remote(void *dummy);
217 extern cpumask_t smp_active_mask;
218 
219 /*
220  * We use a callout callback instead of a systimer because we cannot afford
221  * to preempt anyone to do this, or we might deadlock a spin-lock or
222  * serializer between two cpus.
223  */
224 static
225 void
226 ktr_resync_callback(void *dummy __unused)
227 {
228 	int count;
229 
230 	KKASSERT(mycpu->gd_cpuid == 0);
231 
232 #if KTR_TESTLOG
233 	/*
234 	 * Test logging
235 	 */
236 	if (ktr_testlogcnt) {
237 		--ktr_testlogcnt;
238 		cpu_disable_intr();
239 		logtest(test1);
240 		logtest(test2);
241 		logtest(test3);
242 		logtest_noargs(test4);
243 		logtest_noargs(test5);
244 		logtest_noargs(test6);
245 		cpu_enable_intr();
246 	}
247 
248 	/*
249 	 * Test IPI messaging
250 	 */
251 	if (ktr_testipicnt && ktr_testipicnt_remainder == 0 && ncpus > 1) {
252 		ktr_testipicnt_remainder = ktr_testipicnt;
253 		ktr_testipicnt = 0;
254 		lwkt_send_ipiq_bycpu(1, ktr_pingpong_remote, NULL);
255 	}
256 
257 	/*
258 	 * Test critical sections
259 	 */
260 	if (ktr_testcritcnt) {
261 		crit_enter();
262 		crit_exit();
263 		logtest_noargs(crit_beg);
264 		for (count = ktr_testcritcnt; count; --count) {
265 			crit_enter();
266 			crit_exit();
267 		}
268 		logtest_noargs(crit_end);
269 		ktr_testcritcnt = 0;
270 	}
271 
272 	/*
273 	 * Test spinlock sections
274 	 */
275 	if (ktr_testspincnt) {
276 		struct spinlock spin;
277 
278 		spin_init(&spin);
279 		spin_lock_wr(&spin);
280 		spin_unlock_wr(&spin);
281 		logtest_noargs(spin_beg);
282 		for (count = ktr_testspincnt; count; --count) {
283 			spin_lock_wr(&spin);
284 			spin_unlock_wr(&spin);
285 		}
286 		logtest_noargs(spin_end);
287 		logtest_noargs(spin_beg);
288 		for (count = ktr_testspincnt; count; --count) {
289 			spin_lock_rd(&spin);
290 			spin_unlock_rd(&spin);
291 		}
292 		logtest_noargs(spin_end);
293 		ktr_testspincnt = 0;
294 	}
295 #endif
296 
297 	/*
298 	 * Resynchronize the TSC
299 	 */
300 	if (ktr_resynchronize == 0)
301 		goto done;
302 	if ((cpu_feature & CPUID_TSC) == 0)
303 		return;
304 
305 	/*
306 	 * Send the synchronizing IPI and wait for all cpus to get into
307 	 * their spin loop.  We must process incoming IPIs while waiting
308 	 * to avoid a deadlock.
309 	 */
310 	crit_enter();
311 	ktr_sync_count = 0;
312 	ktr_sync_state = 1;
313 	ktr_sync_tsc = rdtsc();
314 	count = lwkt_send_ipiq_mask(mycpu->gd_other_cpus & smp_active_mask,
315 				    (ipifunc1_t)ktr_resync_remote, NULL);
316 	while (ktr_sync_count != count)
317 		lwkt_process_ipiq();
318 
319 	/*
320 	 * Continuously update the TSC for cpu 0 while waiting for all other
321 	 * cpus to finish stage 2.
322 	 */
323 	cpu_disable_intr();
324 	ktr_sync_tsc = rdtsc();
325 	cpu_sfence();
326 	ktr_sync_state = 2;
327 	cpu_sfence();
328 	while (ktr_sync_count != 0) {
329 		ktr_sync_tsc = rdtsc();
330 		cpu_lfence();
331 		cpu_nop();
332 	}
333 	cpu_enable_intr();
334 	crit_exit();
335 	ktr_sync_state = 0;
336 done:
337 	callout_reset(&ktr_resync_callout, hz / 10, ktr_resync_callback, NULL);
338 }
339 
340 /*
341  * The remote-end of the KTR synchronization protocol runs on all cpus except
342  * cpu 0.  Since this is an IPI function, it is entered with the current
343  * thread in a critical section.
344  */
345 static void
346 ktr_resync_remote(void *dummy __unused)
347 {
348 	volatile int64_t tsc1 = ktr_sync_tsc;
349 	volatile int64_t tsc2;
350 
351 	/*
352 	 * Inform the master that we have entered our hard loop.
353 	 */
354 	KKASSERT(ktr_sync_state == 1);
355 	atomic_add_int(&ktr_sync_count, 1);
356 	while (ktr_sync_state == 1) {
357 		lwkt_process_ipiq();
358 	}
359 
360 	/*
361 	 * Now the master is in a hard loop, synchronize the TSC and
362 	 * we are done.
363 	 */
364 	cpu_disable_intr();
365 	KKASSERT(ktr_sync_state == 2);
366 	tsc2 = ktr_sync_tsc;
367 	if (tsc2 > tsc1)
368 		tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc2;
369 	atomic_subtract_int(&ktr_sync_count, 1);
370 	cpu_enable_intr();
371 }
372 
373 #if KTR_TESTLOG
374 
375 static
376 void
377 ktr_pingpong_remote(void *dummy __unused)
378 {
379 	int other_cpu;
380 
381 	logtest_noargs(pingpong);
382 	other_cpu = 1 - mycpu->gd_cpuid;
383 	if (ktr_testipicnt_remainder) {
384 		--ktr_testipicnt_remainder;
385 		lwkt_send_ipiq_bycpu(other_cpu, ktr_pingpong_remote, NULL);
386 	} else {
387 		lwkt_send_ipiq_bycpu(other_cpu, ktr_pipeline_remote, NULL);
388 		lwkt_send_ipiq_bycpu(other_cpu, ktr_pipeline_remote, NULL);
389 		lwkt_send_ipiq_bycpu(other_cpu, ktr_pipeline_remote, NULL);
390 		lwkt_send_ipiq_bycpu(other_cpu, ktr_pipeline_remote, NULL);
391 		lwkt_send_ipiq_bycpu(other_cpu, ktr_pipeline_remote, NULL);
392 	}
393 }
394 
395 static
396 void
397 ktr_pipeline_remote(void *dummy __unused)
398 {
399 	logtest_noargs(pipeline);
400 }
401 
402 #endif
403 
404 #else	/* !SMP */
405 
406 /*
407  * The resync callback for UP doesn't do anything other then run the test
408  * log messages.  If test logging is not enabled, don't bother resetting
409  * the callout.
410  */
411 static
412 void
413 ktr_resync_callback(void *dummy __unused)
414 {
415 #if KTR_TESTLOG
416 	/*
417 	 * Test logging
418 	 */
419 	if (ktr_testlogcnt) {
420 		--ktr_testlogcnt;
421 		cpu_disable_intr();
422 		logtest(test1);
423 		logtest(test2);
424 		logtest(test3);
425 		logtest_noargs(test4);
426 		logtest_noargs(test5);
427 		logtest_noargs(test6);
428 		cpu_enable_intr();
429 	}
430 	callout_reset(&ktr_resync_callout, hz / 10, ktr_resync_callback, NULL);
431 #endif
432 }
433 
434 #endif
435 
436 /*
437  * KTR_WRITE_ENTRY - Primary entry point for kernel trace logging
438  */
439 
440 static __inline
441 void
442 ktr_write_entry(struct ktr_info *info, const char *file, int line, __va_list va)
443 {
444 	struct ktr_cpu_core *kcpu;
445 	struct ktr_entry *entry;
446 	int cpu;
447 
448 	cpu = mycpu->gd_cpuid;
449 	kcpu = &ktr_cpu[cpu].core;
450 	if (kcpu->ktr_buf == NULL)
451 		return;
452 
453 	crit_enter();
454 	entry = kcpu->ktr_buf + (kcpu->ktr_idx & KTR_ENTRIES_MASK);
455 	++kcpu->ktr_idx;
456 #ifdef _RDTSC_SUPPORTED_
457 	if (cpu_feature & CPUID_TSC) {
458 #ifdef SMP
459 		entry->ktr_timestamp = rdtsc() - tsc_offsets[cpu];
460 #else
461 		entry->ktr_timestamp = rdtsc();
462 #endif
463 	} else
464 #endif
465 	{
466 		entry->ktr_timestamp = get_approximate_time_t();
467 	}
468 	entry->ktr_info = info;
469 	entry->ktr_file = file;
470 	entry->ktr_line = line;
471 	crit_exit();
472 	if (info->kf_data_size > KTR_BUFSIZE)
473 		bcopy(va, entry->ktr_data, KTR_BUFSIZE);
474 	else if (info->kf_data_size)
475 		bcopy(va, entry->ktr_data, info->kf_data_size);
476 	if (ktr_stacktrace)
477 		cpu_ktr_caller(entry);
478 #ifdef KTR_VERBOSE
479 	if (ktr_verbose && info->kf_format) {
480 #ifdef SMP
481 		kprintf("cpu%d ", cpu);
482 #endif
483 		if (ktr_verbose > 1) {
484 			kprintf("%s.%d\t", entry->ktr_file, entry->ktr_line);
485 		}
486 		kvprintf(info->kf_format, va);
487 		kprintf("\n");
488 	}
489 #endif
490 }
491 
492 void
493 ktr_log(struct ktr_info *info, const char *file, int line, ...)
494 {
495 	__va_list va;
496 
497 	if (panicstr == NULL) {
498 		__va_start(va, line);
499 		ktr_write_entry(info, file, line, va);
500 		__va_end(va);
501 	}
502 }
503 
504 #ifdef DDB
505 
506 #define	NUM_LINES_PER_PAGE	19
507 
508 struct tstate {
509 	int	cur;
510 	int	first;
511 };
512 
513 static	int db_ktr_verbose;
514 static	int db_mach_vtrace(int cpu, struct ktr_entry *kp, int idx);
515 
516 DB_SHOW_COMMAND(ktr, db_ktr_all)
517 {
518 	struct ktr_cpu_core *kcpu;
519 	int a_flag = 0;
520 	int c;
521 	int nl = 0;
522 	int i;
523 	struct tstate tstate[MAXCPU];
524 	int printcpu = -1;
525 
526 	for(i = 0; i < ncpus; i++) {
527 		kcpu = &ktr_cpu[i].core;
528 		tstate[i].first = -1;
529 		tstate[i].cur = (kcpu->ktr_idx - 1) & KTR_ENTRIES_MASK;
530 	}
531 	db_ktr_verbose = 0;
532 	while ((c = *(modif++)) != '\0') {
533 		if (c == 'v') {
534 			db_ktr_verbose = 1;
535 		}
536 		else if (c == 'a') {
537 			a_flag = 1;
538 		}
539 		else if (c == 'c') {
540 			printcpu = 0;
541 			while ((c = *(modif++)) != '\0') {
542 				if (isdigit(c)) {
543 					printcpu *= 10;
544 					printcpu += c - '0';
545 				}
546 				else {
547 					modif++;
548 					break;
549 				}
550 			}
551 			modif--;
552 		}
553 	}
554 	if (printcpu > ncpus - 1) {
555 		db_printf("Invalid cpu number\n");
556 		return;
557 	}
558 	/*
559 	 * Lopp throug all the buffers and print the content of them, sorted
560 	 * by the timestamp.
561 	 */
562 	while (1) {
563 		int counter;
564 		u_int64_t highest_ts;
565 		int highest_cpu;
566 		struct ktr_entry *kp;
567 
568 		if (a_flag == 1 && cncheckc() != -1)
569 			return;
570 		highest_ts = 0;
571 		highest_cpu = -1;
572 		/*
573 		 * Find the lowest timestamp
574 		 */
575 		for (i = 0, counter = 0; i < ncpus; i++) {
576 			kcpu = &ktr_cpu[i].core;
577 			if (kcpu->ktr_buf == NULL)
578 				continue;
579 			if (printcpu != -1 && printcpu != i)
580 				continue;
581 			if (tstate[i].cur == -1) {
582 				counter++;
583 				if (counter == ncpus) {
584 					db_printf("--- End of trace buffer ---\n");
585 					return;
586 				}
587 				continue;
588 			}
589 			if (kcpu->ktr_buf[tstate[i].cur].ktr_timestamp > highest_ts) {
590 				highest_ts = kcpu->ktr_buf[tstate[i].cur].ktr_timestamp;
591 				highest_cpu = i;
592 			}
593 		}
594 		if (highest_cpu < 0) {
595 			db_printf("no KTR data available\n");
596 			break;
597 		}
598 		i = highest_cpu;
599 		kcpu = &ktr_cpu[i].core;
600 		kp = &kcpu->ktr_buf[tstate[i].cur];
601 		if (tstate[i].first == -1)
602 			tstate[i].first = tstate[i].cur;
603 		if (--tstate[i].cur < 0)
604 			tstate[i].cur = KTR_ENTRIES - 1;
605 		if (tstate[i].first == tstate[i].cur) {
606 			db_mach_vtrace(i, kp, tstate[i].cur + 1);
607 			tstate[i].cur = -1;
608 			continue;
609 		}
610 		if (kcpu->ktr_buf[tstate[i].cur].ktr_info == NULL)
611 			tstate[i].cur = -1;
612 		if (db_more(&nl) == -1)
613 			break;
614 		if (db_mach_vtrace(i, kp, tstate[i].cur + 1) == 0)
615 			tstate[i].cur = -1;
616 	}
617 }
618 
619 static int
620 db_mach_vtrace(int cpu, struct ktr_entry *kp, int idx)
621 {
622 	if (kp->ktr_info == NULL)
623 		return(0);
624 #ifdef SMP
625 	db_printf("cpu%d ", cpu);
626 #endif
627 	db_printf("%d: ", idx);
628 	if (db_ktr_verbose) {
629 		db_printf("%10.10lld %s.%d\t", (long long)kp->ktr_timestamp,
630 		    kp->ktr_file, kp->ktr_line);
631 	}
632 	db_printf("%s\t", kp->ktr_info->kf_name);
633 	db_printf("from(%p,%p) ", kp->ktr_caller1, kp->ktr_caller2);
634 	if (kp->ktr_info->kf_format)
635 		db_vprintf(kp->ktr_info->kf_format, (__va_list)kp->ktr_data);
636 	db_printf("\n");
637 
638 	return(1);
639 }
640 
641 #endif	/* DDB */
642