xref: /openbsd/sys/kern/kern_clockintr.c (revision c737cf90)
1 /* $OpenBSD: kern_clockintr.c,v 1.70 2024/02/25 19:15:50 cheloha Exp $ */
2 /*
3  * Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
4  * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
5  * Copyright (c) 2020-2024 Scott Cheloha <cheloha@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/atomic.h>
23 #include <sys/clockintr.h>
24 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/mutex.h>
27 #include <sys/resourcevar.h>
28 #include <sys/queue.h>
29 #include <sys/sched.h>
30 #include <sys/stdint.h>
31 #include <sys/sysctl.h>
32 #include <sys/time.h>
33 
34 void clockintr_cancel_locked(struct clockintr *);
35 void clockintr_hardclock(struct clockrequest *, void *, void *);
36 void clockintr_schedule_locked(struct clockintr *, uint64_t);
37 void clockqueue_intrclock_install(struct clockqueue *,
38     const struct intrclock *);
39 void clockqueue_intrclock_reprogram(struct clockqueue *);
40 uint64_t clockqueue_next(const struct clockqueue *);
41 void clockqueue_pend_delete(struct clockqueue *, struct clockintr *);
42 void clockqueue_pend_insert(struct clockqueue *, struct clockintr *,
43     uint64_t);
44 void intrclock_rearm(struct intrclock *, uint64_t);
45 void intrclock_trigger(struct intrclock *);
46 uint64_t nsec_advance(uint64_t *, uint64_t, uint64_t);
47 
48 /*
49  * Ready the calling CPU for clockintr_dispatch().  If this is our
50  * first time here, install the intrclock, if any, and set necessary
51  * flags.  Advance the schedule as needed.
52  */
53 void
clockintr_cpu_init(const struct intrclock * ic)54 clockintr_cpu_init(const struct intrclock *ic)
55 {
56 	uint64_t multiplier = 0;
57 	struct cpu_info *ci = curcpu();
58 	struct clockqueue *cq = &ci->ci_queue;
59 	struct schedstate_percpu *spc = &ci->ci_schedstate;
60 	int reset_cq_intrclock = 0;
61 
62 	if (ic != NULL)
63 		clockqueue_intrclock_install(cq, ic);
64 
65 	/* TODO: Remove this from struct clockqueue. */
66 	if (CPU_IS_PRIMARY(ci) && cq->cq_hardclock.cl_expiration == 0) {
67 		clockintr_bind(&cq->cq_hardclock, ci, clockintr_hardclock,
68 		    NULL);
69 	}
70 
71 	/*
72 	 * Mask CQ_INTRCLOCK while we're advancing the internal clock
73 	 * interrupts.  We don't want the intrclock to fire until this
74 	 * thread reaches clockintr_trigger().
75 	 */
76 	if (ISSET(cq->cq_flags, CQ_INTRCLOCK)) {
77 		CLR(cq->cq_flags, CQ_INTRCLOCK);
78 		reset_cq_intrclock = 1;
79 	}
80 
81 	/*
82 	 * Until we understand scheduler lock contention better, stagger
83 	 * the hardclock and statclock so they don't all happen at once.
84 	 * If we have no intrclock it doesn't matter, we have no control
85 	 * anyway.  The primary CPU's starting offset is always zero, so
86 	 * leave the multiplier zero.
87 	 */
88 	if (!CPU_IS_PRIMARY(ci) && reset_cq_intrclock)
89 		multiplier = CPU_INFO_UNIT(ci);
90 
91 	/*
92 	 * The first time we do this, the primary CPU cannot skip any
93 	 * hardclocks.  We can skip hardclocks on subsequent calls because
94 	 * the global tick value is advanced during inittodr(9) on our
95 	 * behalf.
96 	 */
97 	if (CPU_IS_PRIMARY(ci)) {
98 		if (cq->cq_hardclock.cl_expiration == 0)
99 			clockintr_schedule(&cq->cq_hardclock, 0);
100 		else
101 			clockintr_advance(&cq->cq_hardclock, hardclock_period);
102 	}
103 
104 	/*
105 	 * We can always advance the statclock.  There is no reason to
106 	 * stagger a randomized statclock.
107 	 */
108 	if (!statclock_is_randomized) {
109 		if (spc->spc_statclock.cl_expiration == 0) {
110 			clockintr_stagger(&spc->spc_statclock, statclock_avg,
111 			    multiplier, MAXCPUS);
112 		}
113 	}
114 	clockintr_advance(&spc->spc_statclock, statclock_avg);
115 
116 	/*
117 	 * XXX Need to find a better place to do this.  We can't do it in
118 	 * sched_init_cpu() because initclocks() runs after it.
119 	 */
120 	if (spc->spc_itimer.cl_expiration == 0) {
121 		clockintr_stagger(&spc->spc_itimer, hardclock_period,
122 		    multiplier, MAXCPUS);
123 	}
124 	if (spc->spc_profclock.cl_expiration == 0) {
125 		clockintr_stagger(&spc->spc_profclock, profclock_period,
126 		    multiplier, MAXCPUS);
127 	}
128 	if (spc->spc_roundrobin.cl_expiration == 0) {
129 		clockintr_stagger(&spc->spc_roundrobin, hardclock_period,
130 		    multiplier, MAXCPUS);
131 	}
132 	clockintr_advance(&spc->spc_roundrobin, roundrobin_period);
133 
134 	if (reset_cq_intrclock)
135 		SET(cq->cq_flags, CQ_INTRCLOCK);
136 }
137 
138 /*
139  * If we have an intrclock, trigger it to start the dispatch cycle.
140  */
141 void
clockintr_trigger(void)142 clockintr_trigger(void)
143 {
144 	struct clockqueue *cq = &curcpu()->ci_queue;
145 
146 	KASSERT(ISSET(cq->cq_flags, CQ_INIT));
147 
148 	if (ISSET(cq->cq_flags, CQ_INTRCLOCK))
149 		intrclock_trigger(&cq->cq_intrclock);
150 }
151 
152 /*
153  * Run all expired events scheduled on the calling CPU.
154  */
155 int
clockintr_dispatch(void * frame)156 clockintr_dispatch(void *frame)
157 {
158 	uint64_t lateness, run = 0, start;
159 	struct cpu_info *ci = curcpu();
160 	struct clockintr *cl;
161 	struct clockqueue *cq = &ci->ci_queue;
162 	struct clockrequest *request = &cq->cq_request;
163 	void *arg;
164 	void (*func)(struct clockrequest *, void *, void *);
165 	uint32_t ogen;
166 
167 	if (cq->cq_dispatch != 0)
168 		panic("%s: recursive dispatch", __func__);
169 	cq->cq_dispatch = 1;
170 
171 	splassert(IPL_CLOCK);
172 	KASSERT(ISSET(cq->cq_flags, CQ_INIT));
173 
174 	mtx_enter(&cq->cq_mtx);
175 
176 	/*
177 	 * If nothing is scheduled or we arrived too early, we have
178 	 * nothing to do.
179 	 */
180 	start = nsecuptime();
181 	cq->cq_uptime = start;
182 	if (TAILQ_EMPTY(&cq->cq_pend))
183 		goto stats;
184 	if (cq->cq_uptime < clockqueue_next(cq))
185 		goto rearm;
186 	lateness = start - clockqueue_next(cq);
187 
188 	/*
189 	 * Dispatch expired events.
190 	 */
191 	for (;;) {
192 		cl = TAILQ_FIRST(&cq->cq_pend);
193 		if (cl == NULL)
194 			break;
195 		if (cq->cq_uptime < cl->cl_expiration) {
196 			/* Double-check the time before giving up. */
197 			cq->cq_uptime = nsecuptime();
198 			if (cq->cq_uptime < cl->cl_expiration)
199 				break;
200 		}
201 
202 		/*
203 		 * This clockintr has expired.  Execute it.
204 		 */
205 		clockqueue_pend_delete(cq, cl);
206 		request->cr_expiration = cl->cl_expiration;
207 		arg = cl->cl_arg;
208 		func = cl->cl_func;
209 		cq->cq_running = cl;
210 		mtx_leave(&cq->cq_mtx);
211 
212 		func(request, frame, arg);
213 
214 		mtx_enter(&cq->cq_mtx);
215 		cq->cq_running = NULL;
216 		if (ISSET(cq->cq_flags, CQ_IGNORE_REQUEST)) {
217 			CLR(cq->cq_flags, CQ_IGNORE_REQUEST);
218 			CLR(request->cr_flags, CR_RESCHEDULE);
219 		}
220 		if (ISSET(request->cr_flags, CR_RESCHEDULE)) {
221 			CLR(request->cr_flags, CR_RESCHEDULE);
222 			clockqueue_pend_insert(cq, cl, request->cr_expiration);
223 		}
224 		if (ISSET(cq->cq_flags, CQ_NEED_WAKEUP)) {
225 			CLR(cq->cq_flags, CQ_NEED_WAKEUP);
226 			mtx_leave(&cq->cq_mtx);
227 			wakeup(&cq->cq_running);
228 			mtx_enter(&cq->cq_mtx);
229 		}
230 		run++;
231 	}
232 
233 	/*
234 	 * Dispatch complete.
235 	 */
236 rearm:
237 	/* Rearm the interrupt clock if we have one. */
238 	if (ISSET(cq->cq_flags, CQ_INTRCLOCK)) {
239 		if (!TAILQ_EMPTY(&cq->cq_pend)) {
240 			intrclock_rearm(&cq->cq_intrclock,
241 			    clockqueue_next(cq) - cq->cq_uptime);
242 		}
243 	}
244 stats:
245 	/* Update our stats. */
246 	ogen = cq->cq_gen;
247 	cq->cq_gen = 0;
248 	membar_producer();
249 	cq->cq_stat.cs_dispatched += cq->cq_uptime - start;
250 	if (run > 0) {
251 		cq->cq_stat.cs_lateness += lateness;
252 		cq->cq_stat.cs_prompt++;
253 		cq->cq_stat.cs_run += run;
254 	} else if (!TAILQ_EMPTY(&cq->cq_pend)) {
255 		cq->cq_stat.cs_early++;
256 		cq->cq_stat.cs_earliness += clockqueue_next(cq) - cq->cq_uptime;
257 	} else
258 		cq->cq_stat.cs_spurious++;
259 	membar_producer();
260 	cq->cq_gen = MAX(1, ogen + 1);
261 
262 	mtx_leave(&cq->cq_mtx);
263 
264 	if (cq->cq_dispatch != 1)
265 		panic("%s: unexpected value: %u", __func__, cq->cq_dispatch);
266 	cq->cq_dispatch = 0;
267 
268 	return run > 0;
269 }
270 
271 uint64_t
clockintr_advance(struct clockintr * cl,uint64_t period)272 clockintr_advance(struct clockintr *cl, uint64_t period)
273 {
274 	uint64_t count, expiration;
275 	struct clockqueue *cq = cl->cl_queue;
276 
277 	mtx_enter(&cq->cq_mtx);
278 	expiration = cl->cl_expiration;
279 	count = nsec_advance(&expiration, period, nsecuptime());
280 	clockintr_schedule_locked(cl, expiration);
281 	mtx_leave(&cq->cq_mtx);
282 
283 	return count;
284 }
285 
286 uint64_t
clockrequest_advance(struct clockrequest * cr,uint64_t period)287 clockrequest_advance(struct clockrequest *cr, uint64_t period)
288 {
289 	struct clockqueue *cq = cr->cr_queue;
290 
291 	KASSERT(cr == &cq->cq_request);
292 
293 	SET(cr->cr_flags, CR_RESCHEDULE);
294 	return nsec_advance(&cr->cr_expiration, period, cq->cq_uptime);
295 }
296 
297 uint64_t
clockrequest_advance_random(struct clockrequest * cr,uint64_t min,uint32_t mask)298 clockrequest_advance_random(struct clockrequest *cr, uint64_t min,
299     uint32_t mask)
300 {
301 	uint64_t count = 0;
302 	struct clockqueue *cq = cr->cr_queue;
303 	uint32_t off;
304 
305 	KASSERT(cr == &cq->cq_request);
306 
307 	while (cr->cr_expiration <= cq->cq_uptime) {
308 		while ((off = (random() & mask)) == 0)
309 			continue;
310 		cr->cr_expiration += min + off;
311 		count++;
312 	}
313 	SET(cr->cr_flags, CR_RESCHEDULE);
314 	return count;
315 }
316 
317 void
clockintr_cancel(struct clockintr * cl)318 clockintr_cancel(struct clockintr *cl)
319 {
320 	struct clockqueue *cq = cl->cl_queue;
321 
322 	mtx_enter(&cq->cq_mtx);
323 	clockintr_cancel_locked(cl);
324 	mtx_leave(&cq->cq_mtx);
325 }
326 
327 void
clockintr_cancel_locked(struct clockintr * cl)328 clockintr_cancel_locked(struct clockintr *cl)
329 {
330 	struct clockqueue *cq = cl->cl_queue;
331 	int was_next;
332 
333 	MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
334 
335 	if (ISSET(cl->cl_flags, CLST_PENDING)) {
336 		was_next = cl == TAILQ_FIRST(&cq->cq_pend);
337 		clockqueue_pend_delete(cq, cl);
338 		if (ISSET(cq->cq_flags, CQ_INTRCLOCK)) {
339 			if (was_next && !TAILQ_EMPTY(&cq->cq_pend)) {
340 				if (cq == &curcpu()->ci_queue)
341 					clockqueue_intrclock_reprogram(cq);
342 			}
343 		}
344 	}
345 	if (cl == cq->cq_running)
346 		SET(cq->cq_flags, CQ_IGNORE_REQUEST);
347 }
348 
349 void
clockintr_bind(struct clockintr * cl,struct cpu_info * ci,void (* func)(struct clockrequest *,void *,void *),void * arg)350 clockintr_bind(struct clockintr *cl, struct cpu_info *ci,
351     void (*func)(struct clockrequest *, void *, void *), void *arg)
352 {
353 	struct clockqueue *cq = &ci->ci_queue;
354 
355 	splassert(IPL_NONE);
356 	KASSERT(cl->cl_queue == NULL);
357 
358 	mtx_enter(&cq->cq_mtx);
359 	cl->cl_arg = arg;
360 	cl->cl_func = func;
361 	cl->cl_queue = cq;
362 	TAILQ_INSERT_TAIL(&cq->cq_all, cl, cl_alink);
363 	mtx_leave(&cq->cq_mtx);
364 }
365 
366 void
clockintr_unbind(struct clockintr * cl,uint32_t flags)367 clockintr_unbind(struct clockintr *cl, uint32_t flags)
368 {
369 	struct clockqueue *cq = cl->cl_queue;
370 
371 	KASSERT(!ISSET(flags, ~CL_FLAG_MASK));
372 
373 	mtx_enter(&cq->cq_mtx);
374 
375 	clockintr_cancel_locked(cl);
376 
377 	cl->cl_arg = NULL;
378 	cl->cl_func = NULL;
379 	cl->cl_queue = NULL;
380 	TAILQ_REMOVE(&cq->cq_all, cl, cl_alink);
381 
382 	if (ISSET(flags, CL_BARRIER) && cl == cq->cq_running) {
383 		SET(cq->cq_flags, CQ_NEED_WAKEUP);
384 		msleep_nsec(&cq->cq_running, &cq->cq_mtx, PWAIT | PNORELOCK,
385 		    "clkbar", INFSLP);
386 	} else
387 		mtx_leave(&cq->cq_mtx);
388 }
389 
390 void
clockintr_schedule(struct clockintr * cl,uint64_t expiration)391 clockintr_schedule(struct clockintr *cl, uint64_t expiration)
392 {
393 	struct clockqueue *cq = cl->cl_queue;
394 
395 	mtx_enter(&cq->cq_mtx);
396 	clockintr_schedule_locked(cl, expiration);
397 	mtx_leave(&cq->cq_mtx);
398 }
399 
400 void
clockintr_schedule_locked(struct clockintr * cl,uint64_t expiration)401 clockintr_schedule_locked(struct clockintr *cl, uint64_t expiration)
402 {
403 	struct clockqueue *cq = cl->cl_queue;
404 
405 	MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
406 
407 	if (ISSET(cl->cl_flags, CLST_PENDING))
408 		clockqueue_pend_delete(cq, cl);
409 	clockqueue_pend_insert(cq, cl, expiration);
410 	if (ISSET(cq->cq_flags, CQ_INTRCLOCK)) {
411 		if (cl == TAILQ_FIRST(&cq->cq_pend)) {
412 			if (cq == &curcpu()->ci_queue)
413 				clockqueue_intrclock_reprogram(cq);
414 		}
415 	}
416 	if (cl == cq->cq_running)
417 		SET(cq->cq_flags, CQ_IGNORE_REQUEST);
418 }
419 
420 void
clockintr_stagger(struct clockintr * cl,uint64_t period,uint32_t numer,uint32_t denom)421 clockintr_stagger(struct clockintr *cl, uint64_t period, uint32_t numer,
422     uint32_t denom)
423 {
424 	struct clockqueue *cq = cl->cl_queue;
425 
426 	KASSERT(numer < denom);
427 
428 	mtx_enter(&cq->cq_mtx);
429 	if (ISSET(cl->cl_flags, CLST_PENDING))
430 		panic("%s: clock interrupt pending", __func__);
431 	cl->cl_expiration = period / denom * numer;
432 	mtx_leave(&cq->cq_mtx);
433 }
434 
435 void
clockintr_hardclock(struct clockrequest * cr,void * frame,void * arg)436 clockintr_hardclock(struct clockrequest *cr, void *frame, void *arg)
437 {
438 	uint64_t count, i;
439 
440 	count = clockrequest_advance(cr, hardclock_period);
441 	for (i = 0; i < count; i++)
442 		hardclock(frame);
443 }
444 
445 void
clockqueue_init(struct clockqueue * cq)446 clockqueue_init(struct clockqueue *cq)
447 {
448 	if (ISSET(cq->cq_flags, CQ_INIT))
449 		return;
450 
451 	cq->cq_request.cr_queue = cq;
452 	mtx_init(&cq->cq_mtx, IPL_CLOCK);
453 	TAILQ_INIT(&cq->cq_all);
454 	TAILQ_INIT(&cq->cq_pend);
455 	cq->cq_gen = 1;
456 	SET(cq->cq_flags, CQ_INIT);
457 }
458 
459 void
clockqueue_intrclock_install(struct clockqueue * cq,const struct intrclock * ic)460 clockqueue_intrclock_install(struct clockqueue *cq,
461     const struct intrclock *ic)
462 {
463 	mtx_enter(&cq->cq_mtx);
464 	if (!ISSET(cq->cq_flags, CQ_INTRCLOCK)) {
465 		cq->cq_intrclock = *ic;
466 		SET(cq->cq_flags, CQ_INTRCLOCK);
467 	}
468 	mtx_leave(&cq->cq_mtx);
469 }
470 
471 uint64_t
clockqueue_next(const struct clockqueue * cq)472 clockqueue_next(const struct clockqueue *cq)
473 {
474 	MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
475 	return TAILQ_FIRST(&cq->cq_pend)->cl_expiration;
476 }
477 
478 void
clockqueue_pend_delete(struct clockqueue * cq,struct clockintr * cl)479 clockqueue_pend_delete(struct clockqueue *cq, struct clockintr *cl)
480 {
481 	MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
482 	KASSERT(ISSET(cl->cl_flags, CLST_PENDING));
483 
484 	TAILQ_REMOVE(&cq->cq_pend, cl, cl_plink);
485 	CLR(cl->cl_flags, CLST_PENDING);
486 }
487 
488 void
clockqueue_pend_insert(struct clockqueue * cq,struct clockintr * cl,uint64_t expiration)489 clockqueue_pend_insert(struct clockqueue *cq, struct clockintr *cl,
490     uint64_t expiration)
491 {
492 	struct clockintr *elm;
493 
494 	MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
495 	KASSERT(!ISSET(cl->cl_flags, CLST_PENDING));
496 
497 	cl->cl_expiration = expiration;
498 	TAILQ_FOREACH(elm, &cq->cq_pend, cl_plink) {
499 		if (cl->cl_expiration < elm->cl_expiration)
500 			break;
501 	}
502 	if (elm == NULL)
503 		TAILQ_INSERT_TAIL(&cq->cq_pend, cl, cl_plink);
504 	else
505 		TAILQ_INSERT_BEFORE(elm, cl, cl_plink);
506 	SET(cl->cl_flags, CLST_PENDING);
507 }
508 
509 void
clockqueue_intrclock_reprogram(struct clockqueue * cq)510 clockqueue_intrclock_reprogram(struct clockqueue *cq)
511 {
512 	uint64_t exp, now;
513 
514 	MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
515 	KASSERT(ISSET(cq->cq_flags, CQ_INTRCLOCK));
516 
517 	exp = clockqueue_next(cq);
518 	now = nsecuptime();
519 	if (now < exp)
520 		intrclock_rearm(&cq->cq_intrclock, exp - now);
521 	else
522 		intrclock_trigger(&cq->cq_intrclock);
523 }
524 
525 void
intrclock_rearm(struct intrclock * ic,uint64_t nsecs)526 intrclock_rearm(struct intrclock *ic, uint64_t nsecs)
527 {
528 	ic->ic_rearm(ic->ic_cookie, nsecs);
529 }
530 
531 void
intrclock_trigger(struct intrclock * ic)532 intrclock_trigger(struct intrclock *ic)
533 {
534 	ic->ic_trigger(ic->ic_cookie);
535 }
536 
537 /*
538  * Advance *next in increments of period until it exceeds now.
539  * Returns the number of increments *next was advanced.
540  *
541  * We check the common cases first to avoid division if possible.
542  * This does no overflow checking.
543  */
544 uint64_t
nsec_advance(uint64_t * next,uint64_t period,uint64_t now)545 nsec_advance(uint64_t *next, uint64_t period, uint64_t now)
546 {
547 	uint64_t elapsed;
548 
549 	if (now < *next)
550 		return 0;
551 
552 	if (now < *next + period) {
553 		*next += period;
554 		return 1;
555 	}
556 
557 	elapsed = (now - *next) / period + 1;
558 	*next += period * elapsed;
559 	return elapsed;
560 }
561 
562 int
sysctl_clockintr(int * name,u_int namelen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)563 sysctl_clockintr(int *name, u_int namelen, void *oldp, size_t *oldlenp,
564     void *newp, size_t newlen)
565 {
566 	struct clockintr_stat sum, tmp;
567 	struct clockqueue *cq;
568 	struct cpu_info *ci;
569 	CPU_INFO_ITERATOR cii;
570 	uint32_t gen;
571 
572 	if (namelen != 1)
573 		return ENOTDIR;
574 
575 	switch (name[0]) {
576 	case KERN_CLOCKINTR_STATS:
577 		memset(&sum, 0, sizeof sum);
578 		CPU_INFO_FOREACH(cii, ci) {
579 			cq = &ci->ci_queue;
580 			if (!ISSET(cq->cq_flags, CQ_INIT))
581 				continue;
582 			do {
583 				gen = cq->cq_gen;
584 				membar_consumer();
585 				tmp = cq->cq_stat;
586 				membar_consumer();
587 			} while (gen == 0 || gen != cq->cq_gen);
588 			sum.cs_dispatched += tmp.cs_dispatched;
589 			sum.cs_early += tmp.cs_early;
590 			sum.cs_earliness += tmp.cs_earliness;
591 			sum.cs_lateness += tmp.cs_lateness;
592 			sum.cs_prompt += tmp.cs_prompt;
593 			sum.cs_run += tmp.cs_run;
594 			sum.cs_spurious += tmp.cs_spurious;
595 		}
596 		return sysctl_rdstruct(oldp, oldlenp, newp, &sum, sizeof sum);
597 	default:
598 		break;
599 	}
600 
601 	return EINVAL;
602 }
603 
604 #ifdef DDB
605 
606 #include <machine/db_machdep.h>
607 
608 #include <ddb/db_interface.h>
609 #include <ddb/db_output.h>
610 #include <ddb/db_sym.h>
611 
612 void db_show_clockintr(const struct clockintr *, const char *, u_int);
613 void db_show_clockintr_cpu(struct cpu_info *);
614 
615 void
db_show_all_clockintr(db_expr_t addr,int haddr,db_expr_t count,char * modif)616 db_show_all_clockintr(db_expr_t addr, int haddr, db_expr_t count, char *modif)
617 {
618 	struct timespec now;
619 	struct cpu_info *ci;
620 	CPU_INFO_ITERATOR cii;
621 	int width = sizeof(long) * 2 + 2;	/* +2 for "0x" prefix */
622 
623 	nanouptime(&now);
624 	db_printf("%20s\n", "UPTIME");
625 	db_printf("%10lld.%09ld\n", now.tv_sec, now.tv_nsec);
626 	db_printf("\n");
627 	db_printf("%20s  %5s  %3s  %*s  %s\n",
628 	    "EXPIRATION", "STATE", "CPU", width, "ARG", "NAME");
629 	CPU_INFO_FOREACH(cii, ci) {
630 		if (ISSET(ci->ci_queue.cq_flags, CQ_INIT))
631 			db_show_clockintr_cpu(ci);
632 	}
633 }
634 
635 void
db_show_clockintr_cpu(struct cpu_info * ci)636 db_show_clockintr_cpu(struct cpu_info *ci)
637 {
638 	struct clockintr *elm;
639 	struct clockqueue *cq = &ci->ci_queue;
640 	u_int cpu = CPU_INFO_UNIT(ci);
641 
642 	if (cq->cq_running != NULL)
643 		db_show_clockintr(cq->cq_running, "run", cpu);
644 	TAILQ_FOREACH(elm, &cq->cq_pend, cl_plink)
645 		db_show_clockintr(elm, "pend", cpu);
646 	TAILQ_FOREACH(elm, &cq->cq_all, cl_alink) {
647 		if (!ISSET(elm->cl_flags, CLST_PENDING))
648 			db_show_clockintr(elm, "idle", cpu);
649 	}
650 }
651 
652 void
db_show_clockintr(const struct clockintr * cl,const char * state,u_int cpu)653 db_show_clockintr(const struct clockintr *cl, const char *state, u_int cpu)
654 {
655 	struct timespec ts;
656 	char *name;
657 	db_expr_t offset;
658 	int width = sizeof(long) * 2;
659 
660 	NSEC_TO_TIMESPEC(cl->cl_expiration, &ts);
661 	db_find_sym_and_offset((vaddr_t)cl->cl_func, &name, &offset);
662 	if (name == NULL)
663 		name = "?";
664 	db_printf("%10lld.%09ld  %5s  %3u  0x%0*lx  %s\n",
665 	    ts.tv_sec, ts.tv_nsec, state, cpu,
666 	    width, (unsigned long)cl->cl_arg, name);
667 }
668 
669 #endif /* DDB */
670