1 /*	$NetBSD: kern_timeout.c,v 1.51 2015/11/24 15:48:23 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2003, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org>
34  * Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org>
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. The name of the author may not be used to endorse or promote products
47  *    derived from this software without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
50  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
51  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
52  * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
53  * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
54  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
55  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
56  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
57  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
58  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  */
60 
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.51 2015/11/24 15:48:23 christos Exp $");
63 
64 /*
65  * Timeouts are kept in a hierarchical timing wheel.  The c_time is the
66  * value of c_cpu->cc_ticks when the timeout should be called.  There are
67  * four levels with 256 buckets each. See 'Scheme 7' in "Hashed and
68  * Hierarchical Timing Wheels: Efficient Data Structures for Implementing
69  * a Timer Facility" by George Varghese and Tony Lauck.
70  *
71  * Some of the "math" in here is a bit tricky.  We have to beware of
72  * wrapping ints.
73  *
74  * We use the fact that any element added to the queue must be added with
75  * a positive time.  That means that any element `to' on the queue cannot
76  * be scheduled to timeout further in time than INT_MAX, but c->c_time can
77  * be positive or negative so comparing it with anything is dangerous.
78  * The only way we can use the c->c_time value in any predictable way is
79  * when we calculate how far in the future `to' will timeout - "c->c_time
80  * - c->c_cpu->cc_ticks".  The result will always be positive for future
81  * timeouts and 0 or negative for due timeouts.
82  */
83 
84 #define	_CALLOUT_PRIVATE
85 
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/callout.h>
90 #include <sys/lwp.h>
91 #include <sys/mutex.h>
92 #include <sys/proc.h>
93 #include <sys/sleepq.h>
94 #include <sys/syncobj.h>
95 #include <sys/evcnt.h>
96 #include <sys/intr.h>
97 #include <sys/cpu.h>
98 #include <sys/kmem.h>
99 
100 #ifdef DDB
101 #include <machine/db_machdep.h>
102 #include <ddb/db_interface.h>
103 #include <ddb/db_access.h>
104 #include <ddb/db_cpu.h>
105 #include <ddb/db_sym.h>
106 #include <ddb/db_output.h>
107 #endif
108 
109 #define BUCKETS		1024
110 #define WHEELSIZE	256
111 #define WHEELMASK	255
112 #define WHEELBITS	8
113 
114 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK)
115 
116 #define BUCKET(cc, rel, abs)						\
117     (((rel) <= (1 << (2*WHEELBITS)))					\
118     	? ((rel) <= (1 << WHEELBITS))					\
119             ? &(cc)->cc_wheel[MASKWHEEL(0, (abs))]			\
120             : &(cc)->cc_wheel[MASKWHEEL(1, (abs)) + WHEELSIZE]		\
121         : ((rel) <= (1 << (3*WHEELBITS)))				\
122             ? &(cc)->cc_wheel[MASKWHEEL(2, (abs)) + 2*WHEELSIZE]	\
123             : &(cc)->cc_wheel[MASKWHEEL(3, (abs)) + 3*WHEELSIZE])
124 
125 #define MOVEBUCKET(cc, wheel, time)					\
126     CIRCQ_APPEND(&(cc)->cc_todo,					\
127         &(cc)->cc_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE])
128 
129 /*
130  * Circular queue definitions.
131  */
132 
133 #define CIRCQ_INIT(list)						\
134 do {									\
135         (list)->cq_next_l = (list);					\
136         (list)->cq_prev_l = (list);					\
137 } while (/*CONSTCOND*/0)
138 
139 #define CIRCQ_INSERT(elem, list)					\
140 do {									\
141         (elem)->cq_prev_e = (list)->cq_prev_e;				\
142         (elem)->cq_next_l = (list);					\
143         (list)->cq_prev_l->cq_next_l = (elem);				\
144         (list)->cq_prev_l = (elem);					\
145 } while (/*CONSTCOND*/0)
146 
147 #define CIRCQ_APPEND(fst, snd)						\
148 do {									\
149         if (!CIRCQ_EMPTY(snd)) {					\
150                 (fst)->cq_prev_l->cq_next_l = (snd)->cq_next_l;		\
151                 (snd)->cq_next_l->cq_prev_l = (fst)->cq_prev_l;		\
152                 (snd)->cq_prev_l->cq_next_l = (fst);			\
153                 (fst)->cq_prev_l = (snd)->cq_prev_l;			\
154                 CIRCQ_INIT(snd);					\
155         }								\
156 } while (/*CONSTCOND*/0)
157 
158 #define CIRCQ_REMOVE(elem)						\
159 do {									\
160         (elem)->cq_next_l->cq_prev_e = (elem)->cq_prev_e;		\
161         (elem)->cq_prev_l->cq_next_e = (elem)->cq_next_e;		\
162 } while (/*CONSTCOND*/0)
163 
164 #define CIRCQ_FIRST(list)	((list)->cq_next_e)
165 #define CIRCQ_NEXT(elem)	((elem)->cq_next_e)
166 #define CIRCQ_LAST(elem,list)	((elem)->cq_next_l == (list))
167 #define CIRCQ_EMPTY(list)	((list)->cq_next_l == (list))
168 
169 struct callout_cpu {
170 	kmutex_t	*cc_lock;
171 	sleepq_t	cc_sleepq;
172 	u_int		cc_nwait;
173 	u_int		cc_ticks;
174 	lwp_t		*cc_lwp;
175 	callout_impl_t	*cc_active;
176 	callout_impl_t	*cc_cancel;
177 	struct evcnt	cc_ev_late;
178 	struct evcnt	cc_ev_block;
179 	struct callout_circq cc_todo;		/* Worklist */
180 	struct callout_circq cc_wheel[BUCKETS];	/* Queues of timeouts */
181 	char		cc_name1[12];
182 	char		cc_name2[12];
183 };
184 
185 #ifndef CRASH
186 
187 static void	callout_softclock(void *);
188 static struct callout_cpu callout_cpu0;
189 static void *callout_sih;
190 
191 static inline kmutex_t *
callout_lock(callout_impl_t * c)192 callout_lock(callout_impl_t *c)
193 {
194 	struct callout_cpu *cc;
195 	kmutex_t *lock;
196 
197 	for (;;) {
198 		cc = c->c_cpu;
199 		lock = cc->cc_lock;
200 		mutex_spin_enter(lock);
201 		if (__predict_true(cc == c->c_cpu))
202 			return lock;
203 		mutex_spin_exit(lock);
204 	}
205 }
206 
207 /*
208  * callout_startup:
209  *
210  *	Initialize the callout facility, called at system startup time.
211  *	Do just enough to allow callouts to be safely registered.
212  */
213 void
callout_startup(void)214 callout_startup(void)
215 {
216 	struct callout_cpu *cc;
217 	int b;
218 
219 	KASSERT(curcpu()->ci_data.cpu_callout == NULL);
220 
221 	cc = &callout_cpu0;
222 	cc->cc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
223 	CIRCQ_INIT(&cc->cc_todo);
224 	for (b = 0; b < BUCKETS; b++)
225 		CIRCQ_INIT(&cc->cc_wheel[b]);
226 	curcpu()->ci_data.cpu_callout = cc;
227 }
228 
229 /*
230  * callout_init_cpu:
231  *
232  *	Per-CPU initialization.
233  */
234 CTASSERT(sizeof(callout_impl_t) <= sizeof(callout_t));
235 
236 void
callout_init_cpu(struct cpu_info * ci)237 callout_init_cpu(struct cpu_info *ci)
238 {
239 	struct callout_cpu *cc;
240 	int b;
241 
242 	if ((cc = ci->ci_data.cpu_callout) == NULL) {
243 		cc = kmem_zalloc(sizeof(*cc), KM_SLEEP);
244 		if (cc == NULL)
245 			panic("callout_init_cpu (1)");
246 		cc->cc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
247 		CIRCQ_INIT(&cc->cc_todo);
248 		for (b = 0; b < BUCKETS; b++)
249 			CIRCQ_INIT(&cc->cc_wheel[b]);
250 	} else {
251 		/* Boot CPU, one time only. */
252 		callout_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
253 		    callout_softclock, NULL);
254 		if (callout_sih == NULL)
255 			panic("callout_init_cpu (2)");
256 	}
257 
258 	sleepq_init(&cc->cc_sleepq);
259 
260 	snprintf(cc->cc_name1, sizeof(cc->cc_name1), "late/%u",
261 	    cpu_index(ci));
262 	evcnt_attach_dynamic(&cc->cc_ev_late, EVCNT_TYPE_MISC,
263 	    NULL, "callout", cc->cc_name1);
264 
265 	snprintf(cc->cc_name2, sizeof(cc->cc_name2), "wait/%u",
266 	    cpu_index(ci));
267 	evcnt_attach_dynamic(&cc->cc_ev_block, EVCNT_TYPE_MISC,
268 	    NULL, "callout", cc->cc_name2);
269 
270 	ci->ci_data.cpu_callout = cc;
271 }
272 
273 /*
274  * callout_init:
275  *
276  *	Initialize a callout structure.  This must be quick, so we fill
277  *	only the minimum number of fields.
278  */
279 void
callout_init(callout_t * cs,u_int flags)280 callout_init(callout_t *cs, u_int flags)
281 {
282 	callout_impl_t *c = (callout_impl_t *)cs;
283 	struct callout_cpu *cc;
284 
285 	KASSERT((flags & ~CALLOUT_FLAGMASK) == 0);
286 
287 	cc = curcpu()->ci_data.cpu_callout;
288 	c->c_func = NULL;
289 	c->c_magic = CALLOUT_MAGIC;
290 	if (__predict_true((flags & CALLOUT_MPSAFE) != 0 && cc != NULL)) {
291 		c->c_flags = flags;
292 		c->c_cpu = cc;
293 		return;
294 	}
295 	c->c_flags = flags | CALLOUT_BOUND;
296 	c->c_cpu = &callout_cpu0;
297 }
298 
299 /*
300  * callout_destroy:
301  *
302  *	Destroy a callout structure.  The callout must be stopped.
303  */
304 void
callout_destroy(callout_t * cs)305 callout_destroy(callout_t *cs)
306 {
307 	callout_impl_t *c = (callout_impl_t *)cs;
308 
309 	/*
310 	 * It's not necessary to lock in order to see the correct value
311 	 * of c->c_flags.  If the callout could potentially have been
312 	 * running, the current thread should have stopped it.
313 	 */
314 	KASSERTMSG((c->c_flags & CALLOUT_PENDING) == 0,
315 	    "callout %p: c_func (%p) c_flags (%#x) destroyed from %p",
316 	    c, c->c_func, c->c_flags, __builtin_return_address(0));
317 	KASSERT(c->c_cpu->cc_lwp == curlwp || c->c_cpu->cc_active != c);
318 	KASSERTMSG(c->c_magic == CALLOUT_MAGIC,
319 	    "callout %p: c_magic (%#x) != CALLOUT_MAGIC (%#x)",
320 	    c, c->c_magic, CALLOUT_MAGIC);
321 	c->c_magic = 0;
322 }
323 
324 /*
325  * callout_schedule_locked:
326  *
327  *	Schedule a callout to run.  The function and argument must
328  *	already be set in the callout structure.  Must be called with
329  *	callout_lock.
330  */
331 static void
callout_schedule_locked(callout_impl_t * c,kmutex_t * lock,int to_ticks)332 callout_schedule_locked(callout_impl_t *c, kmutex_t *lock, int to_ticks)
333 {
334 	struct callout_cpu *cc, *occ;
335 	int old_time;
336 
337 	KASSERT(to_ticks >= 0);
338 	KASSERT(c->c_func != NULL);
339 
340 	/* Initialize the time here, it won't change. */
341 	occ = c->c_cpu;
342 	c->c_flags &= ~(CALLOUT_FIRED | CALLOUT_INVOKING);
343 
344 	/*
345 	 * If this timeout is already scheduled and now is moved
346 	 * earlier, reschedule it now.  Otherwise leave it in place
347 	 * and let it be rescheduled later.
348 	 */
349 	if ((c->c_flags & CALLOUT_PENDING) != 0) {
350 		/* Leave on existing CPU. */
351 		old_time = c->c_time;
352 		c->c_time = to_ticks + occ->cc_ticks;
353 		if (c->c_time - old_time < 0) {
354 			CIRCQ_REMOVE(&c->c_list);
355 			CIRCQ_INSERT(&c->c_list, &occ->cc_todo);
356 		}
357 		mutex_spin_exit(lock);
358 		return;
359 	}
360 
361 	cc = curcpu()->ci_data.cpu_callout;
362 	if ((c->c_flags & CALLOUT_BOUND) != 0 || cc == occ ||
363 	    !mutex_tryenter(cc->cc_lock)) {
364 		/* Leave on existing CPU. */
365 		c->c_time = to_ticks + occ->cc_ticks;
366 		c->c_flags |= CALLOUT_PENDING;
367 		CIRCQ_INSERT(&c->c_list, &occ->cc_todo);
368 	} else {
369 		/* Move to this CPU. */
370 		c->c_cpu = cc;
371 		c->c_time = to_ticks + cc->cc_ticks;
372 		c->c_flags |= CALLOUT_PENDING;
373 		CIRCQ_INSERT(&c->c_list, &cc->cc_todo);
374 		mutex_spin_exit(cc->cc_lock);
375 	}
376 	mutex_spin_exit(lock);
377 }
378 
379 /*
380  * callout_reset:
381  *
382  *	Reset a callout structure with a new function and argument, and
383  *	schedule it to run.
384  */
385 void
callout_reset(callout_t * cs,int to_ticks,void (* func)(void *),void * arg)386 callout_reset(callout_t *cs, int to_ticks, void (*func)(void *), void *arg)
387 {
388 	callout_impl_t *c = (callout_impl_t *)cs;
389 	kmutex_t *lock;
390 
391 	KASSERT(c->c_magic == CALLOUT_MAGIC);
392 	KASSERT(func != NULL);
393 
394 	lock = callout_lock(c);
395 	c->c_func = func;
396 	c->c_arg = arg;
397 	callout_schedule_locked(c, lock, to_ticks);
398 }
399 
400 /*
401  * callout_schedule:
402  *
403  *	Schedule a callout to run.  The function and argument must
404  *	already be set in the callout structure.
405  */
406 void
callout_schedule(callout_t * cs,int to_ticks)407 callout_schedule(callout_t *cs, int to_ticks)
408 {
409 	callout_impl_t *c = (callout_impl_t *)cs;
410 	kmutex_t *lock;
411 
412 	KASSERT(c->c_magic == CALLOUT_MAGIC);
413 
414 	lock = callout_lock(c);
415 	callout_schedule_locked(c, lock, to_ticks);
416 }
417 
418 /*
419  * callout_stop:
420  *
421  *	Try to cancel a pending callout.  It may be too late: the callout
422  *	could be running on another CPU.  If called from interrupt context,
423  *	the callout could already be in progress at a lower priority.
424  */
425 bool
callout_stop(callout_t * cs)426 callout_stop(callout_t *cs)
427 {
428 	callout_impl_t *c = (callout_impl_t *)cs;
429 	struct callout_cpu *cc;
430 	kmutex_t *lock;
431 	bool expired;
432 
433 	KASSERT(c->c_magic == CALLOUT_MAGIC);
434 
435 	lock = callout_lock(c);
436 
437 	if ((c->c_flags & CALLOUT_PENDING) != 0)
438 		CIRCQ_REMOVE(&c->c_list);
439 	expired = ((c->c_flags & CALLOUT_FIRED) != 0);
440 	c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);
441 
442 	cc = c->c_cpu;
443 	if (cc->cc_active == c) {
444 		/*
445 		 * This is for non-MPSAFE callouts only.  To synchronize
446 		 * effectively we must be called with kernel_lock held.
447 		 * It's also taken in callout_softclock.
448 		 */
449 		cc->cc_cancel = c;
450 	}
451 
452 	mutex_spin_exit(lock);
453 
454 	return expired;
455 }
456 
457 /*
458  * callout_halt:
459  *
460  *	Cancel a pending callout.  If in-flight, block until it completes.
461  *	May not be called from a hard interrupt handler.  If the callout
462  * 	can take locks, the caller of callout_halt() must not hold any of
463  *	those locks, otherwise the two could deadlock.  If 'interlock' is
464  *	non-NULL and we must wait for the callout to complete, it will be
465  *	released and re-acquired before returning.
466  */
467 bool
callout_halt(callout_t * cs,void * interlock)468 callout_halt(callout_t *cs, void *interlock)
469 {
470 	callout_impl_t *c = (callout_impl_t *)cs;
471 	struct callout_cpu *cc;
472 	struct lwp *l;
473 	kmutex_t *lock, *relock;
474 	bool expired;
475 
476 	KASSERT(c->c_magic == CALLOUT_MAGIC);
477 	KASSERT(!cpu_intr_p());
478 
479 	lock = callout_lock(c);
480 	relock = NULL;
481 
482 	expired = ((c->c_flags & CALLOUT_FIRED) != 0);
483 	if ((c->c_flags & CALLOUT_PENDING) != 0)
484 		CIRCQ_REMOVE(&c->c_list);
485 	c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);
486 
487 	l = curlwp;
488 	for (;;) {
489 		cc = c->c_cpu;
490 		if (__predict_true(cc->cc_active != c || cc->cc_lwp == l))
491 			break;
492 		if (interlock != NULL) {
493 			/*
494 			 * Avoid potential scheduler lock order problems by
495 			 * dropping the interlock without the callout lock
496 			 * held.
497 			 */
498 			mutex_spin_exit(lock);
499 			mutex_exit(interlock);
500 			relock = interlock;
501 			interlock = NULL;
502 		} else {
503 			/* XXX Better to do priority inheritance. */
504 			KASSERT(l->l_wchan == NULL);
505 			cc->cc_nwait++;
506 			cc->cc_ev_block.ev_count++;
507 			l->l_kpriority = true;
508 			sleepq_enter(&cc->cc_sleepq, l, cc->cc_lock);
509 			sleepq_enqueue(&cc->cc_sleepq, cc, "callout",
510 			    &sleep_syncobj);
511 			sleepq_block(0, false);
512 		}
513 		lock = callout_lock(c);
514 	}
515 
516 	mutex_spin_exit(lock);
517 	if (__predict_false(relock != NULL))
518 		mutex_enter(relock);
519 
520 	return expired;
521 }
522 
523 #ifdef notyet
524 /*
525  * callout_bind:
526  *
527  *	Bind a callout so that it will only execute on one CPU.
528  *	The callout must be stopped, and must be MPSAFE.
529  *
530  *	XXX Disabled for now until it is decided how to handle
531  *	offlined CPUs.  We may want weak+strong binding.
532  */
533 void
callout_bind(callout_t * cs,struct cpu_info * ci)534 callout_bind(callout_t *cs, struct cpu_info *ci)
535 {
536 	callout_impl_t *c = (callout_impl_t *)cs;
537 	struct callout_cpu *cc;
538 	kmutex_t *lock;
539 
540 	KASSERT((c->c_flags & CALLOUT_PENDING) == 0);
541 	KASSERT(c->c_cpu->cc_active != c);
542 	KASSERT(c->c_magic == CALLOUT_MAGIC);
543 	KASSERT((c->c_flags & CALLOUT_MPSAFE) != 0);
544 
545 	lock = callout_lock(c);
546 	cc = ci->ci_data.cpu_callout;
547 	c->c_flags |= CALLOUT_BOUND;
548 	if (c->c_cpu != cc) {
549 		/*
550 		 * Assigning c_cpu effectively unlocks the callout
551 		 * structure, as we don't hold the new CPU's lock.
552 		 * Issue memory barrier to prevent accesses being
553 		 * reordered.
554 		 */
555 		membar_exit();
556 		c->c_cpu = cc;
557 	}
558 	mutex_spin_exit(lock);
559 }
560 #endif
561 
562 void
callout_setfunc(callout_t * cs,void (* func)(void *),void * arg)563 callout_setfunc(callout_t *cs, void (*func)(void *), void *arg)
564 {
565 	callout_impl_t *c = (callout_impl_t *)cs;
566 	kmutex_t *lock;
567 
568 	KASSERT(c->c_magic == CALLOUT_MAGIC);
569 	KASSERT(func != NULL);
570 
571 	lock = callout_lock(c);
572 	c->c_func = func;
573 	c->c_arg = arg;
574 	mutex_spin_exit(lock);
575 }
576 
577 bool
callout_expired(callout_t * cs)578 callout_expired(callout_t *cs)
579 {
580 	callout_impl_t *c = (callout_impl_t *)cs;
581 	kmutex_t *lock;
582 	bool rv;
583 
584 	KASSERT(c->c_magic == CALLOUT_MAGIC);
585 
586 	lock = callout_lock(c);
587 	rv = ((c->c_flags & CALLOUT_FIRED) != 0);
588 	mutex_spin_exit(lock);
589 
590 	return rv;
591 }
592 
593 bool
callout_active(callout_t * cs)594 callout_active(callout_t *cs)
595 {
596 	callout_impl_t *c = (callout_impl_t *)cs;
597 	kmutex_t *lock;
598 	bool rv;
599 
600 	KASSERT(c->c_magic == CALLOUT_MAGIC);
601 
602 	lock = callout_lock(c);
603 	rv = ((c->c_flags & (CALLOUT_PENDING|CALLOUT_FIRED)) != 0);
604 	mutex_spin_exit(lock);
605 
606 	return rv;
607 }
608 
609 bool
callout_pending(callout_t * cs)610 callout_pending(callout_t *cs)
611 {
612 	callout_impl_t *c = (callout_impl_t *)cs;
613 	kmutex_t *lock;
614 	bool rv;
615 
616 	KASSERT(c->c_magic == CALLOUT_MAGIC);
617 
618 	lock = callout_lock(c);
619 	rv = ((c->c_flags & CALLOUT_PENDING) != 0);
620 	mutex_spin_exit(lock);
621 
622 	return rv;
623 }
624 
625 bool
callout_invoking(callout_t * cs)626 callout_invoking(callout_t *cs)
627 {
628 	callout_impl_t *c = (callout_impl_t *)cs;
629 	kmutex_t *lock;
630 	bool rv;
631 
632 	KASSERT(c->c_magic == CALLOUT_MAGIC);
633 
634 	lock = callout_lock(c);
635 	rv = ((c->c_flags & CALLOUT_INVOKING) != 0);
636 	mutex_spin_exit(lock);
637 
638 	return rv;
639 }
640 
641 void
callout_ack(callout_t * cs)642 callout_ack(callout_t *cs)
643 {
644 	callout_impl_t *c = (callout_impl_t *)cs;
645 	kmutex_t *lock;
646 
647 	KASSERT(c->c_magic == CALLOUT_MAGIC);
648 
649 	lock = callout_lock(c);
650 	c->c_flags &= ~CALLOUT_INVOKING;
651 	mutex_spin_exit(lock);
652 }
653 
654 /*
655  * callout_hardclock:
656  *
657  *	Called from hardclock() once every tick.  We schedule a soft
658  *	interrupt if there is work to be done.
659  */
660 void
callout_hardclock(void)661 callout_hardclock(void)
662 {
663 	struct callout_cpu *cc;
664 	int needsoftclock, ticks;
665 
666 	cc = curcpu()->ci_data.cpu_callout;
667 	mutex_spin_enter(cc->cc_lock);
668 
669 	ticks = ++cc->cc_ticks;
670 
671 	MOVEBUCKET(cc, 0, ticks);
672 	if (MASKWHEEL(0, ticks) == 0) {
673 		MOVEBUCKET(cc, 1, ticks);
674 		if (MASKWHEEL(1, ticks) == 0) {
675 			MOVEBUCKET(cc, 2, ticks);
676 			if (MASKWHEEL(2, ticks) == 0)
677 				MOVEBUCKET(cc, 3, ticks);
678 		}
679 	}
680 
681 	needsoftclock = !CIRCQ_EMPTY(&cc->cc_todo);
682 	mutex_spin_exit(cc->cc_lock);
683 
684 	if (needsoftclock)
685 		softint_schedule(callout_sih);
686 }
687 
688 /*
689  * callout_softclock:
690  *
691  *	Soft interrupt handler, scheduled above if there is work to
692  * 	be done.  Callouts are made in soft interrupt context.
693  */
694 static void
callout_softclock(void * v)695 callout_softclock(void *v)
696 {
697 	callout_impl_t *c;
698 	struct callout_cpu *cc;
699 	void (*func)(void *);
700 	void *arg;
701 	int mpsafe, count, ticks, delta;
702 	lwp_t *l;
703 
704 	l = curlwp;
705 	KASSERT(l->l_cpu == curcpu());
706 	cc = l->l_cpu->ci_data.cpu_callout;
707 
708 	mutex_spin_enter(cc->cc_lock);
709 	cc->cc_lwp = l;
710 	while (!CIRCQ_EMPTY(&cc->cc_todo)) {
711 		c = CIRCQ_FIRST(&cc->cc_todo);
712 		KASSERT(c->c_magic == CALLOUT_MAGIC);
713 		KASSERT(c->c_func != NULL);
714 		KASSERT(c->c_cpu == cc);
715 		KASSERT((c->c_flags & CALLOUT_PENDING) != 0);
716 		KASSERT((c->c_flags & CALLOUT_FIRED) == 0);
717 		CIRCQ_REMOVE(&c->c_list);
718 
719 		/* If due run it, otherwise insert it into the right bucket. */
720 		ticks = cc->cc_ticks;
721 		delta = c->c_time - ticks;
722 		if (delta > 0) {
723 			CIRCQ_INSERT(&c->c_list, BUCKET(cc, delta, c->c_time));
724 			continue;
725 		}
726 		if (delta < 0)
727 			cc->cc_ev_late.ev_count++;
728 
729 		c->c_flags = (c->c_flags & ~CALLOUT_PENDING) |
730 		    (CALLOUT_FIRED | CALLOUT_INVOKING);
731 		mpsafe = (c->c_flags & CALLOUT_MPSAFE);
732 		func = c->c_func;
733 		arg = c->c_arg;
734 		cc->cc_active = c;
735 
736 		mutex_spin_exit(cc->cc_lock);
737 		KASSERT(func != NULL);
738 		if (__predict_false(!mpsafe)) {
739 			KERNEL_LOCK(1, NULL);
740 			(*func)(arg);
741 			KERNEL_UNLOCK_ONE(NULL);
742 		} else
743 			(*func)(arg);
744 		mutex_spin_enter(cc->cc_lock);
745 
746 		/*
747 		 * We can't touch 'c' here because it might be
748 		 * freed already.  If LWPs waiting for callout
749 		 * to complete, awaken them.
750 		 */
751 		cc->cc_active = NULL;
752 		if ((count = cc->cc_nwait) != 0) {
753 			cc->cc_nwait = 0;
754 			/* sleepq_wake() drops the lock. */
755 			sleepq_wake(&cc->cc_sleepq, cc, count, cc->cc_lock);
756 			mutex_spin_enter(cc->cc_lock);
757 		}
758 	}
759 	cc->cc_lwp = NULL;
760 	mutex_spin_exit(cc->cc_lock);
761 }
762 #endif
763 
764 #ifdef DDB
765 static void
db_show_callout_bucket(struct callout_cpu * cc,struct callout_circq * kbucket,struct callout_circq * bucket)766 db_show_callout_bucket(struct callout_cpu *cc, struct callout_circq *kbucket,
767     struct callout_circq *bucket)
768 {
769 	callout_impl_t *c, ci;
770 	db_expr_t offset;
771 	const char *name;
772 	static char question[] = "?";
773 	int b;
774 
775 	if (CIRCQ_LAST(bucket, kbucket))
776 		return;
777 
778 	for (c = CIRCQ_FIRST(bucket); /*nothing*/; c = CIRCQ_NEXT(&c->c_list)) {
779 		db_read_bytes((db_addr_t)c, sizeof(ci), (char *)&ci);
780 		c = &ci;
781 		db_find_sym_and_offset((db_addr_t)(intptr_t)c->c_func, &name,
782 		    &offset);
783 		name = name ? name : question;
784 		b = (bucket - cc->cc_wheel);
785 		if (b < 0)
786 			b = -WHEELSIZE;
787 		db_printf("%9d %2d/%-4d %16lx  %s\n",
788 		    c->c_time - cc->cc_ticks, b / WHEELSIZE, b,
789 		    (u_long)c->c_arg, name);
790 		if (CIRCQ_LAST(&c->c_list, kbucket))
791 			break;
792 	}
793 }
794 
795 void
db_show_callout(db_expr_t addr,bool haddr,db_expr_t count,const char * modif)796 db_show_callout(db_expr_t addr, bool haddr, db_expr_t count, const char *modif)
797 {
798 	struct callout_cpu *cc, ccb;
799 	struct cpu_info *ci, cib;
800 	int b;
801 
802 #ifndef CRASH
803 	db_printf("hardclock_ticks now: %d\n", hardclock_ticks);
804 #endif
805 	db_printf("    ticks  wheel               arg  func\n");
806 
807 	/*
808 	 * Don't lock the callwheel; all the other CPUs are paused
809 	 * anyhow, and we might be called in a circumstance where
810 	 * some other CPU was paused while holding the lock.
811 	 */
812 	for (ci = db_cpu_first(); ci != NULL; ci = db_cpu_next(ci)) {
813 		db_read_bytes((db_addr_t)ci, sizeof(cib), (char *)&cib);
814 		cc = cib.ci_data.cpu_callout;
815 		db_read_bytes((db_addr_t)cc, sizeof(ccb), (char *)&ccb);
816 		db_show_callout_bucket(&ccb, &cc->cc_todo, &ccb.cc_todo);
817 	}
818 	for (b = 0; b < BUCKETS; b++) {
819 		for (ci = db_cpu_first(); ci != NULL; ci = db_cpu_next(ci)) {
820 			db_read_bytes((db_addr_t)ci, sizeof(cib), (char *)&cib);
821 			cc = cib.ci_data.cpu_callout;
822 			db_read_bytes((db_addr_t)cc, sizeof(ccb), (char *)&ccb);
823 			db_show_callout_bucket(&ccb, &cc->cc_wheel[b],
824 			    &ccb.cc_wheel[b]);
825 		}
826 	}
827 }
828 #endif /* DDB */
829