1 /* $OpenBSD: kern_timeout.c,v 1.100 2024/11/07 16:02:29 miod Exp $ */
2 /*
3 * Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org>
4 * Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
17 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
18 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
19 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kthread.h>
31 #include <sys/proc.h>
32 #include <sys/timeout.h>
33 #include <sys/mutex.h>
34 #include <sys/kernel.h>
35 #include <sys/queue.h> /* _Q_INVALIDATE */
36 #include <sys/sysctl.h>
37 #include <sys/witness.h>
38
39 #ifdef DDB
40 #include <machine/db_machdep.h>
41 #include <ddb/db_interface.h>
42 #include <ddb/db_sym.h>
43 #include <ddb/db_output.h>
44 #endif
45
46 #include "kcov.h"
47 #if NKCOV > 0
48 #include <sys/kcov.h>
49 #endif
50
51 /*
52 * Locks used to protect global variables in this file:
53 *
54 * I immutable after initialization
55 * T timeout_mutex
56 */
57 struct mutex timeout_mutex = MUTEX_INITIALIZER(IPL_HIGH);
58
59 void *softclock_si; /* [I] softclock() interrupt handle */
60 struct timeoutstat tostat; /* [T] statistics and totals */
61
62 /*
63 * Timeouts are kept in a hierarchical timing wheel. The to_time is the value
64 * of the global variable "ticks" when the timeout should be called. There are
65 * four levels with 256 buckets each.
66 */
67 #define WHEELCOUNT 4
68 #define WHEELSIZE 256
69 #define WHEELMASK 255
70 #define WHEELBITS 8
71 #define BUCKETS (WHEELCOUNT * WHEELSIZE)
72
73 struct circq timeout_wheel[BUCKETS]; /* [T] Tick-based timeouts */
74 struct circq timeout_wheel_kc[BUCKETS]; /* [T] Clock-based timeouts */
75 struct circq timeout_new; /* [T] New, unscheduled timeouts */
76 struct circq timeout_todo; /* [T] Due or needs rescheduling */
77 struct circq timeout_proc; /* [T] Due + needs process context */
78 #ifdef MULTIPROCESSOR
79 struct circq timeout_proc_mp; /* [T] Process ctx + no kernel lock */
80 #endif
81
82 time_t timeout_level_width[WHEELCOUNT]; /* [I] Wheel level width (seconds) */
83 struct timespec tick_ts; /* [I] Length of a tick (1/hz secs) */
84
85 struct kclock {
86 struct timespec kc_lastscan; /* [T] Clock time at last wheel scan */
87 struct timespec kc_late; /* [T] Late if due prior */
88 struct timespec kc_offset; /* [T] Offset from primary kclock */
89 } timeout_kclock[KCLOCK_MAX];
90
91 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK)
92
93 #define BUCKET(rel, abs) \
94 (timeout_wheel[ \
95 ((rel) <= (1 << (2*WHEELBITS))) \
96 ? ((rel) <= (1 << WHEELBITS)) \
97 ? MASKWHEEL(0, (abs)) \
98 : MASKWHEEL(1, (abs)) + WHEELSIZE \
99 : ((rel) <= (1 << (3*WHEELBITS))) \
100 ? MASKWHEEL(2, (abs)) + 2*WHEELSIZE \
101 : MASKWHEEL(3, (abs)) + 3*WHEELSIZE])
102
103 #define MOVEBUCKET(wheel, time) \
104 CIRCQ_CONCAT(&timeout_todo, \
105 &timeout_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE])
106
107 /*
108 * Circular queue definitions.
109 */
110
111 #define CIRCQ_INIT(elem) do { \
112 (elem)->next = (elem); \
113 (elem)->prev = (elem); \
114 } while (0)
115
116 #define CIRCQ_INSERT_TAIL(list, elem) do { \
117 (elem)->prev = (list)->prev; \
118 (elem)->next = (list); \
119 (list)->prev->next = (elem); \
120 (list)->prev = (elem); \
121 tostat.tos_pending++; \
122 } while (0)
123
124 #define CIRCQ_CONCAT(fst, snd) do { \
125 if (!CIRCQ_EMPTY(snd)) { \
126 (fst)->prev->next = (snd)->next;\
127 (snd)->next->prev = (fst)->prev;\
128 (snd)->prev->next = (fst); \
129 (fst)->prev = (snd)->prev; \
130 CIRCQ_INIT(snd); \
131 } \
132 } while (0)
133
134 #define CIRCQ_REMOVE(elem) do { \
135 (elem)->next->prev = (elem)->prev; \
136 (elem)->prev->next = (elem)->next; \
137 _Q_INVALIDATE((elem)->prev); \
138 _Q_INVALIDATE((elem)->next); \
139 tostat.tos_pending--; \
140 } while (0)
141
142 #define CIRCQ_FIRST(elem) ((elem)->next)
143
144 #define CIRCQ_EMPTY(elem) (CIRCQ_FIRST(elem) == (elem))
145
146 #define CIRCQ_FOREACH(elem, list) \
147 for ((elem) = CIRCQ_FIRST(list); \
148 (elem) != (list); \
149 (elem) = CIRCQ_FIRST(elem))
150
151 #ifdef WITNESS
152 struct lock_object timeout_sleeplock_obj = {
153 .lo_name = "timeout",
154 .lo_flags = LO_WITNESS | LO_INITIALIZED | LO_SLEEPABLE |
155 (LO_CLASS_RWLOCK << LO_CLASSSHIFT)
156 };
157 struct lock_object timeout_spinlock_obj = {
158 .lo_name = "timeout",
159 .lo_flags = LO_WITNESS | LO_INITIALIZED |
160 (LO_CLASS_MUTEX << LO_CLASSSHIFT)
161 };
162 struct lock_type timeout_sleeplock_type = {
163 .lt_name = "timeout"
164 };
165 struct lock_type timeout_spinlock_type = {
166 .lt_name = "timeout"
167 };
168 #define TIMEOUT_LOCK_OBJ(needsproc) \
169 ((needsproc) ? &timeout_sleeplock_obj : &timeout_spinlock_obj)
170 #endif
171
172 void softclock(void *);
173 void softclock_create_thread(void *);
174 void softclock_process_kclock_timeout(struct timeout *, int);
175 void softclock_process_tick_timeout(struct timeout *, int);
176 void softclock_thread(void *);
177 #ifdef MULTIPROCESSOR
178 void softclock_thread_mp(void *);
179 #endif
180 void timeout_barrier_timeout(void *);
181 uint32_t timeout_bucket(const struct timeout *);
182 uint32_t timeout_maskwheel(uint32_t, const struct timespec *);
183 void timeout_run(struct timeout *);
184
185 /*
186 * The first thing in a struct timeout is its struct circq, so we
187 * can get back from a pointer to the latter to a pointer to the
188 * whole timeout with just a cast.
189 */
190 static inline struct timeout *
timeout_from_circq(struct circq * p)191 timeout_from_circq(struct circq *p)
192 {
193 return ((struct timeout *)(p));
194 }
195
196 static inline void
timeout_sync_order(int needsproc)197 timeout_sync_order(int needsproc)
198 {
199 WITNESS_CHECKORDER(TIMEOUT_LOCK_OBJ(needsproc), LOP_NEWORDER, NULL);
200 }
201
202 static inline void
timeout_sync_enter(int needsproc)203 timeout_sync_enter(int needsproc)
204 {
205 timeout_sync_order(needsproc);
206 WITNESS_LOCK(TIMEOUT_LOCK_OBJ(needsproc), 0);
207 }
208
209 static inline void
timeout_sync_leave(int needsproc)210 timeout_sync_leave(int needsproc)
211 {
212 WITNESS_UNLOCK(TIMEOUT_LOCK_OBJ(needsproc), 0);
213 }
214
215 /*
216 * Some of the "math" in here is a bit tricky.
217 *
218 * We have to beware of wrapping ints.
219 * We use the fact that any element added to the queue must be added with a
220 * positive time. That means that any element `to' on the queue cannot be
221 * scheduled to timeout further in time than INT_MAX, but to->to_time can
222 * be positive or negative so comparing it with anything is dangerous.
223 * The only way we can use the to->to_time value in any predictable way
224 * is when we calculate how far in the future `to' will timeout -
225 * "to->to_time - ticks". The result will always be positive for future
226 * timeouts and 0 or negative for due timeouts.
227 */
228
229 void
timeout_startup(void)230 timeout_startup(void)
231 {
232 int b, level;
233
234 CIRCQ_INIT(&timeout_new);
235 CIRCQ_INIT(&timeout_todo);
236 CIRCQ_INIT(&timeout_proc);
237 #ifdef MULTIPROCESSOR
238 CIRCQ_INIT(&timeout_proc_mp);
239 #endif
240 for (b = 0; b < nitems(timeout_wheel); b++)
241 CIRCQ_INIT(&timeout_wheel[b]);
242 for (b = 0; b < nitems(timeout_wheel_kc); b++)
243 CIRCQ_INIT(&timeout_wheel_kc[b]);
244
245 for (level = 0; level < nitems(timeout_level_width); level++)
246 timeout_level_width[level] = 2 << (level * WHEELBITS);
247 NSEC_TO_TIMESPEC(tick_nsec, &tick_ts);
248 }
249
250 void
timeout_proc_init(void)251 timeout_proc_init(void)
252 {
253 softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL);
254 if (softclock_si == NULL)
255 panic("%s: unable to register softclock interrupt", __func__);
256
257 WITNESS_INIT(&timeout_sleeplock_obj, &timeout_sleeplock_type);
258 WITNESS_INIT(&timeout_spinlock_obj, &timeout_spinlock_type);
259
260 kthread_create_deferred(softclock_create_thread, NULL);
261 }
262
263 void
timeout_set(struct timeout * new,void (* fn)(void *),void * arg)264 timeout_set(struct timeout *new, void (*fn)(void *), void *arg)
265 {
266 timeout_set_flags(new, fn, arg, KCLOCK_NONE, 0);
267 }
268
269 void
timeout_set_flags(struct timeout * to,void (* fn)(void *),void * arg,int kclock,int flags)270 timeout_set_flags(struct timeout *to, void (*fn)(void *), void *arg, int kclock,
271 int flags)
272 {
273 KASSERT(!ISSET(flags, ~(TIMEOUT_PROC | TIMEOUT_MPSAFE)));
274 KASSERT(kclock >= KCLOCK_NONE && kclock < KCLOCK_MAX);
275
276 to->to_func = fn;
277 to->to_arg = arg;
278 to->to_kclock = kclock;
279 to->to_flags = flags | TIMEOUT_INITIALIZED;
280
281 /* For now, only process context timeouts may be marked MP-safe. */
282 if (ISSET(to->to_flags, TIMEOUT_MPSAFE))
283 KASSERT(ISSET(to->to_flags, TIMEOUT_PROC));
284 }
285
286 void
timeout_set_proc(struct timeout * new,void (* fn)(void *),void * arg)287 timeout_set_proc(struct timeout *new, void (*fn)(void *), void *arg)
288 {
289 timeout_set_flags(new, fn, arg, KCLOCK_NONE, TIMEOUT_PROC);
290 }
291
292 int
timeout_add(struct timeout * new,int to_ticks)293 timeout_add(struct timeout *new, int to_ticks)
294 {
295 int old_time;
296 int ret = 1;
297
298 KASSERT(ISSET(new->to_flags, TIMEOUT_INITIALIZED));
299 KASSERT(new->to_kclock == KCLOCK_NONE);
300 KASSERT(to_ticks >= 0);
301
302 mtx_enter(&timeout_mutex);
303
304 /* Initialize the time here, it won't change. */
305 old_time = new->to_time;
306 new->to_time = to_ticks + ticks;
307 CLR(new->to_flags, TIMEOUT_TRIGGERED);
308
309 /*
310 * If this timeout already is scheduled and now is moved
311 * earlier, reschedule it now. Otherwise leave it in place
312 * and let it be rescheduled later.
313 */
314 if (ISSET(new->to_flags, TIMEOUT_ONQUEUE)) {
315 if (new->to_time - ticks < old_time - ticks) {
316 CIRCQ_REMOVE(&new->to_list);
317 CIRCQ_INSERT_TAIL(&timeout_new, &new->to_list);
318 }
319 tostat.tos_readded++;
320 ret = 0;
321 } else {
322 SET(new->to_flags, TIMEOUT_ONQUEUE);
323 CIRCQ_INSERT_TAIL(&timeout_new, &new->to_list);
324 }
325 #if NKCOV > 0
326 if (!kcov_cold)
327 new->to_process = curproc->p_p;
328 #endif
329 tostat.tos_added++;
330 mtx_leave(&timeout_mutex);
331
332 return ret;
333 }
334
335 static inline int
timeout_add_ticks(struct timeout * to,uint64_t to_ticks,int notzero)336 timeout_add_ticks(struct timeout *to, uint64_t to_ticks, int notzero)
337 {
338 if (to_ticks > INT_MAX)
339 to_ticks = INT_MAX;
340 else if (to_ticks == 0 && notzero)
341 to_ticks = 1;
342
343 return timeout_add(to, (int)to_ticks);
344 }
345
346 int
timeout_add_tv(struct timeout * to,const struct timeval * tv)347 timeout_add_tv(struct timeout *to, const struct timeval *tv)
348 {
349 uint64_t to_ticks;
350
351 to_ticks = (uint64_t)hz * tv->tv_sec + tv->tv_usec / tick;
352
353 return timeout_add_ticks(to, to_ticks, tv->tv_usec > 0);
354 }
355
356 int
timeout_add_sec(struct timeout * to,int secs)357 timeout_add_sec(struct timeout *to, int secs)
358 {
359 uint64_t to_ticks;
360
361 to_ticks = (uint64_t)hz * secs;
362
363 return timeout_add_ticks(to, to_ticks, 1);
364 }
365
366 int
timeout_add_msec(struct timeout * to,uint64_t msecs)367 timeout_add_msec(struct timeout *to, uint64_t msecs)
368 {
369 uint64_t to_ticks;
370
371 to_ticks = msecs * 1000 / tick;
372
373 return timeout_add_ticks(to, to_ticks, msecs > 0);
374 }
375
376 int
timeout_add_usec(struct timeout * to,uint64_t usecs)377 timeout_add_usec(struct timeout *to, uint64_t usecs)
378 {
379 uint64_t to_ticks;
380
381 to_ticks = usecs / tick;
382
383 return timeout_add_ticks(to, to_ticks, usecs > 0);
384 }
385
386 int
timeout_add_nsec(struct timeout * to,uint64_t nsecs)387 timeout_add_nsec(struct timeout *to, uint64_t nsecs)
388 {
389 uint64_t to_ticks;
390
391 to_ticks = nsecs / (tick * 1000);
392
393 return timeout_add_ticks(to, to_ticks, nsecs > 0);
394 }
395
396 int
timeout_abs_ts(struct timeout * to,const struct timespec * abstime)397 timeout_abs_ts(struct timeout *to, const struct timespec *abstime)
398 {
399 struct timespec old_abstime;
400 int ret = 1;
401
402 mtx_enter(&timeout_mutex);
403
404 KASSERT(ISSET(to->to_flags, TIMEOUT_INITIALIZED));
405 KASSERT(to->to_kclock == KCLOCK_UPTIME);
406
407 old_abstime = to->to_abstime;
408 to->to_abstime = *abstime;
409 CLR(to->to_flags, TIMEOUT_TRIGGERED);
410
411 if (ISSET(to->to_flags, TIMEOUT_ONQUEUE)) {
412 if (timespeccmp(abstime, &old_abstime, <)) {
413 CIRCQ_REMOVE(&to->to_list);
414 CIRCQ_INSERT_TAIL(&timeout_new, &to->to_list);
415 }
416 tostat.tos_readded++;
417 ret = 0;
418 } else {
419 SET(to->to_flags, TIMEOUT_ONQUEUE);
420 CIRCQ_INSERT_TAIL(&timeout_new, &to->to_list);
421 }
422 #if NKCOV > 0
423 if (!kcov_cold)
424 to->to_process = curproc->p_p;
425 #endif
426 tostat.tos_added++;
427
428 mtx_leave(&timeout_mutex);
429
430 return ret;
431 }
432
433 int
timeout_del(struct timeout * to)434 timeout_del(struct timeout *to)
435 {
436 int ret = 0;
437
438 mtx_enter(&timeout_mutex);
439 if (ISSET(to->to_flags, TIMEOUT_ONQUEUE)) {
440 CIRCQ_REMOVE(&to->to_list);
441 CLR(to->to_flags, TIMEOUT_ONQUEUE);
442 tostat.tos_cancelled++;
443 ret = 1;
444 }
445 CLR(to->to_flags, TIMEOUT_TRIGGERED);
446 tostat.tos_deleted++;
447 mtx_leave(&timeout_mutex);
448
449 return ret;
450 }
451
452 int
timeout_del_barrier(struct timeout * to)453 timeout_del_barrier(struct timeout *to)
454 {
455 int removed;
456
457 timeout_sync_order(ISSET(to->to_flags, TIMEOUT_PROC));
458
459 removed = timeout_del(to);
460 if (!removed)
461 timeout_barrier(to);
462
463 return removed;
464 }
465
466 void
timeout_barrier(struct timeout * to)467 timeout_barrier(struct timeout *to)
468 {
469 struct timeout barrier;
470 struct cond c;
471 int flags;
472
473 flags = to->to_flags & (TIMEOUT_PROC | TIMEOUT_MPSAFE);
474 timeout_sync_order(ISSET(flags, TIMEOUT_PROC));
475
476 timeout_set_flags(&barrier, timeout_barrier_timeout, &c, KCLOCK_NONE,
477 flags);
478 barrier.to_process = curproc->p_p;
479 cond_init(&c);
480
481 mtx_enter(&timeout_mutex);
482
483 barrier.to_time = ticks;
484 SET(barrier.to_flags, TIMEOUT_ONQUEUE);
485 if (ISSET(flags, TIMEOUT_PROC)) {
486 #ifdef MULTIPROCESSOR
487 if (ISSET(flags, TIMEOUT_MPSAFE))
488 CIRCQ_INSERT_TAIL(&timeout_proc_mp, &barrier.to_list);
489 else
490 #endif
491 CIRCQ_INSERT_TAIL(&timeout_proc, &barrier.to_list);
492 } else
493 CIRCQ_INSERT_TAIL(&timeout_todo, &barrier.to_list);
494
495 mtx_leave(&timeout_mutex);
496
497 if (ISSET(flags, TIMEOUT_PROC)) {
498 #ifdef MULTIPROCESSOR
499 if (ISSET(flags, TIMEOUT_MPSAFE))
500 wakeup_one(&timeout_proc_mp);
501 else
502 #endif
503 wakeup_one(&timeout_proc);
504 } else
505 softintr_schedule(softclock_si);
506
507 cond_wait(&c, "tmobar");
508 }
509
510 void
timeout_barrier_timeout(void * arg)511 timeout_barrier_timeout(void *arg)
512 {
513 struct cond *c = arg;
514
515 cond_signal(c);
516 }
517
518 uint32_t
timeout_bucket(const struct timeout * to)519 timeout_bucket(const struct timeout *to)
520 {
521 struct timespec diff, shifted_abstime;
522 struct kclock *kc;
523 uint32_t level;
524
525 KASSERT(to->to_kclock == KCLOCK_UPTIME);
526 kc = &timeout_kclock[to->to_kclock];
527
528 KASSERT(timespeccmp(&kc->kc_lastscan, &to->to_abstime, <));
529 timespecsub(&to->to_abstime, &kc->kc_lastscan, &diff);
530 for (level = 0; level < nitems(timeout_level_width) - 1; level++) {
531 if (diff.tv_sec < timeout_level_width[level])
532 break;
533 }
534 timespecadd(&to->to_abstime, &kc->kc_offset, &shifted_abstime);
535 return level * WHEELSIZE + timeout_maskwheel(level, &shifted_abstime);
536 }
537
538 /*
539 * Hash the absolute time into a bucket on a given level of the wheel.
540 *
541 * The complete hash is 32 bits. The upper 25 bits are seconds, the
542 * lower 7 bits are nanoseconds. tv_nsec is a positive value less
543 * than one billion so we need to divide it to isolate the desired
544 * bits. We can't just shift it.
545 *
546 * The level is used to isolate an 8-bit portion of the hash. The
547 * resulting number indicates which bucket the absolute time belongs
548 * in on the given level of the wheel.
549 */
550 uint32_t
timeout_maskwheel(uint32_t level,const struct timespec * abstime)551 timeout_maskwheel(uint32_t level, const struct timespec *abstime)
552 {
553 uint32_t hi, lo;
554
555 hi = abstime->tv_sec << 7;
556 lo = abstime->tv_nsec / 7812500;
557
558 return ((hi | lo) >> (level * WHEELBITS)) & WHEELMASK;
559 }
560
561 /*
562 * This is called from hardclock() on the primary CPU at the start of
563 * every tick.
564 */
565 void
timeout_hardclock_update(void)566 timeout_hardclock_update(void)
567 {
568 struct timespec elapsed, now;
569 struct kclock *kc;
570 struct timespec *lastscan = &timeout_kclock[KCLOCK_UPTIME].kc_lastscan;
571 int b, done, first, i, last, level, need_softclock = 1, off;
572
573 mtx_enter(&timeout_mutex);
574
575 MOVEBUCKET(0, ticks);
576 if (MASKWHEEL(0, ticks) == 0) {
577 MOVEBUCKET(1, ticks);
578 if (MASKWHEEL(1, ticks) == 0) {
579 MOVEBUCKET(2, ticks);
580 if (MASKWHEEL(2, ticks) == 0)
581 MOVEBUCKET(3, ticks);
582 }
583 }
584
585 /*
586 * Dump the buckets that expired while we were away.
587 *
588 * If the elapsed time has exceeded a level's limit then we need
589 * to dump every bucket in the level. We have necessarily completed
590 * a lap of that level, too, so we need to process buckets in the
591 * next level.
592 *
593 * Otherwise we need to compare indices: if the index of the first
594 * expired bucket is greater than that of the last then we have
595 * completed a lap of the level and need to process buckets in the
596 * next level.
597 */
598 nanouptime(&now);
599 timespecsub(&now, lastscan, &elapsed);
600 for (level = 0; level < nitems(timeout_level_width); level++) {
601 first = timeout_maskwheel(level, lastscan);
602 if (elapsed.tv_sec >= timeout_level_width[level]) {
603 last = (first == 0) ? WHEELSIZE - 1 : first - 1;
604 done = 0;
605 } else {
606 last = timeout_maskwheel(level, &now);
607 done = first <= last;
608 }
609 off = level * WHEELSIZE;
610 for (b = first;; b = (b + 1) % WHEELSIZE) {
611 CIRCQ_CONCAT(&timeout_todo, &timeout_wheel_kc[off + b]);
612 if (b == last)
613 break;
614 }
615 if (done)
616 break;
617 }
618
619 /*
620 * Update the cached state for each kclock.
621 */
622 for (i = 0; i < nitems(timeout_kclock); i++) {
623 kc = &timeout_kclock[i];
624 timespecadd(&now, &kc->kc_offset, &kc->kc_lastscan);
625 timespecsub(&kc->kc_lastscan, &tick_ts, &kc->kc_late);
626 }
627
628 if (CIRCQ_EMPTY(&timeout_new) && CIRCQ_EMPTY(&timeout_todo))
629 need_softclock = 0;
630
631 mtx_leave(&timeout_mutex);
632
633 if (need_softclock)
634 softintr_schedule(softclock_si);
635 }
636
637 void
timeout_run(struct timeout * to)638 timeout_run(struct timeout *to)
639 {
640 void (*fn)(void *);
641 void *arg;
642 int needsproc;
643
644 MUTEX_ASSERT_LOCKED(&timeout_mutex);
645
646 CLR(to->to_flags, TIMEOUT_ONQUEUE);
647 SET(to->to_flags, TIMEOUT_TRIGGERED);
648
649 fn = to->to_func;
650 arg = to->to_arg;
651 needsproc = ISSET(to->to_flags, TIMEOUT_PROC);
652 #if NKCOV > 0
653 struct process *kcov_process = to->to_process;
654 #endif
655
656 mtx_leave(&timeout_mutex);
657 timeout_sync_enter(needsproc);
658 #if NKCOV > 0
659 kcov_remote_enter(KCOV_REMOTE_COMMON, kcov_process);
660 #endif
661 fn(arg);
662 #if NKCOV > 0
663 kcov_remote_leave(KCOV_REMOTE_COMMON, kcov_process);
664 #endif
665 timeout_sync_leave(needsproc);
666 mtx_enter(&timeout_mutex);
667 }
668
669 void
softclock_process_kclock_timeout(struct timeout * to,int new)670 softclock_process_kclock_timeout(struct timeout *to, int new)
671 {
672 struct kclock *kc = &timeout_kclock[to->to_kclock];
673
674 if (timespeccmp(&to->to_abstime, &kc->kc_lastscan, >)) {
675 tostat.tos_scheduled++;
676 if (!new)
677 tostat.tos_rescheduled++;
678 CIRCQ_INSERT_TAIL(&timeout_wheel_kc[timeout_bucket(to)],
679 &to->to_list);
680 return;
681 }
682 if (!new && timespeccmp(&to->to_abstime, &kc->kc_late, <=))
683 tostat.tos_late++;
684 if (ISSET(to->to_flags, TIMEOUT_PROC)) {
685 #ifdef MULTIPROCESSOR
686 if (ISSET(to->to_flags, TIMEOUT_MPSAFE))
687 CIRCQ_INSERT_TAIL(&timeout_proc_mp, &to->to_list);
688 else
689 #endif
690 CIRCQ_INSERT_TAIL(&timeout_proc, &to->to_list);
691 return;
692 }
693 timeout_run(to);
694 tostat.tos_run_softclock++;
695 }
696
697 void
softclock_process_tick_timeout(struct timeout * to,int new)698 softclock_process_tick_timeout(struct timeout *to, int new)
699 {
700 int delta = to->to_time - ticks;
701
702 if (delta > 0) {
703 tostat.tos_scheduled++;
704 if (!new)
705 tostat.tos_rescheduled++;
706 CIRCQ_INSERT_TAIL(&BUCKET(delta, to->to_time), &to->to_list);
707 return;
708 }
709 if (!new && delta < 0)
710 tostat.tos_late++;
711 if (ISSET(to->to_flags, TIMEOUT_PROC)) {
712 #ifdef MULTIPROCESSOR
713 if (ISSET(to->to_flags, TIMEOUT_MPSAFE))
714 CIRCQ_INSERT_TAIL(&timeout_proc_mp, &to->to_list);
715 else
716 #endif
717 CIRCQ_INSERT_TAIL(&timeout_proc, &to->to_list);
718 return;
719 }
720 timeout_run(to);
721 tostat.tos_run_softclock++;
722 }
723
724 /*
725 * Timeouts are processed here instead of timeout_hardclock_update()
726 * to avoid doing any more work at IPL_CLOCK than absolutely necessary.
727 * Down here at IPL_SOFTCLOCK other interrupts can be serviced promptly
728 * so the system remains responsive even if there is a surge of timeouts.
729 */
730 void
softclock(void * arg)731 softclock(void *arg)
732 {
733 struct timeout *first_new, *to;
734 int needsproc, new;
735 #ifdef MULTIPROCESSOR
736 int need_proc_mp;
737 #endif
738
739 first_new = NULL;
740 new = 0;
741
742 mtx_enter(&timeout_mutex);
743 if (!CIRCQ_EMPTY(&timeout_new))
744 first_new = timeout_from_circq(CIRCQ_FIRST(&timeout_new));
745 CIRCQ_CONCAT(&timeout_todo, &timeout_new);
746 while (!CIRCQ_EMPTY(&timeout_todo)) {
747 to = timeout_from_circq(CIRCQ_FIRST(&timeout_todo));
748 CIRCQ_REMOVE(&to->to_list);
749 if (to == first_new)
750 new = 1;
751 if (to->to_kclock == KCLOCK_NONE)
752 softclock_process_tick_timeout(to, new);
753 else if (to->to_kclock == KCLOCK_UPTIME)
754 softclock_process_kclock_timeout(to, new);
755 else {
756 panic("%s: invalid to_clock: %d",
757 __func__, to->to_kclock);
758 }
759 }
760 tostat.tos_softclocks++;
761 needsproc = !CIRCQ_EMPTY(&timeout_proc);
762 #ifdef MULTIPROCESSOR
763 need_proc_mp = !CIRCQ_EMPTY(&timeout_proc_mp);
764 #endif
765 mtx_leave(&timeout_mutex);
766
767 if (needsproc)
768 wakeup(&timeout_proc);
769 #ifdef MULTIPROCESSOR
770 if (need_proc_mp)
771 wakeup(&timeout_proc_mp);
772 #endif
773 }
774
775 void
softclock_create_thread(void * arg)776 softclock_create_thread(void *arg)
777 {
778 if (kthread_create(softclock_thread, NULL, NULL, "softclock"))
779 panic("fork softclock");
780 #ifdef MULTIPROCESSOR
781 if (kthread_create(softclock_thread_mp, NULL, NULL, "softclockmp"))
782 panic("kthread_create softclock_thread_mp");
783 #endif
784 }
785
786 void
softclock_thread(void * arg)787 softclock_thread(void *arg)
788 {
789 CPU_INFO_ITERATOR cii;
790 struct cpu_info *ci;
791 struct timeout *to;
792 int s;
793
794 KERNEL_ASSERT_LOCKED();
795
796 /* Be conservative for the moment */
797 CPU_INFO_FOREACH(cii, ci) {
798 if (CPU_IS_PRIMARY(ci))
799 break;
800 }
801 KASSERT(ci != NULL);
802 sched_peg_curproc(ci);
803
804 s = splsoftclock();
805 mtx_enter(&timeout_mutex);
806 for (;;) {
807 while (!CIRCQ_EMPTY(&timeout_proc)) {
808 to = timeout_from_circq(CIRCQ_FIRST(&timeout_proc));
809 CIRCQ_REMOVE(&to->to_list);
810 timeout_run(to);
811 tostat.tos_run_thread++;
812 }
813 tostat.tos_thread_wakeups++;
814 msleep_nsec(&timeout_proc, &timeout_mutex, PSWP, "tmoslp",
815 INFSLP);
816 }
817 splx(s);
818 }
819
820 #ifdef MULTIPROCESSOR
821 void
softclock_thread_mp(void * arg)822 softclock_thread_mp(void *arg)
823 {
824 struct timeout *to;
825
826 KERNEL_ASSERT_LOCKED();
827 KERNEL_UNLOCK();
828
829 mtx_enter(&timeout_mutex);
830 for (;;) {
831 while (!CIRCQ_EMPTY(&timeout_proc_mp)) {
832 to = timeout_from_circq(CIRCQ_FIRST(&timeout_proc_mp));
833 CIRCQ_REMOVE(&to->to_list);
834 timeout_run(to);
835 tostat.tos_run_thread++;
836 }
837 tostat.tos_thread_wakeups++;
838 msleep_nsec(&timeout_proc_mp, &timeout_mutex, PSWP, "tmoslp",
839 INFSLP);
840 }
841 }
842 #endif /* MULTIPROCESSOR */
843
844 #ifndef SMALL_KERNEL
845 void
timeout_adjust_ticks(int adj)846 timeout_adjust_ticks(int adj)
847 {
848 struct timeout *to;
849 struct circq *p;
850 int new_ticks, b;
851
852 /* adjusting the monotonic clock backwards would be a Bad Thing */
853 if (adj <= 0)
854 return;
855
856 mtx_enter(&timeout_mutex);
857 new_ticks = ticks + adj;
858 for (b = 0; b < nitems(timeout_wheel); b++) {
859 p = CIRCQ_FIRST(&timeout_wheel[b]);
860 while (p != &timeout_wheel[b]) {
861 to = timeout_from_circq(p);
862 p = CIRCQ_FIRST(p);
863
864 /* when moving a timeout forward need to reinsert it */
865 if (to->to_time - ticks < adj)
866 to->to_time = new_ticks;
867 CIRCQ_REMOVE(&to->to_list);
868 CIRCQ_INSERT_TAIL(&timeout_todo, &to->to_list);
869 }
870 }
871 ticks = new_ticks;
872 mtx_leave(&timeout_mutex);
873 }
874 #endif
875
876 int
timeout_sysctl(void * oldp,size_t * oldlenp,void * newp,size_t newlen)877 timeout_sysctl(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
878 {
879 struct timeoutstat status;
880
881 mtx_enter(&timeout_mutex);
882 memcpy(&status, &tostat, sizeof(status));
883 mtx_leave(&timeout_mutex);
884
885 return sysctl_rdstruct(oldp, oldlenp, newp, &status, sizeof(status));
886 }
887
888 #ifdef DDB
889 const char *db_kclock(int);
890 void db_show_callout_bucket(struct circq *);
891 void db_show_timeout(struct timeout *, struct circq *);
892 const char *db_timespec(const struct timespec *);
893
894 const char *
db_kclock(int kclock)895 db_kclock(int kclock)
896 {
897 switch (kclock) {
898 case KCLOCK_UPTIME:
899 return "uptime";
900 default:
901 return "invalid";
902 }
903 }
904
905 const char *
db_timespec(const struct timespec * ts)906 db_timespec(const struct timespec *ts)
907 {
908 static char buf[32];
909 struct timespec tmp, zero;
910
911 if (ts->tv_sec >= 0) {
912 snprintf(buf, sizeof(buf), "%lld.%09ld",
913 ts->tv_sec, ts->tv_nsec);
914 return buf;
915 }
916
917 timespecclear(&zero);
918 timespecsub(&zero, ts, &tmp);
919 snprintf(buf, sizeof(buf), "-%lld.%09ld", tmp.tv_sec, tmp.tv_nsec);
920 return buf;
921 }
922
923 void
db_show_callout_bucket(struct circq * bucket)924 db_show_callout_bucket(struct circq *bucket)
925 {
926 struct circq *p;
927
928 CIRCQ_FOREACH(p, bucket)
929 db_show_timeout(timeout_from_circq(p), bucket);
930 }
931
932 void
db_show_timeout(struct timeout * to,struct circq * bucket)933 db_show_timeout(struct timeout *to, struct circq *bucket)
934 {
935 struct timespec remaining;
936 struct kclock *kc;
937 char buf[8];
938 db_expr_t offset;
939 struct circq *wheel;
940 const char *name, *where;
941 int width = sizeof(long) * 2;
942
943 db_find_sym_and_offset((vaddr_t)to->to_func, &name, &offset);
944 name = name ? name : "?";
945 if (bucket == &timeout_new)
946 where = "new";
947 else if (bucket == &timeout_todo)
948 where = "softint";
949 else if (bucket == &timeout_proc)
950 where = "thread";
951 #ifdef MULTIPROCESSOR
952 else if (bucket == &timeout_proc_mp)
953 where = "thread-mp";
954 #endif
955 else {
956 if (to->to_kclock == KCLOCK_UPTIME)
957 wheel = timeout_wheel_kc;
958 else if (to->to_kclock == KCLOCK_NONE)
959 wheel = timeout_wheel;
960 else
961 goto invalid;
962 snprintf(buf, sizeof(buf), "%3ld/%1ld",
963 (bucket - wheel) % WHEELSIZE,
964 (bucket - wheel) / WHEELSIZE);
965 where = buf;
966 }
967 if (to->to_kclock == KCLOCK_UPTIME) {
968 kc = &timeout_kclock[to->to_kclock];
969 timespecsub(&to->to_abstime, &kc->kc_lastscan, &remaining);
970 db_printf("%20s %8s %9s 0x%0*lx %s\n",
971 db_timespec(&remaining), db_kclock(to->to_kclock), where,
972 width, (ulong)to->to_arg, name);
973 } else if (to->to_kclock == KCLOCK_NONE) {
974 db_printf("%20d %8s %9s 0x%0*lx %s\n",
975 to->to_time - ticks, "ticks", where,
976 width, (ulong)to->to_arg, name);
977 } else
978 goto invalid;
979 return;
980
981 invalid:
982 db_printf("%s: timeout 0x%p: invalid to_kclock: %d",
983 __func__, to, to->to_kclock);
984 }
985
986 void
db_show_callout(db_expr_t addr,int haddr,db_expr_t count,char * modif)987 db_show_callout(db_expr_t addr, int haddr, db_expr_t count, char *modif)
988 {
989 struct kclock *kc;
990 int width = sizeof(long) * 2 + 2;
991 int b, i;
992
993 db_printf("%20s %8s\n", "lastscan", "clock");
994 db_printf("%20d %8s\n", ticks, "ticks");
995 for (i = 0; i < nitems(timeout_kclock); i++) {
996 kc = &timeout_kclock[i];
997 db_printf("%20s %8s\n",
998 db_timespec(&kc->kc_lastscan), db_kclock(i));
999 }
1000 db_printf("\n");
1001 db_printf("%20s %8s %9s %*s %s\n",
1002 "remaining", "clock", "wheel", width, "arg", "func");
1003 db_show_callout_bucket(&timeout_new);
1004 db_show_callout_bucket(&timeout_todo);
1005 db_show_callout_bucket(&timeout_proc);
1006 #ifdef MULTIPROCESSOR
1007 db_show_callout_bucket(&timeout_proc_mp);
1008 #endif
1009 for (b = 0; b < nitems(timeout_wheel); b++)
1010 db_show_callout_bucket(&timeout_wheel[b]);
1011 for (b = 0; b < nitems(timeout_wheel_kc); b++)
1012 db_show_callout_bucket(&timeout_wheel_kc[b]);
1013 }
1014 #endif
1015