1 /* $OpenBSD: kern_time.c,v 1.170 2024/10/03 10:18:29 claudio Exp $ */
2 /* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */
3
4 /*
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
33 */
34
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/clockintr.h>
39 #include <sys/mutex.h>
40 #include <sys/rwlock.h>
41 #include <sys/proc.h>
42 #include <sys/ktrace.h>
43 #include <sys/resourcevar.h>
44 #include <sys/signalvar.h>
45 #include <sys/stdint.h>
46 #include <sys/pledge.h>
47 #include <sys/task.h>
48 #include <sys/time.h>
49 #include <sys/timeout.h>
50 #include <sys/timetc.h>
51
52 #include <sys/mount.h>
53 #include <sys/syscallargs.h>
54
55 #include <dev/clock_subr.h>
56
57 int itimerfix(struct itimerval *);
58 void process_reset_itimer_flag(struct process *);
59
60 /*
61 * Time of day and interval timer support.
62 *
63 * These routines provide the kernel entry points to get and set
64 * the time-of-day and per-process interval timers. Subroutines
65 * here provide support for adding and subtracting timeval structures
66 * and decrementing interval timers, optionally reloading the interval
67 * timers when they expire.
68 */
69
70 /* This function is used by clock_settime and settimeofday */
71 int
settime(const struct timespec * ts)72 settime(const struct timespec *ts)
73 {
74 struct timespec now;
75
76 /*
77 * Don't allow the time to be set forward so far it will wrap
78 * and become negative, thus allowing an attacker to bypass
79 * the next check below. The cutoff is 1 year before rollover
80 * occurs, so even if the attacker uses adjtime(2) to move
81 * the time past the cutoff, it will take a very long time
82 * to get to the wrap point.
83 *
84 * XXX: we check against UINT_MAX until we can figure out
85 * how to deal with the hardware RTCs.
86 */
87 if (ts->tv_sec > UINT_MAX - 365*24*60*60) {
88 printf("denied attempt to set clock forward to %lld\n",
89 (long long)ts->tv_sec);
90 return (EPERM);
91 }
92 /*
93 * If the system is secure, we do not allow the time to be
94 * set to an earlier value (it may be slowed using adjtime,
95 * but not set back). This feature prevent interlopers from
96 * setting arbitrary time stamps on files.
97 */
98 nanotime(&now);
99 if (securelevel > 1 && timespeccmp(ts, &now, <=)) {
100 printf("denied attempt to set clock back %lld seconds\n",
101 (long long)now.tv_sec - ts->tv_sec);
102 return (EPERM);
103 }
104
105 tc_setrealtimeclock(ts);
106 KERNEL_LOCK();
107 resettodr();
108 KERNEL_UNLOCK();
109
110 return (0);
111 }
112
113 int
clock_gettime(struct proc * p,clockid_t clock_id,struct timespec * tp)114 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
115 {
116 struct tusage tu;
117 struct proc *q;
118 int error = 0;
119
120 switch (clock_id) {
121 case CLOCK_REALTIME:
122 nanotime(tp);
123 break;
124 case CLOCK_UPTIME:
125 nanoruntime(tp);
126 break;
127 case CLOCK_MONOTONIC:
128 case CLOCK_BOOTTIME:
129 nanouptime(tp);
130 break;
131 case CLOCK_PROCESS_CPUTIME_ID:
132 nanouptime(tp);
133 tuagg_get_process(&tu, p->p_p);
134 timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
135 timespecadd(tp, &tu.tu_runtime, tp);
136 break;
137 case CLOCK_THREAD_CPUTIME_ID:
138 nanouptime(tp);
139 tuagg_get_proc(&tu, p);
140 timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
141 timespecadd(tp, &tu.tu_runtime, tp);
142 break;
143 default:
144 /* check for clock from pthread_getcpuclockid() */
145 if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
146 KERNEL_LOCK();
147 q = tfind_user(__CLOCK_PTID(clock_id), p->p_p);
148 if (q == NULL)
149 error = ESRCH;
150 else {
151 tuagg_get_proc(&tu, q);
152 *tp = tu.tu_runtime;
153 }
154 KERNEL_UNLOCK();
155 } else
156 error = EINVAL;
157 break;
158 }
159 return (error);
160 }
161
162 int
sys_clock_gettime(struct proc * p,void * v,register_t * retval)163 sys_clock_gettime(struct proc *p, void *v, register_t *retval)
164 {
165 struct sys_clock_gettime_args /* {
166 syscallarg(clockid_t) clock_id;
167 syscallarg(struct timespec *) tp;
168 } */ *uap = v;
169 struct timespec ats;
170 int error;
171
172 memset(&ats, 0, sizeof(ats));
173 if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0)
174 return (error);
175
176 error = copyout(&ats, SCARG(uap, tp), sizeof(ats));
177 #ifdef KTRACE
178 if (error == 0 && KTRPOINT(p, KTR_STRUCT))
179 ktrabstimespec(p, &ats);
180 #endif
181 return (error);
182 }
183
184 int
sys_clock_settime(struct proc * p,void * v,register_t * retval)185 sys_clock_settime(struct proc *p, void *v, register_t *retval)
186 {
187 struct sys_clock_settime_args /* {
188 syscallarg(clockid_t) clock_id;
189 syscallarg(const struct timespec *) tp;
190 } */ *uap = v;
191 struct timespec ats;
192 clockid_t clock_id;
193 int error;
194
195 if ((error = suser(p)) != 0)
196 return (error);
197
198 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
199 return (error);
200
201 clock_id = SCARG(uap, clock_id);
202 switch (clock_id) {
203 case CLOCK_REALTIME:
204 if (!timespecisvalid(&ats))
205 return (EINVAL);
206 if ((error = settime(&ats)) != 0)
207 return (error);
208 break;
209 default: /* Other clocks are read-only */
210 return (EINVAL);
211 }
212
213 return (0);
214 }
215
216 int
sys_clock_getres(struct proc * p,void * v,register_t * retval)217 sys_clock_getres(struct proc *p, void *v, register_t *retval)
218 {
219 struct sys_clock_getres_args /* {
220 syscallarg(clockid_t) clock_id;
221 syscallarg(struct timespec *) tp;
222 } */ *uap = v;
223 clockid_t clock_id;
224 struct bintime bt;
225 struct timespec ts;
226 struct proc *q;
227 u_int64_t scale;
228 int error = 0;
229
230 memset(&ts, 0, sizeof(ts));
231 clock_id = SCARG(uap, clock_id);
232
233 switch (clock_id) {
234 case CLOCK_REALTIME:
235 case CLOCK_MONOTONIC:
236 case CLOCK_BOOTTIME:
237 case CLOCK_UPTIME:
238 memset(&bt, 0, sizeof(bt));
239 rw_enter_read(&tc_lock);
240 scale = ((1ULL << 63) / tc_getfrequency()) * 2;
241 bt.frac = tc_getprecision() * scale;
242 rw_exit_read(&tc_lock);
243 BINTIME_TO_TIMESPEC(&bt, &ts);
244 break;
245 case CLOCK_PROCESS_CPUTIME_ID:
246 case CLOCK_THREAD_CPUTIME_ID:
247 ts.tv_nsec = 1000000000 / stathz;
248 break;
249 default:
250 /* check for clock from pthread_getcpuclockid() */
251 if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
252 KERNEL_LOCK();
253 q = tfind_user(__CLOCK_PTID(clock_id), p->p_p);
254 if (q == NULL)
255 error = ESRCH;
256 else
257 ts.tv_nsec = 1000000000 / stathz;
258 KERNEL_UNLOCK();
259 } else
260 error = EINVAL;
261 break;
262 }
263
264 if (error == 0 && SCARG(uap, tp)) {
265 ts.tv_nsec = MAX(ts.tv_nsec, 1);
266 error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
267 #ifdef KTRACE
268 if (error == 0 && KTRPOINT(p, KTR_STRUCT))
269 ktrreltimespec(p, &ts);
270 #endif
271 }
272
273 return error;
274 }
275
276 int
sys_nanosleep(struct proc * p,void * v,register_t * retval)277 sys_nanosleep(struct proc *p, void *v, register_t *retval)
278 {
279 struct sys_nanosleep_args/* {
280 syscallarg(const struct timespec *) rqtp;
281 syscallarg(struct timespec *) rmtp;
282 } */ *uap = v;
283 struct timespec elapsed, remainder, request, start, stop;
284 uint64_t nsecs;
285 struct timespec *rmtp;
286 int copyout_error, error;
287
288 rmtp = SCARG(uap, rmtp);
289 error = copyin(SCARG(uap, rqtp), &request, sizeof(request));
290 if (error)
291 return (error);
292 #ifdef KTRACE
293 if (KTRPOINT(p, KTR_STRUCT))
294 ktrreltimespec(p, &request);
295 #endif
296
297 if (request.tv_sec < 0 || !timespecisvalid(&request))
298 return (EINVAL);
299
300 do {
301 getnanouptime(&start);
302 nsecs = MAX(1, MIN(TIMESPEC_TO_NSEC(&request), MAXTSLP));
303 error = tsleep_nsec(&nowake, PWAIT | PCATCH, "nanoslp", nsecs);
304 getnanouptime(&stop);
305 timespecsub(&stop, &start, &elapsed);
306 timespecsub(&request, &elapsed, &request);
307 if (request.tv_sec < 0)
308 timespecclear(&request);
309 if (error != EWOULDBLOCK)
310 break;
311 } while (timespecisset(&request));
312
313 if (error == ERESTART)
314 error = EINTR;
315 if (error == EWOULDBLOCK)
316 error = 0;
317
318 if (rmtp) {
319 memset(&remainder, 0, sizeof(remainder));
320 remainder = request;
321 copyout_error = copyout(&remainder, rmtp, sizeof(remainder));
322 if (copyout_error)
323 error = copyout_error;
324 #ifdef KTRACE
325 if (copyout_error == 0 && KTRPOINT(p, KTR_STRUCT))
326 ktrreltimespec(p, &remainder);
327 #endif
328 }
329
330 return error;
331 }
332
333 int
sys_gettimeofday(struct proc * p,void * v,register_t * retval)334 sys_gettimeofday(struct proc *p, void *v, register_t *retval)
335 {
336 struct sys_gettimeofday_args /* {
337 syscallarg(struct timeval *) tp;
338 syscallarg(struct timezone *) tzp;
339 } */ *uap = v;
340 struct timeval atv;
341 static const struct timezone zerotz = { 0, 0 };
342 struct timeval *tp;
343 struct timezone *tzp;
344 int error = 0;
345
346 tp = SCARG(uap, tp);
347 tzp = SCARG(uap, tzp);
348
349 if (tp) {
350 memset(&atv, 0, sizeof(atv));
351 microtime(&atv);
352 if ((error = copyout(&atv, tp, sizeof (atv))))
353 return (error);
354 #ifdef KTRACE
355 if (KTRPOINT(p, KTR_STRUCT))
356 ktrabstimeval(p, &atv);
357 #endif
358 }
359 if (tzp)
360 error = copyout(&zerotz, tzp, sizeof(zerotz));
361 return (error);
362 }
363
364 int
sys_settimeofday(struct proc * p,void * v,register_t * retval)365 sys_settimeofday(struct proc *p, void *v, register_t *retval)
366 {
367 struct sys_settimeofday_args /* {
368 syscallarg(const struct timeval *) tv;
369 syscallarg(const struct timezone *) tzp;
370 } */ *uap = v;
371 struct timezone atz;
372 struct timeval atv;
373 const struct timeval *tv;
374 const struct timezone *tzp;
375 int error;
376
377 tv = SCARG(uap, tv);
378 tzp = SCARG(uap, tzp);
379
380 if ((error = suser(p)))
381 return (error);
382 /* Verify all parameters before changing time. */
383 if (tv && (error = copyin(tv, &atv, sizeof(atv))))
384 return (error);
385 if (tzp && (error = copyin(tzp, &atz, sizeof(atz))))
386 return (error);
387 if (tv) {
388 struct timespec ts;
389
390 #ifdef KTRACE
391 if (KTRPOINT(p, KTR_STRUCT))
392 ktrabstimeval(p, &atv);
393 #endif
394 if (!timerisvalid(&atv))
395 return (EINVAL);
396 TIMEVAL_TO_TIMESPEC(&atv, &ts);
397 if ((error = settime(&ts)) != 0)
398 return (error);
399 }
400
401 return (0);
402 }
403
404 #define ADJFREQ_MAX (500000000LL << 32)
405 #define ADJFREQ_MIN (-ADJFREQ_MAX)
406
407 int
sys_adjfreq(struct proc * p,void * v,register_t * retval)408 sys_adjfreq(struct proc *p, void *v, register_t *retval)
409 {
410 struct sys_adjfreq_args /* {
411 syscallarg(const int64_t *) freq;
412 syscallarg(int64_t *) oldfreq;
413 } */ *uap = v;
414 int error = 0;
415 int64_t f, oldf;
416 const int64_t *freq = SCARG(uap, freq);
417 int64_t *oldfreq = SCARG(uap, oldfreq);
418
419 if (freq) {
420 if ((error = suser(p)))
421 return (error);
422 if ((error = copyin(freq, &f, sizeof(f))))
423 return (error);
424 if (f < ADJFREQ_MIN || f > ADJFREQ_MAX)
425 return (EINVAL);
426 }
427
428 rw_enter(&tc_lock, (freq == NULL) ? RW_READ : RW_WRITE);
429 if (oldfreq) {
430 tc_adjfreq(&oldf, NULL);
431 if ((error = copyout(&oldf, oldfreq, sizeof(oldf))))
432 goto out;
433 }
434 if (freq)
435 tc_adjfreq(NULL, &f);
436 out:
437 rw_exit(&tc_lock);
438 return (error);
439 }
440
441 int
sys_adjtime(struct proc * p,void * v,register_t * retval)442 sys_adjtime(struct proc *p, void *v, register_t *retval)
443 {
444 struct sys_adjtime_args /* {
445 syscallarg(const struct timeval *) delta;
446 syscallarg(struct timeval *) olddelta;
447 } */ *uap = v;
448 struct timeval atv;
449 const struct timeval *delta = SCARG(uap, delta);
450 struct timeval *olddelta = SCARG(uap, olddelta);
451 int64_t adjustment, remaining;
452 int error;
453
454 error = pledge_adjtime(p, delta);
455 if (error)
456 return error;
457
458 if (delta) {
459 if ((error = suser(p)))
460 return (error);
461 if ((error = copyin(delta, &atv, sizeof(struct timeval))))
462 return (error);
463 #ifdef KTRACE
464 if (KTRPOINT(p, KTR_STRUCT))
465 ktrreltimeval(p, &atv);
466 #endif
467 if (!timerisvalid(&atv))
468 return (EINVAL);
469
470 if (atv.tv_sec > INT64_MAX / 1000000)
471 return EINVAL;
472 if (atv.tv_sec < INT64_MIN / 1000000)
473 return EINVAL;
474 adjustment = atv.tv_sec * 1000000;
475 if (adjustment > INT64_MAX - atv.tv_usec)
476 return EINVAL;
477 adjustment += atv.tv_usec;
478
479 rw_enter_write(&tc_lock);
480 }
481
482 if (olddelta) {
483 tc_adjtime(&remaining, NULL);
484 memset(&atv, 0, sizeof(atv));
485 atv.tv_sec = remaining / 1000000;
486 atv.tv_usec = remaining % 1000000;
487 if (atv.tv_usec < 0) {
488 atv.tv_usec += 1000000;
489 atv.tv_sec--;
490 }
491
492 if ((error = copyout(&atv, olddelta, sizeof(struct timeval))))
493 goto out;
494 }
495
496 if (delta)
497 tc_adjtime(NULL, &adjustment);
498 out:
499 if (delta)
500 rw_exit_write(&tc_lock);
501 return (error);
502 }
503
504
505 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
506
507 /*
508 * Get or set value of an interval timer. The process virtual and
509 * profiling virtual time timers are kept internally in the
510 * way they are specified externally: in time until they expire.
511 *
512 * The real time interval timer's it_value, in contrast, is kept as an
513 * absolute time rather than as a delta, so that it is easy to keep
514 * periodic real-time signals from drifting.
515 *
516 * Virtual time timers are processed in the hardclock() routine of
517 * kern_clock.c. The real time timer is processed by a timeout
518 * routine, called from the softclock() routine. Since a callout
519 * may be delayed in real time due to interrupt processing in the system,
520 * it is possible for the real time timeout routine (realitexpire, given below),
521 * to be delayed in real time past when it is supposed to occur. It
522 * does not suffice, therefore, to reload the real timer .it_value from the
523 * real time timers .it_interval. Rather, we compute the next time in
524 * absolute time the timer should go off.
525 */
526 void
setitimer(int which,const struct itimerval * itv,struct itimerval * olditv)527 setitimer(int which, const struct itimerval *itv, struct itimerval *olditv)
528 {
529 struct itimerspec its, oldits;
530 struct timespec now;
531 struct itimerspec *itimer;
532 struct process *pr;
533
534 KASSERT(which >= ITIMER_REAL && which <= ITIMER_PROF);
535
536 pr = curproc->p_p;
537 itimer = &pr->ps_timer[which];
538
539 if (itv != NULL) {
540 TIMEVAL_TO_TIMESPEC(&itv->it_value, &its.it_value);
541 TIMEVAL_TO_TIMESPEC(&itv->it_interval, &its.it_interval);
542 }
543
544 if (which == ITIMER_REAL) {
545 mtx_enter(&pr->ps_mtx);
546 nanouptime(&now);
547 } else
548 mtx_enter(&itimer_mtx);
549
550 if (olditv != NULL)
551 oldits = *itimer;
552 if (itv != NULL) {
553 if (which == ITIMER_REAL) {
554 if (timespecisset(&its.it_value)) {
555 timespecadd(&its.it_value, &now, &its.it_value);
556 timeout_abs_ts(&pr->ps_realit_to,&its.it_value);
557 } else
558 timeout_del(&pr->ps_realit_to);
559 }
560 *itimer = its;
561 if (which == ITIMER_VIRTUAL || which == ITIMER_PROF) {
562 process_reset_itimer_flag(pr);
563 need_resched(curcpu());
564 }
565 }
566
567 if (which == ITIMER_REAL)
568 mtx_leave(&pr->ps_mtx);
569 else
570 mtx_leave(&itimer_mtx);
571
572 if (olditv != NULL) {
573 if (which == ITIMER_REAL && timespecisset(&oldits.it_value)) {
574 if (timespeccmp(&oldits.it_value, &now, <))
575 timespecclear(&oldits.it_value);
576 else {
577 timespecsub(&oldits.it_value, &now,
578 &oldits.it_value);
579 }
580 }
581 TIMESPEC_TO_TIMEVAL(&olditv->it_value, &oldits.it_value);
582 TIMESPEC_TO_TIMEVAL(&olditv->it_interval, &oldits.it_interval);
583 }
584 }
585
586 void
cancel_all_itimers(void)587 cancel_all_itimers(void)
588 {
589 struct itimerval itv;
590 int i;
591
592 timerclear(&itv.it_value);
593 timerclear(&itv.it_interval);
594
595 for (i = 0; i < nitems(curproc->p_p->ps_timer); i++)
596 setitimer(i, &itv, NULL);
597 }
598
599 int
sys_getitimer(struct proc * p,void * v,register_t * retval)600 sys_getitimer(struct proc *p, void *v, register_t *retval)
601 {
602 struct sys_getitimer_args /* {
603 syscallarg(int) which;
604 syscallarg(struct itimerval *) itv;
605 } */ *uap = v;
606 struct itimerval aitv;
607 int which, error;
608
609 which = SCARG(uap, which);
610 if (which < ITIMER_REAL || which > ITIMER_PROF)
611 return EINVAL;
612
613 memset(&aitv, 0, sizeof(aitv));
614
615 setitimer(which, NULL, &aitv);
616
617 error = copyout(&aitv, SCARG(uap, itv), sizeof(aitv));
618 #ifdef KTRACE
619 if (error == 0 && KTRPOINT(p, KTR_STRUCT))
620 ktritimerval(p, &aitv);
621 #endif
622 return (error);
623 }
624
625 int
sys_setitimer(struct proc * p,void * v,register_t * retval)626 sys_setitimer(struct proc *p, void *v, register_t *retval)
627 {
628 struct sys_setitimer_args /* {
629 syscallarg(int) which;
630 syscallarg(const struct itimerval *) itv;
631 syscallarg(struct itimerval *) oitv;
632 } */ *uap = v;
633 struct itimerval aitv, olditv;
634 struct itimerval *newitvp, *olditvp;
635 int error, which;
636
637 which = SCARG(uap, which);
638 if (which < ITIMER_REAL || which > ITIMER_PROF)
639 return EINVAL;
640
641 newitvp = olditvp = NULL;
642 if (SCARG(uap, itv) != NULL) {
643 error = copyin(SCARG(uap, itv), &aitv, sizeof(aitv));
644 if (error)
645 return error;
646 #ifdef KTRACE
647 if (KTRPOINT(p, KTR_STRUCT))
648 ktritimerval(p, &aitv);
649 #endif
650 error = itimerfix(&aitv);
651 if (error)
652 return error;
653 newitvp = &aitv;
654 }
655 if (SCARG(uap, oitv) != NULL) {
656 memset(&olditv, 0, sizeof(olditv));
657 olditvp = &olditv;
658 }
659 if (newitvp == NULL && olditvp == NULL)
660 return 0;
661
662 setitimer(which, newitvp, olditvp);
663
664 if (SCARG(uap, oitv) != NULL) {
665 error = copyout(&olditv, SCARG(uap, oitv), sizeof(olditv));
666 #ifdef KTRACE
667 if (error == 0 && KTRPOINT(p, KTR_STRUCT))
668 ktritimerval(p, &aitv);
669 #endif
670 return error;
671 }
672
673 return 0;
674 }
675
676 /*
677 * Real interval timer expired:
678 * send process whose timer expired an alarm signal.
679 * If time is not set up to reload, then just return.
680 * Else compute next time timer should go off which is > current time.
681 * This is where delay in processing this timeout causes multiple
682 * SIGALRM calls to be compressed into one.
683 */
684 void
realitexpire(void * arg)685 realitexpire(void *arg)
686 {
687 struct timespec cts;
688 struct process *pr = arg;
689 struct itimerspec *tp = &pr->ps_timer[ITIMER_REAL];
690 int need_signal = 0;
691
692 mtx_enter(&pr->ps_mtx);
693
694 /*
695 * Do nothing if the timer was cancelled or rescheduled while we
696 * were entering the mutex.
697 */
698 if (!timespecisset(&tp->it_value) || timeout_pending(&pr->ps_realit_to))
699 goto out;
700
701 /* The timer expired. We need to send the signal. */
702 need_signal = 1;
703
704 /* One-shot timers are not reloaded. */
705 if (!timespecisset(&tp->it_interval)) {
706 timespecclear(&tp->it_value);
707 goto out;
708 }
709
710 /*
711 * Find the nearest future expiration point and restart
712 * the timeout.
713 */
714 nanouptime(&cts);
715 while (timespeccmp(&tp->it_value, &cts, <=))
716 timespecadd(&tp->it_value, &tp->it_interval, &tp->it_value);
717 if ((pr->ps_flags & PS_EXITING) == 0)
718 timeout_abs_ts(&pr->ps_realit_to, &tp->it_value);
719
720 out:
721 mtx_leave(&pr->ps_mtx);
722
723 if (need_signal)
724 prsignal(pr, SIGALRM);
725 }
726
727 /*
728 * Check if the given setitimer(2) input is valid. Clear it_interval
729 * if it_value is unset. Round it_interval up to the minimum interval
730 * if necessary.
731 */
732 int
itimerfix(struct itimerval * itv)733 itimerfix(struct itimerval *itv)
734 {
735 static const struct timeval max = { .tv_sec = UINT_MAX, .tv_usec = 0 };
736 struct timeval min_interval = { .tv_sec = 0, .tv_usec = tick };
737
738 if (itv->it_value.tv_sec < 0 || !timerisvalid(&itv->it_value))
739 return EINVAL;
740 if (timercmp(&itv->it_value, &max, >))
741 return EINVAL;
742 if (itv->it_interval.tv_sec < 0 || !timerisvalid(&itv->it_interval))
743 return EINVAL;
744 if (timercmp(&itv->it_interval, &max, >))
745 return EINVAL;
746
747 if (!timerisset(&itv->it_value))
748 timerclear(&itv->it_interval);
749 if (timerisset(&itv->it_interval)) {
750 if (timercmp(&itv->it_interval, &min_interval, <))
751 itv->it_interval = min_interval;
752 }
753
754 return 0;
755 }
756
757 /*
758 * Decrement an interval timer by the given duration.
759 * If the timer expires and it is periodic then reload it. When reloading
760 * the timer we subtract any overrun from the next period so that the timer
761 * does not drift.
762 */
763 int
itimerdecr(struct itimerspec * itp,const struct timespec * decrement)764 itimerdecr(struct itimerspec *itp, const struct timespec *decrement)
765 {
766 timespecsub(&itp->it_value, decrement, &itp->it_value);
767 if (itp->it_value.tv_sec >= 0 && timespecisset(&itp->it_value))
768 return (1);
769 if (!timespecisset(&itp->it_interval)) {
770 timespecclear(&itp->it_value);
771 return (0);
772 }
773 while (itp->it_value.tv_sec < 0 || !timespecisset(&itp->it_value))
774 timespecadd(&itp->it_value, &itp->it_interval, &itp->it_value);
775 return (0);
776 }
777
778 void
itimer_update(struct clockrequest * cr,void * cf,void * arg)779 itimer_update(struct clockrequest *cr, void *cf, void *arg)
780 {
781 struct timespec elapsed;
782 uint64_t nsecs;
783 struct clockframe *frame = cf;
784 struct proc *p = curproc;
785 struct process *pr;
786
787 if (p == NULL || ISSET(p->p_flag, P_SYSTEM | P_WEXIT))
788 return;
789
790 pr = p->p_p;
791 if (!ISSET(pr->ps_flags, PS_ITIMER))
792 return;
793
794 nsecs = clockrequest_advance(cr, hardclock_period) * hardclock_period;
795 NSEC_TO_TIMESPEC(nsecs, &elapsed);
796
797 mtx_enter(&itimer_mtx);
798 if (CLKF_USERMODE(frame) &&
799 timespecisset(&pr->ps_timer[ITIMER_VIRTUAL].it_value) &&
800 itimerdecr(&pr->ps_timer[ITIMER_VIRTUAL], &elapsed) == 0) {
801 process_reset_itimer_flag(pr);
802 atomic_setbits_int(&p->p_flag, P_ALRMPEND);
803 need_proftick(p);
804 }
805 if (timespecisset(&pr->ps_timer[ITIMER_PROF].it_value) &&
806 itimerdecr(&pr->ps_timer[ITIMER_PROF], &elapsed) == 0) {
807 process_reset_itimer_flag(pr);
808 atomic_setbits_int(&p->p_flag, P_PROFPEND);
809 need_proftick(p);
810 }
811 mtx_leave(&itimer_mtx);
812 }
813
814 void
process_reset_itimer_flag(struct process * ps)815 process_reset_itimer_flag(struct process *ps)
816 {
817 if (timespecisset(&ps->ps_timer[ITIMER_VIRTUAL].it_value) ||
818 timespecisset(&ps->ps_timer[ITIMER_PROF].it_value))
819 atomic_setbits_int(&ps->ps_flags, PS_ITIMER);
820 else
821 atomic_clearbits_int(&ps->ps_flags, PS_ITIMER);
822 }
823
824 struct mutex ratecheck_mtx = MUTEX_INITIALIZER(IPL_HIGH);
825
826 /*
827 * ratecheck(): simple time-based rate-limit checking. see ratecheck(9)
828 * for usage and rationale.
829 */
830 int
ratecheck(struct timeval * lasttime,const struct timeval * mininterval)831 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
832 {
833 struct timeval tv, delta;
834 int rv = 0;
835
836 getmicrouptime(&tv);
837
838 mtx_enter(&ratecheck_mtx);
839 timersub(&tv, lasttime, &delta);
840
841 /*
842 * check for 0,0 is so that the message will be seen at least once,
843 * even if interval is huge.
844 */
845 if (timercmp(&delta, mininterval, >=) ||
846 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
847 *lasttime = tv;
848 rv = 1;
849 }
850 mtx_leave(&ratecheck_mtx);
851
852 return (rv);
853 }
854
855 struct mutex ppsratecheck_mtx = MUTEX_INITIALIZER(IPL_HIGH);
856
857 /*
858 * ppsratecheck(): packets (or events) per second limitation.
859 */
860 int
ppsratecheck(struct timeval * lasttime,int * curpps,int maxpps)861 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
862 {
863 struct timeval tv, delta;
864 int rv;
865
866 microuptime(&tv);
867
868 mtx_enter(&ppsratecheck_mtx);
869 timersub(&tv, lasttime, &delta);
870
871 /*
872 * check for 0,0 is so that the message will be seen at least once.
873 * if more than one second have passed since the last update of
874 * lasttime, reset the counter.
875 *
876 * we do increment *curpps even in *curpps < maxpps case, as some may
877 * try to use *curpps for stat purposes as well.
878 */
879 if (maxpps == 0)
880 rv = 0;
881 else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
882 delta.tv_sec >= 1) {
883 *lasttime = tv;
884 *curpps = 0;
885 rv = 1;
886 } else if (maxpps < 0)
887 rv = 1;
888 else if (*curpps < maxpps)
889 rv = 1;
890 else
891 rv = 0;
892
893 /* be careful about wrap-around */
894 if (*curpps + 1 > *curpps)
895 *curpps = *curpps + 1;
896
897 mtx_leave(&ppsratecheck_mtx);
898
899 return (rv);
900 }
901
902 todr_chip_handle_t todr_handle;
903 int inittodr_done;
904
905 #define MINYEAR ((OpenBSD / 100) - 1) /* minimum plausible year */
906
907 /*
908 * inittodr:
909 *
910 * Initialize time from the time-of-day register.
911 */
912 void
inittodr(time_t base)913 inittodr(time_t base)
914 {
915 time_t deltat;
916 struct timeval rtctime;
917 struct timespec ts;
918 int badbase;
919
920 inittodr_done = 1;
921
922 if (base < (MINYEAR - 1970) * SECYR) {
923 printf("WARNING: preposterous time in file system\n");
924 /* read the system clock anyway */
925 base = (MINYEAR - 1970) * SECYR;
926 badbase = 1;
927 } else
928 badbase = 0;
929
930 rtctime.tv_sec = base;
931 rtctime.tv_usec = 0;
932
933 if (todr_handle == NULL ||
934 todr_gettime(todr_handle, &rtctime) != 0 ||
935 rtctime.tv_sec < (MINYEAR - 1970) * SECYR) {
936 /*
937 * Believe the time in the file system for lack of
938 * anything better, resetting the TODR.
939 */
940 rtctime.tv_sec = base;
941 rtctime.tv_usec = 0;
942 if (todr_handle != NULL && !badbase)
943 printf("WARNING: bad clock chip time\n");
944 ts.tv_sec = rtctime.tv_sec;
945 ts.tv_nsec = rtctime.tv_usec * 1000;
946 tc_setclock(&ts);
947 goto bad;
948 } else {
949 ts.tv_sec = rtctime.tv_sec;
950 ts.tv_nsec = rtctime.tv_usec * 1000;
951 tc_setclock(&ts);
952 }
953
954 if (!badbase) {
955 /*
956 * See if we gained/lost two or more days; if
957 * so, assume something is amiss.
958 */
959 deltat = rtctime.tv_sec - base;
960 if (deltat < 0)
961 deltat = -deltat;
962 if (deltat < 2 * SECDAY)
963 return; /* all is well */
964 #ifndef SMALL_KERNEL
965 printf("WARNING: clock %s %lld days\n",
966 rtctime.tv_sec < base ? "lost" : "gained",
967 (long long)(deltat / SECDAY));
968 #endif
969 }
970 bad:
971 printf("WARNING: CHECK AND RESET THE DATE!\n");
972 }
973
974 /*
975 * resettodr:
976 *
977 * Reset the time-of-day register with the current time.
978 */
979 void
resettodr(void)980 resettodr(void)
981 {
982 struct timeval rtctime;
983
984 /*
985 * Skip writing the RTC if inittodr(9) never ran. We don't
986 * want to overwrite a reasonable value with a nonsense value.
987 */
988 if (!inittodr_done)
989 return;
990
991 microtime(&rtctime);
992
993 if (todr_handle != NULL &&
994 todr_settime(todr_handle, &rtctime) != 0)
995 printf("WARNING: can't update clock chip time\n");
996 }
997
998 void
todr_attach(struct todr_chip_handle * todr)999 todr_attach(struct todr_chip_handle *todr)
1000 {
1001 if (todr_handle == NULL ||
1002 todr->todr_quality > todr_handle->todr_quality)
1003 todr_handle = todr;
1004 }
1005
1006 #define RESETTODR_PERIOD 1800
1007
1008 void periodic_resettodr(void *);
1009 void perform_resettodr(void *);
1010
1011 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL);
1012 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL);
1013
1014 void
periodic_resettodr(void * arg __unused)1015 periodic_resettodr(void *arg __unused)
1016 {
1017 task_add(systq, &resettodr_task);
1018 }
1019
1020 void
perform_resettodr(void * arg __unused)1021 perform_resettodr(void *arg __unused)
1022 {
1023 resettodr();
1024 timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
1025 }
1026
1027 void
start_periodic_resettodr(void)1028 start_periodic_resettodr(void)
1029 {
1030 timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
1031 }
1032
1033 void
stop_periodic_resettodr(void)1034 stop_periodic_resettodr(void)
1035 {
1036 timeout_del(&resettodr_to);
1037 task_del(systq, &resettodr_task);
1038 }
1039