xref: /openbsd/sys/kern/kern_time.c (revision 73471bf0)
1 /*	$OpenBSD: kern_time.c,v 1.154 2021/06/18 15:59:14 cheloha Exp $	*/
2 /*	$NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
33  */
34 
35 #include <sys/param.h>
36 #include <sys/resourcevar.h>
37 #include <sys/kernel.h>
38 #include <sys/systm.h>
39 #include <sys/rwlock.h>
40 #include <sys/proc.h>
41 #include <sys/ktrace.h>
42 #include <sys/vnode.h>
43 #include <sys/signalvar.h>
44 #include <sys/stdint.h>
45 #include <sys/pledge.h>
46 #include <sys/task.h>
47 #include <sys/timeout.h>
48 #include <sys/timetc.h>
49 
50 #include <sys/mount.h>
51 #include <sys/syscallargs.h>
52 
53 #include <dev/clock_subr.h>
54 
55 int itimerfix(struct itimerval *);
56 
57 /*
58  * Time of day and interval timer support.
59  *
60  * These routines provide the kernel entry points to get and set
61  * the time-of-day and per-process interval timers.  Subroutines
62  * here provide support for adding and subtracting timeval structures
63  * and decrementing interval timers, optionally reloading the interval
64  * timers when they expire.
65  */
66 
67 /* This function is used by clock_settime and settimeofday */
68 int
69 settime(const struct timespec *ts)
70 {
71 	struct timespec now;
72 
73 	/*
74 	 * Don't allow the time to be set forward so far it will wrap
75 	 * and become negative, thus allowing an attacker to bypass
76 	 * the next check below.  The cutoff is 1 year before rollover
77 	 * occurs, so even if the attacker uses adjtime(2) to move
78 	 * the time past the cutoff, it will take a very long time
79 	 * to get to the wrap point.
80 	 *
81 	 * XXX: we check against UINT_MAX until we can figure out
82 	 *	how to deal with the hardware RTCs.
83 	 */
84 	if (ts->tv_sec > UINT_MAX - 365*24*60*60) {
85 		printf("denied attempt to set clock forward to %lld\n",
86 		    (long long)ts->tv_sec);
87 		return (EPERM);
88 	}
89 	/*
90 	 * If the system is secure, we do not allow the time to be
91 	 * set to an earlier value (it may be slowed using adjtime,
92 	 * but not set back). This feature prevent interlopers from
93 	 * setting arbitrary time stamps on files.
94 	 */
95 	nanotime(&now);
96 	if (securelevel > 1 && timespeccmp(ts, &now, <=)) {
97 		printf("denied attempt to set clock back %lld seconds\n",
98 		    (long long)now.tv_sec - ts->tv_sec);
99 		return (EPERM);
100 	}
101 
102 	tc_setrealtimeclock(ts);
103 	KERNEL_LOCK();
104 	resettodr();
105 	KERNEL_UNLOCK();
106 
107 	return (0);
108 }
109 
110 int
111 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
112 {
113 	struct proc *q;
114 	int error = 0;
115 
116 	switch (clock_id) {
117 	case CLOCK_REALTIME:
118 		nanotime(tp);
119 		break;
120 	case CLOCK_UPTIME:
121 		nanoruntime(tp);
122 		break;
123 	case CLOCK_MONOTONIC:
124 	case CLOCK_BOOTTIME:
125 		nanouptime(tp);
126 		break;
127 	case CLOCK_PROCESS_CPUTIME_ID:
128 		nanouptime(tp);
129 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
130 		timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp);
131 		timespecadd(tp, &p->p_rtime, tp);
132 		break;
133 	case CLOCK_THREAD_CPUTIME_ID:
134 		nanouptime(tp);
135 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
136 		timespecadd(tp, &p->p_tu.tu_runtime, tp);
137 		timespecadd(tp, &p->p_rtime, tp);
138 		break;
139 	default:
140 		/* check for clock from pthread_getcpuclockid() */
141 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
142 			KERNEL_LOCK();
143 			q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
144 			if (q == NULL || q->p_p != p->p_p)
145 				error = ESRCH;
146 			else
147 				*tp = q->p_tu.tu_runtime;
148 			KERNEL_UNLOCK();
149 		} else
150 			error = EINVAL;
151 		break;
152 	}
153 	return (error);
154 }
155 
156 int
157 sys_clock_gettime(struct proc *p, void *v, register_t *retval)
158 {
159 	struct sys_clock_gettime_args /* {
160 		syscallarg(clockid_t) clock_id;
161 		syscallarg(struct timespec *) tp;
162 	} */ *uap = v;
163 	struct timespec ats;
164 	int error;
165 
166 	memset(&ats, 0, sizeof(ats));
167 	if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0)
168 		return (error);
169 
170 	error = copyout(&ats, SCARG(uap, tp), sizeof(ats));
171 #ifdef KTRACE
172 	if (error == 0 && KTRPOINT(p, KTR_STRUCT))
173 		ktrabstimespec(p, &ats);
174 #endif
175 	return (error);
176 }
177 
178 int
179 sys_clock_settime(struct proc *p, void *v, register_t *retval)
180 {
181 	struct sys_clock_settime_args /* {
182 		syscallarg(clockid_t) clock_id;
183 		syscallarg(const struct timespec *) tp;
184 	} */ *uap = v;
185 	struct timespec ats;
186 	clockid_t clock_id;
187 	int error;
188 
189 	if ((error = suser(p)) != 0)
190 		return (error);
191 
192 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
193 		return (error);
194 
195 	clock_id = SCARG(uap, clock_id);
196 	switch (clock_id) {
197 	case CLOCK_REALTIME:
198 		if (!timespecisvalid(&ats))
199 			return (EINVAL);
200 		if ((error = settime(&ats)) != 0)
201 			return (error);
202 		break;
203 	default:	/* Other clocks are read-only */
204 		return (EINVAL);
205 	}
206 
207 	return (0);
208 }
209 
210 int
211 sys_clock_getres(struct proc *p, void *v, register_t *retval)
212 {
213 	struct sys_clock_getres_args /* {
214 		syscallarg(clockid_t) clock_id;
215 		syscallarg(struct timespec *) tp;
216 	} */ *uap = v;
217 	clockid_t clock_id;
218 	struct bintime bt;
219 	struct timespec ts;
220 	struct proc *q;
221 	u_int64_t scale;
222 	int error = 0, realstathz;
223 
224 	memset(&ts, 0, sizeof(ts));
225 	realstathz = (stathz == 0) ? hz : stathz;
226 	clock_id = SCARG(uap, clock_id);
227 
228 	switch (clock_id) {
229 	case CLOCK_REALTIME:
230 	case CLOCK_MONOTONIC:
231 	case CLOCK_BOOTTIME:
232 	case CLOCK_UPTIME:
233 		memset(&bt, 0, sizeof(bt));
234 		rw_enter_read(&tc_lock);
235 		scale = ((1ULL << 63) / tc_getfrequency()) * 2;
236 		bt.frac = tc_getprecision() * scale;
237 		rw_exit_read(&tc_lock);
238 		BINTIME_TO_TIMESPEC(&bt, &ts);
239 		break;
240 	case CLOCK_PROCESS_CPUTIME_ID:
241 	case CLOCK_THREAD_CPUTIME_ID:
242 		ts.tv_nsec = 1000000000 / realstathz;
243 		break;
244 	default:
245 		/* check for clock from pthread_getcpuclockid() */
246 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
247 			KERNEL_LOCK();
248 			q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
249 			if (q == NULL || q->p_p != p->p_p)
250 				error = ESRCH;
251 			else
252 				ts.tv_nsec = 1000000000 / realstathz;
253 			KERNEL_UNLOCK();
254 		} else
255 			error = EINVAL;
256 		break;
257 	}
258 
259 	if (error == 0 && SCARG(uap, tp)) {
260 		ts.tv_nsec = MAX(ts.tv_nsec, 1);
261 		error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
262 #ifdef KTRACE
263 		if (error == 0 && KTRPOINT(p, KTR_STRUCT))
264 			ktrreltimespec(p, &ts);
265 #endif
266 	}
267 
268 	return error;
269 }
270 
271 int
272 sys_nanosleep(struct proc *p, void *v, register_t *retval)
273 {
274 	static int chan;
275 	struct sys_nanosleep_args/* {
276 		syscallarg(const struct timespec *) rqtp;
277 		syscallarg(struct timespec *) rmtp;
278 	} */ *uap = v;
279 	struct timespec elapsed, remainder, request, start, stop;
280 	uint64_t nsecs;
281 	struct timespec *rmtp;
282 	int copyout_error, error;
283 
284 	rmtp = SCARG(uap, rmtp);
285 	error = copyin(SCARG(uap, rqtp), &request, sizeof(request));
286 	if (error)
287 		return (error);
288 #ifdef KTRACE
289 	if (KTRPOINT(p, KTR_STRUCT))
290 		ktrreltimespec(p, &request);
291 #endif
292 
293 	if (request.tv_sec < 0 || !timespecisvalid(&request))
294 		return (EINVAL);
295 
296 	do {
297 		getnanouptime(&start);
298 		nsecs = MAX(1, MIN(TIMESPEC_TO_NSEC(&request), MAXTSLP));
299 		error = tsleep_nsec(&chan, PWAIT | PCATCH, "nanoslp", nsecs);
300 		getnanouptime(&stop);
301 		timespecsub(&stop, &start, &elapsed);
302 		timespecsub(&request, &elapsed, &request);
303 		if (request.tv_sec < 0)
304 			timespecclear(&request);
305 		if (error != EWOULDBLOCK)
306 			break;
307 	} while (timespecisset(&request));
308 
309 	if (error == ERESTART)
310 		error = EINTR;
311 	if (error == EWOULDBLOCK)
312 		error = 0;
313 
314 	if (rmtp) {
315 		memset(&remainder, 0, sizeof(remainder));
316 		remainder = request;
317 		copyout_error = copyout(&remainder, rmtp, sizeof(remainder));
318 		if (copyout_error)
319 			error = copyout_error;
320 #ifdef KTRACE
321 		if (copyout_error == 0 && KTRPOINT(p, KTR_STRUCT))
322 			ktrreltimespec(p, &remainder);
323 #endif
324 	}
325 
326 	return error;
327 }
328 
329 int
330 sys_gettimeofday(struct proc *p, void *v, register_t *retval)
331 {
332 	struct sys_gettimeofday_args /* {
333 		syscallarg(struct timeval *) tp;
334 		syscallarg(struct timezone *) tzp;
335 	} */ *uap = v;
336 	struct timeval atv;
337 	static const struct timezone zerotz = { 0, 0 };
338 	struct timeval *tp;
339 	struct timezone *tzp;
340 	int error = 0;
341 
342 	tp = SCARG(uap, tp);
343 	tzp = SCARG(uap, tzp);
344 
345 	if (tp) {
346 		memset(&atv, 0, sizeof(atv));
347 		microtime(&atv);
348 		if ((error = copyout(&atv, tp, sizeof (atv))))
349 			return (error);
350 #ifdef KTRACE
351 		if (KTRPOINT(p, KTR_STRUCT))
352 			ktrabstimeval(p, &atv);
353 #endif
354 	}
355 	if (tzp)
356 		error = copyout(&zerotz, tzp, sizeof(zerotz));
357 	return (error);
358 }
359 
360 int
361 sys_settimeofday(struct proc *p, void *v, register_t *retval)
362 {
363 	struct sys_settimeofday_args /* {
364 		syscallarg(const struct timeval *) tv;
365 		syscallarg(const struct timezone *) tzp;
366 	} */ *uap = v;
367 	struct timezone atz;
368 	struct timeval atv;
369 	const struct timeval *tv;
370 	const struct timezone *tzp;
371 	int error;
372 
373 	tv = SCARG(uap, tv);
374 	tzp = SCARG(uap, tzp);
375 
376 	if ((error = suser(p)))
377 		return (error);
378 	/* Verify all parameters before changing time. */
379 	if (tv && (error = copyin(tv, &atv, sizeof(atv))))
380 		return (error);
381 	if (tzp && (error = copyin(tzp, &atz, sizeof(atz))))
382 		return (error);
383 	if (tv) {
384 		struct timespec ts;
385 
386 #ifdef KTRACE
387 		if (KTRPOINT(p, KTR_STRUCT))
388 			ktrabstimeval(p, &atv);
389 #endif
390 		if (!timerisvalid(&atv))
391 			return (EINVAL);
392 		TIMEVAL_TO_TIMESPEC(&atv, &ts);
393 		if ((error = settime(&ts)) != 0)
394 			return (error);
395 	}
396 
397 	return (0);
398 }
399 
400 #define ADJFREQ_MAX (500000000LL << 32)
401 #define ADJFREQ_MIN (-ADJFREQ_MAX)
402 
403 int
404 sys_adjfreq(struct proc *p, void *v, register_t *retval)
405 {
406 	struct sys_adjfreq_args /* {
407 		syscallarg(const int64_t *) freq;
408 		syscallarg(int64_t *) oldfreq;
409 	} */ *uap = v;
410 	int error = 0;
411 	int64_t f, oldf;
412 	const int64_t *freq = SCARG(uap, freq);
413 	int64_t *oldfreq = SCARG(uap, oldfreq);
414 
415 	if (freq) {
416 		if ((error = suser(p)))
417 			return (error);
418 		if ((error = copyin(freq, &f, sizeof(f))))
419 			return (error);
420 		if (f < ADJFREQ_MIN || f > ADJFREQ_MAX)
421 			return (EINVAL);
422 	}
423 
424 	rw_enter(&tc_lock, (freq == NULL) ? RW_READ : RW_WRITE);
425 	if (oldfreq) {
426 		tc_adjfreq(&oldf, NULL);
427 		if ((error = copyout(&oldf, oldfreq, sizeof(oldf))))
428 			goto out;
429 	}
430 	if (freq)
431 		tc_adjfreq(NULL, &f);
432 out:
433 	rw_exit(&tc_lock);
434 	return (error);
435 }
436 
437 int
438 sys_adjtime(struct proc *p, void *v, register_t *retval)
439 {
440 	struct sys_adjtime_args /* {
441 		syscallarg(const struct timeval *) delta;
442 		syscallarg(struct timeval *) olddelta;
443 	} */ *uap = v;
444 	struct timeval atv;
445 	const struct timeval *delta = SCARG(uap, delta);
446 	struct timeval *olddelta = SCARG(uap, olddelta);
447 	int64_t adjustment, remaining;
448 	int error;
449 
450 	error = pledge_adjtime(p, delta);
451 	if (error)
452 		return error;
453 
454 	if (delta) {
455 		if ((error = suser(p)))
456 			return (error);
457 		if ((error = copyin(delta, &atv, sizeof(struct timeval))))
458 			return (error);
459 #ifdef KTRACE
460 		if (KTRPOINT(p, KTR_STRUCT))
461 			ktrreltimeval(p, &atv);
462 #endif
463 		if (!timerisvalid(&atv))
464 			return (EINVAL);
465 
466 		if (atv.tv_sec > INT64_MAX / 1000000)
467 			return EINVAL;
468 		if (atv.tv_sec < INT64_MIN / 1000000)
469 			return EINVAL;
470 		adjustment = atv.tv_sec * 1000000;
471 		if (adjustment > INT64_MAX - atv.tv_usec)
472 			return EINVAL;
473 		adjustment += atv.tv_usec;
474 
475 		rw_enter_write(&tc_lock);
476 	}
477 
478 	if (olddelta) {
479 		tc_adjtime(&remaining, NULL);
480 		memset(&atv, 0, sizeof(atv));
481 		atv.tv_sec =  remaining / 1000000;
482 		atv.tv_usec = remaining % 1000000;
483 		if (atv.tv_usec < 0) {
484 			atv.tv_usec += 1000000;
485 			atv.tv_sec--;
486 		}
487 
488 		if ((error = copyout(&atv, olddelta, sizeof(struct timeval))))
489 			goto out;
490 	}
491 
492 	if (delta)
493 		tc_adjtime(NULL, &adjustment);
494 out:
495 	if (delta)
496 		rw_exit_write(&tc_lock);
497 	return (error);
498 }
499 
500 
501 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
502 
503 /*
504  * Get or set value of an interval timer.  The process virtual and
505  * profiling virtual time timers are kept internally in the
506  * way they are specified externally: in time until they expire.
507  *
508  * The real time interval timer's it_value, in contrast, is kept as an
509  * absolute time rather than as a delta, so that it is easy to keep
510  * periodic real-time signals from drifting.
511  *
512  * Virtual time timers are processed in the hardclock() routine of
513  * kern_clock.c.  The real time timer is processed by a timeout
514  * routine, called from the softclock() routine.  Since a callout
515  * may be delayed in real time due to interrupt processing in the system,
516  * it is possible for the real time timeout routine (realitexpire, given below),
517  * to be delayed in real time past when it is supposed to occur.  It
518  * does not suffice, therefore, to reload the real timer .it_value from the
519  * real time timers .it_interval.  Rather, we compute the next time in
520  * absolute time the timer should go off.
521  */
522 void
523 setitimer(int which, const struct itimerval *itv, struct itimerval *olditv)
524 {
525 	struct itimerspec its, oldits;
526 	struct timespec now;
527 	struct itimerspec *itimer;
528 	struct process *pr;
529 
530 	KASSERT(which >= ITIMER_REAL && which <= ITIMER_PROF);
531 
532 	pr = curproc->p_p;
533 	itimer = &pr->ps_timer[which];
534 
535 	if (itv != NULL) {
536 		TIMEVAL_TO_TIMESPEC(&itv->it_value, &its.it_value);
537 		TIMEVAL_TO_TIMESPEC(&itv->it_interval, &its.it_interval);
538 	}
539 
540 	if (which == ITIMER_REAL) {
541 		mtx_enter(&pr->ps_mtx);
542 		nanouptime(&now);
543 	} else
544 		mtx_enter(&itimer_mtx);
545 
546 	if (olditv != NULL)
547 		oldits = *itimer;
548 	if (itv != NULL) {
549 		if (which == ITIMER_REAL) {
550 			if (timespecisset(&its.it_value)) {
551 				timespecadd(&its.it_value, &now, &its.it_value);
552 				timeout_at_ts(&pr->ps_realit_to, &its.it_value);
553 			} else
554 				timeout_del(&pr->ps_realit_to);
555 		}
556 		*itimer = its;
557 	}
558 
559 	if (which == ITIMER_REAL)
560 		mtx_leave(&pr->ps_mtx);
561 	else
562 		mtx_leave(&itimer_mtx);
563 
564 	if (olditv != NULL) {
565 		if (which == ITIMER_REAL && timespecisset(&oldits.it_value)) {
566 			if (timespeccmp(&oldits.it_value, &now, <))
567 				timespecclear(&oldits.it_value);
568 			else {
569 				timespecsub(&oldits.it_value, &now,
570 				    &oldits.it_value);
571 			}
572 		}
573 		TIMESPEC_TO_TIMEVAL(&olditv->it_value, &oldits.it_value);
574 		TIMESPEC_TO_TIMEVAL(&olditv->it_interval, &oldits.it_interval);
575 	}
576 }
577 
578 void
579 cancel_all_itimers(void)
580 {
581 	struct itimerval itv;
582 	int i;
583 
584 	timerclear(&itv.it_value);
585 	timerclear(&itv.it_interval);
586 
587 	for (i = 0; i < nitems(curproc->p_p->ps_timer); i++)
588 		setitimer(i, &itv, NULL);
589 }
590 
591 int
592 sys_getitimer(struct proc *p, void *v, register_t *retval)
593 {
594 	struct sys_getitimer_args /* {
595 		syscallarg(int) which;
596 		syscallarg(struct itimerval *) itv;
597 	} */ *uap = v;
598 	struct itimerval aitv;
599 	int which;
600 
601 	which = SCARG(uap, which);
602 	if (which < ITIMER_REAL || which > ITIMER_PROF)
603 		return EINVAL;
604 
605 	memset(&aitv, 0, sizeof(aitv));
606 
607 	setitimer(which, NULL, &aitv);
608 
609 	return copyout(&aitv, SCARG(uap, itv), sizeof(aitv));
610 }
611 
612 int
613 sys_setitimer(struct proc *p, void *v, register_t *retval)
614 {
615 	struct sys_setitimer_args /* {
616 		syscallarg(int) which;
617 		syscallarg(const struct itimerval *) itv;
618 		syscallarg(struct itimerval *) oitv;
619 	} */ *uap = v;
620 	struct itimerval aitv, olditv;
621 	struct itimerval *newitvp, *olditvp;
622 	int error, which;
623 
624 	which = SCARG(uap, which);
625 	if (which < ITIMER_REAL || which > ITIMER_PROF)
626 		return EINVAL;
627 
628 	newitvp = olditvp = NULL;
629 	if (SCARG(uap, itv) != NULL) {
630 		error = copyin(SCARG(uap, itv), &aitv, sizeof(aitv));
631 		if (error)
632 			return error;
633 		error = itimerfix(&aitv);
634 		if (error)
635 			return error;
636 		newitvp = &aitv;
637 	}
638 	if (SCARG(uap, oitv) != NULL) {
639 		memset(&olditv, 0, sizeof(olditv));
640 		olditvp = &olditv;
641 	}
642 	if (newitvp == NULL && olditvp == NULL)
643 		return 0;
644 
645 	setitimer(which, newitvp, olditvp);
646 
647 	if (SCARG(uap, oitv) != NULL)
648 		return copyout(&olditv, SCARG(uap, oitv), sizeof(olditv));
649 
650 	return 0;
651 }
652 
653 /*
654  * Real interval timer expired:
655  * send process whose timer expired an alarm signal.
656  * If time is not set up to reload, then just return.
657  * Else compute next time timer should go off which is > current time.
658  * This is where delay in processing this timeout causes multiple
659  * SIGALRM calls to be compressed into one.
660  */
661 void
662 realitexpire(void *arg)
663 {
664 	struct timespec cts;
665 	struct process *pr = arg;
666 	struct itimerspec *tp = &pr->ps_timer[ITIMER_REAL];
667 	int need_signal = 0;
668 
669 	mtx_enter(&pr->ps_mtx);
670 
671 	/*
672 	 * Do nothing if the timer was cancelled or rescheduled while we
673 	 * were entering the mutex.
674 	 */
675 	if (!timespecisset(&tp->it_value) || timeout_pending(&pr->ps_realit_to))
676 		goto out;
677 
678 	/* The timer expired.  We need to send the signal. */
679 	need_signal = 1;
680 
681 	/* One-shot timers are not reloaded. */
682 	if (!timespecisset(&tp->it_interval)) {
683 		timespecclear(&tp->it_value);
684 		goto out;
685 	}
686 
687 	/*
688 	 * Find the nearest future expiration point and restart
689 	 * the timeout.
690 	 */
691 	nanouptime(&cts);
692 	while (timespeccmp(&tp->it_value, &cts, <=))
693 		timespecadd(&tp->it_value, &tp->it_interval, &tp->it_value);
694 	if ((pr->ps_flags & PS_EXITING) == 0)
695 		timeout_at_ts(&pr->ps_realit_to, &tp->it_value);
696 
697 out:
698 	mtx_leave(&pr->ps_mtx);
699 
700 	if (need_signal)
701 		prsignal(pr, SIGALRM);
702 }
703 
704 /*
705  * Check if the given setitimer(2) input is valid.  Clear it_interval
706  * if it_value is unset.  Round it_interval up to the minimum interval
707  * if necessary.
708  */
709 int
710 itimerfix(struct itimerval *itv)
711 {
712 	static const struct timeval max = { .tv_sec = UINT_MAX, .tv_usec = 0 };
713 	struct timeval min_interval = { .tv_sec = 0, .tv_usec = tick };
714 
715 	if (itv->it_value.tv_sec < 0 || !timerisvalid(&itv->it_value))
716 		return EINVAL;
717 	if (timercmp(&itv->it_value, &max, >))
718 		return EINVAL;
719 	if (itv->it_interval.tv_sec < 0 || !timerisvalid(&itv->it_interval))
720 		return EINVAL;
721 	if (timercmp(&itv->it_interval, &max, >))
722 		return EINVAL;
723 
724 	if (!timerisset(&itv->it_value))
725 		timerclear(&itv->it_interval);
726 	if (timerisset(&itv->it_interval)) {
727 		if (timercmp(&itv->it_interval, &min_interval, <))
728 			itv->it_interval = min_interval;
729 	}
730 
731 	return 0;
732 }
733 
734 /*
735  * Decrement an interval timer by the given number of nanoseconds.
736  * If the timer expires and it is periodic then reload it.  When reloading
737  * the timer we subtract any overrun from the next period so that the timer
738  * does not drift.
739  */
740 int
741 itimerdecr(struct itimerspec *itp, long nsec)
742 {
743 	struct timespec decrement;
744 
745 	NSEC_TO_TIMESPEC(nsec, &decrement);
746 
747 	mtx_enter(&itimer_mtx);
748 
749 	/*
750 	 * Double-check that the timer is enabled.  A different thread
751 	 * in setitimer(2) may have disabled it while we were entering
752 	 * the mutex.
753 	 */
754 	if (!timespecisset(&itp->it_value)) {
755 		mtx_leave(&itimer_mtx);
756 		return (1);
757 	}
758 
759 	/*
760 	 * The timer is enabled.  Update and reload it as needed.
761 	 */
762 	timespecsub(&itp->it_value, &decrement, &itp->it_value);
763 	if (itp->it_value.tv_sec >= 0 && timespecisset(&itp->it_value)) {
764 		mtx_leave(&itimer_mtx);
765 		return (1);
766 	}
767 	if (!timespecisset(&itp->it_interval)) {
768 		timespecclear(&itp->it_value);
769 		mtx_leave(&itimer_mtx);
770 		return (0);
771 	}
772 	while (itp->it_value.tv_sec < 0 || !timespecisset(&itp->it_value))
773 		timespecadd(&itp->it_value, &itp->it_interval, &itp->it_value);
774 	mtx_leave(&itimer_mtx);
775 	return (0);
776 }
777 
778 /*
779  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
780  * for usage and rationale.
781  */
782 int
783 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
784 {
785 	struct timeval tv, delta;
786 	int rv = 0;
787 
788 	getmicrouptime(&tv);
789 
790 	timersub(&tv, lasttime, &delta);
791 
792 	/*
793 	 * check for 0,0 is so that the message will be seen at least once,
794 	 * even if interval is huge.
795 	 */
796 	if (timercmp(&delta, mininterval, >=) ||
797 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
798 		*lasttime = tv;
799 		rv = 1;
800 	}
801 
802 	return (rv);
803 }
804 
805 /*
806  * ppsratecheck(): packets (or events) per second limitation.
807  */
808 int
809 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
810 {
811 	struct timeval tv, delta;
812 	int rv;
813 
814 	microuptime(&tv);
815 
816 	timersub(&tv, lasttime, &delta);
817 
818 	/*
819 	 * check for 0,0 is so that the message will be seen at least once.
820 	 * if more than one second have passed since the last update of
821 	 * lasttime, reset the counter.
822 	 *
823 	 * we do increment *curpps even in *curpps < maxpps case, as some may
824 	 * try to use *curpps for stat purposes as well.
825 	 */
826 	if (maxpps == 0)
827 		rv = 0;
828 	else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
829 	    delta.tv_sec >= 1) {
830 		*lasttime = tv;
831 		*curpps = 0;
832 		rv = 1;
833 	} else if (maxpps < 0)
834 		rv = 1;
835 	else if (*curpps < maxpps)
836 		rv = 1;
837 	else
838 		rv = 0;
839 
840 #if 1 /*DIAGNOSTIC?*/
841 	/* be careful about wrap-around */
842 	if (*curpps + 1 > *curpps)
843 		*curpps = *curpps + 1;
844 #else
845 	/*
846 	 * assume that there's not too many calls to this function.
847 	 * not sure if the assumption holds, as it depends on *caller's*
848 	 * behavior, not the behavior of this function.
849 	 * IMHO it is wrong to make assumption on the caller's behavior,
850 	 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
851 	 */
852 	*curpps = *curpps + 1;
853 #endif
854 
855 	return (rv);
856 }
857 
858 todr_chip_handle_t todr_handle;
859 int inittodr_done;
860 
861 #define MINYEAR		((OpenBSD / 100) - 1)	/* minimum plausible year */
862 
863 /*
864  * inittodr:
865  *
866  *      Initialize time from the time-of-day register.
867  */
868 void
869 inittodr(time_t base)
870 {
871 	time_t deltat;
872 	struct timeval rtctime;
873 	struct timespec ts;
874 	int badbase;
875 
876 	inittodr_done = 1;
877 
878 	if (base < (MINYEAR - 1970) * SECYR) {
879 		printf("WARNING: preposterous time in file system\n");
880 		/* read the system clock anyway */
881 		base = (MINYEAR - 1970) * SECYR;
882 		badbase = 1;
883 	} else
884 		badbase = 0;
885 
886 	rtctime.tv_sec = base;
887 	rtctime.tv_usec = 0;
888 
889 	if (todr_handle == NULL ||
890 	    todr_gettime(todr_handle, &rtctime) != 0 ||
891 	    rtctime.tv_sec < (MINYEAR - 1970) * SECYR) {
892 		/*
893 		 * Believe the time in the file system for lack of
894 		 * anything better, resetting the TODR.
895 		 */
896 		rtctime.tv_sec = base;
897 		rtctime.tv_usec = 0;
898 		if (todr_handle != NULL && !badbase)
899 			printf("WARNING: bad clock chip time\n");
900 		ts.tv_sec = rtctime.tv_sec;
901 		ts.tv_nsec = rtctime.tv_usec * 1000;
902 		tc_setclock(&ts);
903 		goto bad;
904 	} else {
905 		ts.tv_sec = rtctime.tv_sec;
906 		ts.tv_nsec = rtctime.tv_usec * 1000;
907 		tc_setclock(&ts);
908 	}
909 
910 	if (!badbase) {
911 		/*
912 		 * See if we gained/lost two or more days; if
913 		 * so, assume something is amiss.
914 		 */
915 		deltat = rtctime.tv_sec - base;
916 		if (deltat < 0)
917 			deltat = -deltat;
918 		if (deltat < 2 * SECDAY)
919 			return;         /* all is well */
920 #ifndef SMALL_KERNEL
921 		printf("WARNING: clock %s %lld days\n",
922 		    rtctime.tv_sec < base ? "lost" : "gained",
923 		    (long long)(deltat / SECDAY));
924 #endif
925 	}
926  bad:
927 	printf("WARNING: CHECK AND RESET THE DATE!\n");
928 }
929 
930 /*
931  * resettodr:
932  *
933  *      Reset the time-of-day register with the current time.
934  */
935 void
936 resettodr(void)
937 {
938 	struct timeval rtctime;
939 
940 	/*
941 	 * Skip writing the RTC if inittodr(9) never ran.  We don't
942 	 * want to overwrite a reasonable value with a nonsense value.
943 	 */
944 	if (!inittodr_done)
945 		return;
946 
947 	microtime(&rtctime);
948 
949 	if (todr_handle != NULL &&
950 	    todr_settime(todr_handle, &rtctime) != 0)
951 		printf("WARNING: can't update clock chip time\n");
952 }
953 
954 void
955 todr_attach(struct todr_chip_handle *todr)
956 {
957 	todr_handle = todr;
958 }
959 
960 #define RESETTODR_PERIOD	1800
961 
962 void periodic_resettodr(void *);
963 void perform_resettodr(void *);
964 
965 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL);
966 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL);
967 
968 void
969 periodic_resettodr(void *arg __unused)
970 {
971 	task_add(systq, &resettodr_task);
972 }
973 
974 void
975 perform_resettodr(void *arg __unused)
976 {
977 	resettodr();
978 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
979 }
980 
981 void
982 start_periodic_resettodr(void)
983 {
984 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
985 }
986 
987 void
988 stop_periodic_resettodr(void)
989 {
990 	timeout_del(&resettodr_to);
991 	task_del(systq, &resettodr_task);
992 }
993