xref: /openbsd/sys/kern/kern_time.c (revision 106c68c4)
1 /*	$OpenBSD: kern_time.c,v 1.167 2023/10/17 00:04:02 cheloha Exp $	*/
2 /*	$NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
33  */
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/clockintr.h>
39 #include <sys/mutex.h>
40 #include <sys/rwlock.h>
41 #include <sys/proc.h>
42 #include <sys/ktrace.h>
43 #include <sys/signalvar.h>
44 #include <sys/stdint.h>
45 #include <sys/pledge.h>
46 #include <sys/task.h>
47 #include <sys/time.h>
48 #include <sys/timeout.h>
49 #include <sys/timetc.h>
50 
51 #include <sys/mount.h>
52 #include <sys/syscallargs.h>
53 
54 #include <dev/clock_subr.h>
55 
56 int itimerfix(struct itimerval *);
57 void process_reset_itimer_flag(struct process *);
58 
59 /*
60  * Time of day and interval timer support.
61  *
62  * These routines provide the kernel entry points to get and set
63  * the time-of-day and per-process interval timers.  Subroutines
64  * here provide support for adding and subtracting timeval structures
65  * and decrementing interval timers, optionally reloading the interval
66  * timers when they expire.
67  */
68 
69 /* This function is used by clock_settime and settimeofday */
70 int
settime(const struct timespec * ts)71 settime(const struct timespec *ts)
72 {
73 	struct timespec now;
74 
75 	/*
76 	 * Don't allow the time to be set forward so far it will wrap
77 	 * and become negative, thus allowing an attacker to bypass
78 	 * the next check below.  The cutoff is 1 year before rollover
79 	 * occurs, so even if the attacker uses adjtime(2) to move
80 	 * the time past the cutoff, it will take a very long time
81 	 * to get to the wrap point.
82 	 *
83 	 * XXX: we check against UINT_MAX until we can figure out
84 	 *	how to deal with the hardware RTCs.
85 	 */
86 	if (ts->tv_sec > UINT_MAX - 365*24*60*60) {
87 		printf("denied attempt to set clock forward to %lld\n",
88 		    (long long)ts->tv_sec);
89 		return (EPERM);
90 	}
91 	/*
92 	 * If the system is secure, we do not allow the time to be
93 	 * set to an earlier value (it may be slowed using adjtime,
94 	 * but not set back). This feature prevent interlopers from
95 	 * setting arbitrary time stamps on files.
96 	 */
97 	nanotime(&now);
98 	if (securelevel > 1 && timespeccmp(ts, &now, <=)) {
99 		printf("denied attempt to set clock back %lld seconds\n",
100 		    (long long)now.tv_sec - ts->tv_sec);
101 		return (EPERM);
102 	}
103 
104 	tc_setrealtimeclock(ts);
105 	KERNEL_LOCK();
106 	resettodr();
107 	KERNEL_UNLOCK();
108 
109 	return (0);
110 }
111 
112 int
clock_gettime(struct proc * p,clockid_t clock_id,struct timespec * tp)113 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
114 {
115 	struct proc *q;
116 	int error = 0;
117 
118 	switch (clock_id) {
119 	case CLOCK_REALTIME:
120 		nanotime(tp);
121 		break;
122 	case CLOCK_UPTIME:
123 		nanoruntime(tp);
124 		break;
125 	case CLOCK_MONOTONIC:
126 	case CLOCK_BOOTTIME:
127 		nanouptime(tp);
128 		break;
129 	case CLOCK_PROCESS_CPUTIME_ID:
130 		nanouptime(tp);
131 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
132 		timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp);
133 		break;
134 	case CLOCK_THREAD_CPUTIME_ID:
135 		nanouptime(tp);
136 		timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
137 		timespecadd(tp, &p->p_tu.tu_runtime, tp);
138 		break;
139 	default:
140 		/* check for clock from pthread_getcpuclockid() */
141 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
142 			KERNEL_LOCK();
143 			q = tfind_user(__CLOCK_PTID(clock_id), p->p_p);
144 			if (q == NULL)
145 				error = ESRCH;
146 			else
147 				*tp = q->p_tu.tu_runtime;
148 			KERNEL_UNLOCK();
149 		} else
150 			error = EINVAL;
151 		break;
152 	}
153 	return (error);
154 }
155 
156 int
sys_clock_gettime(struct proc * p,void * v,register_t * retval)157 sys_clock_gettime(struct proc *p, void *v, register_t *retval)
158 {
159 	struct sys_clock_gettime_args /* {
160 		syscallarg(clockid_t) clock_id;
161 		syscallarg(struct timespec *) tp;
162 	} */ *uap = v;
163 	struct timespec ats;
164 	int error;
165 
166 	memset(&ats, 0, sizeof(ats));
167 	if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0)
168 		return (error);
169 
170 	error = copyout(&ats, SCARG(uap, tp), sizeof(ats));
171 #ifdef KTRACE
172 	if (error == 0 && KTRPOINT(p, KTR_STRUCT))
173 		ktrabstimespec(p, &ats);
174 #endif
175 	return (error);
176 }
177 
178 int
sys_clock_settime(struct proc * p,void * v,register_t * retval)179 sys_clock_settime(struct proc *p, void *v, register_t *retval)
180 {
181 	struct sys_clock_settime_args /* {
182 		syscallarg(clockid_t) clock_id;
183 		syscallarg(const struct timespec *) tp;
184 	} */ *uap = v;
185 	struct timespec ats;
186 	clockid_t clock_id;
187 	int error;
188 
189 	if ((error = suser(p)) != 0)
190 		return (error);
191 
192 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
193 		return (error);
194 
195 	clock_id = SCARG(uap, clock_id);
196 	switch (clock_id) {
197 	case CLOCK_REALTIME:
198 		if (!timespecisvalid(&ats))
199 			return (EINVAL);
200 		if ((error = settime(&ats)) != 0)
201 			return (error);
202 		break;
203 	default:	/* Other clocks are read-only */
204 		return (EINVAL);
205 	}
206 
207 	return (0);
208 }
209 
210 int
sys_clock_getres(struct proc * p,void * v,register_t * retval)211 sys_clock_getres(struct proc *p, void *v, register_t *retval)
212 {
213 	struct sys_clock_getres_args /* {
214 		syscallarg(clockid_t) clock_id;
215 		syscallarg(struct timespec *) tp;
216 	} */ *uap = v;
217 	clockid_t clock_id;
218 	struct bintime bt;
219 	struct timespec ts;
220 	struct proc *q;
221 	u_int64_t scale;
222 	int error = 0;
223 
224 	memset(&ts, 0, sizeof(ts));
225 	clock_id = SCARG(uap, clock_id);
226 
227 	switch (clock_id) {
228 	case CLOCK_REALTIME:
229 	case CLOCK_MONOTONIC:
230 	case CLOCK_BOOTTIME:
231 	case CLOCK_UPTIME:
232 		memset(&bt, 0, sizeof(bt));
233 		rw_enter_read(&tc_lock);
234 		scale = ((1ULL << 63) / tc_getfrequency()) * 2;
235 		bt.frac = tc_getprecision() * scale;
236 		rw_exit_read(&tc_lock);
237 		BINTIME_TO_TIMESPEC(&bt, &ts);
238 		break;
239 	case CLOCK_PROCESS_CPUTIME_ID:
240 	case CLOCK_THREAD_CPUTIME_ID:
241 		ts.tv_nsec = 1000000000 / stathz;
242 		break;
243 	default:
244 		/* check for clock from pthread_getcpuclockid() */
245 		if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
246 			KERNEL_LOCK();
247 			q = tfind_user(__CLOCK_PTID(clock_id), p->p_p);
248 			if (q == NULL)
249 				error = ESRCH;
250 			else
251 				ts.tv_nsec = 1000000000 / stathz;
252 			KERNEL_UNLOCK();
253 		} else
254 			error = EINVAL;
255 		break;
256 	}
257 
258 	if (error == 0 && SCARG(uap, tp)) {
259 		ts.tv_nsec = MAX(ts.tv_nsec, 1);
260 		error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
261 #ifdef KTRACE
262 		if (error == 0 && KTRPOINT(p, KTR_STRUCT))
263 			ktrreltimespec(p, &ts);
264 #endif
265 	}
266 
267 	return error;
268 }
269 
270 int
sys_nanosleep(struct proc * p,void * v,register_t * retval)271 sys_nanosleep(struct proc *p, void *v, register_t *retval)
272 {
273 	struct sys_nanosleep_args/* {
274 		syscallarg(const struct timespec *) rqtp;
275 		syscallarg(struct timespec *) rmtp;
276 	} */ *uap = v;
277 	struct timespec elapsed, remainder, request, start, stop;
278 	uint64_t nsecs;
279 	struct timespec *rmtp;
280 	int copyout_error, error;
281 
282 	rmtp = SCARG(uap, rmtp);
283 	error = copyin(SCARG(uap, rqtp), &request, sizeof(request));
284 	if (error)
285 		return (error);
286 #ifdef KTRACE
287 	if (KTRPOINT(p, KTR_STRUCT))
288 		ktrreltimespec(p, &request);
289 #endif
290 
291 	if (request.tv_sec < 0 || !timespecisvalid(&request))
292 		return (EINVAL);
293 
294 	do {
295 		getnanouptime(&start);
296 		nsecs = MAX(1, MIN(TIMESPEC_TO_NSEC(&request), MAXTSLP));
297 		error = tsleep_nsec(&nowake, PWAIT | PCATCH, "nanoslp", nsecs);
298 		getnanouptime(&stop);
299 		timespecsub(&stop, &start, &elapsed);
300 		timespecsub(&request, &elapsed, &request);
301 		if (request.tv_sec < 0)
302 			timespecclear(&request);
303 		if (error != EWOULDBLOCK)
304 			break;
305 	} while (timespecisset(&request));
306 
307 	if (error == ERESTART)
308 		error = EINTR;
309 	if (error == EWOULDBLOCK)
310 		error = 0;
311 
312 	if (rmtp) {
313 		memset(&remainder, 0, sizeof(remainder));
314 		remainder = request;
315 		copyout_error = copyout(&remainder, rmtp, sizeof(remainder));
316 		if (copyout_error)
317 			error = copyout_error;
318 #ifdef KTRACE
319 		if (copyout_error == 0 && KTRPOINT(p, KTR_STRUCT))
320 			ktrreltimespec(p, &remainder);
321 #endif
322 	}
323 
324 	return error;
325 }
326 
327 int
sys_gettimeofday(struct proc * p,void * v,register_t * retval)328 sys_gettimeofday(struct proc *p, void *v, register_t *retval)
329 {
330 	struct sys_gettimeofday_args /* {
331 		syscallarg(struct timeval *) tp;
332 		syscallarg(struct timezone *) tzp;
333 	} */ *uap = v;
334 	struct timeval atv;
335 	static const struct timezone zerotz = { 0, 0 };
336 	struct timeval *tp;
337 	struct timezone *tzp;
338 	int error = 0;
339 
340 	tp = SCARG(uap, tp);
341 	tzp = SCARG(uap, tzp);
342 
343 	if (tp) {
344 		memset(&atv, 0, sizeof(atv));
345 		microtime(&atv);
346 		if ((error = copyout(&atv, tp, sizeof (atv))))
347 			return (error);
348 #ifdef KTRACE
349 		if (KTRPOINT(p, KTR_STRUCT))
350 			ktrabstimeval(p, &atv);
351 #endif
352 	}
353 	if (tzp)
354 		error = copyout(&zerotz, tzp, sizeof(zerotz));
355 	return (error);
356 }
357 
358 int
sys_settimeofday(struct proc * p,void * v,register_t * retval)359 sys_settimeofday(struct proc *p, void *v, register_t *retval)
360 {
361 	struct sys_settimeofday_args /* {
362 		syscallarg(const struct timeval *) tv;
363 		syscallarg(const struct timezone *) tzp;
364 	} */ *uap = v;
365 	struct timezone atz;
366 	struct timeval atv;
367 	const struct timeval *tv;
368 	const struct timezone *tzp;
369 	int error;
370 
371 	tv = SCARG(uap, tv);
372 	tzp = SCARG(uap, tzp);
373 
374 	if ((error = suser(p)))
375 		return (error);
376 	/* Verify all parameters before changing time. */
377 	if (tv && (error = copyin(tv, &atv, sizeof(atv))))
378 		return (error);
379 	if (tzp && (error = copyin(tzp, &atz, sizeof(atz))))
380 		return (error);
381 	if (tv) {
382 		struct timespec ts;
383 
384 #ifdef KTRACE
385 		if (KTRPOINT(p, KTR_STRUCT))
386 			ktrabstimeval(p, &atv);
387 #endif
388 		if (!timerisvalid(&atv))
389 			return (EINVAL);
390 		TIMEVAL_TO_TIMESPEC(&atv, &ts);
391 		if ((error = settime(&ts)) != 0)
392 			return (error);
393 	}
394 
395 	return (0);
396 }
397 
398 #define ADJFREQ_MAX (500000000LL << 32)
399 #define ADJFREQ_MIN (-ADJFREQ_MAX)
400 
401 int
sys_adjfreq(struct proc * p,void * v,register_t * retval)402 sys_adjfreq(struct proc *p, void *v, register_t *retval)
403 {
404 	struct sys_adjfreq_args /* {
405 		syscallarg(const int64_t *) freq;
406 		syscallarg(int64_t *) oldfreq;
407 	} */ *uap = v;
408 	int error = 0;
409 	int64_t f, oldf;
410 	const int64_t *freq = SCARG(uap, freq);
411 	int64_t *oldfreq = SCARG(uap, oldfreq);
412 
413 	if (freq) {
414 		if ((error = suser(p)))
415 			return (error);
416 		if ((error = copyin(freq, &f, sizeof(f))))
417 			return (error);
418 		if (f < ADJFREQ_MIN || f > ADJFREQ_MAX)
419 			return (EINVAL);
420 	}
421 
422 	rw_enter(&tc_lock, (freq == NULL) ? RW_READ : RW_WRITE);
423 	if (oldfreq) {
424 		tc_adjfreq(&oldf, NULL);
425 		if ((error = copyout(&oldf, oldfreq, sizeof(oldf))))
426 			goto out;
427 	}
428 	if (freq)
429 		tc_adjfreq(NULL, &f);
430 out:
431 	rw_exit(&tc_lock);
432 	return (error);
433 }
434 
435 int
sys_adjtime(struct proc * p,void * v,register_t * retval)436 sys_adjtime(struct proc *p, void *v, register_t *retval)
437 {
438 	struct sys_adjtime_args /* {
439 		syscallarg(const struct timeval *) delta;
440 		syscallarg(struct timeval *) olddelta;
441 	} */ *uap = v;
442 	struct timeval atv;
443 	const struct timeval *delta = SCARG(uap, delta);
444 	struct timeval *olddelta = SCARG(uap, olddelta);
445 	int64_t adjustment, remaining;
446 	int error;
447 
448 	error = pledge_adjtime(p, delta);
449 	if (error)
450 		return error;
451 
452 	if (delta) {
453 		if ((error = suser(p)))
454 			return (error);
455 		if ((error = copyin(delta, &atv, sizeof(struct timeval))))
456 			return (error);
457 #ifdef KTRACE
458 		if (KTRPOINT(p, KTR_STRUCT))
459 			ktrreltimeval(p, &atv);
460 #endif
461 		if (!timerisvalid(&atv))
462 			return (EINVAL);
463 
464 		if (atv.tv_sec > INT64_MAX / 1000000)
465 			return EINVAL;
466 		if (atv.tv_sec < INT64_MIN / 1000000)
467 			return EINVAL;
468 		adjustment = atv.tv_sec * 1000000;
469 		if (adjustment > INT64_MAX - atv.tv_usec)
470 			return EINVAL;
471 		adjustment += atv.tv_usec;
472 
473 		rw_enter_write(&tc_lock);
474 	}
475 
476 	if (olddelta) {
477 		tc_adjtime(&remaining, NULL);
478 		memset(&atv, 0, sizeof(atv));
479 		atv.tv_sec =  remaining / 1000000;
480 		atv.tv_usec = remaining % 1000000;
481 		if (atv.tv_usec < 0) {
482 			atv.tv_usec += 1000000;
483 			atv.tv_sec--;
484 		}
485 
486 		if ((error = copyout(&atv, olddelta, sizeof(struct timeval))))
487 			goto out;
488 	}
489 
490 	if (delta)
491 		tc_adjtime(NULL, &adjustment);
492 out:
493 	if (delta)
494 		rw_exit_write(&tc_lock);
495 	return (error);
496 }
497 
498 
499 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
500 
501 /*
502  * Get or set value of an interval timer.  The process virtual and
503  * profiling virtual time timers are kept internally in the
504  * way they are specified externally: in time until they expire.
505  *
506  * The real time interval timer's it_value, in contrast, is kept as an
507  * absolute time rather than as a delta, so that it is easy to keep
508  * periodic real-time signals from drifting.
509  *
510  * Virtual time timers are processed in the hardclock() routine of
511  * kern_clock.c.  The real time timer is processed by a timeout
512  * routine, called from the softclock() routine.  Since a callout
513  * may be delayed in real time due to interrupt processing in the system,
514  * it is possible for the real time timeout routine (realitexpire, given below),
515  * to be delayed in real time past when it is supposed to occur.  It
516  * does not suffice, therefore, to reload the real timer .it_value from the
517  * real time timers .it_interval.  Rather, we compute the next time in
518  * absolute time the timer should go off.
519  */
520 void
setitimer(int which,const struct itimerval * itv,struct itimerval * olditv)521 setitimer(int which, const struct itimerval *itv, struct itimerval *olditv)
522 {
523 	struct itimerspec its, oldits;
524 	struct timespec now;
525 	struct itimerspec *itimer;
526 	struct process *pr;
527 
528 	KASSERT(which >= ITIMER_REAL && which <= ITIMER_PROF);
529 
530 	pr = curproc->p_p;
531 	itimer = &pr->ps_timer[which];
532 
533 	if (itv != NULL) {
534 		TIMEVAL_TO_TIMESPEC(&itv->it_value, &its.it_value);
535 		TIMEVAL_TO_TIMESPEC(&itv->it_interval, &its.it_interval);
536 	}
537 
538 	if (which == ITIMER_REAL) {
539 		mtx_enter(&pr->ps_mtx);
540 		nanouptime(&now);
541 	} else
542 		mtx_enter(&itimer_mtx);
543 
544 	if (olditv != NULL)
545 		oldits = *itimer;
546 	if (itv != NULL) {
547 		if (which == ITIMER_REAL) {
548 			if (timespecisset(&its.it_value)) {
549 				timespecadd(&its.it_value, &now, &its.it_value);
550 				timeout_abs_ts(&pr->ps_realit_to,&its.it_value);
551 			} else
552 				timeout_del(&pr->ps_realit_to);
553 		}
554 		*itimer = its;
555 		if (which == ITIMER_VIRTUAL || which == ITIMER_PROF) {
556 			process_reset_itimer_flag(pr);
557 			need_resched(curcpu());
558 		}
559 	}
560 
561 	if (which == ITIMER_REAL)
562 		mtx_leave(&pr->ps_mtx);
563 	else
564 		mtx_leave(&itimer_mtx);
565 
566 	if (olditv != NULL) {
567 		if (which == ITIMER_REAL && timespecisset(&oldits.it_value)) {
568 			if (timespeccmp(&oldits.it_value, &now, <))
569 				timespecclear(&oldits.it_value);
570 			else {
571 				timespecsub(&oldits.it_value, &now,
572 				    &oldits.it_value);
573 			}
574 		}
575 		TIMESPEC_TO_TIMEVAL(&olditv->it_value, &oldits.it_value);
576 		TIMESPEC_TO_TIMEVAL(&olditv->it_interval, &oldits.it_interval);
577 	}
578 }
579 
580 void
cancel_all_itimers(void)581 cancel_all_itimers(void)
582 {
583 	struct itimerval itv;
584 	int i;
585 
586 	timerclear(&itv.it_value);
587 	timerclear(&itv.it_interval);
588 
589 	for (i = 0; i < nitems(curproc->p_p->ps_timer); i++)
590 		setitimer(i, &itv, NULL);
591 }
592 
593 int
sys_getitimer(struct proc * p,void * v,register_t * retval)594 sys_getitimer(struct proc *p, void *v, register_t *retval)
595 {
596 	struct sys_getitimer_args /* {
597 		syscallarg(int) which;
598 		syscallarg(struct itimerval *) itv;
599 	} */ *uap = v;
600 	struct itimerval aitv;
601 	int which;
602 
603 	which = SCARG(uap, which);
604 	if (which < ITIMER_REAL || which > ITIMER_PROF)
605 		return EINVAL;
606 
607 	memset(&aitv, 0, sizeof(aitv));
608 
609 	setitimer(which, NULL, &aitv);
610 
611 	return copyout(&aitv, SCARG(uap, itv), sizeof(aitv));
612 }
613 
614 int
sys_setitimer(struct proc * p,void * v,register_t * retval)615 sys_setitimer(struct proc *p, void *v, register_t *retval)
616 {
617 	struct sys_setitimer_args /* {
618 		syscallarg(int) which;
619 		syscallarg(const struct itimerval *) itv;
620 		syscallarg(struct itimerval *) oitv;
621 	} */ *uap = v;
622 	struct itimerval aitv, olditv;
623 	struct itimerval *newitvp, *olditvp;
624 	int error, which;
625 
626 	which = SCARG(uap, which);
627 	if (which < ITIMER_REAL || which > ITIMER_PROF)
628 		return EINVAL;
629 
630 	newitvp = olditvp = NULL;
631 	if (SCARG(uap, itv) != NULL) {
632 		error = copyin(SCARG(uap, itv), &aitv, sizeof(aitv));
633 		if (error)
634 			return error;
635 		error = itimerfix(&aitv);
636 		if (error)
637 			return error;
638 		newitvp = &aitv;
639 	}
640 	if (SCARG(uap, oitv) != NULL) {
641 		memset(&olditv, 0, sizeof(olditv));
642 		olditvp = &olditv;
643 	}
644 	if (newitvp == NULL && olditvp == NULL)
645 		return 0;
646 
647 	setitimer(which, newitvp, olditvp);
648 
649 	if (SCARG(uap, oitv) != NULL)
650 		return copyout(&olditv, SCARG(uap, oitv), sizeof(olditv));
651 
652 	return 0;
653 }
654 
655 /*
656  * Real interval timer expired:
657  * send process whose timer expired an alarm signal.
658  * If time is not set up to reload, then just return.
659  * Else compute next time timer should go off which is > current time.
660  * This is where delay in processing this timeout causes multiple
661  * SIGALRM calls to be compressed into one.
662  */
663 void
realitexpire(void * arg)664 realitexpire(void *arg)
665 {
666 	struct timespec cts;
667 	struct process *pr = arg;
668 	struct itimerspec *tp = &pr->ps_timer[ITIMER_REAL];
669 	int need_signal = 0;
670 
671 	mtx_enter(&pr->ps_mtx);
672 
673 	/*
674 	 * Do nothing if the timer was cancelled or rescheduled while we
675 	 * were entering the mutex.
676 	 */
677 	if (!timespecisset(&tp->it_value) || timeout_pending(&pr->ps_realit_to))
678 		goto out;
679 
680 	/* The timer expired.  We need to send the signal. */
681 	need_signal = 1;
682 
683 	/* One-shot timers are not reloaded. */
684 	if (!timespecisset(&tp->it_interval)) {
685 		timespecclear(&tp->it_value);
686 		goto out;
687 	}
688 
689 	/*
690 	 * Find the nearest future expiration point and restart
691 	 * the timeout.
692 	 */
693 	nanouptime(&cts);
694 	while (timespeccmp(&tp->it_value, &cts, <=))
695 		timespecadd(&tp->it_value, &tp->it_interval, &tp->it_value);
696 	if ((pr->ps_flags & PS_EXITING) == 0)
697 		timeout_abs_ts(&pr->ps_realit_to, &tp->it_value);
698 
699 out:
700 	mtx_leave(&pr->ps_mtx);
701 
702 	if (need_signal)
703 		prsignal(pr, SIGALRM);
704 }
705 
706 /*
707  * Check if the given setitimer(2) input is valid.  Clear it_interval
708  * if it_value is unset.  Round it_interval up to the minimum interval
709  * if necessary.
710  */
711 int
itimerfix(struct itimerval * itv)712 itimerfix(struct itimerval *itv)
713 {
714 	static const struct timeval max = { .tv_sec = UINT_MAX, .tv_usec = 0 };
715 	struct timeval min_interval = { .tv_sec = 0, .tv_usec = tick };
716 
717 	if (itv->it_value.tv_sec < 0 || !timerisvalid(&itv->it_value))
718 		return EINVAL;
719 	if (timercmp(&itv->it_value, &max, >))
720 		return EINVAL;
721 	if (itv->it_interval.tv_sec < 0 || !timerisvalid(&itv->it_interval))
722 		return EINVAL;
723 	if (timercmp(&itv->it_interval, &max, >))
724 		return EINVAL;
725 
726 	if (!timerisset(&itv->it_value))
727 		timerclear(&itv->it_interval);
728 	if (timerisset(&itv->it_interval)) {
729 		if (timercmp(&itv->it_interval, &min_interval, <))
730 			itv->it_interval = min_interval;
731 	}
732 
733 	return 0;
734 }
735 
736 /*
737  * Decrement an interval timer by the given duration.
738  * If the timer expires and it is periodic then reload it.  When reloading
739  * the timer we subtract any overrun from the next period so that the timer
740  * does not drift.
741  */
742 int
itimerdecr(struct itimerspec * itp,const struct timespec * decrement)743 itimerdecr(struct itimerspec *itp, const struct timespec *decrement)
744 {
745 	timespecsub(&itp->it_value, decrement, &itp->it_value);
746 	if (itp->it_value.tv_sec >= 0 && timespecisset(&itp->it_value))
747 		return (1);
748 	if (!timespecisset(&itp->it_interval)) {
749 		timespecclear(&itp->it_value);
750 		return (0);
751 	}
752 	while (itp->it_value.tv_sec < 0 || !timespecisset(&itp->it_value))
753 		timespecadd(&itp->it_value, &itp->it_interval, &itp->it_value);
754 	return (0);
755 }
756 
757 void
itimer_update(struct clockrequest * cr,void * cf,void * arg)758 itimer_update(struct clockrequest *cr, void *cf, void *arg)
759 {
760 	struct timespec elapsed;
761 	uint64_t nsecs;
762 	struct clockframe *frame = cf;
763 	struct proc *p = curproc;
764 	struct process *pr;
765 
766 	if (p == NULL || ISSET(p->p_flag, P_SYSTEM | P_WEXIT))
767 		return;
768 
769 	pr = p->p_p;
770 	if (!ISSET(pr->ps_flags, PS_ITIMER))
771 		return;
772 
773 	nsecs = clockrequest_advance(cr, hardclock_period) * hardclock_period;
774 	NSEC_TO_TIMESPEC(nsecs, &elapsed);
775 
776 	mtx_enter(&itimer_mtx);
777 	if (CLKF_USERMODE(frame) &&
778 	    timespecisset(&pr->ps_timer[ITIMER_VIRTUAL].it_value) &&
779 	    itimerdecr(&pr->ps_timer[ITIMER_VIRTUAL], &elapsed) == 0) {
780 		process_reset_itimer_flag(pr);
781 		atomic_setbits_int(&p->p_flag, P_ALRMPEND);
782 		need_proftick(p);
783 	}
784 	if (timespecisset(&pr->ps_timer[ITIMER_PROF].it_value) &&
785 	    itimerdecr(&pr->ps_timer[ITIMER_PROF], &elapsed) == 0) {
786 		process_reset_itimer_flag(pr);
787 		atomic_setbits_int(&p->p_flag, P_PROFPEND);
788 		need_proftick(p);
789 	}
790 	mtx_leave(&itimer_mtx);
791 }
792 
793 void
process_reset_itimer_flag(struct process * ps)794 process_reset_itimer_flag(struct process *ps)
795 {
796 	if (timespecisset(&ps->ps_timer[ITIMER_VIRTUAL].it_value) ||
797 	    timespecisset(&ps->ps_timer[ITIMER_PROF].it_value))
798 		atomic_setbits_int(&ps->ps_flags, PS_ITIMER);
799 	else
800 		atomic_clearbits_int(&ps->ps_flags, PS_ITIMER);
801 }
802 
803 struct mutex ratecheck_mtx = MUTEX_INITIALIZER(IPL_HIGH);
804 
805 /*
806  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
807  * for usage and rationale.
808  */
809 int
ratecheck(struct timeval * lasttime,const struct timeval * mininterval)810 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
811 {
812 	struct timeval tv, delta;
813 	int rv = 0;
814 
815 	getmicrouptime(&tv);
816 
817 	mtx_enter(&ratecheck_mtx);
818 	timersub(&tv, lasttime, &delta);
819 
820 	/*
821 	 * check for 0,0 is so that the message will be seen at least once,
822 	 * even if interval is huge.
823 	 */
824 	if (timercmp(&delta, mininterval, >=) ||
825 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
826 		*lasttime = tv;
827 		rv = 1;
828 	}
829 	mtx_leave(&ratecheck_mtx);
830 
831 	return (rv);
832 }
833 
834 struct mutex ppsratecheck_mtx = MUTEX_INITIALIZER(IPL_HIGH);
835 
836 /*
837  * ppsratecheck(): packets (or events) per second limitation.
838  */
839 int
ppsratecheck(struct timeval * lasttime,int * curpps,int maxpps)840 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
841 {
842 	struct timeval tv, delta;
843 	int rv;
844 
845 	microuptime(&tv);
846 
847 	mtx_enter(&ppsratecheck_mtx);
848 	timersub(&tv, lasttime, &delta);
849 
850 	/*
851 	 * check for 0,0 is so that the message will be seen at least once.
852 	 * if more than one second have passed since the last update of
853 	 * lasttime, reset the counter.
854 	 *
855 	 * we do increment *curpps even in *curpps < maxpps case, as some may
856 	 * try to use *curpps for stat purposes as well.
857 	 */
858 	if (maxpps == 0)
859 		rv = 0;
860 	else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
861 	    delta.tv_sec >= 1) {
862 		*lasttime = tv;
863 		*curpps = 0;
864 		rv = 1;
865 	} else if (maxpps < 0)
866 		rv = 1;
867 	else if (*curpps < maxpps)
868 		rv = 1;
869 	else
870 		rv = 0;
871 
872 	/* be careful about wrap-around */
873 	if (*curpps + 1 > *curpps)
874 		*curpps = *curpps + 1;
875 
876 	mtx_leave(&ppsratecheck_mtx);
877 
878 	return (rv);
879 }
880 
881 todr_chip_handle_t todr_handle;
882 int inittodr_done;
883 
884 #define MINYEAR		((OpenBSD / 100) - 1)	/* minimum plausible year */
885 
886 /*
887  * inittodr:
888  *
889  *      Initialize time from the time-of-day register.
890  */
891 void
inittodr(time_t base)892 inittodr(time_t base)
893 {
894 	time_t deltat;
895 	struct timeval rtctime;
896 	struct timespec ts;
897 	int badbase;
898 
899 	inittodr_done = 1;
900 
901 	if (base < (MINYEAR - 1970) * SECYR) {
902 		printf("WARNING: preposterous time in file system\n");
903 		/* read the system clock anyway */
904 		base = (MINYEAR - 1970) * SECYR;
905 		badbase = 1;
906 	} else
907 		badbase = 0;
908 
909 	rtctime.tv_sec = base;
910 	rtctime.tv_usec = 0;
911 
912 	if (todr_handle == NULL ||
913 	    todr_gettime(todr_handle, &rtctime) != 0 ||
914 	    rtctime.tv_sec < (MINYEAR - 1970) * SECYR) {
915 		/*
916 		 * Believe the time in the file system for lack of
917 		 * anything better, resetting the TODR.
918 		 */
919 		rtctime.tv_sec = base;
920 		rtctime.tv_usec = 0;
921 		if (todr_handle != NULL && !badbase)
922 			printf("WARNING: bad clock chip time\n");
923 		ts.tv_sec = rtctime.tv_sec;
924 		ts.tv_nsec = rtctime.tv_usec * 1000;
925 		tc_setclock(&ts);
926 		goto bad;
927 	} else {
928 		ts.tv_sec = rtctime.tv_sec;
929 		ts.tv_nsec = rtctime.tv_usec * 1000;
930 		tc_setclock(&ts);
931 	}
932 
933 	if (!badbase) {
934 		/*
935 		 * See if we gained/lost two or more days; if
936 		 * so, assume something is amiss.
937 		 */
938 		deltat = rtctime.tv_sec - base;
939 		if (deltat < 0)
940 			deltat = -deltat;
941 		if (deltat < 2 * SECDAY)
942 			return;         /* all is well */
943 #ifndef SMALL_KERNEL
944 		printf("WARNING: clock %s %lld days\n",
945 		    rtctime.tv_sec < base ? "lost" : "gained",
946 		    (long long)(deltat / SECDAY));
947 #endif
948 	}
949  bad:
950 	printf("WARNING: CHECK AND RESET THE DATE!\n");
951 }
952 
953 /*
954  * resettodr:
955  *
956  *      Reset the time-of-day register with the current time.
957  */
958 void
resettodr(void)959 resettodr(void)
960 {
961 	struct timeval rtctime;
962 
963 	/*
964 	 * Skip writing the RTC if inittodr(9) never ran.  We don't
965 	 * want to overwrite a reasonable value with a nonsense value.
966 	 */
967 	if (!inittodr_done)
968 		return;
969 
970 	microtime(&rtctime);
971 
972 	if (todr_handle != NULL &&
973 	    todr_settime(todr_handle, &rtctime) != 0)
974 		printf("WARNING: can't update clock chip time\n");
975 }
976 
977 void
todr_attach(struct todr_chip_handle * todr)978 todr_attach(struct todr_chip_handle *todr)
979 {
980 	if (todr_handle == NULL ||
981 	    todr->todr_quality > todr_handle->todr_quality)
982 		todr_handle = todr;
983 }
984 
985 #define RESETTODR_PERIOD	1800
986 
987 void periodic_resettodr(void *);
988 void perform_resettodr(void *);
989 
990 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL);
991 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL);
992 
993 void
periodic_resettodr(void * arg __unused)994 periodic_resettodr(void *arg __unused)
995 {
996 	task_add(systq, &resettodr_task);
997 }
998 
999 void
perform_resettodr(void * arg __unused)1000 perform_resettodr(void *arg __unused)
1001 {
1002 	resettodr();
1003 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
1004 }
1005 
1006 void
start_periodic_resettodr(void)1007 start_periodic_resettodr(void)
1008 {
1009 	timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
1010 }
1011 
1012 void
stop_periodic_resettodr(void)1013 stop_periodic_resettodr(void)
1014 {
1015 	timeout_del(&resettodr_to);
1016 	task_del(systq, &resettodr_task);
1017 }
1018