1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 34 * $FreeBSD: src/sys/kern/kern_time.c,v 1.68.2.1 2002/10/01 08:00:41 bde Exp $ 35 * $DragonFly: src/sys/kern/kern_time.c,v 1.40 2008/04/02 14:16:16 sephe Exp $ 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/buf.h> 41 #include <sys/sysproto.h> 42 #include <sys/resourcevar.h> 43 #include <sys/signalvar.h> 44 #include <sys/kernel.h> 45 #include <sys/systm.h> 46 #include <sys/sysent.h> 47 #include <sys/sysunion.h> 48 #include <sys/proc.h> 49 #include <sys/priv.h> 50 #include <sys/time.h> 51 #include <sys/vnode.h> 52 #include <sys/sysctl.h> 53 #include <vm/vm.h> 54 #include <vm/vm_extern.h> 55 #include <sys/msgport2.h> 56 #include <sys/thread2.h> 57 58 struct timezone tz; 59 60 /* 61 * Time of day and interval timer support. 62 * 63 * These routines provide the kernel entry points to get and set 64 * the time-of-day and per-process interval timers. Subroutines 65 * here provide support for adding and subtracting timeval structures 66 * and decrementing interval timers, optionally reloading the interval 67 * timers when they expire. 68 */ 69 70 static int nanosleep1 (struct timespec *rqt, 71 struct timespec *rmt); 72 static int settime (struct timeval *); 73 static void timevalfix (struct timeval *); 74 75 static int sleep_hard_us = 100; 76 SYSCTL_INT(_kern, OID_AUTO, sleep_hard_us, CTLFLAG_RW, &sleep_hard_us, 0, "") 77 78 static int 79 settime(struct timeval *tv) 80 { 81 struct timeval delta, tv1, tv2; 82 static struct timeval maxtime, laststep; 83 struct timespec ts; 84 int origcpu; 85 86 if ((origcpu = mycpu->gd_cpuid) != 0) 87 lwkt_setcpu_self(globaldata_find(0)); 88 89 crit_enter(); 90 microtime(&tv1); 91 delta = *tv; 92 timevalsub(&delta, &tv1); 93 94 /* 95 * If the system is secure, we do not allow the time to be 96 * set to a value earlier than 1 second less than the highest 97 * time we have yet seen. The worst a miscreant can do in 98 * this circumstance is "freeze" time. He couldn't go 99 * back to the past. 100 * 101 * We similarly do not allow the clock to be stepped more 102 * than one second, nor more than once per second. This allows 103 * a miscreant to make the clock march double-time, but no worse. 104 */ 105 if (securelevel > 1) { 106 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 107 /* 108 * Update maxtime to latest time we've seen. 109 */ 110 if (tv1.tv_sec > maxtime.tv_sec) 111 maxtime = tv1; 112 tv2 = *tv; 113 timevalsub(&tv2, &maxtime); 114 if (tv2.tv_sec < -1) { 115 tv->tv_sec = maxtime.tv_sec - 1; 116 kprintf("Time adjustment clamped to -1 second\n"); 117 } 118 } else { 119 if (tv1.tv_sec == laststep.tv_sec) { 120 crit_exit(); 121 return (EPERM); 122 } 123 if (delta.tv_sec > 1) { 124 tv->tv_sec = tv1.tv_sec + 1; 125 kprintf("Time adjustment clamped to +1 second\n"); 126 } 127 laststep = *tv; 128 } 129 } 130 131 ts.tv_sec = tv->tv_sec; 132 ts.tv_nsec = tv->tv_usec * 1000; 133 set_timeofday(&ts); 134 crit_exit(); 135 136 if (origcpu != 0) 137 lwkt_setcpu_self(globaldata_find(origcpu)); 138 139 resettodr(); 140 return (0); 141 } 142 143 /* ARGSUSED */ 144 int 145 sys_clock_gettime(struct clock_gettime_args *uap) 146 { 147 struct timespec ats; 148 149 switch(uap->clock_id) { 150 case CLOCK_REALTIME: 151 nanotime(&ats); 152 return (copyout(&ats, uap->tp, sizeof(ats))); 153 case CLOCK_MONOTONIC: 154 nanouptime(&ats); 155 return (copyout(&ats, uap->tp, sizeof(ats))); 156 default: 157 return (EINVAL); 158 } 159 } 160 161 /* ARGSUSED */ 162 int 163 sys_clock_settime(struct clock_settime_args *uap) 164 { 165 struct thread *td = curthread; 166 struct timeval atv; 167 struct timespec ats; 168 int error; 169 170 if ((error = priv_check(td, PRIV_ROOT)) != 0) 171 return (error); 172 switch(uap->clock_id) { 173 case CLOCK_REALTIME: 174 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 175 return (error); 176 if (ats.tv_nsec < 0 || ats.tv_nsec >= 1000000000) 177 return (EINVAL); 178 /* XXX Don't convert nsec->usec and back */ 179 TIMESPEC_TO_TIMEVAL(&atv, &ats); 180 error = settime(&atv); 181 return (error); 182 default: 183 return (EINVAL); 184 } 185 } 186 187 int 188 sys_clock_getres(struct clock_getres_args *uap) 189 { 190 struct timespec ts; 191 192 switch(uap->clock_id) { 193 case CLOCK_REALTIME: 194 case CLOCK_MONOTONIC: 195 /* 196 * Round up the result of the division cheaply 197 * by adding 1. Rounding up is especially important 198 * if rounding down would give 0. Perfect rounding 199 * is unimportant. 200 */ 201 ts.tv_sec = 0; 202 ts.tv_nsec = 1000000000 / sys_cputimer->freq + 1; 203 return(copyout(&ts, uap->tp, sizeof(ts))); 204 default: 205 return(EINVAL); 206 } 207 } 208 209 /* 210 * nanosleep1() 211 * 212 * This is a general helper function for nanosleep() (aka sleep() aka 213 * usleep()). 214 * 215 * If there is less then one tick's worth of time left and 216 * we haven't done a yield, or the remaining microseconds is 217 * ridiculously low, do a yield. This avoids having 218 * to deal with systimer overheads when the system is under 219 * heavy loads. If we have done a yield already then use 220 * a systimer and an uninterruptable thread wait. 221 * 222 * If there is more then a tick's worth of time left, 223 * calculate the baseline ticks and use an interruptable 224 * tsleep, then handle the fine-grained delay on the next 225 * loop. This usually results in two sleeps occuring, a long one 226 * and a short one. 227 */ 228 static void 229 ns1_systimer(systimer_t info) 230 { 231 lwkt_schedule(info->data); 232 } 233 234 static int 235 nanosleep1(struct timespec *rqt, struct timespec *rmt) 236 { 237 static int nanowait; 238 struct timespec ts, ts2, ts3; 239 struct timeval tv; 240 int error; 241 int tried_yield; 242 243 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 244 return (EINVAL); 245 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 246 return (0); 247 nanouptime(&ts); 248 timespecadd(&ts, rqt); /* ts = target timestamp compare */ 249 TIMESPEC_TO_TIMEVAL(&tv, rqt); /* tv = sleep interval */ 250 tried_yield = 0; 251 252 for (;;) { 253 int ticks; 254 struct systimer info; 255 256 ticks = tv.tv_usec / tick; /* approximate */ 257 258 if (tv.tv_sec == 0 && ticks == 0) { 259 thread_t td = curthread; 260 if (tried_yield || tv.tv_usec < sleep_hard_us) { 261 tried_yield = 0; 262 uio_yield(); 263 } else { 264 crit_enter_quick(td); 265 systimer_init_oneshot(&info, ns1_systimer, 266 td, tv.tv_usec); 267 lwkt_deschedule_self(td); 268 crit_exit_quick(td); 269 lwkt_switch(); 270 systimer_del(&info); /* make sure it's gone */ 271 } 272 error = iscaught(td->td_lwp); 273 } else if (tv.tv_sec == 0) { 274 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 275 } else { 276 ticks = tvtohz_low(&tv); /* also handles overflow */ 277 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 278 } 279 nanouptime(&ts2); 280 if (error && error != EWOULDBLOCK) { 281 if (error == ERESTART) 282 error = EINTR; 283 if (rmt != NULL) { 284 timespecsub(&ts, &ts2); 285 if (ts.tv_sec < 0) 286 timespecclear(&ts); 287 *rmt = ts; 288 } 289 return (error); 290 } 291 if (timespeccmp(&ts2, &ts, >=)) 292 return (0); 293 ts3 = ts; 294 timespecsub(&ts3, &ts2); 295 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 296 } 297 } 298 299 /* ARGSUSED */ 300 int 301 sys_nanosleep(struct nanosleep_args *uap) 302 { 303 int error; 304 struct timespec rqt; 305 struct timespec rmt; 306 307 error = copyin(uap->rqtp, &rqt, sizeof(rqt)); 308 if (error) 309 return (error); 310 311 error = nanosleep1(&rqt, &rmt); 312 313 /* 314 * copyout the residual if nanosleep was interrupted. 315 */ 316 if (error && uap->rmtp) 317 error = copyout(&rmt, uap->rmtp, sizeof(rmt)); 318 return (error); 319 } 320 321 /* ARGSUSED */ 322 int 323 sys_gettimeofday(struct gettimeofday_args *uap) 324 { 325 struct timeval atv; 326 int error = 0; 327 328 if (uap->tp) { 329 microtime(&atv); 330 if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp, 331 sizeof (atv)))) 332 return (error); 333 } 334 if (uap->tzp) 335 error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, 336 sizeof (tz)); 337 return (error); 338 } 339 340 /* ARGSUSED */ 341 int 342 sys_settimeofday(struct settimeofday_args *uap) 343 { 344 struct thread *td = curthread; 345 struct timeval atv; 346 struct timezone atz; 347 int error; 348 349 if ((error = priv_check(td, PRIV_ROOT))) 350 return (error); 351 /* Verify all parameters before changing time. */ 352 if (uap->tv) { 353 if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv, 354 sizeof(atv)))) 355 return (error); 356 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000) 357 return (EINVAL); 358 } 359 if (uap->tzp && 360 (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz)))) 361 return (error); 362 if (uap->tv && (error = settime(&atv))) 363 return (error); 364 if (uap->tzp) 365 tz = atz; 366 return (0); 367 } 368 369 static void 370 kern_adjtime_common(void) 371 { 372 if ((ntp_delta >= 0 && ntp_delta < ntp_default_tick_delta) || 373 (ntp_delta < 0 && ntp_delta > -ntp_default_tick_delta)) 374 ntp_tick_delta = ntp_delta; 375 else if (ntp_delta > ntp_big_delta) 376 ntp_tick_delta = 10 * ntp_default_tick_delta; 377 else if (ntp_delta < -ntp_big_delta) 378 ntp_tick_delta = -10 * ntp_default_tick_delta; 379 else if (ntp_delta > 0) 380 ntp_tick_delta = ntp_default_tick_delta; 381 else 382 ntp_tick_delta = -ntp_default_tick_delta; 383 } 384 385 void 386 kern_adjtime(int64_t delta, int64_t *odelta) 387 { 388 int origcpu; 389 390 if ((origcpu = mycpu->gd_cpuid) != 0) 391 lwkt_setcpu_self(globaldata_find(0)); 392 393 crit_enter(); 394 *odelta = ntp_delta; 395 ntp_delta = delta; 396 kern_adjtime_common(); 397 crit_exit(); 398 399 if (origcpu != 0) 400 lwkt_setcpu_self(globaldata_find(origcpu)); 401 } 402 403 static void 404 kern_get_ntp_delta(int64_t *delta) 405 { 406 int origcpu; 407 408 if ((origcpu = mycpu->gd_cpuid) != 0) 409 lwkt_setcpu_self(globaldata_find(0)); 410 411 crit_enter(); 412 *delta = ntp_delta; 413 crit_exit(); 414 415 if (origcpu != 0) 416 lwkt_setcpu_self(globaldata_find(origcpu)); 417 } 418 419 void 420 kern_reladjtime(int64_t delta) 421 { 422 int origcpu; 423 424 if ((origcpu = mycpu->gd_cpuid) != 0) 425 lwkt_setcpu_self(globaldata_find(0)); 426 427 crit_enter(); 428 ntp_delta += delta; 429 kern_adjtime_common(); 430 crit_exit(); 431 432 if (origcpu != 0) 433 lwkt_setcpu_self(globaldata_find(origcpu)); 434 } 435 436 static void 437 kern_adjfreq(int64_t rate) 438 { 439 int origcpu; 440 441 if ((origcpu = mycpu->gd_cpuid) != 0) 442 lwkt_setcpu_self(globaldata_find(0)); 443 444 crit_enter(); 445 ntp_tick_permanent = rate; 446 crit_exit(); 447 448 if (origcpu != 0) 449 lwkt_setcpu_self(globaldata_find(origcpu)); 450 } 451 452 /* ARGSUSED */ 453 int 454 sys_adjtime(struct adjtime_args *uap) 455 { 456 struct thread *td = curthread; 457 struct timeval atv; 458 int64_t ndelta, odelta; 459 int error; 460 461 if ((error = priv_check(td, PRIV_ROOT))) 462 return (error); 463 if ((error = 464 copyin((caddr_t)uap->delta, (caddr_t)&atv, sizeof(struct timeval)))) 465 return (error); 466 467 /* 468 * Compute the total correction and the rate at which to apply it. 469 * Round the adjustment down to a whole multiple of the per-tick 470 * delta, so that after some number of incremental changes in 471 * hardclock(), tickdelta will become zero, lest the correction 472 * overshoot and start taking us away from the desired final time. 473 */ 474 ndelta = (int64_t)atv.tv_sec * 1000000000 + atv.tv_usec * 1000; 475 kern_adjtime(ndelta, &odelta); 476 477 if (uap->olddelta) { 478 atv.tv_sec = odelta / 1000000000; 479 atv.tv_usec = odelta % 1000000000 / 1000; 480 (void) copyout((caddr_t)&atv, (caddr_t)uap->olddelta, 481 sizeof(struct timeval)); 482 } 483 return (0); 484 } 485 486 static int 487 sysctl_adjtime(SYSCTL_HANDLER_ARGS) 488 { 489 int64_t delta; 490 int error; 491 492 if (req->newptr != NULL) { 493 if (priv_check(curthread, PRIV_ROOT)) 494 return (EPERM); 495 error = SYSCTL_IN(req, &delta, sizeof(delta)); 496 if (error) 497 return (error); 498 kern_reladjtime(delta); 499 } 500 501 if (req->oldptr) 502 kern_get_ntp_delta(&delta); 503 error = SYSCTL_OUT(req, &delta, sizeof(delta)); 504 return (error); 505 } 506 507 /* 508 * delta is in nanoseconds. 509 */ 510 static int 511 sysctl_delta(SYSCTL_HANDLER_ARGS) 512 { 513 int64_t delta, old_delta; 514 int error; 515 516 if (req->newptr != NULL) { 517 if (priv_check(curthread, PRIV_ROOT)) 518 return (EPERM); 519 error = SYSCTL_IN(req, &delta, sizeof(delta)); 520 if (error) 521 return (error); 522 kern_adjtime(delta, &old_delta); 523 } 524 525 if (req->oldptr != NULL) 526 kern_get_ntp_delta(&old_delta); 527 error = SYSCTL_OUT(req, &old_delta, sizeof(old_delta)); 528 return (error); 529 } 530 531 /* 532 * frequency is in nanoseconds per second shifted left 32. 533 * kern_adjfreq() needs it in nanoseconds per tick shifted left 32. 534 */ 535 static int 536 sysctl_adjfreq(SYSCTL_HANDLER_ARGS) 537 { 538 int64_t freqdelta; 539 int error; 540 541 if (req->newptr != NULL) { 542 if (priv_check(curthread, PRIV_ROOT)) 543 return (EPERM); 544 error = SYSCTL_IN(req, &freqdelta, sizeof(freqdelta)); 545 if (error) 546 return (error); 547 548 freqdelta /= hz; 549 kern_adjfreq(freqdelta); 550 } 551 552 if (req->oldptr != NULL) 553 freqdelta = ntp_tick_permanent * hz; 554 error = SYSCTL_OUT(req, &freqdelta, sizeof(freqdelta)); 555 if (error) 556 return (error); 557 558 return (0); 559 } 560 561 SYSCTL_NODE(_kern, OID_AUTO, ntp, CTLFLAG_RW, 0, "NTP related controls"); 562 SYSCTL_PROC(_kern_ntp, OID_AUTO, permanent, 563 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 564 sysctl_adjfreq, "Q", "permanent correction per second"); 565 SYSCTL_PROC(_kern_ntp, OID_AUTO, delta, 566 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 567 sysctl_delta, "Q", "one-time delta"); 568 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, big_delta, CTLFLAG_RD, 569 &ntp_big_delta, sizeof(ntp_big_delta), "Q", 570 "threshold for fast adjustment"); 571 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, tick_delta, CTLFLAG_RD, 572 &ntp_tick_delta, sizeof(ntp_tick_delta), "LU", 573 "per-tick adjustment"); 574 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, default_tick_delta, CTLFLAG_RD, 575 &ntp_default_tick_delta, sizeof(ntp_default_tick_delta), "LU", 576 "default per-tick adjustment"); 577 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, next_leap_second, CTLFLAG_RW, 578 &ntp_leap_second, sizeof(ntp_leap_second), "LU", 579 "next leap second"); 580 SYSCTL_INT(_kern_ntp, OID_AUTO, insert_leap_second, CTLFLAG_RW, 581 &ntp_leap_insert, 0, "insert or remove leap second"); 582 SYSCTL_PROC(_kern_ntp, OID_AUTO, adjust, 583 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 584 sysctl_adjtime, "Q", "relative adjust for delta"); 585 586 /* 587 * Get value of an interval timer. The process virtual and 588 * profiling virtual time timers are kept in the p_stats area, since 589 * they can be swapped out. These are kept internally in the 590 * way they are specified externally: in time until they expire. 591 * 592 * The real time interval timer is kept in the process table slot 593 * for the process, and its value (it_value) is kept as an 594 * absolute time rather than as a delta, so that it is easy to keep 595 * periodic real-time signals from drifting. 596 * 597 * Virtual time timers are processed in the hardclock() routine of 598 * kern_clock.c. The real time timer is processed by a timeout 599 * routine, called from the softclock() routine. Since a callout 600 * may be delayed in real time due to interrupt processing in the system, 601 * it is possible for the real time timeout routine (realitexpire, given below), 602 * to be delayed in real time past when it is supposed to occur. It 603 * does not suffice, therefore, to reload the real timer .it_value from the 604 * real time timers .it_interval. Rather, we compute the next time in 605 * absolute time the timer should go off. 606 */ 607 /* ARGSUSED */ 608 int 609 sys_getitimer(struct getitimer_args *uap) 610 { 611 struct proc *p = curproc; 612 struct timeval ctv; 613 struct itimerval aitv; 614 615 if (uap->which > ITIMER_PROF) 616 return (EINVAL); 617 crit_enter(); 618 if (uap->which == ITIMER_REAL) { 619 /* 620 * Convert from absolute to relative time in .it_value 621 * part of real time timer. If time for real time timer 622 * has passed return 0, else return difference between 623 * current time and time for the timer to go off. 624 */ 625 aitv = p->p_realtimer; 626 if (timevalisset(&aitv.it_value)) { 627 getmicrouptime(&ctv); 628 if (timevalcmp(&aitv.it_value, &ctv, <)) 629 timevalclear(&aitv.it_value); 630 else 631 timevalsub(&aitv.it_value, &ctv); 632 } 633 } else { 634 aitv = p->p_timer[uap->which]; 635 } 636 crit_exit(); 637 return (copyout((caddr_t)&aitv, (caddr_t)uap->itv, 638 sizeof (struct itimerval))); 639 } 640 641 /* ARGSUSED */ 642 int 643 sys_setitimer(struct setitimer_args *uap) 644 { 645 struct itimerval aitv; 646 struct timeval ctv; 647 struct itimerval *itvp; 648 struct proc *p = curproc; 649 int error; 650 651 if (uap->which > ITIMER_PROF) 652 return (EINVAL); 653 itvp = uap->itv; 654 if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv, 655 sizeof(struct itimerval)))) 656 return (error); 657 if ((uap->itv = uap->oitv) && 658 (error = sys_getitimer((struct getitimer_args *)uap))) 659 return (error); 660 if (itvp == 0) 661 return (0); 662 if (itimerfix(&aitv.it_value)) 663 return (EINVAL); 664 if (!timevalisset(&aitv.it_value)) 665 timevalclear(&aitv.it_interval); 666 else if (itimerfix(&aitv.it_interval)) 667 return (EINVAL); 668 crit_enter(); 669 if (uap->which == ITIMER_REAL) { 670 if (timevalisset(&p->p_realtimer.it_value)) 671 callout_stop(&p->p_ithandle); 672 if (timevalisset(&aitv.it_value)) 673 callout_reset(&p->p_ithandle, 674 tvtohz_high(&aitv.it_value), realitexpire, p); 675 getmicrouptime(&ctv); 676 timevaladd(&aitv.it_value, &ctv); 677 p->p_realtimer = aitv; 678 } else { 679 p->p_timer[uap->which] = aitv; 680 } 681 crit_exit(); 682 return (0); 683 } 684 685 /* 686 * Real interval timer expired: 687 * send process whose timer expired an alarm signal. 688 * If time is not set up to reload, then just return. 689 * Else compute next time timer should go off which is > current time. 690 * This is where delay in processing this timeout causes multiple 691 * SIGALRM calls to be compressed into one. 692 * tvtohz_high() always adds 1 to allow for the time until the next clock 693 * interrupt being strictly less than 1 clock tick, but we don't want 694 * that here since we want to appear to be in sync with the clock 695 * interrupt even when we're delayed. 696 */ 697 void 698 realitexpire(void *arg) 699 { 700 struct proc *p; 701 struct timeval ctv, ntv; 702 703 p = (struct proc *)arg; 704 ksignal(p, SIGALRM); 705 if (!timevalisset(&p->p_realtimer.it_interval)) { 706 timevalclear(&p->p_realtimer.it_value); 707 return; 708 } 709 for (;;) { 710 crit_enter(); 711 timevaladd(&p->p_realtimer.it_value, 712 &p->p_realtimer.it_interval); 713 getmicrouptime(&ctv); 714 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { 715 ntv = p->p_realtimer.it_value; 716 timevalsub(&ntv, &ctv); 717 callout_reset(&p->p_ithandle, tvtohz_low(&ntv), 718 realitexpire, p); 719 crit_exit(); 720 return; 721 } 722 crit_exit(); 723 } 724 } 725 726 /* 727 * Check that a proposed value to load into the .it_value or 728 * .it_interval part of an interval timer is acceptable, and 729 * fix it to have at least minimal value (i.e. if it is less 730 * than the resolution of the clock, round it up.) 731 */ 732 int 733 itimerfix(struct timeval *tv) 734 { 735 736 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 737 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 738 return (EINVAL); 739 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 740 tv->tv_usec = tick; 741 return (0); 742 } 743 744 /* 745 * Decrement an interval timer by a specified number 746 * of microseconds, which must be less than a second, 747 * i.e. < 1000000. If the timer expires, then reload 748 * it. In this case, carry over (usec - old value) to 749 * reduce the value reloaded into the timer so that 750 * the timer does not drift. This routine assumes 751 * that it is called in a context where the timers 752 * on which it is operating cannot change in value. 753 */ 754 int 755 itimerdecr(struct itimerval *itp, int usec) 756 { 757 758 if (itp->it_value.tv_usec < usec) { 759 if (itp->it_value.tv_sec == 0) { 760 /* expired, and already in next interval */ 761 usec -= itp->it_value.tv_usec; 762 goto expire; 763 } 764 itp->it_value.tv_usec += 1000000; 765 itp->it_value.tv_sec--; 766 } 767 itp->it_value.tv_usec -= usec; 768 usec = 0; 769 if (timevalisset(&itp->it_value)) 770 return (1); 771 /* expired, exactly at end of interval */ 772 expire: 773 if (timevalisset(&itp->it_interval)) { 774 itp->it_value = itp->it_interval; 775 itp->it_value.tv_usec -= usec; 776 if (itp->it_value.tv_usec < 0) { 777 itp->it_value.tv_usec += 1000000; 778 itp->it_value.tv_sec--; 779 } 780 } else 781 itp->it_value.tv_usec = 0; /* sec is already 0 */ 782 return (0); 783 } 784 785 /* 786 * Add and subtract routines for timevals. 787 * N.B.: subtract routine doesn't deal with 788 * results which are before the beginning, 789 * it just gets very confused in this case. 790 * Caveat emptor. 791 */ 792 void 793 timevaladd(struct timeval *t1, const struct timeval *t2) 794 { 795 796 t1->tv_sec += t2->tv_sec; 797 t1->tv_usec += t2->tv_usec; 798 timevalfix(t1); 799 } 800 801 void 802 timevalsub(struct timeval *t1, const struct timeval *t2) 803 { 804 805 t1->tv_sec -= t2->tv_sec; 806 t1->tv_usec -= t2->tv_usec; 807 timevalfix(t1); 808 } 809 810 static void 811 timevalfix(struct timeval *t1) 812 { 813 814 if (t1->tv_usec < 0) { 815 t1->tv_sec--; 816 t1->tv_usec += 1000000; 817 } 818 if (t1->tv_usec >= 1000000) { 819 t1->tv_sec++; 820 t1->tv_usec -= 1000000; 821 } 822 } 823 824 /* 825 * ratecheck(): simple time-based rate-limit checking. 826 */ 827 int 828 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 829 { 830 struct timeval tv, delta; 831 int rv = 0; 832 833 getmicrouptime(&tv); /* NB: 10ms precision */ 834 delta = tv; 835 timevalsub(&delta, lasttime); 836 837 /* 838 * check for 0,0 is so that the message will be seen at least once, 839 * even if interval is huge. 840 */ 841 if (timevalcmp(&delta, mininterval, >=) || 842 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 843 *lasttime = tv; 844 rv = 1; 845 } 846 847 return (rv); 848 } 849 850 /* 851 * ppsratecheck(): packets (or events) per second limitation. 852 * 853 * Return 0 if the limit is to be enforced (e.g. the caller 854 * should drop a packet because of the rate limitation). 855 * 856 * maxpps of 0 always causes zero to be returned. maxpps of -1 857 * always causes 1 to be returned; this effectively defeats rate 858 * limiting. 859 * 860 * Note that we maintain the struct timeval for compatibility 861 * with other bsd systems. We reuse the storage and just monitor 862 * clock ticks for minimal overhead. 863 */ 864 int 865 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 866 { 867 int now; 868 869 /* 870 * Reset the last time and counter if this is the first call 871 * or more than a second has passed since the last update of 872 * lasttime. 873 */ 874 now = ticks; 875 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 876 lasttime->tv_sec = now; 877 *curpps = 1; 878 return (maxpps != 0); 879 } else { 880 (*curpps)++; /* NB: ignore potential overflow */ 881 return (maxpps < 0 || *curpps < maxpps); 882 } 883 } 884 885