1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 30 * $FreeBSD: src/sys/kern/kern_time.c,v 1.68.2.1 2002/10/01 08:00:41 bde Exp $ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/buf.h> 36 #include <sys/sysproto.h> 37 #include <sys/resourcevar.h> 38 #include <sys/signalvar.h> 39 #include <sys/kernel.h> 40 #include <sys/sysent.h> 41 #include <sys/sysunion.h> 42 #include <sys/proc.h> 43 #include <sys/priv.h> 44 #include <sys/time.h> 45 #include <sys/vnode.h> 46 #include <sys/sysctl.h> 47 #include <sys/kern_syscall.h> 48 #include <vm/vm.h> 49 #include <vm/vm_extern.h> 50 51 #include <sys/msgport2.h> 52 #include <sys/thread2.h> 53 #include <sys/mplock2.h> 54 55 struct timezone tz; 56 57 /* 58 * Time of day and interval timer support. 59 * 60 * These routines provide the kernel entry points to get and set 61 * the time-of-day and per-process interval timers. Subroutines 62 * here provide support for adding and subtracting timeval structures 63 * and decrementing interval timers, optionally reloading the interval 64 * timers when they expire. 65 */ 66 67 static int settime(struct timeval *); 68 static void timevalfix(struct timeval *); 69 70 /* 71 * Nanosleep tries very hard to sleep for a precisely requested time 72 * interval, down to 1uS. The administrator can impose a minimum delay 73 * and a delay below which we hard-loop instead of initiate a timer 74 * interrupt and sleep. 75 * 76 * For machines under high loads it might be beneficial to increase min_us 77 * to e.g. 1000uS (1ms) so spining processes sleep meaningfully. 78 */ 79 static int nanosleep_min_us = 10; 80 static int nanosleep_hard_us = 100; 81 static int gettimeofday_quick = 0; 82 SYSCTL_INT(_kern, OID_AUTO, nanosleep_min_us, CTLFLAG_RW, 83 &nanosleep_min_us, 0, ""); 84 SYSCTL_INT(_kern, OID_AUTO, nanosleep_hard_us, CTLFLAG_RW, 85 &nanosleep_hard_us, 0, ""); 86 SYSCTL_INT(_kern, OID_AUTO, gettimeofday_quick, CTLFLAG_RW, 87 &gettimeofday_quick, 0, ""); 88 89 static int 90 settime(struct timeval *tv) 91 { 92 struct timeval delta, tv1, tv2; 93 static struct timeval maxtime, laststep; 94 struct timespec ts; 95 int origcpu; 96 97 if ((origcpu = mycpu->gd_cpuid) != 0) 98 lwkt_setcpu_self(globaldata_find(0)); 99 100 crit_enter(); 101 microtime(&tv1); 102 delta = *tv; 103 timevalsub(&delta, &tv1); 104 105 /* 106 * If the system is secure, we do not allow the time to be 107 * set to a value earlier than 1 second less than the highest 108 * time we have yet seen. The worst a miscreant can do in 109 * this circumstance is "freeze" time. He couldn't go 110 * back to the past. 111 * 112 * We similarly do not allow the clock to be stepped more 113 * than one second, nor more than once per second. This allows 114 * a miscreant to make the clock march double-time, but no worse. 115 */ 116 if (securelevel > 1) { 117 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 118 /* 119 * Update maxtime to latest time we've seen. 120 */ 121 if (tv1.tv_sec > maxtime.tv_sec) 122 maxtime = tv1; 123 tv2 = *tv; 124 timevalsub(&tv2, &maxtime); 125 if (tv2.tv_sec < -1) { 126 tv->tv_sec = maxtime.tv_sec - 1; 127 kprintf("Time adjustment clamped to -1 second\n"); 128 } 129 } else { 130 if (tv1.tv_sec == laststep.tv_sec) { 131 crit_exit(); 132 return (EPERM); 133 } 134 if (delta.tv_sec > 1) { 135 tv->tv_sec = tv1.tv_sec + 1; 136 kprintf("Time adjustment clamped to +1 second\n"); 137 } 138 laststep = *tv; 139 } 140 } 141 142 ts.tv_sec = tv->tv_sec; 143 ts.tv_nsec = tv->tv_usec * 1000; 144 set_timeofday(&ts); 145 crit_exit(); 146 147 if (origcpu != 0) 148 lwkt_setcpu_self(globaldata_find(origcpu)); 149 150 resettodr(); 151 return (0); 152 } 153 154 static void 155 get_process_cputime(struct proc *p, struct timespec *ats) 156 { 157 struct rusage ru; 158 159 lwkt_gettoken(&p->p_token); 160 calcru_proc(p, &ru); 161 lwkt_reltoken(&p->p_token); 162 timevaladd(&ru.ru_utime, &ru.ru_stime); 163 TIMEVAL_TO_TIMESPEC(&ru.ru_utime, ats); 164 } 165 166 static void 167 get_process_usertime(struct proc *p, struct timespec *ats) 168 { 169 struct rusage ru; 170 171 lwkt_gettoken(&p->p_token); 172 calcru_proc(p, &ru); 173 lwkt_reltoken(&p->p_token); 174 TIMEVAL_TO_TIMESPEC(&ru.ru_utime, ats); 175 } 176 177 static void 178 get_curthread_cputime(struct timespec *ats) 179 { 180 struct thread *td = curthread; 181 struct timeval sys, user; 182 183 calcru(td->td_lwp, &user, &sys); 184 timevaladd(&user, &sys); 185 TIMEVAL_TO_TIMESPEC(&user, ats); 186 } 187 188 /* 189 * MPSAFE 190 */ 191 int 192 kern_clock_gettime(clockid_t clock_id, struct timespec *ats) 193 { 194 struct proc *p; 195 196 p = curproc; 197 switch(clock_id) { 198 case CLOCK_REALTIME: 199 case CLOCK_REALTIME_PRECISE: 200 nanotime(ats); 201 break; 202 case CLOCK_REALTIME_FAST: 203 getnanotime(ats); 204 break; 205 case CLOCK_MONOTONIC: 206 case CLOCK_MONOTONIC_PRECISE: 207 case CLOCK_UPTIME: 208 case CLOCK_UPTIME_PRECISE: 209 nanouptime(ats); 210 break; 211 case CLOCK_MONOTONIC_FAST: 212 case CLOCK_UPTIME_FAST: 213 getnanouptime(ats); 214 break; 215 case CLOCK_VIRTUAL: 216 get_process_usertime(p, ats); 217 break; 218 case CLOCK_PROF: 219 case CLOCK_PROCESS_CPUTIME_ID: 220 get_process_cputime(p, ats); 221 break; 222 case CLOCK_SECOND: 223 ats->tv_sec = time_second; 224 ats->tv_nsec = 0; 225 break; 226 case CLOCK_THREAD_CPUTIME_ID: 227 get_curthread_cputime(ats); 228 break; 229 default: 230 return (EINVAL); 231 } 232 return (0); 233 } 234 235 /* 236 * MPSAFE 237 */ 238 int 239 sys_clock_gettime(struct clock_gettime_args *uap) 240 { 241 struct timespec ats; 242 int error; 243 244 error = kern_clock_gettime(uap->clock_id, &ats); 245 if (error == 0) 246 error = copyout(&ats, uap->tp, sizeof(ats)); 247 248 return (error); 249 } 250 251 int 252 kern_clock_settime(clockid_t clock_id, struct timespec *ats) 253 { 254 struct thread *td = curthread; 255 struct timeval atv; 256 int error; 257 258 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0) 259 return (error); 260 if (clock_id != CLOCK_REALTIME) 261 return (EINVAL); 262 if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000) 263 return (EINVAL); 264 265 TIMESPEC_TO_TIMEVAL(&atv, ats); 266 error = settime(&atv); 267 return (error); 268 } 269 270 /* 271 * MPALMOSTSAFE 272 */ 273 int 274 sys_clock_settime(struct clock_settime_args *uap) 275 { 276 struct timespec ats; 277 int error; 278 279 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 280 return (error); 281 282 get_mplock(); 283 error = kern_clock_settime(uap->clock_id, &ats); 284 rel_mplock(); 285 return (error); 286 } 287 288 /* 289 * MPSAFE 290 */ 291 int 292 kern_clock_getres(clockid_t clock_id, struct timespec *ts) 293 { 294 ts->tv_sec = 0; 295 switch(clock_id) { 296 case CLOCK_REALTIME: 297 case CLOCK_REALTIME_FAST: 298 case CLOCK_REALTIME_PRECISE: 299 case CLOCK_MONOTONIC: 300 case CLOCK_MONOTONIC_FAST: 301 case CLOCK_MONOTONIC_PRECISE: 302 case CLOCK_UPTIME: 303 case CLOCK_UPTIME_FAST: 304 case CLOCK_UPTIME_PRECISE: 305 /* 306 * Round up the result of the division cheaply 307 * by adding 1. Rounding up is especially important 308 * if rounding down would give 0. Perfect rounding 309 * is unimportant. 310 */ 311 ts->tv_nsec = 1000000000 / sys_cputimer->freq + 1; 312 break; 313 case CLOCK_VIRTUAL: 314 case CLOCK_PROF: 315 /* Accurately round up here because we can do so cheaply. */ 316 ts->tv_nsec = (1000000000 + hz - 1) / hz; 317 break; 318 case CLOCK_SECOND: 319 ts->tv_sec = 1; 320 ts->tv_nsec = 0; 321 break; 322 case CLOCK_THREAD_CPUTIME_ID: 323 case CLOCK_PROCESS_CPUTIME_ID: 324 ts->tv_nsec = 1000; 325 break; 326 default: 327 return (EINVAL); 328 } 329 330 return (0); 331 } 332 333 /* 334 * MPSAFE 335 */ 336 int 337 sys_clock_getres(struct clock_getres_args *uap) 338 { 339 int error; 340 struct timespec ts; 341 342 error = kern_clock_getres(uap->clock_id, &ts); 343 if (error == 0) 344 error = copyout(&ts, uap->tp, sizeof(ts)); 345 346 return (error); 347 } 348 349 /* 350 * nanosleep1() 351 * 352 * This is a general helper function for nanosleep() (aka sleep() aka 353 * usleep()). 354 * 355 * If there is less then one tick's worth of time left and 356 * we haven't done a yield, or the remaining microseconds is 357 * ridiculously low, do a yield. This avoids having 358 * to deal with systimer overheads when the system is under 359 * heavy loads. If we have done a yield already then use 360 * a systimer and an uninterruptable thread wait. 361 * 362 * If there is more then a tick's worth of time left, 363 * calculate the baseline ticks and use an interruptable 364 * tsleep, then handle the fine-grained delay on the next 365 * loop. This usually results in two sleeps occuring, a long one 366 * and a short one. 367 * 368 * MPSAFE 369 */ 370 static void 371 ns1_systimer(systimer_t info, int in_ipi __unused, 372 struct intrframe *frame __unused) 373 { 374 lwkt_schedule(info->data); 375 } 376 377 int 378 nanosleep1(struct timespec *rqt, struct timespec *rmt) 379 { 380 static int nanowait; 381 struct timespec ts, ts2, ts3; 382 struct timeval tv; 383 int error; 384 385 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 386 return (EINVAL); 387 /* XXX: imho this should return EINVAL at least for tv_sec < 0 */ 388 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 389 return (0); 390 nanouptime(&ts); 391 timespecadd(&ts, rqt); /* ts = target timestamp compare */ 392 TIMESPEC_TO_TIMEVAL(&tv, rqt); /* tv = sleep interval */ 393 394 for (;;) { 395 int ticks; 396 struct systimer info; 397 398 ticks = tv.tv_usec / ustick; /* approximate */ 399 400 if (tv.tv_sec == 0 && ticks == 0) { 401 thread_t td = curthread; 402 if (tv.tv_usec > 0 && tv.tv_usec < nanosleep_min_us) 403 tv.tv_usec = nanosleep_min_us; 404 if (tv.tv_usec < nanosleep_hard_us) { 405 lwkt_user_yield(); 406 cpu_pause(); 407 } else { 408 crit_enter_quick(td); 409 systimer_init_oneshot(&info, ns1_systimer, 410 td, tv.tv_usec); 411 lwkt_deschedule_self(td); 412 crit_exit_quick(td); 413 lwkt_switch(); 414 systimer_del(&info); /* make sure it's gone */ 415 } 416 error = iscaught(td->td_lwp); 417 } else if (tv.tv_sec == 0) { 418 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 419 } else { 420 ticks = tvtohz_low(&tv); /* also handles overflow */ 421 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 422 } 423 nanouptime(&ts2); 424 if (error && error != EWOULDBLOCK) { 425 if (error == ERESTART) 426 error = EINTR; 427 if (rmt != NULL) { 428 timespecsub(&ts, &ts2); 429 if (ts.tv_sec < 0) 430 timespecclear(&ts); 431 *rmt = ts; 432 } 433 return (error); 434 } 435 if (timespeccmp(&ts2, &ts, >=)) 436 return (0); 437 ts3 = ts; 438 timespecsub(&ts3, &ts2); 439 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 440 } 441 } 442 443 /* 444 * MPSAFE 445 */ 446 int 447 sys_nanosleep(struct nanosleep_args *uap) 448 { 449 int error; 450 struct timespec rqt; 451 struct timespec rmt; 452 453 error = copyin(uap->rqtp, &rqt, sizeof(rqt)); 454 if (error) 455 return (error); 456 457 error = nanosleep1(&rqt, &rmt); 458 459 /* 460 * copyout the residual if nanosleep was interrupted. 461 */ 462 if (error && uap->rmtp) { 463 int error2; 464 465 error2 = copyout(&rmt, uap->rmtp, sizeof(rmt)); 466 if (error2) 467 error = error2; 468 } 469 return (error); 470 } 471 472 /* 473 * The gettimeofday() system call is supposed to return a fine-grained 474 * realtime stamp. However, acquiring a fine-grained stamp can create a 475 * bottleneck when multiple cpu cores are trying to accessing e.g. the 476 * HPET hardware timer all at the same time, so we have a sysctl that 477 * allows its behavior to be changed to a more coarse-grained timestamp 478 * which does not have to access a hardware timer. 479 */ 480 int 481 sys_gettimeofday(struct gettimeofday_args *uap) 482 { 483 struct timeval atv; 484 int error = 0; 485 486 if (uap->tp) { 487 if (gettimeofday_quick) 488 getmicrotime(&atv); 489 else 490 microtime(&atv); 491 if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp, 492 sizeof (atv)))) 493 return (error); 494 } 495 if (uap->tzp) 496 error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, 497 sizeof (tz)); 498 return (error); 499 } 500 501 /* 502 * MPALMOSTSAFE 503 */ 504 int 505 sys_settimeofday(struct settimeofday_args *uap) 506 { 507 struct thread *td = curthread; 508 struct timeval atv; 509 struct timezone atz; 510 int error; 511 512 if ((error = priv_check(td, PRIV_SETTIMEOFDAY))) 513 return (error); 514 /* 515 * Verify all parameters before changing time. 516 * 517 * XXX: We do not allow the time to be set to 0.0, which also by 518 * happy coincidence works around a pkgsrc bulk build bug. 519 */ 520 if (uap->tv) { 521 if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv, 522 sizeof(atv)))) 523 return (error); 524 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000) 525 return (EINVAL); 526 if (atv.tv_sec == 0 && atv.tv_usec == 0) 527 return (EINVAL); 528 } 529 if (uap->tzp && 530 (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz)))) 531 return (error); 532 533 get_mplock(); 534 if (uap->tv && (error = settime(&atv))) { 535 rel_mplock(); 536 return (error); 537 } 538 rel_mplock(); 539 if (uap->tzp) 540 tz = atz; 541 return (0); 542 } 543 544 static void 545 kern_adjtime_common(void) 546 { 547 if ((ntp_delta >= 0 && ntp_delta < ntp_default_tick_delta) || 548 (ntp_delta < 0 && ntp_delta > -ntp_default_tick_delta)) 549 ntp_tick_delta = ntp_delta; 550 else if (ntp_delta > ntp_big_delta) 551 ntp_tick_delta = 10 * ntp_default_tick_delta; 552 else if (ntp_delta < -ntp_big_delta) 553 ntp_tick_delta = -10 * ntp_default_tick_delta; 554 else if (ntp_delta > 0) 555 ntp_tick_delta = ntp_default_tick_delta; 556 else 557 ntp_tick_delta = -ntp_default_tick_delta; 558 } 559 560 void 561 kern_adjtime(int64_t delta, int64_t *odelta) 562 { 563 int origcpu; 564 565 if ((origcpu = mycpu->gd_cpuid) != 0) 566 lwkt_setcpu_self(globaldata_find(0)); 567 568 crit_enter(); 569 *odelta = ntp_delta; 570 ntp_delta = delta; 571 kern_adjtime_common(); 572 crit_exit(); 573 574 if (origcpu != 0) 575 lwkt_setcpu_self(globaldata_find(origcpu)); 576 } 577 578 static void 579 kern_get_ntp_delta(int64_t *delta) 580 { 581 int origcpu; 582 583 if ((origcpu = mycpu->gd_cpuid) != 0) 584 lwkt_setcpu_self(globaldata_find(0)); 585 586 crit_enter(); 587 *delta = ntp_delta; 588 crit_exit(); 589 590 if (origcpu != 0) 591 lwkt_setcpu_self(globaldata_find(origcpu)); 592 } 593 594 void 595 kern_reladjtime(int64_t delta) 596 { 597 int origcpu; 598 599 if ((origcpu = mycpu->gd_cpuid) != 0) 600 lwkt_setcpu_self(globaldata_find(0)); 601 602 crit_enter(); 603 ntp_delta += delta; 604 kern_adjtime_common(); 605 crit_exit(); 606 607 if (origcpu != 0) 608 lwkt_setcpu_self(globaldata_find(origcpu)); 609 } 610 611 static void 612 kern_adjfreq(int64_t rate) 613 { 614 int origcpu; 615 616 if ((origcpu = mycpu->gd_cpuid) != 0) 617 lwkt_setcpu_self(globaldata_find(0)); 618 619 crit_enter(); 620 ntp_tick_permanent = rate; 621 crit_exit(); 622 623 if (origcpu != 0) 624 lwkt_setcpu_self(globaldata_find(origcpu)); 625 } 626 627 /* 628 * MPALMOSTSAFE 629 */ 630 int 631 sys_adjtime(struct adjtime_args *uap) 632 { 633 struct thread *td = curthread; 634 struct timeval atv; 635 int64_t ndelta, odelta; 636 int error; 637 638 if ((error = priv_check(td, PRIV_ADJTIME))) 639 return (error); 640 error = copyin(uap->delta, &atv, sizeof(struct timeval)); 641 if (error) 642 return (error); 643 644 /* 645 * Compute the total correction and the rate at which to apply it. 646 * Round the adjustment down to a whole multiple of the per-tick 647 * delta, so that after some number of incremental changes in 648 * hardclock(), tickdelta will become zero, lest the correction 649 * overshoot and start taking us away from the desired final time. 650 */ 651 ndelta = (int64_t)atv.tv_sec * 1000000000 + atv.tv_usec * 1000; 652 get_mplock(); 653 kern_adjtime(ndelta, &odelta); 654 rel_mplock(); 655 656 if (uap->olddelta) { 657 atv.tv_sec = odelta / 1000000000; 658 atv.tv_usec = odelta % 1000000000 / 1000; 659 copyout(&atv, uap->olddelta, sizeof(struct timeval)); 660 } 661 return (0); 662 } 663 664 static int 665 sysctl_adjtime(SYSCTL_HANDLER_ARGS) 666 { 667 int64_t delta; 668 int error; 669 670 if (req->newptr != NULL) { 671 if (priv_check(curthread, PRIV_ROOT)) 672 return (EPERM); 673 error = SYSCTL_IN(req, &delta, sizeof(delta)); 674 if (error) 675 return (error); 676 kern_reladjtime(delta); 677 } 678 679 if (req->oldptr) 680 kern_get_ntp_delta(&delta); 681 error = SYSCTL_OUT(req, &delta, sizeof(delta)); 682 return (error); 683 } 684 685 /* 686 * delta is in nanoseconds. 687 */ 688 static int 689 sysctl_delta(SYSCTL_HANDLER_ARGS) 690 { 691 int64_t delta, old_delta; 692 int error; 693 694 if (req->newptr != NULL) { 695 if (priv_check(curthread, PRIV_ROOT)) 696 return (EPERM); 697 error = SYSCTL_IN(req, &delta, sizeof(delta)); 698 if (error) 699 return (error); 700 kern_adjtime(delta, &old_delta); 701 } 702 703 if (req->oldptr != NULL) 704 kern_get_ntp_delta(&old_delta); 705 error = SYSCTL_OUT(req, &old_delta, sizeof(old_delta)); 706 return (error); 707 } 708 709 /* 710 * frequency is in nanoseconds per second shifted left 32. 711 * kern_adjfreq() needs it in nanoseconds per tick shifted left 32. 712 */ 713 static int 714 sysctl_adjfreq(SYSCTL_HANDLER_ARGS) 715 { 716 int64_t freqdelta; 717 int error; 718 719 if (req->newptr != NULL) { 720 if (priv_check(curthread, PRIV_ROOT)) 721 return (EPERM); 722 error = SYSCTL_IN(req, &freqdelta, sizeof(freqdelta)); 723 if (error) 724 return (error); 725 726 freqdelta /= hz; 727 kern_adjfreq(freqdelta); 728 } 729 730 if (req->oldptr != NULL) 731 freqdelta = ntp_tick_permanent * hz; 732 error = SYSCTL_OUT(req, &freqdelta, sizeof(freqdelta)); 733 if (error) 734 return (error); 735 736 return (0); 737 } 738 739 SYSCTL_NODE(_kern, OID_AUTO, ntp, CTLFLAG_RW, 0, "NTP related controls"); 740 SYSCTL_PROC(_kern_ntp, OID_AUTO, permanent, 741 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 742 sysctl_adjfreq, "Q", "permanent correction per second"); 743 SYSCTL_PROC(_kern_ntp, OID_AUTO, delta, 744 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 745 sysctl_delta, "Q", "one-time delta"); 746 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, big_delta, CTLFLAG_RD, 747 &ntp_big_delta, sizeof(ntp_big_delta), "Q", 748 "threshold for fast adjustment"); 749 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, tick_delta, CTLFLAG_RD, 750 &ntp_tick_delta, sizeof(ntp_tick_delta), "LU", 751 "per-tick adjustment"); 752 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, default_tick_delta, CTLFLAG_RD, 753 &ntp_default_tick_delta, sizeof(ntp_default_tick_delta), "LU", 754 "default per-tick adjustment"); 755 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, next_leap_second, CTLFLAG_RW, 756 &ntp_leap_second, sizeof(ntp_leap_second), "LU", 757 "next leap second"); 758 SYSCTL_INT(_kern_ntp, OID_AUTO, insert_leap_second, CTLFLAG_RW, 759 &ntp_leap_insert, 0, "insert or remove leap second"); 760 SYSCTL_PROC(_kern_ntp, OID_AUTO, adjust, 761 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 762 sysctl_adjtime, "Q", "relative adjust for delta"); 763 764 /* 765 * Get value of an interval timer. The process virtual and 766 * profiling virtual time timers are kept in the p_stats area, since 767 * they can be swapped out. These are kept internally in the 768 * way they are specified externally: in time until they expire. 769 * 770 * The real time interval timer is kept in the process table slot 771 * for the process, and its value (it_value) is kept as an 772 * absolute time rather than as a delta, so that it is easy to keep 773 * periodic real-time signals from drifting. 774 * 775 * Virtual time timers are processed in the hardclock() routine of 776 * kern_clock.c. The real time timer is processed by a timeout 777 * routine, called from the softclock() routine. Since a callout 778 * may be delayed in real time due to interrupt processing in the system, 779 * it is possible for the real time timeout routine (realitexpire, given below), 780 * to be delayed in real time past when it is supposed to occur. It 781 * does not suffice, therefore, to reload the real timer .it_value from the 782 * real time timers .it_interval. Rather, we compute the next time in 783 * absolute time the timer should go off. 784 * 785 * MPALMOSTSAFE 786 */ 787 int 788 sys_getitimer(struct getitimer_args *uap) 789 { 790 struct proc *p = curproc; 791 struct timeval ctv; 792 struct itimerval aitv; 793 794 if (uap->which > ITIMER_PROF) 795 return (EINVAL); 796 lwkt_gettoken(&p->p_token); 797 if (uap->which == ITIMER_REAL) { 798 /* 799 * Convert from absolute to relative time in .it_value 800 * part of real time timer. If time for real time timer 801 * has passed return 0, else return difference between 802 * current time and time for the timer to go off. 803 */ 804 aitv = p->p_realtimer; 805 if (timevalisset(&aitv.it_value)) { 806 getmicrouptime(&ctv); 807 if (timevalcmp(&aitv.it_value, &ctv, <)) 808 timevalclear(&aitv.it_value); 809 else 810 timevalsub(&aitv.it_value, &ctv); 811 } 812 } else { 813 aitv = p->p_timer[uap->which]; 814 } 815 lwkt_reltoken(&p->p_token); 816 return (copyout(&aitv, uap->itv, sizeof (struct itimerval))); 817 } 818 819 /* 820 * MPALMOSTSAFE 821 */ 822 int 823 sys_setitimer(struct setitimer_args *uap) 824 { 825 struct itimerval aitv; 826 struct timeval ctv; 827 struct itimerval *itvp; 828 struct proc *p = curproc; 829 int error; 830 831 if (uap->which > ITIMER_PROF) 832 return (EINVAL); 833 itvp = uap->itv; 834 if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv, 835 sizeof(struct itimerval)))) 836 return (error); 837 if ((uap->itv = uap->oitv) && 838 (error = sys_getitimer((struct getitimer_args *)uap))) 839 return (error); 840 if (itvp == NULL) 841 return (0); 842 if (itimerfix(&aitv.it_value)) 843 return (EINVAL); 844 if (!timevalisset(&aitv.it_value)) 845 timevalclear(&aitv.it_interval); 846 else if (itimerfix(&aitv.it_interval)) 847 return (EINVAL); 848 lwkt_gettoken(&p->p_token); 849 if (uap->which == ITIMER_REAL) { 850 if (timevalisset(&p->p_realtimer.it_value)) 851 callout_stop_sync(&p->p_ithandle); 852 if (timevalisset(&aitv.it_value)) 853 callout_reset(&p->p_ithandle, 854 tvtohz_high(&aitv.it_value), realitexpire, p); 855 getmicrouptime(&ctv); 856 timevaladd(&aitv.it_value, &ctv); 857 p->p_realtimer = aitv; 858 } else { 859 p->p_timer[uap->which] = aitv; 860 switch(uap->which) { 861 case ITIMER_VIRTUAL: 862 p->p_flags &= ~P_SIGVTALRM; 863 break; 864 case ITIMER_PROF: 865 p->p_flags &= ~P_SIGPROF; 866 break; 867 } 868 } 869 lwkt_reltoken(&p->p_token); 870 return (0); 871 } 872 873 /* 874 * Real interval timer expired: 875 * send process whose timer expired an alarm signal. 876 * If time is not set up to reload, then just return. 877 * Else compute next time timer should go off which is > current time. 878 * This is where delay in processing this timeout causes multiple 879 * SIGALRM calls to be compressed into one. 880 * tvtohz_high() always adds 1 to allow for the time until the next clock 881 * interrupt being strictly less than 1 clock tick, but we don't want 882 * that here since we want to appear to be in sync with the clock 883 * interrupt even when we're delayed. 884 */ 885 void 886 realitexpire(void *arg) 887 { 888 struct proc *p; 889 struct timeval ctv, ntv; 890 891 p = (struct proc *)arg; 892 PHOLD(p); 893 lwkt_gettoken(&p->p_token); 894 ksignal(p, SIGALRM); 895 if (!timevalisset(&p->p_realtimer.it_interval)) { 896 timevalclear(&p->p_realtimer.it_value); 897 goto done; 898 } 899 for (;;) { 900 timevaladd(&p->p_realtimer.it_value, 901 &p->p_realtimer.it_interval); 902 getmicrouptime(&ctv); 903 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { 904 ntv = p->p_realtimer.it_value; 905 timevalsub(&ntv, &ctv); 906 callout_reset(&p->p_ithandle, tvtohz_low(&ntv), 907 realitexpire, p); 908 goto done; 909 } 910 } 911 done: 912 lwkt_reltoken(&p->p_token); 913 PRELE(p); 914 } 915 916 /* 917 * Used to validate itimer timeouts and utimes*() timespecs. 918 */ 919 int 920 itimerfix(struct timeval *tv) 921 { 922 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) 923 return (EINVAL); 924 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < ustick) 925 tv->tv_usec = ustick; 926 return (0); 927 } 928 929 /* 930 * Used to validate timeouts and utimes*() timespecs. 931 */ 932 int 933 itimespecfix(struct timespec *ts) 934 { 935 if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000ULL) 936 return (EINVAL); 937 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < nstick) 938 ts->tv_nsec = nstick; 939 return (0); 940 } 941 942 /* 943 * Decrement an interval timer by a specified number 944 * of microseconds, which must be less than a second, 945 * i.e. < 1000000. If the timer expires, then reload 946 * it. In this case, carry over (usec - old value) to 947 * reduce the value reloaded into the timer so that 948 * the timer does not drift. This routine assumes 949 * that it is called in a context where the timers 950 * on which it is operating cannot change in value. 951 */ 952 int 953 itimerdecr(struct itimerval *itp, int usec) 954 { 955 956 if (itp->it_value.tv_usec < usec) { 957 if (itp->it_value.tv_sec == 0) { 958 /* expired, and already in next interval */ 959 usec -= itp->it_value.tv_usec; 960 goto expire; 961 } 962 itp->it_value.tv_usec += 1000000; 963 itp->it_value.tv_sec--; 964 } 965 itp->it_value.tv_usec -= usec; 966 usec = 0; 967 if (timevalisset(&itp->it_value)) 968 return (1); 969 /* expired, exactly at end of interval */ 970 expire: 971 if (timevalisset(&itp->it_interval)) { 972 itp->it_value = itp->it_interval; 973 itp->it_value.tv_usec -= usec; 974 if (itp->it_value.tv_usec < 0) { 975 itp->it_value.tv_usec += 1000000; 976 itp->it_value.tv_sec--; 977 } 978 } else 979 itp->it_value.tv_usec = 0; /* sec is already 0 */ 980 return (0); 981 } 982 983 /* 984 * Add and subtract routines for timevals. 985 * N.B.: subtract routine doesn't deal with 986 * results which are before the beginning, 987 * it just gets very confused in this case. 988 * Caveat emptor. 989 */ 990 void 991 timevaladd(struct timeval *t1, const struct timeval *t2) 992 { 993 994 t1->tv_sec += t2->tv_sec; 995 t1->tv_usec += t2->tv_usec; 996 timevalfix(t1); 997 } 998 999 void 1000 timevalsub(struct timeval *t1, const struct timeval *t2) 1001 { 1002 1003 t1->tv_sec -= t2->tv_sec; 1004 t1->tv_usec -= t2->tv_usec; 1005 timevalfix(t1); 1006 } 1007 1008 static void 1009 timevalfix(struct timeval *t1) 1010 { 1011 1012 if (t1->tv_usec < 0) { 1013 t1->tv_sec--; 1014 t1->tv_usec += 1000000; 1015 } 1016 if (t1->tv_usec >= 1000000) { 1017 t1->tv_sec++; 1018 t1->tv_usec -= 1000000; 1019 } 1020 } 1021 1022 /* 1023 * ratecheck(): simple time-based rate-limit checking. 1024 */ 1025 int 1026 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 1027 { 1028 struct timeval tv, delta; 1029 int rv = 0; 1030 1031 getmicrouptime(&tv); /* NB: 10ms precision */ 1032 delta = tv; 1033 timevalsub(&delta, lasttime); 1034 1035 /* 1036 * check for 0,0 is so that the message will be seen at least once, 1037 * even if interval is huge. 1038 */ 1039 if (timevalcmp(&delta, mininterval, >=) || 1040 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 1041 *lasttime = tv; 1042 rv = 1; 1043 } 1044 1045 return (rv); 1046 } 1047 1048 /* 1049 * ppsratecheck(): packets (or events) per second limitation. 1050 * 1051 * Return 0 if the limit is to be enforced (e.g. the caller 1052 * should drop a packet because of the rate limitation). 1053 * 1054 * maxpps of 0 always causes zero to be returned. maxpps of -1 1055 * always causes 1 to be returned; this effectively defeats rate 1056 * limiting. 1057 * 1058 * Note that we maintain the struct timeval for compatibility 1059 * with other bsd systems. We reuse the storage and just monitor 1060 * clock ticks for minimal overhead. 1061 */ 1062 int 1063 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 1064 { 1065 int now; 1066 1067 /* 1068 * Reset the last time and counter if this is the first call 1069 * or more than a second has passed since the last update of 1070 * lasttime. 1071 */ 1072 now = ticks; 1073 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 1074 lasttime->tv_sec = now; 1075 *curpps = 1; 1076 return (maxpps != 0); 1077 } else { 1078 (*curpps)++; /* NB: ignore potential overflow */ 1079 return (maxpps < 0 || *curpps < maxpps); 1080 } 1081 } 1082