1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 30 * $FreeBSD: src/sys/kern/kern_time.c,v 1.68.2.1 2002/10/01 08:00:41 bde Exp $ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/buf.h> 36 #include <sys/sysmsg.h> 37 #include <sys/resourcevar.h> 38 #include <sys/signalvar.h> 39 #include <sys/kernel.h> 40 #include <sys/sysent.h> 41 #include <sys/proc.h> 42 #include <sys/priv.h> 43 #include <sys/time.h> 44 #include <sys/vnode.h> 45 #include <sys/sysctl.h> 46 #include <sys/kern_syscall.h> 47 #include <sys/upmap.h> 48 #include <vm/vm.h> 49 #include <vm/vm_extern.h> 50 51 #include <sys/msgport2.h> 52 #include <sys/spinlock2.h> 53 #include <sys/thread2.h> 54 55 extern struct spinlock ntp_spin; 56 57 #define CPUCLOCK_BIT 0x80000000 58 #define CPUCLOCK_ID_MASK ~CPUCLOCK_BIT 59 #define CPUCLOCK2LWPID(clock_id) (((clockid_t)(clock_id) >> 32) & CPUCLOCK_ID_MASK) 60 #define CPUCLOCK2PID(clock_id) ((clock_id) & CPUCLOCK_ID_MASK) 61 #define MAKE_CPUCLOCK(pid, lwp_id) ((clockid_t)(lwp_id) << 32 | (pid) | CPUCLOCK_BIT) 62 63 struct timezone tz; 64 65 /* 66 * Time of day and interval timer support. 67 * 68 * These routines provide the kernel entry points to get and set 69 * the time-of-day and per-process interval timers. Subroutines 70 * here provide support for adding and subtracting timeval structures 71 * and decrementing interval timers, optionally reloading the interval 72 * timers when they expire. 73 */ 74 75 static int settime(struct timeval *); 76 static void timevalfix(struct timeval *); 77 static void realitexpire(void *arg); 78 79 static int sysctl_gettimeofday_quick(SYSCTL_HANDLER_ARGS); 80 81 82 /* 83 * Nanosleep tries very hard to sleep for a precisely requested time 84 * interval, down to 1uS. The administrator can impose a minimum delay 85 * and a delay below which we hard-loop instead of initiate a timer 86 * interrupt and sleep. 87 * 88 * For machines under high loads it might be beneficial to increase min_us 89 * to e.g. 1000uS (1ms) so spining processes sleep meaningfully. 90 */ 91 static int nanosleep_min_us = 10; 92 static int nanosleep_hard_us = 100; 93 static int gettimeofday_quick = 0; 94 SYSCTL_INT(_kern, OID_AUTO, nanosleep_min_us, CTLFLAG_RW, 95 &nanosleep_min_us, 0, ""); 96 SYSCTL_INT(_kern, OID_AUTO, nanosleep_hard_us, CTLFLAG_RW, 97 &nanosleep_hard_us, 0, ""); 98 SYSCTL_PROC(_kern, OID_AUTO, gettimeofday_quick, CTLTYPE_INT | CTLFLAG_RW, 99 0, 0, sysctl_gettimeofday_quick, "I", "Quick mode gettimeofday"); 100 101 static struct lock masterclock_lock = LOCK_INITIALIZER("mstrclk", 0, 0); 102 103 static int 104 settime(struct timeval *tv) 105 { 106 struct timeval delta, tv1, tv2; 107 static struct timeval maxtime, laststep; 108 struct timespec ts; 109 int origcpu; 110 111 if ((origcpu = mycpu->gd_cpuid) != 0) 112 lwkt_setcpu_self(globaldata_find(0)); 113 114 crit_enter(); 115 microtime(&tv1); 116 delta = *tv; 117 timevalsub(&delta, &tv1); 118 119 /* 120 * If the system is secure, we do not allow the time to be 121 * set to a value earlier than 1 second less than the highest 122 * time we have yet seen. The worst a miscreant can do in 123 * this circumstance is "freeze" time. He couldn't go 124 * back to the past. 125 * 126 * We similarly do not allow the clock to be stepped more 127 * than one second, nor more than once per second. This allows 128 * a miscreant to make the clock march double-time, but no worse. 129 */ 130 if (securelevel > 1) { 131 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 132 /* 133 * Update maxtime to latest time we've seen. 134 */ 135 if (tv1.tv_sec > maxtime.tv_sec) 136 maxtime = tv1; 137 tv2 = *tv; 138 timevalsub(&tv2, &maxtime); 139 if (tv2.tv_sec < -1) { 140 tv->tv_sec = maxtime.tv_sec - 1; 141 kprintf("Time adjustment clamped to -1 second\n"); 142 } 143 } else { 144 if (tv1.tv_sec == laststep.tv_sec) { 145 crit_exit(); 146 return (EPERM); 147 } 148 if (delta.tv_sec > 1) { 149 tv->tv_sec = tv1.tv_sec + 1; 150 kprintf("Time adjustment clamped to +1 second\n"); 151 } 152 laststep = *tv; 153 } 154 } 155 156 ts.tv_sec = tv->tv_sec; 157 ts.tv_nsec = tv->tv_usec * 1000; 158 set_timeofday(&ts); 159 crit_exit(); 160 161 if (origcpu != 0) 162 lwkt_setcpu_self(globaldata_find(origcpu)); 163 164 resettodr(); 165 return (0); 166 } 167 168 static void 169 get_process_cputime(struct proc *p, struct timespec *ats) 170 { 171 struct rusage ru; 172 173 lwkt_gettoken(&p->p_token); 174 calcru_proc(p, &ru); 175 lwkt_reltoken(&p->p_token); 176 timevaladd(&ru.ru_utime, &ru.ru_stime); 177 TIMEVAL_TO_TIMESPEC(&ru.ru_utime, ats); 178 } 179 180 static void 181 get_process_usertime(struct proc *p, struct timespec *ats) 182 { 183 struct rusage ru; 184 185 lwkt_gettoken(&p->p_token); 186 calcru_proc(p, &ru); 187 lwkt_reltoken(&p->p_token); 188 TIMEVAL_TO_TIMESPEC(&ru.ru_utime, ats); 189 } 190 191 static void 192 get_thread_cputime(struct thread *td, struct timespec *ats) 193 { 194 struct timeval sys, user; 195 196 calcru(td->td_lwp, &user, &sys); 197 timevaladd(&user, &sys); 198 TIMEVAL_TO_TIMESPEC(&user, ats); 199 } 200 201 /* 202 * MPSAFE 203 */ 204 int 205 kern_clock_gettime(clockid_t clock_id, struct timespec *ats) 206 { 207 struct proc *p; 208 struct lwp *lp; 209 lwpid_t lwp_id; 210 211 p = curproc; 212 switch(clock_id) { 213 case CLOCK_REALTIME: 214 case CLOCK_REALTIME_PRECISE: 215 nanotime(ats); 216 break; 217 case CLOCK_REALTIME_FAST: 218 getnanotime(ats); 219 break; 220 case CLOCK_MONOTONIC: 221 case CLOCK_MONOTONIC_PRECISE: 222 case CLOCK_UPTIME: 223 case CLOCK_UPTIME_PRECISE: 224 nanouptime(ats); 225 break; 226 case CLOCK_MONOTONIC_FAST: 227 case CLOCK_UPTIME_FAST: 228 getnanouptime(ats); 229 break; 230 case CLOCK_VIRTUAL: 231 get_process_usertime(p, ats); 232 break; 233 case CLOCK_PROF: 234 case CLOCK_PROCESS_CPUTIME_ID: 235 get_process_cputime(p, ats); 236 break; 237 case CLOCK_SECOND: 238 ats->tv_sec = time_second; 239 ats->tv_nsec = 0; 240 break; 241 case CLOCK_THREAD_CPUTIME_ID: 242 get_thread_cputime(curthread, ats); 243 break; 244 default: 245 if ((clock_id & CPUCLOCK_BIT) == 0) 246 return (EINVAL); 247 if ((p = pfind(CPUCLOCK2PID(clock_id))) == NULL) 248 return (EINVAL); 249 lwp_id = CPUCLOCK2LWPID(clock_id); 250 if (lwp_id == 0) { 251 get_process_cputime(p, ats); 252 } else { 253 lwkt_gettoken(&p->p_token); 254 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, lwp_id); 255 if (lp == NULL) { 256 lwkt_reltoken(&p->p_token); 257 PRELE(p); 258 return (EINVAL); 259 } 260 get_thread_cputime(lp->lwp_thread, ats); 261 lwkt_reltoken(&p->p_token); 262 } 263 PRELE(p); 264 } 265 return (0); 266 } 267 268 /* 269 * MPSAFE 270 */ 271 int 272 sys_clock_gettime(struct sysmsg *sysmsg, const struct clock_gettime_args *uap) 273 { 274 struct timespec ats; 275 int error; 276 277 error = kern_clock_gettime(uap->clock_id, &ats); 278 if (error == 0) 279 error = copyout(&ats, uap->tp, sizeof(ats)); 280 281 return (error); 282 } 283 284 int 285 kern_clock_settime(clockid_t clock_id, struct timespec *ats) 286 { 287 struct thread *td = curthread; 288 struct timeval atv; 289 int error; 290 291 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0) 292 return (error); 293 if (clock_id != CLOCK_REALTIME) 294 return (EINVAL); 295 if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000) 296 return (EINVAL); 297 298 lockmgr(&masterclock_lock, LK_EXCLUSIVE); 299 TIMESPEC_TO_TIMEVAL(&atv, ats); 300 error = settime(&atv); 301 lockmgr(&masterclock_lock, LK_RELEASE); 302 303 return (error); 304 } 305 306 /* 307 * MPALMOSTSAFE 308 */ 309 int 310 sys_clock_settime(struct sysmsg *sysmsg, const struct clock_settime_args *uap) 311 { 312 struct timespec ats; 313 int error; 314 315 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 316 return (error); 317 318 error = kern_clock_settime(uap->clock_id, &ats); 319 320 return (error); 321 } 322 323 /* 324 * MPSAFE 325 */ 326 int 327 kern_clock_getres(clockid_t clock_id, struct timespec *ts) 328 { 329 ts->tv_sec = 0; 330 331 switch(clock_id) { 332 case CLOCK_REALTIME: 333 case CLOCK_REALTIME_FAST: 334 case CLOCK_REALTIME_PRECISE: 335 case CLOCK_MONOTONIC: 336 case CLOCK_MONOTONIC_FAST: 337 case CLOCK_MONOTONIC_PRECISE: 338 case CLOCK_UPTIME: 339 case CLOCK_UPTIME_FAST: 340 case CLOCK_UPTIME_PRECISE: 341 /* 342 * Minimum reportable resolution is 1ns. Rounding is 343 * otherwise unimportant. 344 */ 345 ts->tv_nsec = 999999999 / sys_cputimer->freq + 1; 346 break; 347 case CLOCK_VIRTUAL: 348 case CLOCK_PROF: 349 /* Accurately round up here because we can do so cheaply. */ 350 ts->tv_nsec = howmany(1000000000, hz); 351 break; 352 case CLOCK_SECOND: 353 ts->tv_sec = 1; 354 ts->tv_nsec = 0; 355 break; 356 case CLOCK_THREAD_CPUTIME_ID: 357 case CLOCK_PROCESS_CPUTIME_ID: 358 ts->tv_nsec = 1000; 359 break; 360 default: 361 if ((clock_id & CPUCLOCK_BIT) != 0) 362 ts->tv_nsec = 1000; 363 else 364 return (EINVAL); 365 } 366 367 return (0); 368 } 369 370 /* 371 * MPSAFE 372 */ 373 int 374 sys_clock_getres(struct sysmsg *sysmsg, const struct clock_getres_args *uap) 375 { 376 int error; 377 struct timespec ts; 378 379 error = kern_clock_getres(uap->clock_id, &ts); 380 if (error == 0) 381 error = copyout(&ts, uap->tp, sizeof(ts)); 382 383 return (error); 384 } 385 386 static int 387 kern_getcpuclockid(pid_t pid, lwpid_t lwp_id, clockid_t *clock_id) 388 { 389 struct proc *p; 390 int error = 0; 391 392 if (pid == 0) { 393 p = curproc; 394 pid = p->p_pid; 395 PHOLD(p); 396 } else { 397 p = pfind(pid); 398 if (p == NULL) 399 return (ESRCH); 400 } 401 /* lwp_id can be 0 when called by clock_getcpuclockid() */ 402 if (lwp_id < 0) { 403 error = EINVAL; 404 goto out; 405 } 406 lwkt_gettoken(&p->p_token); 407 if (lwp_id > 0 && 408 lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, lwp_id) == NULL) { 409 lwkt_reltoken(&p->p_token); 410 error = ESRCH; 411 goto out; 412 } 413 *clock_id = MAKE_CPUCLOCK(pid, lwp_id); 414 lwkt_reltoken(&p->p_token); 415 out: 416 PRELE(p); 417 return (error); 418 } 419 420 int 421 sys_getcpuclockid(struct sysmsg *sysmsg, const struct getcpuclockid_args *uap) 422 { 423 clockid_t clk_id; 424 int error; 425 426 error = kern_getcpuclockid(uap->pid, uap->lwp_id, &clk_id); 427 if (error == 0) 428 error = copyout(&clk_id, uap->clock_id, sizeof(clockid_t)); 429 430 return (error); 431 } 432 433 /* 434 * nanosleep1() 435 * 436 * This is a general helper function for nanosleep() (aka sleep() aka 437 * usleep()). 438 * 439 * If there is less then one tick's worth of time left and 440 * we haven't done a yield, or the remaining microseconds is 441 * ridiculously low, do a yield. This avoids having 442 * to deal with systimer overheads when the system is under 443 * heavy loads. If we have done a yield already then use 444 * a systimer and an uninterruptable thread wait. 445 * 446 * If there is more then a tick's worth of time left, 447 * calculate the baseline ticks and use an interruptable 448 * tsleep, then handle the fine-grained delay on the next 449 * loop. This usually results in two sleeps occuring, a long one 450 * and a short one. 451 * 452 * MPSAFE 453 */ 454 static void 455 ns1_systimer(systimer_t info, int in_ipi __unused, 456 struct intrframe *frame __unused) 457 { 458 lwkt_schedule(info->data); 459 } 460 461 int 462 nanosleep1(struct timespec *rqt, struct timespec *rmt) 463 { 464 static int nanowait; 465 struct timespec ts, ts2, ts3; 466 struct timeval tv; 467 int error; 468 469 if (rqt->tv_sec < 0 || rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 470 return (EINVAL); 471 if (rqt->tv_sec == 0 && rqt->tv_nsec == 0) 472 return (0); 473 474 nanouptime(&ts); 475 timespecadd(&ts, rqt, &ts); /* ts = target timestamp compare */ 476 TIMESPEC_TO_TIMEVAL(&tv, rqt); /* tv = sleep interval */ 477 478 for (;;) { 479 int ticks; 480 struct systimer info; 481 482 ticks = tv.tv_usec / ustick; /* approximate */ 483 484 if (tv.tv_sec == 0 && ticks == 0) { 485 thread_t td = curthread; 486 if (tv.tv_usec > 0 && tv.tv_usec < nanosleep_min_us) 487 tv.tv_usec = nanosleep_min_us; 488 if (tv.tv_usec < nanosleep_hard_us) { 489 lwkt_user_yield(); 490 cpu_pause(); 491 } else { 492 crit_enter_quick(td); 493 systimer_init_oneshot(&info, ns1_systimer, 494 td, tv.tv_usec); 495 lwkt_deschedule_self(td); 496 crit_exit_quick(td); 497 lwkt_switch(); 498 systimer_del(&info); /* make sure it's gone */ 499 } 500 error = iscaught(td->td_lwp); 501 } else if (tv.tv_sec == 0) { 502 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 503 } else { 504 ticks = tvtohz_low(&tv); /* also handles overflow */ 505 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 506 } 507 nanouptime(&ts2); 508 if (error && error != EWOULDBLOCK) { 509 if (error == ERESTART) 510 error = EINTR; 511 if (rmt != NULL) { 512 timespecsub(&ts, &ts2, &ts); 513 if (ts.tv_sec < 0) 514 timespecclear(&ts); 515 *rmt = ts; 516 } 517 return (error); 518 } 519 if (timespeccmp(&ts2, &ts, >=)) 520 return (0); 521 timespecsub(&ts, &ts2, &ts3); 522 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 523 } 524 } 525 526 /* 527 * MPSAFE 528 */ 529 int 530 sys_nanosleep(struct sysmsg *sysmsg, const struct nanosleep_args *uap) 531 { 532 int error; 533 struct timespec rqt; 534 struct timespec rmt; 535 536 error = copyin(uap->rqtp, &rqt, sizeof(rqt)); 537 if (error) 538 return (error); 539 540 bzero(&rmt, sizeof(rmt)); 541 error = nanosleep1(&rqt, &rmt); 542 543 /* 544 * copyout the residual if nanosleep was interrupted. 545 */ 546 if (error == EINTR && uap->rmtp) { 547 int error2; 548 549 error2 = copyout(&rmt, uap->rmtp, sizeof(rmt)); 550 if (error2) 551 error = error2; 552 } 553 return (error); 554 } 555 556 /* 557 * The gettimeofday() system call is supposed to return a fine-grained 558 * realtime stamp. However, acquiring a fine-grained stamp can create a 559 * bottleneck when multiple cpu cores are trying to accessing e.g. the 560 * HPET hardware timer all at the same time, so we have a sysctl that 561 * allows its behavior to be changed to a more coarse-grained timestamp 562 * which does not have to access a hardware timer. 563 */ 564 int 565 sys_gettimeofday(struct sysmsg *sysmsg, const struct gettimeofday_args *uap) 566 { 567 struct timeval atv; 568 int error = 0; 569 570 if (uap->tp) { 571 if (gettimeofday_quick) 572 getmicrotime(&atv); 573 else 574 microtime(&atv); 575 if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp, 576 sizeof (atv)))) 577 return (error); 578 } 579 if (uap->tzp) 580 error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, 581 sizeof (tz)); 582 return (error); 583 } 584 585 /* 586 * MPALMOSTSAFE 587 */ 588 int 589 sys_settimeofday(struct sysmsg *sysmsg, const struct settimeofday_args *uap) 590 { 591 struct thread *td = curthread; 592 struct timeval atv; 593 struct timezone atz; 594 int error; 595 596 if ((error = priv_check(td, PRIV_SETTIMEOFDAY))) 597 return (error); 598 /* 599 * Verify all parameters before changing time. 600 * 601 * XXX: We do not allow the time to be set to 0.0, which also by 602 * happy coincidence works around a pkgsrc bulk build bug. 603 */ 604 if (uap->tv) { 605 if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv, 606 sizeof(atv)))) 607 return (error); 608 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000) 609 return (EINVAL); 610 if (atv.tv_sec == 0 && atv.tv_usec == 0) 611 return (EINVAL); 612 } 613 if (uap->tzp && 614 (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz)))) 615 return (error); 616 617 lockmgr(&masterclock_lock, LK_EXCLUSIVE); 618 if (uap->tv && (error = settime(&atv))) { 619 lockmgr(&masterclock_lock, LK_RELEASE); 620 return (error); 621 } 622 lockmgr(&masterclock_lock, LK_RELEASE); 623 624 if (uap->tzp) 625 tz = atz; 626 return (0); 627 } 628 629 /* 630 * WARNING! Run with ntp_spin held 631 */ 632 static void 633 kern_adjtime_common(void) 634 { 635 if ((ntp_delta >= 0 && ntp_delta < ntp_default_tick_delta) || 636 (ntp_delta < 0 && ntp_delta > -ntp_default_tick_delta)) 637 ntp_tick_delta = ntp_delta; 638 else if (ntp_delta > ntp_big_delta) 639 ntp_tick_delta = 10 * ntp_default_tick_delta; 640 else if (ntp_delta < -ntp_big_delta) 641 ntp_tick_delta = -10 * ntp_default_tick_delta; 642 else if (ntp_delta > 0) 643 ntp_tick_delta = ntp_default_tick_delta; 644 else 645 ntp_tick_delta = -ntp_default_tick_delta; 646 } 647 648 void 649 kern_adjtime(int64_t delta, int64_t *odelta) 650 { 651 spin_lock(&ntp_spin); 652 *odelta = ntp_delta; 653 ntp_delta = delta; 654 kern_adjtime_common(); 655 spin_unlock(&ntp_spin); 656 } 657 658 static void 659 kern_get_ntp_delta(int64_t *delta) 660 { 661 *delta = ntp_delta; 662 } 663 664 void 665 kern_reladjtime(int64_t delta) 666 { 667 spin_lock(&ntp_spin); 668 ntp_delta += delta; 669 kern_adjtime_common(); 670 spin_unlock(&ntp_spin); 671 } 672 673 static void 674 kern_adjfreq(int64_t rate) 675 { 676 spin_lock(&ntp_spin); 677 ntp_tick_permanent = rate; 678 spin_unlock(&ntp_spin); 679 } 680 681 /* 682 * MPALMOSTSAFE 683 */ 684 int 685 sys_adjtime(struct sysmsg *sysmsg, const struct adjtime_args *uap) 686 { 687 struct thread *td = curthread; 688 struct timeval atv; 689 int64_t ndelta, odelta; 690 int error; 691 692 if ((error = priv_check(td, PRIV_ADJTIME))) 693 return (error); 694 error = copyin(uap->delta, &atv, sizeof(struct timeval)); 695 if (error) 696 return (error); 697 698 /* 699 * Compute the total correction and the rate at which to apply it. 700 * Round the adjustment down to a whole multiple of the per-tick 701 * delta, so that after some number of incremental changes in 702 * hardclock(), tickdelta will become zero, lest the correction 703 * overshoot and start taking us away from the desired final time. 704 */ 705 ndelta = (int64_t)atv.tv_sec * 1000000000 + atv.tv_usec * 1000; 706 kern_adjtime(ndelta, &odelta); 707 708 if (uap->olddelta) { 709 atv.tv_sec = odelta / 1000000000; 710 atv.tv_usec = odelta % 1000000000 / 1000; 711 copyout(&atv, uap->olddelta, sizeof(struct timeval)); 712 } 713 return (0); 714 } 715 716 static int 717 sysctl_adjtime(SYSCTL_HANDLER_ARGS) 718 { 719 int64_t delta; 720 int error; 721 722 if (req->newptr != NULL) { 723 if (priv_check(curthread, PRIV_ROOT)) 724 return (EPERM); 725 error = SYSCTL_IN(req, &delta, sizeof(delta)); 726 if (error) 727 return (error); 728 kern_reladjtime(delta); 729 } 730 731 if (req->oldptr) 732 kern_get_ntp_delta(&delta); 733 error = SYSCTL_OUT(req, &delta, sizeof(delta)); 734 return (error); 735 } 736 737 /* 738 * delta is in nanoseconds. 739 */ 740 static int 741 sysctl_delta(SYSCTL_HANDLER_ARGS) 742 { 743 int64_t delta, old_delta; 744 int error; 745 746 if (req->newptr != NULL) { 747 if (priv_check(curthread, PRIV_ROOT)) 748 return (EPERM); 749 error = SYSCTL_IN(req, &delta, sizeof(delta)); 750 if (error) 751 return (error); 752 kern_adjtime(delta, &old_delta); 753 } 754 755 if (req->oldptr != NULL) 756 kern_get_ntp_delta(&old_delta); 757 error = SYSCTL_OUT(req, &old_delta, sizeof(old_delta)); 758 return (error); 759 } 760 761 /* 762 * frequency is in nanoseconds per second shifted left 32. 763 * kern_adjfreq() needs it in nanoseconds per tick shifted left 32. 764 */ 765 static int 766 sysctl_adjfreq(SYSCTL_HANDLER_ARGS) 767 { 768 int64_t freqdelta; 769 int error; 770 771 if (req->newptr != NULL) { 772 if (priv_check(curthread, PRIV_ROOT)) 773 return (EPERM); 774 error = SYSCTL_IN(req, &freqdelta, sizeof(freqdelta)); 775 if (error) 776 return (error); 777 778 freqdelta /= hz; 779 kern_adjfreq(freqdelta); 780 } 781 782 if (req->oldptr != NULL) 783 freqdelta = ntp_tick_permanent * hz; 784 error = SYSCTL_OUT(req, &freqdelta, sizeof(freqdelta)); 785 if (error) 786 return (error); 787 788 return (0); 789 } 790 791 SYSCTL_NODE(_kern, OID_AUTO, ntp, CTLFLAG_RW, 0, "NTP related controls"); 792 SYSCTL_PROC(_kern_ntp, OID_AUTO, permanent, 793 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 794 sysctl_adjfreq, "Q", "permanent correction per second"); 795 SYSCTL_PROC(_kern_ntp, OID_AUTO, delta, 796 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 797 sysctl_delta, "Q", "one-time delta"); 798 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, big_delta, CTLFLAG_RD, 799 &ntp_big_delta, sizeof(ntp_big_delta), "Q", 800 "threshold for fast adjustment"); 801 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, tick_delta, CTLFLAG_RD, 802 &ntp_tick_delta, sizeof(ntp_tick_delta), "LU", 803 "per-tick adjustment"); 804 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, default_tick_delta, CTLFLAG_RD, 805 &ntp_default_tick_delta, sizeof(ntp_default_tick_delta), "LU", 806 "default per-tick adjustment"); 807 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, next_leap_second, CTLFLAG_RW, 808 &ntp_leap_second, sizeof(ntp_leap_second), "LU", 809 "next leap second"); 810 SYSCTL_INT(_kern_ntp, OID_AUTO, insert_leap_second, CTLFLAG_RW, 811 &ntp_leap_insert, 0, "insert or remove leap second"); 812 SYSCTL_PROC(_kern_ntp, OID_AUTO, adjust, 813 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 814 sysctl_adjtime, "Q", "relative adjust for delta"); 815 816 /* 817 * Get value of an interval timer. The process virtual and 818 * profiling virtual time timers are kept in the p_stats area, since 819 * they can be swapped out. These are kept internally in the 820 * way they are specified externally: in time until they expire. 821 * 822 * The real time interval timer is kept in the process table slot 823 * for the process, and its value (it_value) is kept as an 824 * absolute time rather than as a delta, so that it is easy to keep 825 * periodic real-time signals from drifting. 826 * 827 * Virtual time timers are processed in the hardclock() routine of 828 * kern_clock.c. The real time timer is processed by a timeout 829 * routine, called from the softclock() routine. Since a callout 830 * may be delayed in real time due to interrupt processing in the system, 831 * it is possible for the real time timeout routine (realitexpire, given below), 832 * to be delayed in real time past when it is supposed to occur. It 833 * does not suffice, therefore, to reload the real timer .it_value from the 834 * real time timers .it_interval. Rather, we compute the next time in 835 * absolute time the timer should go off. 836 * 837 * MPALMOSTSAFE 838 */ 839 int 840 sys_getitimer(struct sysmsg *sysmsg, const struct getitimer_args *uap) 841 { 842 struct proc *p = curproc; 843 struct timeval ctv; 844 struct itimerval aitv; 845 846 if (uap->which > ITIMER_PROF) 847 return (EINVAL); 848 lwkt_gettoken(&p->p_token); 849 if (uap->which == ITIMER_REAL) { 850 /* 851 * Convert from absolute to relative time in .it_value 852 * part of real time timer. If time for real time timer 853 * has passed return 0, else return difference between 854 * current time and time for the timer to go off. 855 */ 856 aitv = p->p_realtimer; 857 if (timevalisset(&aitv.it_value)) { 858 getmicrouptime(&ctv); 859 if (timevalcmp(&aitv.it_value, &ctv, <)) 860 timevalclear(&aitv.it_value); 861 else 862 timevalsub(&aitv.it_value, &ctv); 863 } 864 } else { 865 aitv = p->p_timer[uap->which]; 866 } 867 lwkt_reltoken(&p->p_token); 868 return (copyout(&aitv, uap->itv, sizeof (struct itimerval))); 869 } 870 871 /* 872 * MPALMOSTSAFE 873 */ 874 int 875 sys_setitimer(struct sysmsg *sysmsg, const struct setitimer_args *uap) 876 { 877 struct itimerval aitv; 878 struct timeval ctv; 879 struct itimerval *itvp; 880 struct proc *p = curproc; 881 struct getitimer_args gitargs; 882 int error; 883 884 if (uap->which > ITIMER_PROF) 885 return (EINVAL); 886 itvp = uap->itv; 887 if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv, 888 sizeof(struct itimerval)))) 889 return (error); 890 891 if (uap->oitv) { 892 gitargs.which = uap->which; 893 gitargs.itv = uap->oitv; 894 error = sys_getitimer(sysmsg, &gitargs); 895 if (error) 896 return error; 897 } 898 if (itvp == NULL) 899 return (0); 900 if (itimerfix(&aitv.it_value)) 901 return (EINVAL); 902 if (!timevalisset(&aitv.it_value)) 903 timevalclear(&aitv.it_interval); 904 else if (itimerfix(&aitv.it_interval)) 905 return (EINVAL); 906 lwkt_gettoken(&p->p_token); 907 if (uap->which == ITIMER_REAL) { 908 if (timevalisset(&p->p_realtimer.it_value)) 909 callout_cancel(&p->p_ithandle); 910 if (timevalisset(&aitv.it_value)) 911 callout_reset(&p->p_ithandle, 912 tvtohz_high(&aitv.it_value), realitexpire, p); 913 getmicrouptime(&ctv); 914 timevaladd(&aitv.it_value, &ctv); 915 p->p_realtimer = aitv; 916 } else { 917 p->p_timer[uap->which] = aitv; 918 switch(uap->which) { 919 case ITIMER_VIRTUAL: 920 p->p_flags &= ~P_SIGVTALRM; 921 break; 922 case ITIMER_PROF: 923 p->p_flags &= ~P_SIGPROF; 924 break; 925 } 926 } 927 lwkt_reltoken(&p->p_token); 928 return (0); 929 } 930 931 /* 932 * Real interval timer expired: 933 * send process whose timer expired an alarm signal. 934 * If time is not set up to reload, then just return. 935 * Else compute next time timer should go off which is > current time. 936 * This is where delay in processing this timeout causes multiple 937 * SIGALRM calls to be compressed into one. 938 * tvtohz_high() always adds 1 to allow for the time until the next clock 939 * interrupt being strictly less than 1 clock tick, but we don't want 940 * that here since we want to appear to be in sync with the clock 941 * interrupt even when we're delayed. 942 */ 943 static 944 void 945 realitexpire(void *arg) 946 { 947 struct proc *p; 948 struct timeval ctv, ntv; 949 950 p = (struct proc *)arg; 951 PHOLD(p); 952 lwkt_gettoken(&p->p_token); 953 ksignal(p, SIGALRM); 954 if (!timevalisset(&p->p_realtimer.it_interval)) { 955 timevalclear(&p->p_realtimer.it_value); 956 goto done; 957 } 958 for (;;) { 959 timevaladd(&p->p_realtimer.it_value, 960 &p->p_realtimer.it_interval); 961 getmicrouptime(&ctv); 962 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { 963 ntv = p->p_realtimer.it_value; 964 timevalsub(&ntv, &ctv); 965 callout_reset(&p->p_ithandle, tvtohz_low(&ntv), 966 realitexpire, p); 967 goto done; 968 } 969 } 970 done: 971 lwkt_reltoken(&p->p_token); 972 PRELE(p); 973 } 974 975 /* 976 * Used to validate itimer timeouts and utimes*() timespecs. 977 */ 978 int 979 itimerfix(struct timeval *tv) 980 { 981 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) 982 return (EINVAL); 983 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < ustick) 984 tv->tv_usec = ustick; 985 return (0); 986 } 987 988 /* 989 * Used to validate timeouts and utimes*() timespecs. 990 */ 991 int 992 itimespecfix(struct timespec *ts) 993 { 994 if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000ULL) 995 return (EINVAL); 996 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < nstick) 997 ts->tv_nsec = nstick; 998 return (0); 999 } 1000 1001 /* 1002 * Decrement an interval timer by a specified number 1003 * of microseconds, which must be less than a second, 1004 * i.e. < 1000000. If the timer expires, then reload 1005 * it. In this case, carry over (usec - old value) to 1006 * reduce the value reloaded into the timer so that 1007 * the timer does not drift. This routine assumes 1008 * that it is called in a context where the timers 1009 * on which it is operating cannot change in value. 1010 */ 1011 int 1012 itimerdecr(struct itimerval *itp, int usec) 1013 { 1014 1015 if (itp->it_value.tv_usec < usec) { 1016 if (itp->it_value.tv_sec == 0) { 1017 /* expired, and already in next interval */ 1018 usec -= itp->it_value.tv_usec; 1019 goto expire; 1020 } 1021 itp->it_value.tv_usec += 1000000; 1022 itp->it_value.tv_sec--; 1023 } 1024 itp->it_value.tv_usec -= usec; 1025 usec = 0; 1026 if (timevalisset(&itp->it_value)) 1027 return (1); 1028 /* expired, exactly at end of interval */ 1029 expire: 1030 if (timevalisset(&itp->it_interval)) { 1031 itp->it_value = itp->it_interval; 1032 itp->it_value.tv_usec -= usec; 1033 if (itp->it_value.tv_usec < 0) { 1034 itp->it_value.tv_usec += 1000000; 1035 itp->it_value.tv_sec--; 1036 } 1037 } else 1038 itp->it_value.tv_usec = 0; /* sec is already 0 */ 1039 return (0); 1040 } 1041 1042 /* 1043 * Add and subtract routines for timevals. 1044 * N.B.: subtract routine doesn't deal with 1045 * results which are before the beginning, 1046 * it just gets very confused in this case. 1047 * Caveat emptor. 1048 */ 1049 void 1050 timevaladd(struct timeval *t1, const struct timeval *t2) 1051 { 1052 1053 t1->tv_sec += t2->tv_sec; 1054 t1->tv_usec += t2->tv_usec; 1055 timevalfix(t1); 1056 } 1057 1058 void 1059 timevalsub(struct timeval *t1, const struct timeval *t2) 1060 { 1061 1062 t1->tv_sec -= t2->tv_sec; 1063 t1->tv_usec -= t2->tv_usec; 1064 timevalfix(t1); 1065 } 1066 1067 static void 1068 timevalfix(struct timeval *t1) 1069 { 1070 1071 if (t1->tv_usec < 0) { 1072 t1->tv_sec--; 1073 t1->tv_usec += 1000000; 1074 } 1075 if (t1->tv_usec >= 1000000) { 1076 t1->tv_sec++; 1077 t1->tv_usec -= 1000000; 1078 } 1079 } 1080 1081 /* 1082 * ratecheck(): simple time-based rate-limit checking. 1083 */ 1084 int 1085 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 1086 { 1087 struct timeval tv, delta; 1088 int rv = 0; 1089 1090 getmicrouptime(&tv); /* NB: 10ms precision */ 1091 delta = tv; 1092 timevalsub(&delta, lasttime); 1093 1094 /* 1095 * check for 0,0 is so that the message will be seen at least once, 1096 * even if interval is huge. 1097 */ 1098 if (timevalcmp(&delta, mininterval, >=) || 1099 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 1100 *lasttime = tv; 1101 rv = 1; 1102 } 1103 1104 return (rv); 1105 } 1106 1107 /* 1108 * ppsratecheck(): packets (or events) per second limitation. 1109 * 1110 * Return 0 if the limit is to be enforced (e.g. the caller 1111 * should drop a packet because of the rate limitation). 1112 * 1113 * maxpps of 0 always causes zero to be returned. maxpps of -1 1114 * always causes 1 to be returned; this effectively defeats rate 1115 * limiting. 1116 * 1117 * Note that we maintain the struct timeval for compatibility 1118 * with other bsd systems. We reuse the storage and just monitor 1119 * clock ticks for minimal overhead. 1120 */ 1121 int 1122 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 1123 { 1124 int now; 1125 1126 /* 1127 * Reset the last time and counter if this is the first call 1128 * or more than a second has passed since the last update of 1129 * lasttime. 1130 */ 1131 now = ticks; 1132 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 1133 lasttime->tv_sec = now; 1134 *curpps = 1; 1135 return (maxpps != 0); 1136 } else { 1137 (*curpps)++; /* NB: ignore potential overflow */ 1138 return (maxpps < 0 || *curpps < maxpps); 1139 } 1140 } 1141 1142 static int 1143 sysctl_gettimeofday_quick(SYSCTL_HANDLER_ARGS) 1144 { 1145 int error; 1146 int gtod; 1147 1148 gtod = gettimeofday_quick; 1149 error = sysctl_handle_int(oidp, >od, 0, req); 1150 if (error || req->newptr == NULL) 1151 return error; 1152 gettimeofday_quick = gtod; 1153 if (kpmap) 1154 kpmap->fast_gtod = gtod; 1155 return 0; 1156 } 1157