1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 72 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ 73 * $DragonFly: src/sys/kern/kern_clock.c,v 1.36 2005/04/20 17:57:16 joerg Exp $ 74 */ 75 76 #include "opt_ntp.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/dkstat.h> 81 #include <sys/callout.h> 82 #include <sys/kernel.h> 83 #include <sys/kinfo.h> 84 #include <sys/proc.h> 85 #include <sys/malloc.h> 86 #include <sys/resourcevar.h> 87 #include <sys/signalvar.h> 88 #include <sys/timex.h> 89 #include <sys/timepps.h> 90 #include <vm/vm.h> 91 #include <sys/lock.h> 92 #include <vm/pmap.h> 93 #include <vm/vm_map.h> 94 #include <sys/sysctl.h> 95 #include <sys/thread2.h> 96 97 #include <machine/cpu.h> 98 #include <machine/limits.h> 99 #include <machine/smp.h> 100 101 #ifdef GPROF 102 #include <sys/gmon.h> 103 #endif 104 105 #ifdef DEVICE_POLLING 106 extern void init_device_poll(void); 107 extern void hardclock_device_poll(void); 108 #endif /* DEVICE_POLLING */ 109 110 static void initclocks (void *dummy); 111 SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) 112 113 /* 114 * Some of these don't belong here, but it's easiest to concentrate them. 115 * Note that cp_time counts in microseconds, but most userland programs 116 * just compare relative times against the total by delta. 117 */ 118 struct cp_time cp_time; 119 120 SYSCTL_OPAQUE(_kern, OID_AUTO, cp_time, CTLFLAG_RD, &cp_time, sizeof(cp_time), 121 "LU", "CPU time statistics"); 122 123 /* 124 * boottime is used to calculate the 'real' uptime. Do not confuse this with 125 * microuptime(). microtime() is not drift compensated. The real uptime 126 * with compensation is nanotime() - bootime. boottime is recalculated 127 * whenever the real time is set based on the compensated elapsed time 128 * in seconds (gd->gd_time_seconds). 129 * 130 * basetime is used to calculate the compensated real time of day. Chunky 131 * changes to the time, aka settimeofday(), are made by modifying basetime. 132 * 133 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic. 134 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to 135 * the real time. 136 */ 137 struct timespec boottime; /* boot time (realtime) for reference only */ 138 static struct timespec basetime; /* base time adjusts uptime -> realtime */ 139 time_t time_second; /* read-only 'passive' uptime in seconds */ 140 141 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 142 &boottime, timeval, "System boottime"); 143 SYSCTL_STRUCT(_kern, OID_AUTO, basetime, CTLFLAG_RD, 144 &basetime, timeval, "System basetime"); 145 146 static void hardclock(systimer_t info, struct intrframe *frame); 147 static void statclock(systimer_t info, struct intrframe *frame); 148 static void schedclock(systimer_t info, struct intrframe *frame); 149 150 int ticks; /* system master ticks at hz */ 151 int clocks_running; /* tsleep/timeout clocks operational */ 152 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */ 153 int64_t nsec_acc; /* accumulator */ 154 155 /* NTPD time correction fields */ 156 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */ 157 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */ 158 int64_t ntp_delta; /* one-time correction in nsec */ 159 int64_t ntp_big_delta = 1000000000; 160 int32_t ntp_tick_delta; /* current adjustment rate */ 161 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */ 162 time_t ntp_leap_second; /* time of next leap second */ 163 int ntp_leap_insert; /* whether to insert or remove a second */ 164 165 /* 166 * Finish initializing clock frequencies and start all clocks running. 167 */ 168 /* ARGSUSED*/ 169 static void 170 initclocks(void *dummy) 171 { 172 cpu_initclocks(); 173 #ifdef DEVICE_POLLING 174 init_device_poll(); 175 #endif 176 /*psratio = profhz / stathz;*/ 177 initclocks_pcpu(); 178 clocks_running = 1; 179 } 180 181 /* 182 * Called on a per-cpu basis 183 */ 184 void 185 initclocks_pcpu(void) 186 { 187 struct globaldata *gd = mycpu; 188 189 crit_enter(); 190 if (gd->gd_cpuid == 0) { 191 gd->gd_time_seconds = 1; 192 gd->gd_cpuclock_base = cputimer_count(); 193 } else { 194 /* XXX */ 195 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; 196 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; 197 } 198 199 /* 200 * Use a non-queued periodic systimer to prevent multiple ticks from 201 * building up if the sysclock jumps forward (8254 gets reset). The 202 * sysclock will never jump backwards. Our time sync is based on 203 * the actual sysclock, not the ticks count. 204 */ 205 systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, NULL, hz); 206 systimer_init_periodic_nq(&gd->gd_statclock, statclock, NULL, stathz); 207 /* XXX correct the frequency for scheduler / estcpu tests */ 208 systimer_init_periodic_nq(&gd->gd_schedclock, schedclock, 209 NULL, ESTCPUFREQ); 210 crit_exit(); 211 } 212 213 /* 214 * This sets the current real time of day. Timespecs are in seconds and 215 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base, 216 * instead we adjust basetime so basetime + gd_* results in the current 217 * time of day. This way the gd_* fields are guarenteed to represent 218 * a monotonically increasing 'uptime' value. 219 */ 220 void 221 set_timeofday(struct timespec *ts) 222 { 223 struct timespec ts2; 224 225 /* 226 * XXX SMP / non-atomic basetime updates 227 */ 228 crit_enter(); 229 nanouptime(&ts2); 230 basetime.tv_sec = ts->tv_sec - ts2.tv_sec; 231 basetime.tv_nsec = ts->tv_nsec - ts2.tv_nsec; 232 if (basetime.tv_nsec < 0) { 233 basetime.tv_nsec += 1000000000; 234 --basetime.tv_sec; 235 } 236 237 /* 238 * Note that basetime diverges from boottime as the clock drift is 239 * compensated for, so we cannot do away with boottime. When setting 240 * the absolute time of day the drift is 0 (for an instant) and we 241 * can simply assign boottime to basetime. 242 * 243 * Note that nanouptime() is based on gd_time_seconds which is drift 244 * compensated up to a point (it is guarenteed to remain monotonically 245 * increasing). gd_time_seconds is thus our best uptime guess and 246 * suitable for use in the boottime calculation. It is already taken 247 * into account in the basetime calculation above. 248 */ 249 boottime.tv_sec = basetime.tv_sec; 250 ntp_delta = 0; 251 crit_exit(); 252 } 253 254 /* 255 * Each cpu has its own hardclock, but we only increments ticks and softticks 256 * on cpu #0. 257 * 258 * NOTE! systimer! the MP lock might not be held here. We can only safely 259 * manipulate objects owned by the current cpu. 260 */ 261 static void 262 hardclock(systimer_t info, struct intrframe *frame) 263 { 264 sysclock_t cputicks; 265 struct proc *p; 266 struct pstats *pstats; 267 struct globaldata *gd = mycpu; 268 269 /* 270 * Realtime updates are per-cpu. Note that timer corrections as 271 * returned by microtime() and friends make an additional adjustment 272 * using a system-wise 'basetime', but the running time is always 273 * taken from the per-cpu globaldata area. Since the same clock 274 * is distributing (XXX SMP) to all cpus, the per-cpu timebases 275 * stay in synch. 276 * 277 * Note that we never allow info->time (aka gd->gd_hardclock.time) 278 * to reverse index gd_cpuclock_base, but that it is possible for 279 * it to temporarily get behind in the seconds if something in the 280 * system locks interrupts for a long period of time. Since periodic 281 * timers count events, though everything should resynch again 282 * immediately. 283 */ 284 cputicks = info->time - gd->gd_cpuclock_base; 285 if (cputicks >= cputimer_freq) { 286 ++gd->gd_time_seconds; 287 gd->gd_cpuclock_base += cputimer_freq; 288 } 289 290 /* 291 * The system-wide ticks counter and NTP related timedelta/tickdelta 292 * adjustments only occur on cpu #0. NTP adjustments are accomplished 293 * by updating basetime. 294 */ 295 if (gd->gd_cpuid == 0) { 296 struct timespec nts; 297 int leap; 298 299 ++ticks; 300 301 #ifdef DEVICE_POLLING 302 hardclock_device_poll(); /* mpsafe, short and quick */ 303 #endif /* DEVICE_POLLING */ 304 305 #if 0 306 if (tco->tc_poll_pps) 307 tco->tc_poll_pps(tco); 308 #endif 309 /* 310 * Apply adjtime corrections. At the moment only do this if 311 * we can get the MP lock to interlock with adjtime's modification 312 * of these variables. Note that basetime adjustments are not 313 * MP safe either XXX. 314 */ 315 if (ntp_delta != 0) { 316 basetime.tv_nsec += ntp_tick_delta; 317 ntp_delta -= ntp_tick_delta; 318 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) || 319 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) { 320 ntp_tick_delta = ntp_delta; 321 } 322 } 323 324 if (ntp_tick_permanent != 0) { 325 ntp_tick_acc += ntp_tick_permanent; 326 if (ntp_tick_acc >= (1LL << 32)) { 327 basetime.tv_nsec += ntp_tick_acc >> 32; 328 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32; 329 } else if (ntp_tick_acc <= -(1LL << 32)) { 330 /* Negate ntp_tick_acc to avoid shifting the sign bit. */ 331 basetime.tv_nsec -= (-ntp_tick_acc) >> 32; 332 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32; 333 } 334 } 335 336 if (basetime.tv_nsec >= 1000000000) { 337 basetime.tv_sec++; 338 basetime.tv_nsec -= 1000000000; 339 } else if (basetime.tv_nsec < 0) { 340 basetime.tv_sec--; 341 basetime.tv_nsec += 1000000000; 342 } 343 344 if (ntp_leap_second) { 345 struct timespec tsp; 346 nanotime(&tsp); 347 348 if (ntp_leap_second == tsp.tv_sec) { 349 if (ntp_leap_insert) 350 basetime.tv_sec++; 351 else 352 basetime.tv_sec--; 353 ntp_leap_second--; 354 } 355 } 356 357 /* 358 * Apply per-tick compensation. ticks_adj adjusts for both 359 * offset and frequency, and could be negative. 360 */ 361 if (nsec_adj != 0 && try_mplock()) { 362 nsec_acc += nsec_adj; 363 if (nsec_acc >= 0x100000000LL) { 364 basetime.tv_nsec += nsec_acc >> 32; 365 nsec_acc = (nsec_acc & 0xFFFFFFFFLL); 366 } else if (nsec_acc <= -0x100000000LL) { 367 basetime.tv_nsec -= -nsec_acc >> 32; 368 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL); 369 } 370 if (basetime.tv_nsec >= 1000000000) { 371 basetime.tv_nsec -= 1000000000; 372 ++basetime.tv_sec; 373 } else if (basetime.tv_nsec < 0) { 374 basetime.tv_nsec += 1000000000; 375 --basetime.tv_sec; 376 } 377 rel_mplock(); 378 } 379 380 /* 381 * If the realtime-adjusted seconds hand rolls over then tell 382 * ntp_update_second() what we did in the last second so it can 383 * calculate what to do in the next second. It may also add 384 * or subtract a leap second. 385 */ 386 getnanotime(&nts); 387 if (time_second != nts.tv_sec) { 388 leap = ntp_update_second(time_second, &nsec_adj); 389 basetime.tv_sec += leap; 390 time_second = nts.tv_sec + leap; 391 nsec_adj /= hz; 392 } 393 } 394 395 /* 396 * softticks are handled for all cpus 397 */ 398 hardclock_softtick(gd); 399 400 /* 401 * ITimer handling is per-tick, per-cpu. I don't think psignal() 402 * is mpsafe on curproc, so XXX get the mplock. 403 */ 404 if ((p = curproc) != NULL && try_mplock()) { 405 pstats = p->p_stats; 406 if (frame && CLKF_USERMODE(frame) && 407 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 408 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) 409 psignal(p, SIGVTALRM); 410 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && 411 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) 412 psignal(p, SIGPROF); 413 rel_mplock(); 414 } 415 setdelayed(); 416 } 417 418 /* 419 * The statistics clock typically runs at a 125Hz rate, and is intended 420 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu. 421 * 422 * NOTE! systimer! the MP lock might not be held here. We can only safely 423 * manipulate objects owned by the current cpu. 424 * 425 * The stats clock is responsible for grabbing a profiling sample. 426 * Most of the statistics are only used by user-level statistics programs. 427 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and 428 * p->p_estcpu. 429 * 430 * Like the other clocks, the stat clock is called from what is effectively 431 * a fast interrupt, so the context should be the thread/process that got 432 * interrupted. 433 */ 434 static void 435 statclock(systimer_t info, struct intrframe *frame) 436 { 437 #ifdef GPROF 438 struct gmonparam *g; 439 int i; 440 #endif 441 thread_t td; 442 struct proc *p; 443 int bump; 444 struct timeval tv; 445 struct timeval *stv; 446 447 /* 448 * How big was our timeslice relative to the last time? 449 */ 450 microuptime(&tv); /* mpsafe */ 451 stv = &mycpu->gd_stattv; 452 if (stv->tv_sec == 0) { 453 bump = 1; 454 } else { 455 bump = tv.tv_usec - stv->tv_usec + 456 (tv.tv_sec - stv->tv_sec) * 1000000; 457 if (bump < 0) 458 bump = 0; 459 if (bump > 1000000) 460 bump = 1000000; 461 } 462 *stv = tv; 463 464 td = curthread; 465 p = td->td_proc; 466 467 if (frame && CLKF_USERMODE(frame)) { 468 /* 469 * Came from userland, handle user time and deal with 470 * possible process. 471 */ 472 if (p && (p->p_flag & P_PROFIL)) 473 addupc_intr(p, CLKF_PC(frame), 1); 474 td->td_uticks += bump; 475 476 /* 477 * Charge the time as appropriate 478 */ 479 if (p && p->p_nice > NZERO) 480 cp_time.cp_nice += bump; 481 else 482 cp_time.cp_user += bump; 483 } else { 484 #ifdef GPROF 485 /* 486 * Kernel statistics are just like addupc_intr, only easier. 487 */ 488 g = &_gmonparam; 489 if (g->state == GMON_PROF_ON && frame) { 490 i = CLKF_PC(frame) - g->lowpc; 491 if (i < g->textsize) { 492 i /= HISTFRACTION * sizeof(*g->kcount); 493 g->kcount[i]++; 494 } 495 } 496 #endif 497 /* 498 * Came from kernel mode, so we were: 499 * - handling an interrupt, 500 * - doing syscall or trap work on behalf of the current 501 * user process, or 502 * - spinning in the idle loop. 503 * Whichever it is, charge the time as appropriate. 504 * Note that we charge interrupts to the current process, 505 * regardless of whether they are ``for'' that process, 506 * so that we know how much of its real time was spent 507 * in ``non-process'' (i.e., interrupt) work. 508 * 509 * XXX assume system if frame is NULL. A NULL frame 510 * can occur if ipi processing is done from an splx(). 511 */ 512 if (frame && CLKF_INTR(frame)) 513 td->td_iticks += bump; 514 else 515 td->td_sticks += bump; 516 517 if (frame && CLKF_INTR(frame)) { 518 cp_time.cp_intr += bump; 519 } else { 520 if (td == &mycpu->gd_idlethread) 521 cp_time.cp_idle += bump; 522 else 523 cp_time.cp_sys += bump; 524 } 525 } 526 } 527 528 /* 529 * The scheduler clock typically runs at a 20Hz rate. NOTE! systimer, 530 * the MP lock might not be held. We can safely manipulate parts of curproc 531 * but that's about it. 532 */ 533 static void 534 schedclock(systimer_t info, struct intrframe *frame) 535 { 536 struct proc *p; 537 struct pstats *pstats; 538 struct rusage *ru; 539 struct vmspace *vm; 540 long rss; 541 542 schedulerclock(NULL); /* mpsafe */ 543 if ((p = curproc) != NULL) { 544 /* Update resource usage integrals and maximums. */ 545 if ((pstats = p->p_stats) != NULL && 546 (ru = &pstats->p_ru) != NULL && 547 (vm = p->p_vmspace) != NULL) { 548 ru->ru_ixrss += pgtok(vm->vm_tsize); 549 ru->ru_idrss += pgtok(vm->vm_dsize); 550 ru->ru_isrss += pgtok(vm->vm_ssize); 551 rss = pgtok(vmspace_resident_count(vm)); 552 if (ru->ru_maxrss < rss) 553 ru->ru_maxrss = rss; 554 } 555 } 556 } 557 558 /* 559 * Compute number of ticks for the specified amount of time. The 560 * return value is intended to be used in a clock interrupt timed 561 * operation and guarenteed to meet or exceed the requested time. 562 * If the representation overflows, return INT_MAX. The minimum return 563 * value is 1 ticks and the function will average the calculation up. 564 * If any value greater then 0 microseconds is supplied, a value 565 * of at least 2 will be returned to ensure that a near-term clock 566 * interrupt does not cause the timeout to occur (degenerately) early. 567 * 568 * Note that limit checks must take into account microseconds, which is 569 * done simply by using the smaller signed long maximum instead of 570 * the unsigned long maximum. 571 * 572 * If ints have 32 bits, then the maximum value for any timeout in 573 * 10ms ticks is 248 days. 574 */ 575 int 576 tvtohz_high(struct timeval *tv) 577 { 578 int ticks; 579 long sec, usec; 580 581 sec = tv->tv_sec; 582 usec = tv->tv_usec; 583 if (usec < 0) { 584 sec--; 585 usec += 1000000; 586 } 587 if (sec < 0) { 588 #ifdef DIAGNOSTIC 589 if (usec > 0) { 590 sec++; 591 usec -= 1000000; 592 } 593 printf("tvotohz: negative time difference %ld sec %ld usec\n", 594 sec, usec); 595 #endif 596 ticks = 1; 597 } else if (sec <= INT_MAX / hz) { 598 ticks = (int)(sec * hz + 599 ((u_long)usec + (tick - 1)) / tick) + 1; 600 } else { 601 ticks = INT_MAX; 602 } 603 return (ticks); 604 } 605 606 /* 607 * Compute number of ticks for the specified amount of time, erroring on 608 * the side of it being too low to ensure that sleeping the returned number 609 * of ticks will not result in a late return. 610 * 611 * The supplied timeval may not be negative and should be normalized. A 612 * return value of 0 is possible if the timeval converts to less then 613 * 1 tick. 614 * 615 * If ints have 32 bits, then the maximum value for any timeout in 616 * 10ms ticks is 248 days. 617 */ 618 int 619 tvtohz_low(struct timeval *tv) 620 { 621 int ticks; 622 long sec; 623 624 sec = tv->tv_sec; 625 if (sec <= INT_MAX / hz) 626 ticks = (int)(sec * hz + (u_long)tv->tv_usec / tick); 627 else 628 ticks = INT_MAX; 629 return (ticks); 630 } 631 632 633 /* 634 * Start profiling on a process. 635 * 636 * Kernel profiling passes proc0 which never exits and hence 637 * keeps the profile clock running constantly. 638 */ 639 void 640 startprofclock(struct proc *p) 641 { 642 if ((p->p_flag & P_PROFIL) == 0) { 643 p->p_flag |= P_PROFIL; 644 #if 0 /* XXX */ 645 if (++profprocs == 1 && stathz != 0) { 646 s = splstatclock(); 647 psdiv = psratio; 648 setstatclockrate(profhz); 649 splx(s); 650 } 651 #endif 652 } 653 } 654 655 /* 656 * Stop profiling on a process. 657 */ 658 void 659 stopprofclock(struct proc *p) 660 { 661 if (p->p_flag & P_PROFIL) { 662 p->p_flag &= ~P_PROFIL; 663 #if 0 /* XXX */ 664 if (--profprocs == 0 && stathz != 0) { 665 s = splstatclock(); 666 psdiv = 1; 667 setstatclockrate(stathz); 668 splx(s); 669 } 670 #endif 671 } 672 } 673 674 /* 675 * Return information about system clocks. 676 */ 677 static int 678 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 679 { 680 struct kinfo_clockinfo clkinfo; 681 /* 682 * Construct clockinfo structure. 683 */ 684 clkinfo.ci_hz = hz; 685 clkinfo.ci_tick = tick; 686 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000; 687 clkinfo.ci_profhz = profhz; 688 clkinfo.ci_stathz = stathz ? stathz : hz; 689 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 690 } 691 692 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 693 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 694 695 /* 696 * We have eight functions for looking at the clock, four for 697 * microseconds and four for nanoseconds. For each there is fast 698 * but less precise version "get{nano|micro}[up]time" which will 699 * return a time which is up to 1/HZ previous to the call, whereas 700 * the raw version "{nano|micro}[up]time" will return a timestamp 701 * which is as precise as possible. The "up" variants return the 702 * time relative to system boot, these are well suited for time 703 * interval measurements. 704 * 705 * Each cpu independantly maintains the current time of day, so all 706 * we need to do to protect ourselves from changes is to do a loop 707 * check on the seconds field changing out from under us. 708 * 709 * The system timer maintains a 32 bit count and due to various issues 710 * it is possible for the calculated delta to occassionally exceed 711 * cputimer_freq. If this occurs the cputimer_freq64_nsec multiplication 712 * can easily overflow, so we deal with the case. For uniformity we deal 713 * with the case in the usec case too. 714 */ 715 void 716 getmicrouptime(struct timeval *tvp) 717 { 718 struct globaldata *gd = mycpu; 719 sysclock_t delta; 720 721 do { 722 tvp->tv_sec = gd->gd_time_seconds; 723 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 724 } while (tvp->tv_sec != gd->gd_time_seconds); 725 726 if (delta >= cputimer_freq) { 727 tvp->tv_sec += delta / cputimer_freq; 728 delta %= cputimer_freq; 729 } 730 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32; 731 if (tvp->tv_usec >= 1000000) { 732 tvp->tv_usec -= 1000000; 733 ++tvp->tv_sec; 734 } 735 } 736 737 void 738 getnanouptime(struct timespec *tsp) 739 { 740 struct globaldata *gd = mycpu; 741 sysclock_t delta; 742 743 do { 744 tsp->tv_sec = gd->gd_time_seconds; 745 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 746 } while (tsp->tv_sec != gd->gd_time_seconds); 747 748 if (delta >= cputimer_freq) { 749 tsp->tv_sec += delta / cputimer_freq; 750 delta %= cputimer_freq; 751 } 752 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32; 753 } 754 755 void 756 microuptime(struct timeval *tvp) 757 { 758 struct globaldata *gd = mycpu; 759 sysclock_t delta; 760 761 do { 762 tvp->tv_sec = gd->gd_time_seconds; 763 delta = cputimer_count() - gd->gd_cpuclock_base; 764 } while (tvp->tv_sec != gd->gd_time_seconds); 765 766 if (delta >= cputimer_freq) { 767 tvp->tv_sec += delta / cputimer_freq; 768 delta %= cputimer_freq; 769 } 770 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32; 771 } 772 773 void 774 nanouptime(struct timespec *tsp) 775 { 776 struct globaldata *gd = mycpu; 777 sysclock_t delta; 778 779 do { 780 tsp->tv_sec = gd->gd_time_seconds; 781 delta = cputimer_count() - gd->gd_cpuclock_base; 782 } while (tsp->tv_sec != gd->gd_time_seconds); 783 784 if (delta >= cputimer_freq) { 785 tsp->tv_sec += delta / cputimer_freq; 786 delta %= cputimer_freq; 787 } 788 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32; 789 } 790 791 /* 792 * realtime routines 793 */ 794 795 void 796 getmicrotime(struct timeval *tvp) 797 { 798 struct globaldata *gd = mycpu; 799 sysclock_t delta; 800 801 do { 802 tvp->tv_sec = gd->gd_time_seconds; 803 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 804 } while (tvp->tv_sec != gd->gd_time_seconds); 805 806 if (delta >= cputimer_freq) { 807 tvp->tv_sec += delta / cputimer_freq; 808 delta %= cputimer_freq; 809 } 810 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32; 811 812 tvp->tv_sec += basetime.tv_sec; 813 tvp->tv_usec += basetime.tv_nsec / 1000; 814 while (tvp->tv_usec >= 1000000) { 815 tvp->tv_usec -= 1000000; 816 ++tvp->tv_sec; 817 } 818 } 819 820 void 821 getnanotime(struct timespec *tsp) 822 { 823 struct globaldata *gd = mycpu; 824 sysclock_t delta; 825 826 do { 827 tsp->tv_sec = gd->gd_time_seconds; 828 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 829 } while (tsp->tv_sec != gd->gd_time_seconds); 830 831 if (delta >= cputimer_freq) { 832 tsp->tv_sec += delta / cputimer_freq; 833 delta %= cputimer_freq; 834 } 835 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32; 836 837 tsp->tv_sec += basetime.tv_sec; 838 tsp->tv_nsec += basetime.tv_nsec; 839 while (tsp->tv_nsec >= 1000000000) { 840 tsp->tv_nsec -= 1000000000; 841 ++tsp->tv_sec; 842 } 843 } 844 845 void 846 microtime(struct timeval *tvp) 847 { 848 struct globaldata *gd = mycpu; 849 sysclock_t delta; 850 851 do { 852 tvp->tv_sec = gd->gd_time_seconds; 853 delta = cputimer_count() - gd->gd_cpuclock_base; 854 } while (tvp->tv_sec != gd->gd_time_seconds); 855 856 if (delta >= cputimer_freq) { 857 tvp->tv_sec += delta / cputimer_freq; 858 delta %= cputimer_freq; 859 } 860 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32; 861 862 tvp->tv_sec += basetime.tv_sec; 863 tvp->tv_usec += basetime.tv_nsec / 1000; 864 while (tvp->tv_usec >= 1000000) { 865 tvp->tv_usec -= 1000000; 866 ++tvp->tv_sec; 867 } 868 } 869 870 void 871 nanotime(struct timespec *tsp) 872 { 873 struct globaldata *gd = mycpu; 874 sysclock_t delta; 875 876 do { 877 tsp->tv_sec = gd->gd_time_seconds; 878 delta = cputimer_count() - gd->gd_cpuclock_base; 879 } while (tsp->tv_sec != gd->gd_time_seconds); 880 881 if (delta >= cputimer_freq) { 882 tsp->tv_sec += delta / cputimer_freq; 883 delta %= cputimer_freq; 884 } 885 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32; 886 887 tsp->tv_sec += basetime.tv_sec; 888 tsp->tv_nsec += basetime.tv_nsec; 889 while (tsp->tv_nsec >= 1000000000) { 890 tsp->tv_nsec -= 1000000000; 891 ++tsp->tv_sec; 892 } 893 } 894 895 /* 896 * note: this is not exactly synchronized with real time. To do that we 897 * would have to do what microtime does and check for a nanoseconds overflow. 898 */ 899 time_t 900 get_approximate_time_t(void) 901 { 902 struct globaldata *gd = mycpu; 903 return(gd->gd_time_seconds + basetime.tv_sec); 904 } 905 906 int 907 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 908 { 909 pps_params_t *app; 910 struct pps_fetch_args *fapi; 911 #ifdef PPS_SYNC 912 struct pps_kcbind_args *kapi; 913 #endif 914 915 switch (cmd) { 916 case PPS_IOC_CREATE: 917 return (0); 918 case PPS_IOC_DESTROY: 919 return (0); 920 case PPS_IOC_SETPARAMS: 921 app = (pps_params_t *)data; 922 if (app->mode & ~pps->ppscap) 923 return (EINVAL); 924 pps->ppsparam = *app; 925 return (0); 926 case PPS_IOC_GETPARAMS: 927 app = (pps_params_t *)data; 928 *app = pps->ppsparam; 929 app->api_version = PPS_API_VERS_1; 930 return (0); 931 case PPS_IOC_GETCAP: 932 *(int*)data = pps->ppscap; 933 return (0); 934 case PPS_IOC_FETCH: 935 fapi = (struct pps_fetch_args *)data; 936 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 937 return (EINVAL); 938 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 939 return (EOPNOTSUPP); 940 pps->ppsinfo.current_mode = pps->ppsparam.mode; 941 fapi->pps_info_buf = pps->ppsinfo; 942 return (0); 943 case PPS_IOC_KCBIND: 944 #ifdef PPS_SYNC 945 kapi = (struct pps_kcbind_args *)data; 946 /* XXX Only root should be able to do this */ 947 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 948 return (EINVAL); 949 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 950 return (EINVAL); 951 if (kapi->edge & ~pps->ppscap) 952 return (EINVAL); 953 pps->kcmode = kapi->edge; 954 return (0); 955 #else 956 return (EOPNOTSUPP); 957 #endif 958 default: 959 return (ENOTTY); 960 } 961 } 962 963 void 964 pps_init(struct pps_state *pps) 965 { 966 pps->ppscap |= PPS_TSFMT_TSPEC; 967 if (pps->ppscap & PPS_CAPTUREASSERT) 968 pps->ppscap |= PPS_OFFSETASSERT; 969 if (pps->ppscap & PPS_CAPTURECLEAR) 970 pps->ppscap |= PPS_OFFSETCLEAR; 971 } 972 973 void 974 pps_event(struct pps_state *pps, sysclock_t count, int event) 975 { 976 struct globaldata *gd; 977 struct timespec *tsp; 978 struct timespec *osp; 979 struct timespec ts; 980 sysclock_t *pcount; 981 #ifdef PPS_SYNC 982 sysclock_t tcount; 983 #endif 984 sysclock_t delta; 985 pps_seq_t *pseq; 986 int foff; 987 int fhard; 988 989 gd = mycpu; 990 991 /* Things would be easier with arrays... */ 992 if (event == PPS_CAPTUREASSERT) { 993 tsp = &pps->ppsinfo.assert_timestamp; 994 osp = &pps->ppsparam.assert_offset; 995 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 996 fhard = pps->kcmode & PPS_CAPTUREASSERT; 997 pcount = &pps->ppscount[0]; 998 pseq = &pps->ppsinfo.assert_sequence; 999 } else { 1000 tsp = &pps->ppsinfo.clear_timestamp; 1001 osp = &pps->ppsparam.clear_offset; 1002 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1003 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1004 pcount = &pps->ppscount[1]; 1005 pseq = &pps->ppsinfo.clear_sequence; 1006 } 1007 1008 /* Nothing really happened */ 1009 if (*pcount == count) 1010 return; 1011 1012 *pcount = count; 1013 1014 do { 1015 ts.tv_sec = gd->gd_time_seconds; 1016 delta = count - gd->gd_cpuclock_base; 1017 } while (ts.tv_sec != gd->gd_time_seconds); 1018 1019 if (delta >= cputimer_freq) { 1020 ts.tv_sec += delta / cputimer_freq; 1021 delta %= cputimer_freq; 1022 } 1023 ts.tv_nsec = (cputimer_freq64_nsec * delta) >> 32; 1024 ts.tv_sec += basetime.tv_sec; 1025 ts.tv_nsec += basetime.tv_nsec; 1026 while (ts.tv_nsec >= 1000000000) { 1027 ts.tv_nsec -= 1000000000; 1028 ++ts.tv_sec; 1029 } 1030 1031 (*pseq)++; 1032 *tsp = ts; 1033 1034 if (foff) { 1035 timespecadd(tsp, osp); 1036 if (tsp->tv_nsec < 0) { 1037 tsp->tv_nsec += 1000000000; 1038 tsp->tv_sec -= 1; 1039 } 1040 } 1041 #ifdef PPS_SYNC 1042 if (fhard) { 1043 /* magic, at its best... */ 1044 tcount = count - pps->ppscount[2]; 1045 pps->ppscount[2] = count; 1046 if (tcount >= cputimer_freq) { 1047 delta = (1000000000 * (tcount / cputimer_freq) + 1048 cputimer_freq64_nsec * 1049 (tcount % cputimer_freq)) >> 32; 1050 } else { 1051 delta = (cputimer_freq64_nsec * tcount) >> 32; 1052 } 1053 hardpps(tsp, delta); 1054 } 1055 #endif 1056 } 1057 1058