1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 72 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ 73 * $DragonFly: src/sys/kern/kern_clock.c,v 1.62 2008/09/09 04:06:13 dillon Exp $ 74 */ 75 76 #include "opt_ntp.h" 77 #include "opt_polling.h" 78 #include "opt_ifpoll.h" 79 #include "opt_pctrack.h" 80 81 #include <sys/param.h> 82 #include <sys/systm.h> 83 #include <sys/callout.h> 84 #include <sys/kernel.h> 85 #include <sys/kinfo.h> 86 #include <sys/proc.h> 87 #include <sys/malloc.h> 88 #include <sys/resourcevar.h> 89 #include <sys/signalvar.h> 90 #include <sys/timex.h> 91 #include <sys/timepps.h> 92 #include <vm/vm.h> 93 #include <sys/lock.h> 94 #include <vm/pmap.h> 95 #include <vm/vm_map.h> 96 #include <vm/vm_extern.h> 97 #include <sys/sysctl.h> 98 #include <sys/thread2.h> 99 100 #include <machine/cpu.h> 101 #include <machine/limits.h> 102 #include <machine/smp.h> 103 #include <machine/cpufunc.h> 104 #include <machine/specialreg.h> 105 #include <machine/clock.h> 106 107 #ifdef GPROF 108 #include <sys/gmon.h> 109 #endif 110 111 #ifdef DEVICE_POLLING 112 extern void init_device_poll_pcpu(int); 113 #endif 114 115 #ifdef IFPOLL_ENABLE 116 extern void ifpoll_init_pcpu(int); 117 #endif 118 119 #ifdef DEBUG_PCTRACK 120 static void do_pctrack(struct intrframe *frame, int which); 121 #endif 122 123 static void initclocks (void *dummy); 124 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) 125 126 /* 127 * Some of these don't belong here, but it's easiest to concentrate them. 128 * Note that cpu_time counts in microseconds, but most userland programs 129 * just compare relative times against the total by delta. 130 */ 131 struct kinfo_cputime cputime_percpu[MAXCPU]; 132 #ifdef DEBUG_PCTRACK 133 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE }; 134 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE]; 135 #endif 136 137 #ifdef SMP 138 static int 139 sysctl_cputime(SYSCTL_HANDLER_ARGS) 140 { 141 int cpu, error = 0; 142 size_t size = sizeof(struct kinfo_cputime); 143 144 for (cpu = 0; cpu < ncpus; ++cpu) { 145 if ((error = SYSCTL_OUT(req, &cputime_percpu[cpu], size))) 146 break; 147 } 148 149 return (error); 150 } 151 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 152 sysctl_cputime, "S,kinfo_cputime", "CPU time statistics"); 153 #else 154 SYSCTL_STRUCT(_kern, OID_AUTO, cputime, CTLFLAG_RD, &cpu_time, kinfo_cputime, 155 "CPU time statistics"); 156 #endif 157 158 /* 159 * boottime is used to calculate the 'real' uptime. Do not confuse this with 160 * microuptime(). microtime() is not drift compensated. The real uptime 161 * with compensation is nanotime() - bootime. boottime is recalculated 162 * whenever the real time is set based on the compensated elapsed time 163 * in seconds (gd->gd_time_seconds). 164 * 165 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic. 166 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to 167 * the real time. 168 */ 169 struct timespec boottime; /* boot time (realtime) for reference only */ 170 time_t time_second; /* read-only 'passive' uptime in seconds */ 171 172 /* 173 * basetime is used to calculate the compensated real time of day. The 174 * basetime can be modified on a per-tick basis by the adjtime(), 175 * ntp_adjtime(), and sysctl-based time correction APIs. 176 * 177 * Note that frequency corrections can also be made by adjusting 178 * gd_cpuclock_base. 179 * 180 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is 181 * used on both SMP and UP systems to avoid MP races between cpu's and 182 * interrupt races on UP systems. 183 */ 184 #define BASETIME_ARYSIZE 16 185 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1) 186 static struct timespec basetime[BASETIME_ARYSIZE]; 187 static volatile int basetime_index; 188 189 static int 190 sysctl_get_basetime(SYSCTL_HANDLER_ARGS) 191 { 192 struct timespec *bt; 193 int error; 194 int index; 195 196 /* 197 * Because basetime data and index may be updated by another cpu, 198 * a load fence is required to ensure that the data we read has 199 * not been speculatively read relative to a possibly updated index. 200 */ 201 index = basetime_index; 202 cpu_lfence(); 203 bt = &basetime[index]; 204 error = SYSCTL_OUT(req, bt, sizeof(*bt)); 205 return (error); 206 } 207 208 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 209 &boottime, timespec, "System boottime"); 210 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, 211 sysctl_get_basetime, "S,timespec", "System basetime"); 212 213 static void hardclock(systimer_t info, struct intrframe *frame); 214 static void statclock(systimer_t info, struct intrframe *frame); 215 static void schedclock(systimer_t info, struct intrframe *frame); 216 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp); 217 218 int ticks; /* system master ticks at hz */ 219 int clocks_running; /* tsleep/timeout clocks operational */ 220 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */ 221 int64_t nsec_acc; /* accumulator */ 222 223 /* NTPD time correction fields */ 224 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */ 225 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */ 226 int64_t ntp_delta; /* one-time correction in nsec */ 227 int64_t ntp_big_delta = 1000000000; 228 int32_t ntp_tick_delta; /* current adjustment rate */ 229 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */ 230 time_t ntp_leap_second; /* time of next leap second */ 231 int ntp_leap_insert; /* whether to insert or remove a second */ 232 233 /* 234 * Finish initializing clock frequencies and start all clocks running. 235 */ 236 /* ARGSUSED*/ 237 static void 238 initclocks(void *dummy) 239 { 240 /*psratio = profhz / stathz;*/ 241 initclocks_pcpu(); 242 clocks_running = 1; 243 } 244 245 /* 246 * Called on a per-cpu basis 247 */ 248 void 249 initclocks_pcpu(void) 250 { 251 struct globaldata *gd = mycpu; 252 253 crit_enter(); 254 if (gd->gd_cpuid == 0) { 255 gd->gd_time_seconds = 1; 256 gd->gd_cpuclock_base = sys_cputimer->count(); 257 } else { 258 /* XXX */ 259 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; 260 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; 261 } 262 263 systimer_intr_enable(); 264 265 #ifdef DEVICE_POLLING 266 init_device_poll_pcpu(gd->gd_cpuid); 267 #endif 268 269 #ifdef IFPOLL_ENABLE 270 ifpoll_init_pcpu(gd->gd_cpuid); 271 #endif 272 273 /* 274 * Use a non-queued periodic systimer to prevent multiple ticks from 275 * building up if the sysclock jumps forward (8254 gets reset). The 276 * sysclock will never jump backwards. Our time sync is based on 277 * the actual sysclock, not the ticks count. 278 */ 279 systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, NULL, hz); 280 systimer_init_periodic_nq(&gd->gd_statclock, statclock, NULL, stathz); 281 /* XXX correct the frequency for scheduler / estcpu tests */ 282 systimer_init_periodic_nq(&gd->gd_schedclock, schedclock, 283 NULL, ESTCPUFREQ); 284 crit_exit(); 285 } 286 287 /* 288 * This sets the current real time of day. Timespecs are in seconds and 289 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base, 290 * instead we adjust basetime so basetime + gd_* results in the current 291 * time of day. This way the gd_* fields are guarenteed to represent 292 * a monotonically increasing 'uptime' value. 293 * 294 * When set_timeofday() is called from userland, the system call forces it 295 * onto cpu #0 since only cpu #0 can update basetime_index. 296 */ 297 void 298 set_timeofday(struct timespec *ts) 299 { 300 struct timespec *nbt; 301 int ni; 302 303 /* 304 * XXX SMP / non-atomic basetime updates 305 */ 306 crit_enter(); 307 ni = (basetime_index + 1) & BASETIME_ARYMASK; 308 nbt = &basetime[ni]; 309 nanouptime(nbt); 310 nbt->tv_sec = ts->tv_sec - nbt->tv_sec; 311 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec; 312 if (nbt->tv_nsec < 0) { 313 nbt->tv_nsec += 1000000000; 314 --nbt->tv_sec; 315 } 316 317 /* 318 * Note that basetime diverges from boottime as the clock drift is 319 * compensated for, so we cannot do away with boottime. When setting 320 * the absolute time of day the drift is 0 (for an instant) and we 321 * can simply assign boottime to basetime. 322 * 323 * Note that nanouptime() is based on gd_time_seconds which is drift 324 * compensated up to a point (it is guarenteed to remain monotonically 325 * increasing). gd_time_seconds is thus our best uptime guess and 326 * suitable for use in the boottime calculation. It is already taken 327 * into account in the basetime calculation above. 328 */ 329 boottime.tv_sec = nbt->tv_sec; 330 ntp_delta = 0; 331 332 /* 333 * We now have a new basetime, make sure all other cpus have it, 334 * then update the index. 335 */ 336 cpu_sfence(); 337 basetime_index = ni; 338 339 crit_exit(); 340 } 341 342 /* 343 * Each cpu has its own hardclock, but we only increments ticks and softticks 344 * on cpu #0. 345 * 346 * NOTE! systimer! the MP lock might not be held here. We can only safely 347 * manipulate objects owned by the current cpu. 348 */ 349 static void 350 hardclock(systimer_t info, struct intrframe *frame) 351 { 352 sysclock_t cputicks; 353 struct proc *p; 354 struct globaldata *gd = mycpu; 355 356 /* 357 * Realtime updates are per-cpu. Note that timer corrections as 358 * returned by microtime() and friends make an additional adjustment 359 * using a system-wise 'basetime', but the running time is always 360 * taken from the per-cpu globaldata area. Since the same clock 361 * is distributing (XXX SMP) to all cpus, the per-cpu timebases 362 * stay in synch. 363 * 364 * Note that we never allow info->time (aka gd->gd_hardclock.time) 365 * to reverse index gd_cpuclock_base, but that it is possible for 366 * it to temporarily get behind in the seconds if something in the 367 * system locks interrupts for a long period of time. Since periodic 368 * timers count events, though everything should resynch again 369 * immediately. 370 */ 371 cputicks = info->time - gd->gd_cpuclock_base; 372 if (cputicks >= sys_cputimer->freq) { 373 ++gd->gd_time_seconds; 374 gd->gd_cpuclock_base += sys_cputimer->freq; 375 } 376 377 /* 378 * The system-wide ticks counter and NTP related timedelta/tickdelta 379 * adjustments only occur on cpu #0. NTP adjustments are accomplished 380 * by updating basetime. 381 */ 382 if (gd->gd_cpuid == 0) { 383 struct timespec *nbt; 384 struct timespec nts; 385 int leap; 386 int ni; 387 388 ++ticks; 389 390 #if 0 391 if (tco->tc_poll_pps) 392 tco->tc_poll_pps(tco); 393 #endif 394 395 /* 396 * Calculate the new basetime index. We are in a critical section 397 * on cpu #0 and can safely play with basetime_index. Start 398 * with the current basetime and then make adjustments. 399 */ 400 ni = (basetime_index + 1) & BASETIME_ARYMASK; 401 nbt = &basetime[ni]; 402 *nbt = basetime[basetime_index]; 403 404 /* 405 * Apply adjtime corrections. (adjtime() API) 406 * 407 * adjtime() only runs on cpu #0 so our critical section is 408 * sufficient to access these variables. 409 */ 410 if (ntp_delta != 0) { 411 nbt->tv_nsec += ntp_tick_delta; 412 ntp_delta -= ntp_tick_delta; 413 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) || 414 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) { 415 ntp_tick_delta = ntp_delta; 416 } 417 } 418 419 /* 420 * Apply permanent frequency corrections. (sysctl API) 421 */ 422 if (ntp_tick_permanent != 0) { 423 ntp_tick_acc += ntp_tick_permanent; 424 if (ntp_tick_acc >= (1LL << 32)) { 425 nbt->tv_nsec += ntp_tick_acc >> 32; 426 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32; 427 } else if (ntp_tick_acc <= -(1LL << 32)) { 428 /* Negate ntp_tick_acc to avoid shifting the sign bit. */ 429 nbt->tv_nsec -= (-ntp_tick_acc) >> 32; 430 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32; 431 } 432 } 433 434 if (nbt->tv_nsec >= 1000000000) { 435 nbt->tv_sec++; 436 nbt->tv_nsec -= 1000000000; 437 } else if (nbt->tv_nsec < 0) { 438 nbt->tv_sec--; 439 nbt->tv_nsec += 1000000000; 440 } 441 442 /* 443 * Another per-tick compensation. (for ntp_adjtime() API) 444 */ 445 if (nsec_adj != 0) { 446 nsec_acc += nsec_adj; 447 if (nsec_acc >= 0x100000000LL) { 448 nbt->tv_nsec += nsec_acc >> 32; 449 nsec_acc = (nsec_acc & 0xFFFFFFFFLL); 450 } else if (nsec_acc <= -0x100000000LL) { 451 nbt->tv_nsec -= -nsec_acc >> 32; 452 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL); 453 } 454 if (nbt->tv_nsec >= 1000000000) { 455 nbt->tv_nsec -= 1000000000; 456 ++nbt->tv_sec; 457 } else if (nbt->tv_nsec < 0) { 458 nbt->tv_nsec += 1000000000; 459 --nbt->tv_sec; 460 } 461 } 462 463 /************************************************************ 464 * LEAP SECOND CORRECTION * 465 ************************************************************ 466 * 467 * Taking into account all the corrections made above, figure 468 * out the new real time. If the seconds field has changed 469 * then apply any pending leap-second corrections. 470 */ 471 getnanotime_nbt(nbt, &nts); 472 473 if (time_second != nts.tv_sec) { 474 /* 475 * Apply leap second (sysctl API). Adjust nts for changes 476 * so we do not have to call getnanotime_nbt again. 477 */ 478 if (ntp_leap_second) { 479 if (ntp_leap_second == nts.tv_sec) { 480 if (ntp_leap_insert) { 481 nbt->tv_sec++; 482 nts.tv_sec++; 483 } else { 484 nbt->tv_sec--; 485 nts.tv_sec--; 486 } 487 ntp_leap_second--; 488 } 489 } 490 491 /* 492 * Apply leap second (ntp_adjtime() API), calculate a new 493 * nsec_adj field. ntp_update_second() returns nsec_adj 494 * as a per-second value but we need it as a per-tick value. 495 */ 496 leap = ntp_update_second(time_second, &nsec_adj); 497 nsec_adj /= hz; 498 nbt->tv_sec += leap; 499 nts.tv_sec += leap; 500 501 /* 502 * Update the time_second 'approximate time' global. 503 */ 504 time_second = nts.tv_sec; 505 } 506 507 /* 508 * Finally, our new basetime is ready to go live! 509 */ 510 cpu_sfence(); 511 basetime_index = ni; 512 513 /* 514 * Figure out how badly the system is starved for memory 515 */ 516 vm_fault_ratecheck(); 517 } 518 519 /* 520 * softticks are handled for all cpus 521 */ 522 hardclock_softtick(gd); 523 524 /* 525 * The LWKT scheduler will generally allow the current process to 526 * return to user mode even if there are other runnable LWKT threads 527 * running in kernel mode on behalf of a user process. This will 528 * ensure that those other threads have an opportunity to run in 529 * fairly short order (but not instantly). 530 */ 531 need_lwkt_resched(); 532 533 /* 534 * ITimer handling is per-tick, per-cpu. I don't think ksignal() 535 * is mpsafe on curproc, so XXX get the mplock. 536 */ 537 if ((p = curproc) != NULL && try_mplock()) { 538 if (frame && CLKF_USERMODE(frame) && 539 timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) && 540 itimerdecr(&p->p_timer[ITIMER_VIRTUAL], tick) == 0) 541 ksignal(p, SIGVTALRM); 542 if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) && 543 itimerdecr(&p->p_timer[ITIMER_PROF], tick) == 0) 544 ksignal(p, SIGPROF); 545 rel_mplock(); 546 } 547 setdelayed(); 548 } 549 550 /* 551 * The statistics clock typically runs at a 125Hz rate, and is intended 552 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu. 553 * 554 * NOTE! systimer! the MP lock might not be held here. We can only safely 555 * manipulate objects owned by the current cpu. 556 * 557 * The stats clock is responsible for grabbing a profiling sample. 558 * Most of the statistics are only used by user-level statistics programs. 559 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and 560 * p->p_estcpu. 561 * 562 * Like the other clocks, the stat clock is called from what is effectively 563 * a fast interrupt, so the context should be the thread/process that got 564 * interrupted. 565 */ 566 static void 567 statclock(systimer_t info, struct intrframe *frame) 568 { 569 #ifdef GPROF 570 struct gmonparam *g; 571 int i; 572 #endif 573 thread_t td; 574 struct proc *p; 575 int bump; 576 struct timeval tv; 577 struct timeval *stv; 578 579 /* 580 * How big was our timeslice relative to the last time? 581 */ 582 microuptime(&tv); /* mpsafe */ 583 stv = &mycpu->gd_stattv; 584 if (stv->tv_sec == 0) { 585 bump = 1; 586 } else { 587 bump = tv.tv_usec - stv->tv_usec + 588 (tv.tv_sec - stv->tv_sec) * 1000000; 589 if (bump < 0) 590 bump = 0; 591 if (bump > 1000000) 592 bump = 1000000; 593 } 594 *stv = tv; 595 596 td = curthread; 597 p = td->td_proc; 598 599 if (frame && CLKF_USERMODE(frame)) { 600 /* 601 * Came from userland, handle user time and deal with 602 * possible process. 603 */ 604 if (p && (p->p_flag & P_PROFIL)) 605 addupc_intr(p, CLKF_PC(frame), 1); 606 td->td_uticks += bump; 607 608 /* 609 * Charge the time as appropriate 610 */ 611 if (p && p->p_nice > NZERO) 612 cpu_time.cp_nice += bump; 613 else 614 cpu_time.cp_user += bump; 615 } else { 616 #ifdef GPROF 617 /* 618 * Kernel statistics are just like addupc_intr, only easier. 619 */ 620 g = &_gmonparam; 621 if (g->state == GMON_PROF_ON && frame) { 622 i = CLKF_PC(frame) - g->lowpc; 623 if (i < g->textsize) { 624 i /= HISTFRACTION * sizeof(*g->kcount); 625 g->kcount[i]++; 626 } 627 } 628 #endif 629 /* 630 * Came from kernel mode, so we were: 631 * - handling an interrupt, 632 * - doing syscall or trap work on behalf of the current 633 * user process, or 634 * - spinning in the idle loop. 635 * Whichever it is, charge the time as appropriate. 636 * Note that we charge interrupts to the current process, 637 * regardless of whether they are ``for'' that process, 638 * so that we know how much of its real time was spent 639 * in ``non-process'' (i.e., interrupt) work. 640 * 641 * XXX assume system if frame is NULL. A NULL frame 642 * can occur if ipi processing is done from a crit_exit(). 643 */ 644 if (frame && CLKF_INTR(frame)) 645 td->td_iticks += bump; 646 else 647 td->td_sticks += bump; 648 649 if (frame && CLKF_INTR(frame)) { 650 #ifdef DEBUG_PCTRACK 651 do_pctrack(frame, PCTRACK_INT); 652 #endif 653 cpu_time.cp_intr += bump; 654 } else { 655 if (td == &mycpu->gd_idlethread) { 656 cpu_time.cp_idle += bump; 657 } else { 658 #ifdef DEBUG_PCTRACK 659 if (frame) 660 do_pctrack(frame, PCTRACK_SYS); 661 #endif 662 cpu_time.cp_sys += bump; 663 } 664 } 665 } 666 } 667 668 #ifdef DEBUG_PCTRACK 669 /* 670 * Sample the PC when in the kernel or in an interrupt. User code can 671 * retrieve the information and generate a histogram or other output. 672 */ 673 674 static void 675 do_pctrack(struct intrframe *frame, int which) 676 { 677 struct kinfo_pctrack *pctrack; 678 679 pctrack = &cputime_pctrack[mycpu->gd_cpuid][which]; 680 pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] = 681 (void *)CLKF_PC(frame); 682 ++pctrack->pc_index; 683 } 684 685 static int 686 sysctl_pctrack(SYSCTL_HANDLER_ARGS) 687 { 688 struct kinfo_pcheader head; 689 int error; 690 int cpu; 691 int ntrack; 692 693 head.pc_ntrack = PCTRACK_SIZE; 694 head.pc_arysize = PCTRACK_ARYSIZE; 695 696 if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0) 697 return (error); 698 699 for (cpu = 0; cpu < ncpus; ++cpu) { 700 for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) { 701 error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack], 702 sizeof(struct kinfo_pctrack)); 703 if (error) 704 break; 705 } 706 if (error) 707 break; 708 } 709 return (error); 710 } 711 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 712 sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking"); 713 714 #endif 715 716 /* 717 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer, 718 * the MP lock might not be held. We can safely manipulate parts of curproc 719 * but that's about it. 720 * 721 * Each cpu has its own scheduler clock. 722 */ 723 static void 724 schedclock(systimer_t info, struct intrframe *frame) 725 { 726 struct lwp *lp; 727 struct rusage *ru; 728 struct vmspace *vm; 729 long rss; 730 731 if ((lp = lwkt_preempted_proc()) != NULL) { 732 /* 733 * Account for cpu time used and hit the scheduler. Note 734 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD 735 * HERE. 736 */ 737 ++lp->lwp_cpticks; 738 lp->lwp_proc->p_usched->schedulerclock(lp, info->periodic, 739 info->time); 740 } 741 if ((lp = curthread->td_lwp) != NULL) { 742 /* 743 * Update resource usage integrals and maximums. 744 */ 745 if ((ru = &lp->lwp_proc->p_ru) && 746 (vm = lp->lwp_proc->p_vmspace) != NULL) { 747 ru->ru_ixrss += pgtok(vm->vm_tsize); 748 ru->ru_idrss += pgtok(vm->vm_dsize); 749 ru->ru_isrss += pgtok(vm->vm_ssize); 750 rss = pgtok(vmspace_resident_count(vm)); 751 if (ru->ru_maxrss < rss) 752 ru->ru_maxrss = rss; 753 } 754 } 755 } 756 757 /* 758 * Compute number of ticks for the specified amount of time. The 759 * return value is intended to be used in a clock interrupt timed 760 * operation and guarenteed to meet or exceed the requested time. 761 * If the representation overflows, return INT_MAX. The minimum return 762 * value is 1 ticks and the function will average the calculation up. 763 * If any value greater then 0 microseconds is supplied, a value 764 * of at least 2 will be returned to ensure that a near-term clock 765 * interrupt does not cause the timeout to occur (degenerately) early. 766 * 767 * Note that limit checks must take into account microseconds, which is 768 * done simply by using the smaller signed long maximum instead of 769 * the unsigned long maximum. 770 * 771 * If ints have 32 bits, then the maximum value for any timeout in 772 * 10ms ticks is 248 days. 773 */ 774 int 775 tvtohz_high(struct timeval *tv) 776 { 777 int ticks; 778 long sec, usec; 779 780 sec = tv->tv_sec; 781 usec = tv->tv_usec; 782 if (usec < 0) { 783 sec--; 784 usec += 1000000; 785 } 786 if (sec < 0) { 787 #ifdef DIAGNOSTIC 788 if (usec > 0) { 789 sec++; 790 usec -= 1000000; 791 } 792 kprintf("tvtohz_high: negative time difference %ld sec %ld usec\n", 793 sec, usec); 794 #endif 795 ticks = 1; 796 } else if (sec <= INT_MAX / hz) { 797 ticks = (int)(sec * hz + 798 ((u_long)usec + (tick - 1)) / tick) + 1; 799 } else { 800 ticks = INT_MAX; 801 } 802 return (ticks); 803 } 804 805 /* 806 * Compute number of ticks for the specified amount of time, erroring on 807 * the side of it being too low to ensure that sleeping the returned number 808 * of ticks will not result in a late return. 809 * 810 * The supplied timeval may not be negative and should be normalized. A 811 * return value of 0 is possible if the timeval converts to less then 812 * 1 tick. 813 * 814 * If ints have 32 bits, then the maximum value for any timeout in 815 * 10ms ticks is 248 days. 816 */ 817 int 818 tvtohz_low(struct timeval *tv) 819 { 820 int ticks; 821 long sec; 822 823 sec = tv->tv_sec; 824 if (sec <= INT_MAX / hz) 825 ticks = (int)(sec * hz + (u_long)tv->tv_usec / tick); 826 else 827 ticks = INT_MAX; 828 return (ticks); 829 } 830 831 832 /* 833 * Start profiling on a process. 834 * 835 * Kernel profiling passes proc0 which never exits and hence 836 * keeps the profile clock running constantly. 837 */ 838 void 839 startprofclock(struct proc *p) 840 { 841 if ((p->p_flag & P_PROFIL) == 0) { 842 p->p_flag |= P_PROFIL; 843 #if 0 /* XXX */ 844 if (++profprocs == 1 && stathz != 0) { 845 crit_enter(); 846 psdiv = psratio; 847 setstatclockrate(profhz); 848 crit_exit(); 849 } 850 #endif 851 } 852 } 853 854 /* 855 * Stop profiling on a process. 856 */ 857 void 858 stopprofclock(struct proc *p) 859 { 860 if (p->p_flag & P_PROFIL) { 861 p->p_flag &= ~P_PROFIL; 862 #if 0 /* XXX */ 863 if (--profprocs == 0 && stathz != 0) { 864 crit_enter(); 865 psdiv = 1; 866 setstatclockrate(stathz); 867 crit_exit(); 868 } 869 #endif 870 } 871 } 872 873 /* 874 * Return information about system clocks. 875 */ 876 static int 877 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 878 { 879 struct kinfo_clockinfo clkinfo; 880 /* 881 * Construct clockinfo structure. 882 */ 883 clkinfo.ci_hz = hz; 884 clkinfo.ci_tick = tick; 885 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000; 886 clkinfo.ci_profhz = profhz; 887 clkinfo.ci_stathz = stathz ? stathz : hz; 888 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 889 } 890 891 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 892 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 893 894 /* 895 * We have eight functions for looking at the clock, four for 896 * microseconds and four for nanoseconds. For each there is fast 897 * but less precise version "get{nano|micro}[up]time" which will 898 * return a time which is up to 1/HZ previous to the call, whereas 899 * the raw version "{nano|micro}[up]time" will return a timestamp 900 * which is as precise as possible. The "up" variants return the 901 * time relative to system boot, these are well suited for time 902 * interval measurements. 903 * 904 * Each cpu independantly maintains the current time of day, so all 905 * we need to do to protect ourselves from changes is to do a loop 906 * check on the seconds field changing out from under us. 907 * 908 * The system timer maintains a 32 bit count and due to various issues 909 * it is possible for the calculated delta to occassionally exceed 910 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec 911 * multiplication can easily overflow, so we deal with the case. For 912 * uniformity we deal with the case in the usec case too. 913 * 914 * All the [get][micro,nano][time,uptime]() routines are MPSAFE. 915 */ 916 void 917 getmicrouptime(struct timeval *tvp) 918 { 919 struct globaldata *gd = mycpu; 920 sysclock_t delta; 921 922 do { 923 tvp->tv_sec = gd->gd_time_seconds; 924 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 925 } while (tvp->tv_sec != gd->gd_time_seconds); 926 927 if (delta >= sys_cputimer->freq) { 928 tvp->tv_sec += delta / sys_cputimer->freq; 929 delta %= sys_cputimer->freq; 930 } 931 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 932 if (tvp->tv_usec >= 1000000) { 933 tvp->tv_usec -= 1000000; 934 ++tvp->tv_sec; 935 } 936 } 937 938 void 939 getnanouptime(struct timespec *tsp) 940 { 941 struct globaldata *gd = mycpu; 942 sysclock_t delta; 943 944 do { 945 tsp->tv_sec = gd->gd_time_seconds; 946 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 947 } while (tsp->tv_sec != gd->gd_time_seconds); 948 949 if (delta >= sys_cputimer->freq) { 950 tsp->tv_sec += delta / sys_cputimer->freq; 951 delta %= sys_cputimer->freq; 952 } 953 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 954 } 955 956 void 957 microuptime(struct timeval *tvp) 958 { 959 struct globaldata *gd = mycpu; 960 sysclock_t delta; 961 962 do { 963 tvp->tv_sec = gd->gd_time_seconds; 964 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 965 } while (tvp->tv_sec != gd->gd_time_seconds); 966 967 if (delta >= sys_cputimer->freq) { 968 tvp->tv_sec += delta / sys_cputimer->freq; 969 delta %= sys_cputimer->freq; 970 } 971 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 972 } 973 974 void 975 nanouptime(struct timespec *tsp) 976 { 977 struct globaldata *gd = mycpu; 978 sysclock_t delta; 979 980 do { 981 tsp->tv_sec = gd->gd_time_seconds; 982 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 983 } while (tsp->tv_sec != gd->gd_time_seconds); 984 985 if (delta >= sys_cputimer->freq) { 986 tsp->tv_sec += delta / sys_cputimer->freq; 987 delta %= sys_cputimer->freq; 988 } 989 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 990 } 991 992 /* 993 * realtime routines 994 */ 995 void 996 getmicrotime(struct timeval *tvp) 997 { 998 struct globaldata *gd = mycpu; 999 struct timespec *bt; 1000 sysclock_t delta; 1001 1002 do { 1003 tvp->tv_sec = gd->gd_time_seconds; 1004 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1005 } while (tvp->tv_sec != gd->gd_time_seconds); 1006 1007 if (delta >= sys_cputimer->freq) { 1008 tvp->tv_sec += delta / sys_cputimer->freq; 1009 delta %= sys_cputimer->freq; 1010 } 1011 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1012 1013 bt = &basetime[basetime_index]; 1014 tvp->tv_sec += bt->tv_sec; 1015 tvp->tv_usec += bt->tv_nsec / 1000; 1016 while (tvp->tv_usec >= 1000000) { 1017 tvp->tv_usec -= 1000000; 1018 ++tvp->tv_sec; 1019 } 1020 } 1021 1022 void 1023 getnanotime(struct timespec *tsp) 1024 { 1025 struct globaldata *gd = mycpu; 1026 struct timespec *bt; 1027 sysclock_t delta; 1028 1029 do { 1030 tsp->tv_sec = gd->gd_time_seconds; 1031 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1032 } while (tsp->tv_sec != gd->gd_time_seconds); 1033 1034 if (delta >= sys_cputimer->freq) { 1035 tsp->tv_sec += delta / sys_cputimer->freq; 1036 delta %= sys_cputimer->freq; 1037 } 1038 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1039 1040 bt = &basetime[basetime_index]; 1041 tsp->tv_sec += bt->tv_sec; 1042 tsp->tv_nsec += bt->tv_nsec; 1043 while (tsp->tv_nsec >= 1000000000) { 1044 tsp->tv_nsec -= 1000000000; 1045 ++tsp->tv_sec; 1046 } 1047 } 1048 1049 static void 1050 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp) 1051 { 1052 struct globaldata *gd = mycpu; 1053 sysclock_t delta; 1054 1055 do { 1056 tsp->tv_sec = gd->gd_time_seconds; 1057 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1058 } while (tsp->tv_sec != gd->gd_time_seconds); 1059 1060 if (delta >= sys_cputimer->freq) { 1061 tsp->tv_sec += delta / sys_cputimer->freq; 1062 delta %= sys_cputimer->freq; 1063 } 1064 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1065 1066 tsp->tv_sec += nbt->tv_sec; 1067 tsp->tv_nsec += nbt->tv_nsec; 1068 while (tsp->tv_nsec >= 1000000000) { 1069 tsp->tv_nsec -= 1000000000; 1070 ++tsp->tv_sec; 1071 } 1072 } 1073 1074 1075 void 1076 microtime(struct timeval *tvp) 1077 { 1078 struct globaldata *gd = mycpu; 1079 struct timespec *bt; 1080 sysclock_t delta; 1081 1082 do { 1083 tvp->tv_sec = gd->gd_time_seconds; 1084 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1085 } while (tvp->tv_sec != gd->gd_time_seconds); 1086 1087 if (delta >= sys_cputimer->freq) { 1088 tvp->tv_sec += delta / sys_cputimer->freq; 1089 delta %= sys_cputimer->freq; 1090 } 1091 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1092 1093 bt = &basetime[basetime_index]; 1094 tvp->tv_sec += bt->tv_sec; 1095 tvp->tv_usec += bt->tv_nsec / 1000; 1096 while (tvp->tv_usec >= 1000000) { 1097 tvp->tv_usec -= 1000000; 1098 ++tvp->tv_sec; 1099 } 1100 } 1101 1102 void 1103 nanotime(struct timespec *tsp) 1104 { 1105 struct globaldata *gd = mycpu; 1106 struct timespec *bt; 1107 sysclock_t delta; 1108 1109 do { 1110 tsp->tv_sec = gd->gd_time_seconds; 1111 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1112 } while (tsp->tv_sec != gd->gd_time_seconds); 1113 1114 if (delta >= sys_cputimer->freq) { 1115 tsp->tv_sec += delta / sys_cputimer->freq; 1116 delta %= sys_cputimer->freq; 1117 } 1118 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1119 1120 bt = &basetime[basetime_index]; 1121 tsp->tv_sec += bt->tv_sec; 1122 tsp->tv_nsec += bt->tv_nsec; 1123 while (tsp->tv_nsec >= 1000000000) { 1124 tsp->tv_nsec -= 1000000000; 1125 ++tsp->tv_sec; 1126 } 1127 } 1128 1129 /* 1130 * note: this is not exactly synchronized with real time. To do that we 1131 * would have to do what microtime does and check for a nanoseconds overflow. 1132 */ 1133 time_t 1134 get_approximate_time_t(void) 1135 { 1136 struct globaldata *gd = mycpu; 1137 struct timespec *bt; 1138 1139 bt = &basetime[basetime_index]; 1140 return(gd->gd_time_seconds + bt->tv_sec); 1141 } 1142 1143 int 1144 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 1145 { 1146 pps_params_t *app; 1147 struct pps_fetch_args *fapi; 1148 #ifdef PPS_SYNC 1149 struct pps_kcbind_args *kapi; 1150 #endif 1151 1152 switch (cmd) { 1153 case PPS_IOC_CREATE: 1154 return (0); 1155 case PPS_IOC_DESTROY: 1156 return (0); 1157 case PPS_IOC_SETPARAMS: 1158 app = (pps_params_t *)data; 1159 if (app->mode & ~pps->ppscap) 1160 return (EINVAL); 1161 pps->ppsparam = *app; 1162 return (0); 1163 case PPS_IOC_GETPARAMS: 1164 app = (pps_params_t *)data; 1165 *app = pps->ppsparam; 1166 app->api_version = PPS_API_VERS_1; 1167 return (0); 1168 case PPS_IOC_GETCAP: 1169 *(int*)data = pps->ppscap; 1170 return (0); 1171 case PPS_IOC_FETCH: 1172 fapi = (struct pps_fetch_args *)data; 1173 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 1174 return (EINVAL); 1175 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 1176 return (EOPNOTSUPP); 1177 pps->ppsinfo.current_mode = pps->ppsparam.mode; 1178 fapi->pps_info_buf = pps->ppsinfo; 1179 return (0); 1180 case PPS_IOC_KCBIND: 1181 #ifdef PPS_SYNC 1182 kapi = (struct pps_kcbind_args *)data; 1183 /* XXX Only root should be able to do this */ 1184 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 1185 return (EINVAL); 1186 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 1187 return (EINVAL); 1188 if (kapi->edge & ~pps->ppscap) 1189 return (EINVAL); 1190 pps->kcmode = kapi->edge; 1191 return (0); 1192 #else 1193 return (EOPNOTSUPP); 1194 #endif 1195 default: 1196 return (ENOTTY); 1197 } 1198 } 1199 1200 void 1201 pps_init(struct pps_state *pps) 1202 { 1203 pps->ppscap |= PPS_TSFMT_TSPEC; 1204 if (pps->ppscap & PPS_CAPTUREASSERT) 1205 pps->ppscap |= PPS_OFFSETASSERT; 1206 if (pps->ppscap & PPS_CAPTURECLEAR) 1207 pps->ppscap |= PPS_OFFSETCLEAR; 1208 } 1209 1210 void 1211 pps_event(struct pps_state *pps, sysclock_t count, int event) 1212 { 1213 struct globaldata *gd; 1214 struct timespec *tsp; 1215 struct timespec *osp; 1216 struct timespec *bt; 1217 struct timespec ts; 1218 sysclock_t *pcount; 1219 #ifdef PPS_SYNC 1220 sysclock_t tcount; 1221 #endif 1222 sysclock_t delta; 1223 pps_seq_t *pseq; 1224 int foff; 1225 int fhard; 1226 1227 gd = mycpu; 1228 1229 /* Things would be easier with arrays... */ 1230 if (event == PPS_CAPTUREASSERT) { 1231 tsp = &pps->ppsinfo.assert_timestamp; 1232 osp = &pps->ppsparam.assert_offset; 1233 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1234 fhard = pps->kcmode & PPS_CAPTUREASSERT; 1235 pcount = &pps->ppscount[0]; 1236 pseq = &pps->ppsinfo.assert_sequence; 1237 } else { 1238 tsp = &pps->ppsinfo.clear_timestamp; 1239 osp = &pps->ppsparam.clear_offset; 1240 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1241 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1242 pcount = &pps->ppscount[1]; 1243 pseq = &pps->ppsinfo.clear_sequence; 1244 } 1245 1246 /* Nothing really happened */ 1247 if (*pcount == count) 1248 return; 1249 1250 *pcount = count; 1251 1252 do { 1253 ts.tv_sec = gd->gd_time_seconds; 1254 delta = count - gd->gd_cpuclock_base; 1255 } while (ts.tv_sec != gd->gd_time_seconds); 1256 1257 if (delta >= sys_cputimer->freq) { 1258 ts.tv_sec += delta / sys_cputimer->freq; 1259 delta %= sys_cputimer->freq; 1260 } 1261 ts.tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1262 bt = &basetime[basetime_index]; 1263 ts.tv_sec += bt->tv_sec; 1264 ts.tv_nsec += bt->tv_nsec; 1265 while (ts.tv_nsec >= 1000000000) { 1266 ts.tv_nsec -= 1000000000; 1267 ++ts.tv_sec; 1268 } 1269 1270 (*pseq)++; 1271 *tsp = ts; 1272 1273 if (foff) { 1274 timespecadd(tsp, osp); 1275 if (tsp->tv_nsec < 0) { 1276 tsp->tv_nsec += 1000000000; 1277 tsp->tv_sec -= 1; 1278 } 1279 } 1280 #ifdef PPS_SYNC 1281 if (fhard) { 1282 /* magic, at its best... */ 1283 tcount = count - pps->ppscount[2]; 1284 pps->ppscount[2] = count; 1285 if (tcount >= sys_cputimer->freq) { 1286 delta = (1000000000 * (tcount / sys_cputimer->freq) + 1287 sys_cputimer->freq64_nsec * 1288 (tcount % sys_cputimer->freq)) >> 32; 1289 } else { 1290 delta = (sys_cputimer->freq64_nsec * tcount) >> 32; 1291 } 1292 hardpps(tsp, delta); 1293 } 1294 #endif 1295 } 1296 1297 /* 1298 * Return the tsc target value for a delay of (ns). 1299 * 1300 * Returns -1 if the TSC is not supported. 1301 */ 1302 int64_t 1303 tsc_get_target(int ns) 1304 { 1305 #if defined(_RDTSC_SUPPORTED_) 1306 if (cpu_feature & CPUID_TSC) { 1307 return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000); 1308 } 1309 #endif 1310 return(-1); 1311 } 1312 1313 /* 1314 * Compare the tsc against the passed target 1315 * 1316 * Returns +1 if the target has been reached 1317 * Returns 0 if the target has not yet been reached 1318 * Returns -1 if the TSC is not supported. 1319 * 1320 * Typical use: while (tsc_test_target(target) == 0) { ...poll... } 1321 */ 1322 int 1323 tsc_test_target(int64_t target) 1324 { 1325 #if defined(_RDTSC_SUPPORTED_) 1326 if (cpu_feature & CPUID_TSC) { 1327 if ((int64_t)(target - rdtsc()) <= 0) 1328 return(1); 1329 return(0); 1330 } 1331 return(-1); 1332 #endif 1333 } 1334