1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 4. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 68 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ 69 */ 70 71 #include "opt_ntp.h" 72 #include "opt_ifpoll.h" 73 #include "opt_pctrack.h" 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/callout.h> 78 #include <sys/kernel.h> 79 #include <sys/kinfo.h> 80 #include <sys/proc.h> 81 #include <sys/malloc.h> 82 #include <sys/resource.h> 83 #include <sys/resourcevar.h> 84 #include <sys/signalvar.h> 85 #include <sys/timex.h> 86 #include <sys/timepps.h> 87 #include <vm/vm.h> 88 #include <sys/lock.h> 89 #include <vm/pmap.h> 90 #include <vm/vm_map.h> 91 #include <vm/vm_extern.h> 92 #include <sys/sysctl.h> 93 94 #include <sys/thread2.h> 95 96 #include <machine/cpu.h> 97 #include <machine/limits.h> 98 #include <machine/smp.h> 99 #include <machine/cpufunc.h> 100 #include <machine/specialreg.h> 101 #include <machine/clock.h> 102 103 #ifdef GPROF 104 #include <sys/gmon.h> 105 #endif 106 107 #ifdef IFPOLL_ENABLE 108 extern void ifpoll_init_pcpu(int); 109 #endif 110 111 #ifdef DEBUG_PCTRACK 112 static void do_pctrack(struct intrframe *frame, int which); 113 #endif 114 115 static void initclocks (void *dummy); 116 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) 117 118 /* 119 * Some of these don't belong here, but it's easiest to concentrate them. 120 * Note that cpu_time counts in microseconds, but most userland programs 121 * just compare relative times against the total by delta. 122 */ 123 struct kinfo_cputime cputime_percpu[MAXCPU]; 124 #ifdef DEBUG_PCTRACK 125 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE }; 126 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE]; 127 #endif 128 129 static int 130 sysctl_cputime(SYSCTL_HANDLER_ARGS) 131 { 132 int cpu, error = 0; 133 size_t size = sizeof(struct kinfo_cputime); 134 135 for (cpu = 0; cpu < ncpus; ++cpu) { 136 if ((error = SYSCTL_OUT(req, &cputime_percpu[cpu], size))) 137 break; 138 } 139 140 return (error); 141 } 142 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 143 sysctl_cputime, "S,kinfo_cputime", "CPU time statistics"); 144 145 static int 146 sysctl_cp_time(SYSCTL_HANDLER_ARGS) 147 { 148 long cpu_states[5] = {0}; 149 int cpu, error = 0; 150 size_t size = sizeof(cpu_states); 151 152 for (cpu = 0; cpu < ncpus; ++cpu) { 153 cpu_states[CP_USER] += cputime_percpu[cpu].cp_user; 154 cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice; 155 cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys; 156 cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr; 157 cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle; 158 } 159 160 error = SYSCTL_OUT(req, cpu_states, size); 161 162 return (error); 163 } 164 165 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 166 sysctl_cp_time, "LU", "CPU time statistics"); 167 168 /* 169 * boottime is used to calculate the 'real' uptime. Do not confuse this with 170 * microuptime(). microtime() is not drift compensated. The real uptime 171 * with compensation is nanotime() - bootime. boottime is recalculated 172 * whenever the real time is set based on the compensated elapsed time 173 * in seconds (gd->gd_time_seconds). 174 * 175 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic. 176 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to 177 * the real time. 178 */ 179 struct timespec boottime; /* boot time (realtime) for reference only */ 180 time_t time_second; /* read-only 'passive' uptime in seconds */ 181 182 /* 183 * basetime is used to calculate the compensated real time of day. The 184 * basetime can be modified on a per-tick basis by the adjtime(), 185 * ntp_adjtime(), and sysctl-based time correction APIs. 186 * 187 * Note that frequency corrections can also be made by adjusting 188 * gd_cpuclock_base. 189 * 190 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is 191 * used on both SMP and UP systems to avoid MP races between cpu's and 192 * interrupt races on UP systems. 193 */ 194 #define BASETIME_ARYSIZE 16 195 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1) 196 static struct timespec basetime[BASETIME_ARYSIZE]; 197 static volatile int basetime_index; 198 199 static int 200 sysctl_get_basetime(SYSCTL_HANDLER_ARGS) 201 { 202 struct timespec *bt; 203 int error; 204 int index; 205 206 /* 207 * Because basetime data and index may be updated by another cpu, 208 * a load fence is required to ensure that the data we read has 209 * not been speculatively read relative to a possibly updated index. 210 */ 211 index = basetime_index; 212 cpu_lfence(); 213 bt = &basetime[index]; 214 error = SYSCTL_OUT(req, bt, sizeof(*bt)); 215 return (error); 216 } 217 218 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 219 &boottime, timespec, "System boottime"); 220 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, 221 sysctl_get_basetime, "S,timespec", "System basetime"); 222 223 static void hardclock(systimer_t info, int, struct intrframe *frame); 224 static void statclock(systimer_t info, int, struct intrframe *frame); 225 static void schedclock(systimer_t info, int, struct intrframe *frame); 226 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp); 227 228 int ticks; /* system master ticks at hz */ 229 int clocks_running; /* tsleep/timeout clocks operational */ 230 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */ 231 int64_t nsec_acc; /* accumulator */ 232 int sched_ticks; /* global schedule clock ticks */ 233 234 /* NTPD time correction fields */ 235 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */ 236 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */ 237 int64_t ntp_delta; /* one-time correction in nsec */ 238 int64_t ntp_big_delta = 1000000000; 239 int32_t ntp_tick_delta; /* current adjustment rate */ 240 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */ 241 time_t ntp_leap_second; /* time of next leap second */ 242 int ntp_leap_insert; /* whether to insert or remove a second */ 243 244 /* 245 * Finish initializing clock frequencies and start all clocks running. 246 */ 247 /* ARGSUSED*/ 248 static void 249 initclocks(void *dummy) 250 { 251 /*psratio = profhz / stathz;*/ 252 initclocks_pcpu(); 253 clocks_running = 1; 254 } 255 256 /* 257 * Called on a per-cpu basis 258 */ 259 void 260 initclocks_pcpu(void) 261 { 262 struct globaldata *gd = mycpu; 263 264 crit_enter(); 265 if (gd->gd_cpuid == 0) { 266 gd->gd_time_seconds = 1; 267 gd->gd_cpuclock_base = sys_cputimer->count(); 268 } else { 269 /* XXX */ 270 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; 271 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; 272 } 273 274 systimer_intr_enable(); 275 276 #ifdef IFPOLL_ENABLE 277 ifpoll_init_pcpu(gd->gd_cpuid); 278 #endif 279 280 /* 281 * Use a non-queued periodic systimer to prevent multiple ticks from 282 * building up if the sysclock jumps forward (8254 gets reset). The 283 * sysclock will never jump backwards. Our time sync is based on 284 * the actual sysclock, not the ticks count. 285 */ 286 systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, NULL, hz); 287 systimer_init_periodic_nq(&gd->gd_statclock, statclock, NULL, stathz); 288 /* XXX correct the frequency for scheduler / estcpu tests */ 289 systimer_init_periodic_nq(&gd->gd_schedclock, schedclock, 290 NULL, ESTCPUFREQ); 291 crit_exit(); 292 } 293 294 /* 295 * This sets the current real time of day. Timespecs are in seconds and 296 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base, 297 * instead we adjust basetime so basetime + gd_* results in the current 298 * time of day. This way the gd_* fields are guarenteed to represent 299 * a monotonically increasing 'uptime' value. 300 * 301 * When set_timeofday() is called from userland, the system call forces it 302 * onto cpu #0 since only cpu #0 can update basetime_index. 303 */ 304 void 305 set_timeofday(struct timespec *ts) 306 { 307 struct timespec *nbt; 308 int ni; 309 310 /* 311 * XXX SMP / non-atomic basetime updates 312 */ 313 crit_enter(); 314 ni = (basetime_index + 1) & BASETIME_ARYMASK; 315 nbt = &basetime[ni]; 316 nanouptime(nbt); 317 nbt->tv_sec = ts->tv_sec - nbt->tv_sec; 318 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec; 319 if (nbt->tv_nsec < 0) { 320 nbt->tv_nsec += 1000000000; 321 --nbt->tv_sec; 322 } 323 324 /* 325 * Note that basetime diverges from boottime as the clock drift is 326 * compensated for, so we cannot do away with boottime. When setting 327 * the absolute time of day the drift is 0 (for an instant) and we 328 * can simply assign boottime to basetime. 329 * 330 * Note that nanouptime() is based on gd_time_seconds which is drift 331 * compensated up to a point (it is guarenteed to remain monotonically 332 * increasing). gd_time_seconds is thus our best uptime guess and 333 * suitable for use in the boottime calculation. It is already taken 334 * into account in the basetime calculation above. 335 */ 336 boottime.tv_sec = nbt->tv_sec; 337 ntp_delta = 0; 338 339 /* 340 * We now have a new basetime, make sure all other cpus have it, 341 * then update the index. 342 */ 343 cpu_sfence(); 344 basetime_index = ni; 345 346 crit_exit(); 347 } 348 349 /* 350 * Each cpu has its own hardclock, but we only increments ticks and softticks 351 * on cpu #0. 352 * 353 * NOTE! systimer! the MP lock might not be held here. We can only safely 354 * manipulate objects owned by the current cpu. 355 */ 356 static void 357 hardclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 358 { 359 sysclock_t cputicks; 360 struct proc *p; 361 struct globaldata *gd = mycpu; 362 363 /* 364 * Realtime updates are per-cpu. Note that timer corrections as 365 * returned by microtime() and friends make an additional adjustment 366 * using a system-wise 'basetime', but the running time is always 367 * taken from the per-cpu globaldata area. Since the same clock 368 * is distributing (XXX SMP) to all cpus, the per-cpu timebases 369 * stay in synch. 370 * 371 * Note that we never allow info->time (aka gd->gd_hardclock.time) 372 * to reverse index gd_cpuclock_base, but that it is possible for 373 * it to temporarily get behind in the seconds if something in the 374 * system locks interrupts for a long period of time. Since periodic 375 * timers count events, though everything should resynch again 376 * immediately. 377 */ 378 cputicks = info->time - gd->gd_cpuclock_base; 379 if (cputicks >= sys_cputimer->freq) { 380 ++gd->gd_time_seconds; 381 gd->gd_cpuclock_base += sys_cputimer->freq; 382 } 383 384 /* 385 * The system-wide ticks counter and NTP related timedelta/tickdelta 386 * adjustments only occur on cpu #0. NTP adjustments are accomplished 387 * by updating basetime. 388 */ 389 if (gd->gd_cpuid == 0) { 390 struct timespec *nbt; 391 struct timespec nts; 392 int leap; 393 int ni; 394 395 ++ticks; 396 397 #if 0 398 if (tco->tc_poll_pps) 399 tco->tc_poll_pps(tco); 400 #endif 401 402 /* 403 * Calculate the new basetime index. We are in a critical section 404 * on cpu #0 and can safely play with basetime_index. Start 405 * with the current basetime and then make adjustments. 406 */ 407 ni = (basetime_index + 1) & BASETIME_ARYMASK; 408 nbt = &basetime[ni]; 409 *nbt = basetime[basetime_index]; 410 411 /* 412 * Apply adjtime corrections. (adjtime() API) 413 * 414 * adjtime() only runs on cpu #0 so our critical section is 415 * sufficient to access these variables. 416 */ 417 if (ntp_delta != 0) { 418 nbt->tv_nsec += ntp_tick_delta; 419 ntp_delta -= ntp_tick_delta; 420 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) || 421 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) { 422 ntp_tick_delta = ntp_delta; 423 } 424 } 425 426 /* 427 * Apply permanent frequency corrections. (sysctl API) 428 */ 429 if (ntp_tick_permanent != 0) { 430 ntp_tick_acc += ntp_tick_permanent; 431 if (ntp_tick_acc >= (1LL << 32)) { 432 nbt->tv_nsec += ntp_tick_acc >> 32; 433 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32; 434 } else if (ntp_tick_acc <= -(1LL << 32)) { 435 /* Negate ntp_tick_acc to avoid shifting the sign bit. */ 436 nbt->tv_nsec -= (-ntp_tick_acc) >> 32; 437 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32; 438 } 439 } 440 441 if (nbt->tv_nsec >= 1000000000) { 442 nbt->tv_sec++; 443 nbt->tv_nsec -= 1000000000; 444 } else if (nbt->tv_nsec < 0) { 445 nbt->tv_sec--; 446 nbt->tv_nsec += 1000000000; 447 } 448 449 /* 450 * Another per-tick compensation. (for ntp_adjtime() API) 451 */ 452 if (nsec_adj != 0) { 453 nsec_acc += nsec_adj; 454 if (nsec_acc >= 0x100000000LL) { 455 nbt->tv_nsec += nsec_acc >> 32; 456 nsec_acc = (nsec_acc & 0xFFFFFFFFLL); 457 } else if (nsec_acc <= -0x100000000LL) { 458 nbt->tv_nsec -= -nsec_acc >> 32; 459 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL); 460 } 461 if (nbt->tv_nsec >= 1000000000) { 462 nbt->tv_nsec -= 1000000000; 463 ++nbt->tv_sec; 464 } else if (nbt->tv_nsec < 0) { 465 nbt->tv_nsec += 1000000000; 466 --nbt->tv_sec; 467 } 468 } 469 470 /************************************************************ 471 * LEAP SECOND CORRECTION * 472 ************************************************************ 473 * 474 * Taking into account all the corrections made above, figure 475 * out the new real time. If the seconds field has changed 476 * then apply any pending leap-second corrections. 477 */ 478 getnanotime_nbt(nbt, &nts); 479 480 if (time_second != nts.tv_sec) { 481 /* 482 * Apply leap second (sysctl API). Adjust nts for changes 483 * so we do not have to call getnanotime_nbt again. 484 */ 485 if (ntp_leap_second) { 486 if (ntp_leap_second == nts.tv_sec) { 487 if (ntp_leap_insert) { 488 nbt->tv_sec++; 489 nts.tv_sec++; 490 } else { 491 nbt->tv_sec--; 492 nts.tv_sec--; 493 } 494 ntp_leap_second--; 495 } 496 } 497 498 /* 499 * Apply leap second (ntp_adjtime() API), calculate a new 500 * nsec_adj field. ntp_update_second() returns nsec_adj 501 * as a per-second value but we need it as a per-tick value. 502 */ 503 leap = ntp_update_second(time_second, &nsec_adj); 504 nsec_adj /= hz; 505 nbt->tv_sec += leap; 506 nts.tv_sec += leap; 507 508 /* 509 * Update the time_second 'approximate time' global. 510 */ 511 time_second = nts.tv_sec; 512 } 513 514 /* 515 * Finally, our new basetime is ready to go live! 516 */ 517 cpu_sfence(); 518 basetime_index = ni; 519 } 520 521 /* 522 * lwkt thread scheduler fair queueing 523 */ 524 lwkt_schedulerclock(curthread); 525 526 /* 527 * softticks are handled for all cpus 528 */ 529 hardclock_softtick(gd); 530 531 /* 532 * ITimer handling is per-tick, per-cpu. 533 * 534 * We must acquire the per-process token in order for ksignal() 535 * to be non-blocking. For the moment this requires an AST fault, 536 * the ksignal() cannot be safely issued from this hard interrupt. 537 * 538 * XXX Even the trytoken here isn't right, and itimer operation in 539 * a multi threaded environment is going to be weird at the 540 * very least. 541 */ 542 if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) { 543 crit_enter_hard(); 544 if (frame && CLKF_USERMODE(frame) && 545 timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) && 546 itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) { 547 p->p_flags |= P_SIGVTALRM; 548 need_user_resched(); 549 } 550 if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) && 551 itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) { 552 p->p_flags |= P_SIGPROF; 553 need_user_resched(); 554 } 555 crit_exit_hard(); 556 lwkt_reltoken(&p->p_token); 557 } 558 setdelayed(); 559 } 560 561 /* 562 * The statistics clock typically runs at a 125Hz rate, and is intended 563 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu. 564 * 565 * NOTE! systimer! the MP lock might not be held here. We can only safely 566 * manipulate objects owned by the current cpu. 567 * 568 * The stats clock is responsible for grabbing a profiling sample. 569 * Most of the statistics are only used by user-level statistics programs. 570 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and 571 * p->p_estcpu. 572 * 573 * Like the other clocks, the stat clock is called from what is effectively 574 * a fast interrupt, so the context should be the thread/process that got 575 * interrupted. 576 */ 577 static void 578 statclock(systimer_t info, int in_ipi, struct intrframe *frame) 579 { 580 #ifdef GPROF 581 struct gmonparam *g; 582 int i; 583 #endif 584 thread_t td; 585 struct proc *p; 586 int bump; 587 struct timeval tv; 588 struct timeval *stv; 589 590 /* 591 * How big was our timeslice relative to the last time? 592 */ 593 microuptime(&tv); /* mpsafe */ 594 stv = &mycpu->gd_stattv; 595 if (stv->tv_sec == 0) { 596 bump = 1; 597 } else { 598 bump = tv.tv_usec - stv->tv_usec + 599 (tv.tv_sec - stv->tv_sec) * 1000000; 600 if (bump < 0) 601 bump = 0; 602 if (bump > 1000000) 603 bump = 1000000; 604 } 605 *stv = tv; 606 607 td = curthread; 608 p = td->td_proc; 609 610 if (frame && CLKF_USERMODE(frame)) { 611 /* 612 * Came from userland, handle user time and deal with 613 * possible process. 614 */ 615 if (p && (p->p_flags & P_PROFIL)) 616 addupc_intr(p, CLKF_PC(frame), 1); 617 td->td_uticks += bump; 618 619 /* 620 * Charge the time as appropriate 621 */ 622 if (p && p->p_nice > NZERO) 623 cpu_time.cp_nice += bump; 624 else 625 cpu_time.cp_user += bump; 626 } else { 627 int intr_nest = mycpu->gd_intr_nesting_level; 628 629 if (in_ipi) { 630 /* 631 * IPI processing code will bump gd_intr_nesting_level 632 * up by one, which breaks following CLKF_INTR testing, 633 * so we substract it by one here. 634 */ 635 --intr_nest; 636 } 637 #ifdef GPROF 638 /* 639 * Kernel statistics are just like addupc_intr, only easier. 640 */ 641 g = &_gmonparam; 642 if (g->state == GMON_PROF_ON && frame) { 643 i = CLKF_PC(frame) - g->lowpc; 644 if (i < g->textsize) { 645 i /= HISTFRACTION * sizeof(*g->kcount); 646 g->kcount[i]++; 647 } 648 } 649 #endif 650 651 #define IS_INTR_RUNNING ((frame && CLKF_INTR(intr_nest)) || CLKF_INTR_TD(td)) 652 653 /* 654 * Came from kernel mode, so we were: 655 * - handling an interrupt, 656 * - doing syscall or trap work on behalf of the current 657 * user process, or 658 * - spinning in the idle loop. 659 * Whichever it is, charge the time as appropriate. 660 * Note that we charge interrupts to the current process, 661 * regardless of whether they are ``for'' that process, 662 * so that we know how much of its real time was spent 663 * in ``non-process'' (i.e., interrupt) work. 664 * 665 * XXX assume system if frame is NULL. A NULL frame 666 * can occur if ipi processing is done from a crit_exit(). 667 */ 668 if (IS_INTR_RUNNING) 669 td->td_iticks += bump; 670 else 671 td->td_sticks += bump; 672 673 if (IS_INTR_RUNNING) { 674 /* 675 * If we interrupted an interrupt thread, well, 676 * count it as interrupt time. 677 */ 678 #ifdef DEBUG_PCTRACK 679 if (frame) 680 do_pctrack(frame, PCTRACK_INT); 681 #endif 682 cpu_time.cp_intr += bump; 683 } else { 684 if (td == &mycpu->gd_idlethread) { 685 /* 686 * Even if the current thread is the idle 687 * thread it could be due to token contention 688 * in the LWKT scheduler. Count such as 689 * system time. 690 */ 691 if (mycpu->gd_reqflags & RQF_AST_LWKT_RESCHED) 692 cpu_time.cp_sys += bump; 693 else 694 cpu_time.cp_idle += bump; 695 } else { 696 /* 697 * System thread was running. 698 */ 699 #ifdef DEBUG_PCTRACK 700 if (frame) 701 do_pctrack(frame, PCTRACK_SYS); 702 #endif 703 cpu_time.cp_sys += bump; 704 } 705 } 706 707 #undef IS_INTR_RUNNING 708 } 709 } 710 711 #ifdef DEBUG_PCTRACK 712 /* 713 * Sample the PC when in the kernel or in an interrupt. User code can 714 * retrieve the information and generate a histogram or other output. 715 */ 716 717 static void 718 do_pctrack(struct intrframe *frame, int which) 719 { 720 struct kinfo_pctrack *pctrack; 721 722 pctrack = &cputime_pctrack[mycpu->gd_cpuid][which]; 723 pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] = 724 (void *)CLKF_PC(frame); 725 ++pctrack->pc_index; 726 } 727 728 static int 729 sysctl_pctrack(SYSCTL_HANDLER_ARGS) 730 { 731 struct kinfo_pcheader head; 732 int error; 733 int cpu; 734 int ntrack; 735 736 head.pc_ntrack = PCTRACK_SIZE; 737 head.pc_arysize = PCTRACK_ARYSIZE; 738 739 if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0) 740 return (error); 741 742 for (cpu = 0; cpu < ncpus; ++cpu) { 743 for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) { 744 error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack], 745 sizeof(struct kinfo_pctrack)); 746 if (error) 747 break; 748 } 749 if (error) 750 break; 751 } 752 return (error); 753 } 754 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 755 sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking"); 756 757 #endif 758 759 /* 760 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer, 761 * the MP lock might not be held. We can safely manipulate parts of curproc 762 * but that's about it. 763 * 764 * Each cpu has its own scheduler clock. 765 */ 766 static void 767 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 768 { 769 struct lwp *lp; 770 struct rusage *ru; 771 struct vmspace *vm; 772 long rss; 773 774 if ((lp = lwkt_preempted_proc()) != NULL) { 775 /* 776 * Account for cpu time used and hit the scheduler. Note 777 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD 778 * HERE. 779 */ 780 ++lp->lwp_cpticks; 781 usched_schedulerclock(lp, info->periodic, info->time); 782 } else { 783 usched_schedulerclock(NULL, info->periodic, info->time); 784 } 785 if ((lp = curthread->td_lwp) != NULL) { 786 /* 787 * Update resource usage integrals and maximums. 788 */ 789 if ((ru = &lp->lwp_proc->p_ru) && 790 (vm = lp->lwp_proc->p_vmspace) != NULL) { 791 ru->ru_ixrss += pgtok(vm->vm_tsize); 792 ru->ru_idrss += pgtok(vm->vm_dsize); 793 ru->ru_isrss += pgtok(vm->vm_ssize); 794 if (lwkt_trytoken(&vm->vm_map.token)) { 795 rss = pgtok(vmspace_resident_count(vm)); 796 if (ru->ru_maxrss < rss) 797 ru->ru_maxrss = rss; 798 lwkt_reltoken(&vm->vm_map.token); 799 } 800 } 801 } 802 /* Increment the global sched_ticks */ 803 if (mycpu->gd_cpuid == 0) 804 ++sched_ticks; 805 } 806 807 /* 808 * Compute number of ticks for the specified amount of time. The 809 * return value is intended to be used in a clock interrupt timed 810 * operation and guarenteed to meet or exceed the requested time. 811 * If the representation overflows, return INT_MAX. The minimum return 812 * value is 1 ticks and the function will average the calculation up. 813 * If any value greater then 0 microseconds is supplied, a value 814 * of at least 2 will be returned to ensure that a near-term clock 815 * interrupt does not cause the timeout to occur (degenerately) early. 816 * 817 * Note that limit checks must take into account microseconds, which is 818 * done simply by using the smaller signed long maximum instead of 819 * the unsigned long maximum. 820 * 821 * If ints have 32 bits, then the maximum value for any timeout in 822 * 10ms ticks is 248 days. 823 */ 824 int 825 tvtohz_high(struct timeval *tv) 826 { 827 int ticks; 828 long sec, usec; 829 830 sec = tv->tv_sec; 831 usec = tv->tv_usec; 832 if (usec < 0) { 833 sec--; 834 usec += 1000000; 835 } 836 if (sec < 0) { 837 #ifdef DIAGNOSTIC 838 if (usec > 0) { 839 sec++; 840 usec -= 1000000; 841 } 842 kprintf("tvtohz_high: negative time difference " 843 "%ld sec %ld usec\n", 844 sec, usec); 845 #endif 846 ticks = 1; 847 } else if (sec <= INT_MAX / hz) { 848 ticks = (int)(sec * hz + 849 ((u_long)usec + (ustick - 1)) / ustick) + 1; 850 } else { 851 ticks = INT_MAX; 852 } 853 return (ticks); 854 } 855 856 int 857 tstohz_high(struct timespec *ts) 858 { 859 int ticks; 860 long sec, nsec; 861 862 sec = ts->tv_sec; 863 nsec = ts->tv_nsec; 864 if (nsec < 0) { 865 sec--; 866 nsec += 1000000000; 867 } 868 if (sec < 0) { 869 #ifdef DIAGNOSTIC 870 if (nsec > 0) { 871 sec++; 872 nsec -= 1000000000; 873 } 874 kprintf("tstohz_high: negative time difference " 875 "%ld sec %ld nsec\n", 876 sec, nsec); 877 #endif 878 ticks = 1; 879 } else if (sec <= INT_MAX / hz) { 880 ticks = (int)(sec * hz + 881 ((u_long)nsec + (nstick - 1)) / nstick) + 1; 882 } else { 883 ticks = INT_MAX; 884 } 885 return (ticks); 886 } 887 888 889 /* 890 * Compute number of ticks for the specified amount of time, erroring on 891 * the side of it being too low to ensure that sleeping the returned number 892 * of ticks will not result in a late return. 893 * 894 * The supplied timeval may not be negative and should be normalized. A 895 * return value of 0 is possible if the timeval converts to less then 896 * 1 tick. 897 * 898 * If ints have 32 bits, then the maximum value for any timeout in 899 * 10ms ticks is 248 days. 900 */ 901 int 902 tvtohz_low(struct timeval *tv) 903 { 904 int ticks; 905 long sec; 906 907 sec = tv->tv_sec; 908 if (sec <= INT_MAX / hz) 909 ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick); 910 else 911 ticks = INT_MAX; 912 return (ticks); 913 } 914 915 int 916 tstohz_low(struct timespec *ts) 917 { 918 int ticks; 919 long sec; 920 921 sec = ts->tv_sec; 922 if (sec <= INT_MAX / hz) 923 ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick); 924 else 925 ticks = INT_MAX; 926 return (ticks); 927 } 928 929 /* 930 * Start profiling on a process. 931 * 932 * Kernel profiling passes proc0 which never exits and hence 933 * keeps the profile clock running constantly. 934 */ 935 void 936 startprofclock(struct proc *p) 937 { 938 if ((p->p_flags & P_PROFIL) == 0) { 939 p->p_flags |= P_PROFIL; 940 #if 0 /* XXX */ 941 if (++profprocs == 1 && stathz != 0) { 942 crit_enter(); 943 psdiv = psratio; 944 setstatclockrate(profhz); 945 crit_exit(); 946 } 947 #endif 948 } 949 } 950 951 /* 952 * Stop profiling on a process. 953 * 954 * caller must hold p->p_token 955 */ 956 void 957 stopprofclock(struct proc *p) 958 { 959 if (p->p_flags & P_PROFIL) { 960 p->p_flags &= ~P_PROFIL; 961 #if 0 /* XXX */ 962 if (--profprocs == 0 && stathz != 0) { 963 crit_enter(); 964 psdiv = 1; 965 setstatclockrate(stathz); 966 crit_exit(); 967 } 968 #endif 969 } 970 } 971 972 /* 973 * Return information about system clocks. 974 */ 975 static int 976 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 977 { 978 struct kinfo_clockinfo clkinfo; 979 /* 980 * Construct clockinfo structure. 981 */ 982 clkinfo.ci_hz = hz; 983 clkinfo.ci_tick = ustick; 984 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000; 985 clkinfo.ci_profhz = profhz; 986 clkinfo.ci_stathz = stathz ? stathz : hz; 987 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 988 } 989 990 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 991 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 992 993 /* 994 * We have eight functions for looking at the clock, four for 995 * microseconds and four for nanoseconds. For each there is fast 996 * but less precise version "get{nano|micro}[up]time" which will 997 * return a time which is up to 1/HZ previous to the call, whereas 998 * the raw version "{nano|micro}[up]time" will return a timestamp 999 * which is as precise as possible. The "up" variants return the 1000 * time relative to system boot, these are well suited for time 1001 * interval measurements. 1002 * 1003 * Each cpu independantly maintains the current time of day, so all 1004 * we need to do to protect ourselves from changes is to do a loop 1005 * check on the seconds field changing out from under us. 1006 * 1007 * The system timer maintains a 32 bit count and due to various issues 1008 * it is possible for the calculated delta to occassionally exceed 1009 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec 1010 * multiplication can easily overflow, so we deal with the case. For 1011 * uniformity we deal with the case in the usec case too. 1012 * 1013 * All the [get][micro,nano][time,uptime]() routines are MPSAFE. 1014 */ 1015 void 1016 getmicrouptime(struct timeval *tvp) 1017 { 1018 struct globaldata *gd = mycpu; 1019 sysclock_t delta; 1020 1021 do { 1022 tvp->tv_sec = gd->gd_time_seconds; 1023 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1024 } while (tvp->tv_sec != gd->gd_time_seconds); 1025 1026 if (delta >= sys_cputimer->freq) { 1027 tvp->tv_sec += delta / sys_cputimer->freq; 1028 delta %= sys_cputimer->freq; 1029 } 1030 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1031 if (tvp->tv_usec >= 1000000) { 1032 tvp->tv_usec -= 1000000; 1033 ++tvp->tv_sec; 1034 } 1035 } 1036 1037 void 1038 getnanouptime(struct timespec *tsp) 1039 { 1040 struct globaldata *gd = mycpu; 1041 sysclock_t delta; 1042 1043 do { 1044 tsp->tv_sec = gd->gd_time_seconds; 1045 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1046 } while (tsp->tv_sec != gd->gd_time_seconds); 1047 1048 if (delta >= sys_cputimer->freq) { 1049 tsp->tv_sec += delta / sys_cputimer->freq; 1050 delta %= sys_cputimer->freq; 1051 } 1052 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1053 } 1054 1055 void 1056 microuptime(struct timeval *tvp) 1057 { 1058 struct globaldata *gd = mycpu; 1059 sysclock_t delta; 1060 1061 do { 1062 tvp->tv_sec = gd->gd_time_seconds; 1063 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1064 } while (tvp->tv_sec != gd->gd_time_seconds); 1065 1066 if (delta >= sys_cputimer->freq) { 1067 tvp->tv_sec += delta / sys_cputimer->freq; 1068 delta %= sys_cputimer->freq; 1069 } 1070 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1071 } 1072 1073 void 1074 nanouptime(struct timespec *tsp) 1075 { 1076 struct globaldata *gd = mycpu; 1077 sysclock_t delta; 1078 1079 do { 1080 tsp->tv_sec = gd->gd_time_seconds; 1081 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1082 } while (tsp->tv_sec != gd->gd_time_seconds); 1083 1084 if (delta >= sys_cputimer->freq) { 1085 tsp->tv_sec += delta / sys_cputimer->freq; 1086 delta %= sys_cputimer->freq; 1087 } 1088 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1089 } 1090 1091 /* 1092 * realtime routines 1093 */ 1094 void 1095 getmicrotime(struct timeval *tvp) 1096 { 1097 struct globaldata *gd = mycpu; 1098 struct timespec *bt; 1099 sysclock_t delta; 1100 1101 do { 1102 tvp->tv_sec = gd->gd_time_seconds; 1103 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1104 } while (tvp->tv_sec != gd->gd_time_seconds); 1105 1106 if (delta >= sys_cputimer->freq) { 1107 tvp->tv_sec += delta / sys_cputimer->freq; 1108 delta %= sys_cputimer->freq; 1109 } 1110 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1111 1112 bt = &basetime[basetime_index]; 1113 tvp->tv_sec += bt->tv_sec; 1114 tvp->tv_usec += bt->tv_nsec / 1000; 1115 while (tvp->tv_usec >= 1000000) { 1116 tvp->tv_usec -= 1000000; 1117 ++tvp->tv_sec; 1118 } 1119 } 1120 1121 void 1122 getnanotime(struct timespec *tsp) 1123 { 1124 struct globaldata *gd = mycpu; 1125 struct timespec *bt; 1126 sysclock_t delta; 1127 1128 do { 1129 tsp->tv_sec = gd->gd_time_seconds; 1130 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1131 } while (tsp->tv_sec != gd->gd_time_seconds); 1132 1133 if (delta >= sys_cputimer->freq) { 1134 tsp->tv_sec += delta / sys_cputimer->freq; 1135 delta %= sys_cputimer->freq; 1136 } 1137 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1138 1139 bt = &basetime[basetime_index]; 1140 tsp->tv_sec += bt->tv_sec; 1141 tsp->tv_nsec += bt->tv_nsec; 1142 while (tsp->tv_nsec >= 1000000000) { 1143 tsp->tv_nsec -= 1000000000; 1144 ++tsp->tv_sec; 1145 } 1146 } 1147 1148 static void 1149 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp) 1150 { 1151 struct globaldata *gd = mycpu; 1152 sysclock_t delta; 1153 1154 do { 1155 tsp->tv_sec = gd->gd_time_seconds; 1156 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1157 } while (tsp->tv_sec != gd->gd_time_seconds); 1158 1159 if (delta >= sys_cputimer->freq) { 1160 tsp->tv_sec += delta / sys_cputimer->freq; 1161 delta %= sys_cputimer->freq; 1162 } 1163 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1164 1165 tsp->tv_sec += nbt->tv_sec; 1166 tsp->tv_nsec += nbt->tv_nsec; 1167 while (tsp->tv_nsec >= 1000000000) { 1168 tsp->tv_nsec -= 1000000000; 1169 ++tsp->tv_sec; 1170 } 1171 } 1172 1173 1174 void 1175 microtime(struct timeval *tvp) 1176 { 1177 struct globaldata *gd = mycpu; 1178 struct timespec *bt; 1179 sysclock_t delta; 1180 1181 do { 1182 tvp->tv_sec = gd->gd_time_seconds; 1183 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1184 } while (tvp->tv_sec != gd->gd_time_seconds); 1185 1186 if (delta >= sys_cputimer->freq) { 1187 tvp->tv_sec += delta / sys_cputimer->freq; 1188 delta %= sys_cputimer->freq; 1189 } 1190 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1191 1192 bt = &basetime[basetime_index]; 1193 tvp->tv_sec += bt->tv_sec; 1194 tvp->tv_usec += bt->tv_nsec / 1000; 1195 while (tvp->tv_usec >= 1000000) { 1196 tvp->tv_usec -= 1000000; 1197 ++tvp->tv_sec; 1198 } 1199 } 1200 1201 void 1202 nanotime(struct timespec *tsp) 1203 { 1204 struct globaldata *gd = mycpu; 1205 struct timespec *bt; 1206 sysclock_t delta; 1207 1208 do { 1209 tsp->tv_sec = gd->gd_time_seconds; 1210 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1211 } while (tsp->tv_sec != gd->gd_time_seconds); 1212 1213 if (delta >= sys_cputimer->freq) { 1214 tsp->tv_sec += delta / sys_cputimer->freq; 1215 delta %= sys_cputimer->freq; 1216 } 1217 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1218 1219 bt = &basetime[basetime_index]; 1220 tsp->tv_sec += bt->tv_sec; 1221 tsp->tv_nsec += bt->tv_nsec; 1222 while (tsp->tv_nsec >= 1000000000) { 1223 tsp->tv_nsec -= 1000000000; 1224 ++tsp->tv_sec; 1225 } 1226 } 1227 1228 /* 1229 * note: this is not exactly synchronized with real time. To do that we 1230 * would have to do what microtime does and check for a nanoseconds overflow. 1231 */ 1232 time_t 1233 get_approximate_time_t(void) 1234 { 1235 struct globaldata *gd = mycpu; 1236 struct timespec *bt; 1237 1238 bt = &basetime[basetime_index]; 1239 return(gd->gd_time_seconds + bt->tv_sec); 1240 } 1241 1242 int 1243 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 1244 { 1245 pps_params_t *app; 1246 struct pps_fetch_args *fapi; 1247 #ifdef PPS_SYNC 1248 struct pps_kcbind_args *kapi; 1249 #endif 1250 1251 switch (cmd) { 1252 case PPS_IOC_CREATE: 1253 return (0); 1254 case PPS_IOC_DESTROY: 1255 return (0); 1256 case PPS_IOC_SETPARAMS: 1257 app = (pps_params_t *)data; 1258 if (app->mode & ~pps->ppscap) 1259 return (EINVAL); 1260 pps->ppsparam = *app; 1261 return (0); 1262 case PPS_IOC_GETPARAMS: 1263 app = (pps_params_t *)data; 1264 *app = pps->ppsparam; 1265 app->api_version = PPS_API_VERS_1; 1266 return (0); 1267 case PPS_IOC_GETCAP: 1268 *(int*)data = pps->ppscap; 1269 return (0); 1270 case PPS_IOC_FETCH: 1271 fapi = (struct pps_fetch_args *)data; 1272 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 1273 return (EINVAL); 1274 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 1275 return (EOPNOTSUPP); 1276 pps->ppsinfo.current_mode = pps->ppsparam.mode; 1277 fapi->pps_info_buf = pps->ppsinfo; 1278 return (0); 1279 case PPS_IOC_KCBIND: 1280 #ifdef PPS_SYNC 1281 kapi = (struct pps_kcbind_args *)data; 1282 /* XXX Only root should be able to do this */ 1283 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 1284 return (EINVAL); 1285 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 1286 return (EINVAL); 1287 if (kapi->edge & ~pps->ppscap) 1288 return (EINVAL); 1289 pps->kcmode = kapi->edge; 1290 return (0); 1291 #else 1292 return (EOPNOTSUPP); 1293 #endif 1294 default: 1295 return (ENOTTY); 1296 } 1297 } 1298 1299 void 1300 pps_init(struct pps_state *pps) 1301 { 1302 pps->ppscap |= PPS_TSFMT_TSPEC; 1303 if (pps->ppscap & PPS_CAPTUREASSERT) 1304 pps->ppscap |= PPS_OFFSETASSERT; 1305 if (pps->ppscap & PPS_CAPTURECLEAR) 1306 pps->ppscap |= PPS_OFFSETCLEAR; 1307 } 1308 1309 void 1310 pps_event(struct pps_state *pps, sysclock_t count, int event) 1311 { 1312 struct globaldata *gd; 1313 struct timespec *tsp; 1314 struct timespec *osp; 1315 struct timespec *bt; 1316 struct timespec ts; 1317 sysclock_t *pcount; 1318 #ifdef PPS_SYNC 1319 sysclock_t tcount; 1320 #endif 1321 sysclock_t delta; 1322 pps_seq_t *pseq; 1323 int foff; 1324 int fhard; 1325 1326 gd = mycpu; 1327 1328 /* Things would be easier with arrays... */ 1329 if (event == PPS_CAPTUREASSERT) { 1330 tsp = &pps->ppsinfo.assert_timestamp; 1331 osp = &pps->ppsparam.assert_offset; 1332 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1333 fhard = pps->kcmode & PPS_CAPTUREASSERT; 1334 pcount = &pps->ppscount[0]; 1335 pseq = &pps->ppsinfo.assert_sequence; 1336 } else { 1337 tsp = &pps->ppsinfo.clear_timestamp; 1338 osp = &pps->ppsparam.clear_offset; 1339 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1340 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1341 pcount = &pps->ppscount[1]; 1342 pseq = &pps->ppsinfo.clear_sequence; 1343 } 1344 1345 /* Nothing really happened */ 1346 if (*pcount == count) 1347 return; 1348 1349 *pcount = count; 1350 1351 do { 1352 ts.tv_sec = gd->gd_time_seconds; 1353 delta = count - gd->gd_cpuclock_base; 1354 } while (ts.tv_sec != gd->gd_time_seconds); 1355 1356 if (delta >= sys_cputimer->freq) { 1357 ts.tv_sec += delta / sys_cputimer->freq; 1358 delta %= sys_cputimer->freq; 1359 } 1360 ts.tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1361 bt = &basetime[basetime_index]; 1362 ts.tv_sec += bt->tv_sec; 1363 ts.tv_nsec += bt->tv_nsec; 1364 while (ts.tv_nsec >= 1000000000) { 1365 ts.tv_nsec -= 1000000000; 1366 ++ts.tv_sec; 1367 } 1368 1369 (*pseq)++; 1370 *tsp = ts; 1371 1372 if (foff) { 1373 timespecadd(tsp, osp); 1374 if (tsp->tv_nsec < 0) { 1375 tsp->tv_nsec += 1000000000; 1376 tsp->tv_sec -= 1; 1377 } 1378 } 1379 #ifdef PPS_SYNC 1380 if (fhard) { 1381 /* magic, at its best... */ 1382 tcount = count - pps->ppscount[2]; 1383 pps->ppscount[2] = count; 1384 if (tcount >= sys_cputimer->freq) { 1385 delta = (1000000000 * (tcount / sys_cputimer->freq) + 1386 sys_cputimer->freq64_nsec * 1387 (tcount % sys_cputimer->freq)) >> 32; 1388 } else { 1389 delta = (sys_cputimer->freq64_nsec * tcount) >> 32; 1390 } 1391 hardpps(tsp, delta); 1392 } 1393 #endif 1394 } 1395 1396 /* 1397 * Return the tsc target value for a delay of (ns). 1398 * 1399 * Returns -1 if the TSC is not supported. 1400 */ 1401 int64_t 1402 tsc_get_target(int ns) 1403 { 1404 #if defined(_RDTSC_SUPPORTED_) 1405 if (cpu_feature & CPUID_TSC) { 1406 return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000); 1407 } 1408 #endif 1409 return(-1); 1410 } 1411 1412 /* 1413 * Compare the tsc against the passed target 1414 * 1415 * Returns +1 if the target has been reached 1416 * Returns 0 if the target has not yet been reached 1417 * Returns -1 if the TSC is not supported. 1418 * 1419 * Typical use: while (tsc_test_target(target) == 0) { ...poll... } 1420 */ 1421 int 1422 tsc_test_target(int64_t target) 1423 { 1424 #if defined(_RDTSC_SUPPORTED_) 1425 if (cpu_feature & CPUID_TSC) { 1426 if ((int64_t)(target - rdtsc()) <= 0) 1427 return(1); 1428 return(0); 1429 } 1430 #endif 1431 return(-1); 1432 } 1433 1434 /* 1435 * Delay the specified number of nanoseconds using the tsc. This function 1436 * returns immediately if the TSC is not supported. At least one cpu_pause() 1437 * will be issued. 1438 */ 1439 void 1440 tsc_delay(int ns) 1441 { 1442 int64_t clk; 1443 1444 clk = tsc_get_target(ns); 1445 cpu_pause(); 1446 while (tsc_test_target(clk) == 0) 1447 cpu_pause(); 1448 } 1449