1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 72 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ 73 */ 74 75 #include "opt_ntp.h" 76 #include "opt_ifpoll.h" 77 #include "opt_pctrack.h" 78 79 #include <sys/param.h> 80 #include <sys/systm.h> 81 #include <sys/callout.h> 82 #include <sys/kernel.h> 83 #include <sys/kinfo.h> 84 #include <sys/proc.h> 85 #include <sys/malloc.h> 86 #include <sys/resource.h> 87 #include <sys/resourcevar.h> 88 #include <sys/signalvar.h> 89 #include <sys/timex.h> 90 #include <sys/timepps.h> 91 #include <vm/vm.h> 92 #include <sys/lock.h> 93 #include <vm/pmap.h> 94 #include <vm/vm_map.h> 95 #include <vm/vm_extern.h> 96 #include <sys/sysctl.h> 97 98 #include <sys/thread2.h> 99 100 #include <machine/cpu.h> 101 #include <machine/limits.h> 102 #include <machine/smp.h> 103 #include <machine/cpufunc.h> 104 #include <machine/specialreg.h> 105 #include <machine/clock.h> 106 107 #ifdef GPROF 108 #include <sys/gmon.h> 109 #endif 110 111 #ifdef IFPOLL_ENABLE 112 extern void ifpoll_init_pcpu(int); 113 #endif 114 115 #ifdef DEBUG_PCTRACK 116 static void do_pctrack(struct intrframe *frame, int which); 117 #endif 118 119 static void initclocks (void *dummy); 120 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) 121 122 /* 123 * Some of these don't belong here, but it's easiest to concentrate them. 124 * Note that cpu_time counts in microseconds, but most userland programs 125 * just compare relative times against the total by delta. 126 */ 127 struct kinfo_cputime cputime_percpu[MAXCPU]; 128 #ifdef DEBUG_PCTRACK 129 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE }; 130 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE]; 131 #endif 132 133 static int 134 sysctl_cputime(SYSCTL_HANDLER_ARGS) 135 { 136 int cpu, error = 0; 137 size_t size = sizeof(struct kinfo_cputime); 138 139 for (cpu = 0; cpu < ncpus; ++cpu) { 140 if ((error = SYSCTL_OUT(req, &cputime_percpu[cpu], size))) 141 break; 142 } 143 144 return (error); 145 } 146 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 147 sysctl_cputime, "S,kinfo_cputime", "CPU time statistics"); 148 149 static int 150 sysctl_cp_time(SYSCTL_HANDLER_ARGS) 151 { 152 long cpu_states[5] = {0}; 153 int cpu, error = 0; 154 size_t size = sizeof(cpu_states); 155 156 for (cpu = 0; cpu < ncpus; ++cpu) { 157 cpu_states[CP_USER] += cputime_percpu[cpu].cp_user; 158 cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice; 159 cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys; 160 cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr; 161 cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle; 162 } 163 164 error = SYSCTL_OUT(req, cpu_states, size); 165 166 return (error); 167 } 168 169 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 170 sysctl_cp_time, "LU", "CPU time statistics"); 171 172 /* 173 * boottime is used to calculate the 'real' uptime. Do not confuse this with 174 * microuptime(). microtime() is not drift compensated. The real uptime 175 * with compensation is nanotime() - bootime. boottime is recalculated 176 * whenever the real time is set based on the compensated elapsed time 177 * in seconds (gd->gd_time_seconds). 178 * 179 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic. 180 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to 181 * the real time. 182 */ 183 struct timespec boottime; /* boot time (realtime) for reference only */ 184 time_t time_second; /* read-only 'passive' uptime in seconds */ 185 186 /* 187 * basetime is used to calculate the compensated real time of day. The 188 * basetime can be modified on a per-tick basis by the adjtime(), 189 * ntp_adjtime(), and sysctl-based time correction APIs. 190 * 191 * Note that frequency corrections can also be made by adjusting 192 * gd_cpuclock_base. 193 * 194 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is 195 * used on both SMP and UP systems to avoid MP races between cpu's and 196 * interrupt races on UP systems. 197 */ 198 #define BASETIME_ARYSIZE 16 199 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1) 200 static struct timespec basetime[BASETIME_ARYSIZE]; 201 static volatile int basetime_index; 202 203 static int 204 sysctl_get_basetime(SYSCTL_HANDLER_ARGS) 205 { 206 struct timespec *bt; 207 int error; 208 int index; 209 210 /* 211 * Because basetime data and index may be updated by another cpu, 212 * a load fence is required to ensure that the data we read has 213 * not been speculatively read relative to a possibly updated index. 214 */ 215 index = basetime_index; 216 cpu_lfence(); 217 bt = &basetime[index]; 218 error = SYSCTL_OUT(req, bt, sizeof(*bt)); 219 return (error); 220 } 221 222 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 223 &boottime, timespec, "System boottime"); 224 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, 225 sysctl_get_basetime, "S,timespec", "System basetime"); 226 227 static void hardclock(systimer_t info, int, struct intrframe *frame); 228 static void statclock(systimer_t info, int, struct intrframe *frame); 229 static void schedclock(systimer_t info, int, struct intrframe *frame); 230 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp); 231 232 int ticks; /* system master ticks at hz */ 233 int clocks_running; /* tsleep/timeout clocks operational */ 234 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */ 235 int64_t nsec_acc; /* accumulator */ 236 int sched_ticks; /* global schedule clock ticks */ 237 238 /* NTPD time correction fields */ 239 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */ 240 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */ 241 int64_t ntp_delta; /* one-time correction in nsec */ 242 int64_t ntp_big_delta = 1000000000; 243 int32_t ntp_tick_delta; /* current adjustment rate */ 244 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */ 245 time_t ntp_leap_second; /* time of next leap second */ 246 int ntp_leap_insert; /* whether to insert or remove a second */ 247 248 /* 249 * Finish initializing clock frequencies and start all clocks running. 250 */ 251 /* ARGSUSED*/ 252 static void 253 initclocks(void *dummy) 254 { 255 /*psratio = profhz / stathz;*/ 256 initclocks_pcpu(); 257 clocks_running = 1; 258 } 259 260 /* 261 * Called on a per-cpu basis 262 */ 263 void 264 initclocks_pcpu(void) 265 { 266 struct globaldata *gd = mycpu; 267 268 crit_enter(); 269 if (gd->gd_cpuid == 0) { 270 gd->gd_time_seconds = 1; 271 gd->gd_cpuclock_base = sys_cputimer->count(); 272 } else { 273 /* XXX */ 274 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; 275 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; 276 } 277 278 systimer_intr_enable(); 279 280 #ifdef IFPOLL_ENABLE 281 ifpoll_init_pcpu(gd->gd_cpuid); 282 #endif 283 284 /* 285 * Use a non-queued periodic systimer to prevent multiple ticks from 286 * building up if the sysclock jumps forward (8254 gets reset). The 287 * sysclock will never jump backwards. Our time sync is based on 288 * the actual sysclock, not the ticks count. 289 */ 290 systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, NULL, hz); 291 systimer_init_periodic_nq(&gd->gd_statclock, statclock, NULL, stathz); 292 /* XXX correct the frequency for scheduler / estcpu tests */ 293 systimer_init_periodic_nq(&gd->gd_schedclock, schedclock, 294 NULL, ESTCPUFREQ); 295 crit_exit(); 296 } 297 298 /* 299 * This sets the current real time of day. Timespecs are in seconds and 300 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base, 301 * instead we adjust basetime so basetime + gd_* results in the current 302 * time of day. This way the gd_* fields are guarenteed to represent 303 * a monotonically increasing 'uptime' value. 304 * 305 * When set_timeofday() is called from userland, the system call forces it 306 * onto cpu #0 since only cpu #0 can update basetime_index. 307 */ 308 void 309 set_timeofday(struct timespec *ts) 310 { 311 struct timespec *nbt; 312 int ni; 313 314 /* 315 * XXX SMP / non-atomic basetime updates 316 */ 317 crit_enter(); 318 ni = (basetime_index + 1) & BASETIME_ARYMASK; 319 nbt = &basetime[ni]; 320 nanouptime(nbt); 321 nbt->tv_sec = ts->tv_sec - nbt->tv_sec; 322 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec; 323 if (nbt->tv_nsec < 0) { 324 nbt->tv_nsec += 1000000000; 325 --nbt->tv_sec; 326 } 327 328 /* 329 * Note that basetime diverges from boottime as the clock drift is 330 * compensated for, so we cannot do away with boottime. When setting 331 * the absolute time of day the drift is 0 (for an instant) and we 332 * can simply assign boottime to basetime. 333 * 334 * Note that nanouptime() is based on gd_time_seconds which is drift 335 * compensated up to a point (it is guarenteed to remain monotonically 336 * increasing). gd_time_seconds is thus our best uptime guess and 337 * suitable for use in the boottime calculation. It is already taken 338 * into account in the basetime calculation above. 339 */ 340 boottime.tv_sec = nbt->tv_sec; 341 ntp_delta = 0; 342 343 /* 344 * We now have a new basetime, make sure all other cpus have it, 345 * then update the index. 346 */ 347 cpu_sfence(); 348 basetime_index = ni; 349 350 crit_exit(); 351 } 352 353 /* 354 * Each cpu has its own hardclock, but we only increments ticks and softticks 355 * on cpu #0. 356 * 357 * NOTE! systimer! the MP lock might not be held here. We can only safely 358 * manipulate objects owned by the current cpu. 359 */ 360 static void 361 hardclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 362 { 363 sysclock_t cputicks; 364 struct proc *p; 365 struct globaldata *gd = mycpu; 366 367 /* 368 * Realtime updates are per-cpu. Note that timer corrections as 369 * returned by microtime() and friends make an additional adjustment 370 * using a system-wise 'basetime', but the running time is always 371 * taken from the per-cpu globaldata area. Since the same clock 372 * is distributing (XXX SMP) to all cpus, the per-cpu timebases 373 * stay in synch. 374 * 375 * Note that we never allow info->time (aka gd->gd_hardclock.time) 376 * to reverse index gd_cpuclock_base, but that it is possible for 377 * it to temporarily get behind in the seconds if something in the 378 * system locks interrupts for a long period of time. Since periodic 379 * timers count events, though everything should resynch again 380 * immediately. 381 */ 382 cputicks = info->time - gd->gd_cpuclock_base; 383 if (cputicks >= sys_cputimer->freq) { 384 ++gd->gd_time_seconds; 385 gd->gd_cpuclock_base += sys_cputimer->freq; 386 } 387 388 /* 389 * The system-wide ticks counter and NTP related timedelta/tickdelta 390 * adjustments only occur on cpu #0. NTP adjustments are accomplished 391 * by updating basetime. 392 */ 393 if (gd->gd_cpuid == 0) { 394 struct timespec *nbt; 395 struct timespec nts; 396 int leap; 397 int ni; 398 399 ++ticks; 400 401 #if 0 402 if (tco->tc_poll_pps) 403 tco->tc_poll_pps(tco); 404 #endif 405 406 /* 407 * Calculate the new basetime index. We are in a critical section 408 * on cpu #0 and can safely play with basetime_index. Start 409 * with the current basetime and then make adjustments. 410 */ 411 ni = (basetime_index + 1) & BASETIME_ARYMASK; 412 nbt = &basetime[ni]; 413 *nbt = basetime[basetime_index]; 414 415 /* 416 * Apply adjtime corrections. (adjtime() API) 417 * 418 * adjtime() only runs on cpu #0 so our critical section is 419 * sufficient to access these variables. 420 */ 421 if (ntp_delta != 0) { 422 nbt->tv_nsec += ntp_tick_delta; 423 ntp_delta -= ntp_tick_delta; 424 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) || 425 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) { 426 ntp_tick_delta = ntp_delta; 427 } 428 } 429 430 /* 431 * Apply permanent frequency corrections. (sysctl API) 432 */ 433 if (ntp_tick_permanent != 0) { 434 ntp_tick_acc += ntp_tick_permanent; 435 if (ntp_tick_acc >= (1LL << 32)) { 436 nbt->tv_nsec += ntp_tick_acc >> 32; 437 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32; 438 } else if (ntp_tick_acc <= -(1LL << 32)) { 439 /* Negate ntp_tick_acc to avoid shifting the sign bit. */ 440 nbt->tv_nsec -= (-ntp_tick_acc) >> 32; 441 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32; 442 } 443 } 444 445 if (nbt->tv_nsec >= 1000000000) { 446 nbt->tv_sec++; 447 nbt->tv_nsec -= 1000000000; 448 } else if (nbt->tv_nsec < 0) { 449 nbt->tv_sec--; 450 nbt->tv_nsec += 1000000000; 451 } 452 453 /* 454 * Another per-tick compensation. (for ntp_adjtime() API) 455 */ 456 if (nsec_adj != 0) { 457 nsec_acc += nsec_adj; 458 if (nsec_acc >= 0x100000000LL) { 459 nbt->tv_nsec += nsec_acc >> 32; 460 nsec_acc = (nsec_acc & 0xFFFFFFFFLL); 461 } else if (nsec_acc <= -0x100000000LL) { 462 nbt->tv_nsec -= -nsec_acc >> 32; 463 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL); 464 } 465 if (nbt->tv_nsec >= 1000000000) { 466 nbt->tv_nsec -= 1000000000; 467 ++nbt->tv_sec; 468 } else if (nbt->tv_nsec < 0) { 469 nbt->tv_nsec += 1000000000; 470 --nbt->tv_sec; 471 } 472 } 473 474 /************************************************************ 475 * LEAP SECOND CORRECTION * 476 ************************************************************ 477 * 478 * Taking into account all the corrections made above, figure 479 * out the new real time. If the seconds field has changed 480 * then apply any pending leap-second corrections. 481 */ 482 getnanotime_nbt(nbt, &nts); 483 484 if (time_second != nts.tv_sec) { 485 /* 486 * Apply leap second (sysctl API). Adjust nts for changes 487 * so we do not have to call getnanotime_nbt again. 488 */ 489 if (ntp_leap_second) { 490 if (ntp_leap_second == nts.tv_sec) { 491 if (ntp_leap_insert) { 492 nbt->tv_sec++; 493 nts.tv_sec++; 494 } else { 495 nbt->tv_sec--; 496 nts.tv_sec--; 497 } 498 ntp_leap_second--; 499 } 500 } 501 502 /* 503 * Apply leap second (ntp_adjtime() API), calculate a new 504 * nsec_adj field. ntp_update_second() returns nsec_adj 505 * as a per-second value but we need it as a per-tick value. 506 */ 507 leap = ntp_update_second(time_second, &nsec_adj); 508 nsec_adj /= hz; 509 nbt->tv_sec += leap; 510 nts.tv_sec += leap; 511 512 /* 513 * Update the time_second 'approximate time' global. 514 */ 515 time_second = nts.tv_sec; 516 } 517 518 /* 519 * Finally, our new basetime is ready to go live! 520 */ 521 cpu_sfence(); 522 basetime_index = ni; 523 } 524 525 /* 526 * lwkt thread scheduler fair queueing 527 */ 528 lwkt_schedulerclock(curthread); 529 530 /* 531 * softticks are handled for all cpus 532 */ 533 hardclock_softtick(gd); 534 535 /* 536 * ITimer handling is per-tick, per-cpu. 537 * 538 * We must acquire the per-process token in order for ksignal() 539 * to be non-blocking. For the moment this requires an AST fault, 540 * the ksignal() cannot be safely issued from this hard interrupt. 541 * 542 * XXX Even the trytoken here isn't right, and itimer operation in 543 * a multi threaded environment is going to be weird at the 544 * very least. 545 */ 546 if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) { 547 crit_enter_hard(); 548 if (frame && CLKF_USERMODE(frame) && 549 timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) && 550 itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) { 551 p->p_flags |= P_SIGVTALRM; 552 need_user_resched(); 553 } 554 if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) && 555 itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) { 556 p->p_flags |= P_SIGPROF; 557 need_user_resched(); 558 } 559 crit_exit_hard(); 560 lwkt_reltoken(&p->p_token); 561 } 562 setdelayed(); 563 } 564 565 /* 566 * The statistics clock typically runs at a 125Hz rate, and is intended 567 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu. 568 * 569 * NOTE! systimer! the MP lock might not be held here. We can only safely 570 * manipulate objects owned by the current cpu. 571 * 572 * The stats clock is responsible for grabbing a profiling sample. 573 * Most of the statistics are only used by user-level statistics programs. 574 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and 575 * p->p_estcpu. 576 * 577 * Like the other clocks, the stat clock is called from what is effectively 578 * a fast interrupt, so the context should be the thread/process that got 579 * interrupted. 580 */ 581 static void 582 statclock(systimer_t info, int in_ipi, struct intrframe *frame) 583 { 584 #ifdef GPROF 585 struct gmonparam *g; 586 int i; 587 #endif 588 thread_t td; 589 struct proc *p; 590 int bump; 591 struct timeval tv; 592 struct timeval *stv; 593 594 /* 595 * How big was our timeslice relative to the last time? 596 */ 597 microuptime(&tv); /* mpsafe */ 598 stv = &mycpu->gd_stattv; 599 if (stv->tv_sec == 0) { 600 bump = 1; 601 } else { 602 bump = tv.tv_usec - stv->tv_usec + 603 (tv.tv_sec - stv->tv_sec) * 1000000; 604 if (bump < 0) 605 bump = 0; 606 if (bump > 1000000) 607 bump = 1000000; 608 } 609 *stv = tv; 610 611 td = curthread; 612 p = td->td_proc; 613 614 if (frame && CLKF_USERMODE(frame)) { 615 /* 616 * Came from userland, handle user time and deal with 617 * possible process. 618 */ 619 if (p && (p->p_flags & P_PROFIL)) 620 addupc_intr(p, CLKF_PC(frame), 1); 621 td->td_uticks += bump; 622 623 /* 624 * Charge the time as appropriate 625 */ 626 if (p && p->p_nice > NZERO) 627 cpu_time.cp_nice += bump; 628 else 629 cpu_time.cp_user += bump; 630 } else { 631 int intr_nest = mycpu->gd_intr_nesting_level; 632 633 if (in_ipi) { 634 /* 635 * IPI processing code will bump gd_intr_nesting_level 636 * up by one, which breaks following CLKF_INTR testing, 637 * so we substract it by one here. 638 */ 639 --intr_nest; 640 } 641 #ifdef GPROF 642 /* 643 * Kernel statistics are just like addupc_intr, only easier. 644 */ 645 g = &_gmonparam; 646 if (g->state == GMON_PROF_ON && frame) { 647 i = CLKF_PC(frame) - g->lowpc; 648 if (i < g->textsize) { 649 i /= HISTFRACTION * sizeof(*g->kcount); 650 g->kcount[i]++; 651 } 652 } 653 #endif 654 655 #define IS_INTR_RUNNING ((frame && CLKF_INTR(intr_nest)) || CLKF_INTR_TD(td)) 656 657 /* 658 * Came from kernel mode, so we were: 659 * - handling an interrupt, 660 * - doing syscall or trap work on behalf of the current 661 * user process, or 662 * - spinning in the idle loop. 663 * Whichever it is, charge the time as appropriate. 664 * Note that we charge interrupts to the current process, 665 * regardless of whether they are ``for'' that process, 666 * so that we know how much of its real time was spent 667 * in ``non-process'' (i.e., interrupt) work. 668 * 669 * XXX assume system if frame is NULL. A NULL frame 670 * can occur if ipi processing is done from a crit_exit(). 671 */ 672 if (IS_INTR_RUNNING) 673 td->td_iticks += bump; 674 else 675 td->td_sticks += bump; 676 677 if (IS_INTR_RUNNING) { 678 /* 679 * If we interrupted an interrupt thread, well, 680 * count it as interrupt time. 681 */ 682 #ifdef DEBUG_PCTRACK 683 if (frame) 684 do_pctrack(frame, PCTRACK_INT); 685 #endif 686 cpu_time.cp_intr += bump; 687 } else { 688 if (td == &mycpu->gd_idlethread) { 689 /* 690 * Even if the current thread is the idle 691 * thread it could be due to token contention 692 * in the LWKT scheduler. Count such as 693 * system time. 694 */ 695 if (mycpu->gd_reqflags & RQF_AST_LWKT_RESCHED) 696 cpu_time.cp_sys += bump; 697 else 698 cpu_time.cp_idle += bump; 699 } else { 700 /* 701 * System thread was running. 702 */ 703 #ifdef DEBUG_PCTRACK 704 if (frame) 705 do_pctrack(frame, PCTRACK_SYS); 706 #endif 707 cpu_time.cp_sys += bump; 708 } 709 } 710 711 #undef IS_INTR_RUNNING 712 } 713 } 714 715 #ifdef DEBUG_PCTRACK 716 /* 717 * Sample the PC when in the kernel or in an interrupt. User code can 718 * retrieve the information and generate a histogram or other output. 719 */ 720 721 static void 722 do_pctrack(struct intrframe *frame, int which) 723 { 724 struct kinfo_pctrack *pctrack; 725 726 pctrack = &cputime_pctrack[mycpu->gd_cpuid][which]; 727 pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] = 728 (void *)CLKF_PC(frame); 729 ++pctrack->pc_index; 730 } 731 732 static int 733 sysctl_pctrack(SYSCTL_HANDLER_ARGS) 734 { 735 struct kinfo_pcheader head; 736 int error; 737 int cpu; 738 int ntrack; 739 740 head.pc_ntrack = PCTRACK_SIZE; 741 head.pc_arysize = PCTRACK_ARYSIZE; 742 743 if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0) 744 return (error); 745 746 for (cpu = 0; cpu < ncpus; ++cpu) { 747 for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) { 748 error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack], 749 sizeof(struct kinfo_pctrack)); 750 if (error) 751 break; 752 } 753 if (error) 754 break; 755 } 756 return (error); 757 } 758 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 759 sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking"); 760 761 #endif 762 763 /* 764 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer, 765 * the MP lock might not be held. We can safely manipulate parts of curproc 766 * but that's about it. 767 * 768 * Each cpu has its own scheduler clock. 769 */ 770 static void 771 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 772 { 773 struct lwp *lp; 774 struct rusage *ru; 775 struct vmspace *vm; 776 long rss; 777 778 if ((lp = lwkt_preempted_proc()) != NULL) { 779 /* 780 * Account for cpu time used and hit the scheduler. Note 781 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD 782 * HERE. 783 */ 784 ++lp->lwp_cpticks; 785 usched_schedulerclock(lp, info->periodic, info->time); 786 } else { 787 usched_schedulerclock(NULL, info->periodic, info->time); 788 } 789 if ((lp = curthread->td_lwp) != NULL) { 790 /* 791 * Update resource usage integrals and maximums. 792 */ 793 if ((ru = &lp->lwp_proc->p_ru) && 794 (vm = lp->lwp_proc->p_vmspace) != NULL) { 795 ru->ru_ixrss += pgtok(vm->vm_tsize); 796 ru->ru_idrss += pgtok(vm->vm_dsize); 797 ru->ru_isrss += pgtok(vm->vm_ssize); 798 if (lwkt_trytoken(&vm->vm_map.token)) { 799 rss = pgtok(vmspace_resident_count(vm)); 800 if (ru->ru_maxrss < rss) 801 ru->ru_maxrss = rss; 802 lwkt_reltoken(&vm->vm_map.token); 803 } 804 } 805 } 806 /* Increment the global sched_ticks */ 807 if (mycpu->gd_cpuid == 0) 808 ++sched_ticks; 809 } 810 811 /* 812 * Compute number of ticks for the specified amount of time. The 813 * return value is intended to be used in a clock interrupt timed 814 * operation and guarenteed to meet or exceed the requested time. 815 * If the representation overflows, return INT_MAX. The minimum return 816 * value is 1 ticks and the function will average the calculation up. 817 * If any value greater then 0 microseconds is supplied, a value 818 * of at least 2 will be returned to ensure that a near-term clock 819 * interrupt does not cause the timeout to occur (degenerately) early. 820 * 821 * Note that limit checks must take into account microseconds, which is 822 * done simply by using the smaller signed long maximum instead of 823 * the unsigned long maximum. 824 * 825 * If ints have 32 bits, then the maximum value for any timeout in 826 * 10ms ticks is 248 days. 827 */ 828 int 829 tvtohz_high(struct timeval *tv) 830 { 831 int ticks; 832 long sec, usec; 833 834 sec = tv->tv_sec; 835 usec = tv->tv_usec; 836 if (usec < 0) { 837 sec--; 838 usec += 1000000; 839 } 840 if (sec < 0) { 841 #ifdef DIAGNOSTIC 842 if (usec > 0) { 843 sec++; 844 usec -= 1000000; 845 } 846 kprintf("tvtohz_high: negative time difference " 847 "%ld sec %ld usec\n", 848 sec, usec); 849 #endif 850 ticks = 1; 851 } else if (sec <= INT_MAX / hz) { 852 ticks = (int)(sec * hz + 853 ((u_long)usec + (ustick - 1)) / ustick) + 1; 854 } else { 855 ticks = INT_MAX; 856 } 857 return (ticks); 858 } 859 860 int 861 tstohz_high(struct timespec *ts) 862 { 863 int ticks; 864 long sec, nsec; 865 866 sec = ts->tv_sec; 867 nsec = ts->tv_nsec; 868 if (nsec < 0) { 869 sec--; 870 nsec += 1000000000; 871 } 872 if (sec < 0) { 873 #ifdef DIAGNOSTIC 874 if (nsec > 0) { 875 sec++; 876 nsec -= 1000000000; 877 } 878 kprintf("tstohz_high: negative time difference " 879 "%ld sec %ld nsec\n", 880 sec, nsec); 881 #endif 882 ticks = 1; 883 } else if (sec <= INT_MAX / hz) { 884 ticks = (int)(sec * hz + 885 ((u_long)nsec + (nstick - 1)) / nstick) + 1; 886 } else { 887 ticks = INT_MAX; 888 } 889 return (ticks); 890 } 891 892 893 /* 894 * Compute number of ticks for the specified amount of time, erroring on 895 * the side of it being too low to ensure that sleeping the returned number 896 * of ticks will not result in a late return. 897 * 898 * The supplied timeval may not be negative and should be normalized. A 899 * return value of 0 is possible if the timeval converts to less then 900 * 1 tick. 901 * 902 * If ints have 32 bits, then the maximum value for any timeout in 903 * 10ms ticks is 248 days. 904 */ 905 int 906 tvtohz_low(struct timeval *tv) 907 { 908 int ticks; 909 long sec; 910 911 sec = tv->tv_sec; 912 if (sec <= INT_MAX / hz) 913 ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick); 914 else 915 ticks = INT_MAX; 916 return (ticks); 917 } 918 919 int 920 tstohz_low(struct timespec *ts) 921 { 922 int ticks; 923 long sec; 924 925 sec = ts->tv_sec; 926 if (sec <= INT_MAX / hz) 927 ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick); 928 else 929 ticks = INT_MAX; 930 return (ticks); 931 } 932 933 /* 934 * Start profiling on a process. 935 * 936 * Kernel profiling passes proc0 which never exits and hence 937 * keeps the profile clock running constantly. 938 */ 939 void 940 startprofclock(struct proc *p) 941 { 942 if ((p->p_flags & P_PROFIL) == 0) { 943 p->p_flags |= P_PROFIL; 944 #if 0 /* XXX */ 945 if (++profprocs == 1 && stathz != 0) { 946 crit_enter(); 947 psdiv = psratio; 948 setstatclockrate(profhz); 949 crit_exit(); 950 } 951 #endif 952 } 953 } 954 955 /* 956 * Stop profiling on a process. 957 * 958 * caller must hold p->p_token 959 */ 960 void 961 stopprofclock(struct proc *p) 962 { 963 if (p->p_flags & P_PROFIL) { 964 p->p_flags &= ~P_PROFIL; 965 #if 0 /* XXX */ 966 if (--profprocs == 0 && stathz != 0) { 967 crit_enter(); 968 psdiv = 1; 969 setstatclockrate(stathz); 970 crit_exit(); 971 } 972 #endif 973 } 974 } 975 976 /* 977 * Return information about system clocks. 978 */ 979 static int 980 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 981 { 982 struct kinfo_clockinfo clkinfo; 983 /* 984 * Construct clockinfo structure. 985 */ 986 clkinfo.ci_hz = hz; 987 clkinfo.ci_tick = ustick; 988 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000; 989 clkinfo.ci_profhz = profhz; 990 clkinfo.ci_stathz = stathz ? stathz : hz; 991 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 992 } 993 994 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 995 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 996 997 /* 998 * We have eight functions for looking at the clock, four for 999 * microseconds and four for nanoseconds. For each there is fast 1000 * but less precise version "get{nano|micro}[up]time" which will 1001 * return a time which is up to 1/HZ previous to the call, whereas 1002 * the raw version "{nano|micro}[up]time" will return a timestamp 1003 * which is as precise as possible. The "up" variants return the 1004 * time relative to system boot, these are well suited for time 1005 * interval measurements. 1006 * 1007 * Each cpu independantly maintains the current time of day, so all 1008 * we need to do to protect ourselves from changes is to do a loop 1009 * check on the seconds field changing out from under us. 1010 * 1011 * The system timer maintains a 32 bit count and due to various issues 1012 * it is possible for the calculated delta to occassionally exceed 1013 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec 1014 * multiplication can easily overflow, so we deal with the case. For 1015 * uniformity we deal with the case in the usec case too. 1016 * 1017 * All the [get][micro,nano][time,uptime]() routines are MPSAFE. 1018 */ 1019 void 1020 getmicrouptime(struct timeval *tvp) 1021 { 1022 struct globaldata *gd = mycpu; 1023 sysclock_t delta; 1024 1025 do { 1026 tvp->tv_sec = gd->gd_time_seconds; 1027 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1028 } while (tvp->tv_sec != gd->gd_time_seconds); 1029 1030 if (delta >= sys_cputimer->freq) { 1031 tvp->tv_sec += delta / sys_cputimer->freq; 1032 delta %= sys_cputimer->freq; 1033 } 1034 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1035 if (tvp->tv_usec >= 1000000) { 1036 tvp->tv_usec -= 1000000; 1037 ++tvp->tv_sec; 1038 } 1039 } 1040 1041 void 1042 getnanouptime(struct timespec *tsp) 1043 { 1044 struct globaldata *gd = mycpu; 1045 sysclock_t delta; 1046 1047 do { 1048 tsp->tv_sec = gd->gd_time_seconds; 1049 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1050 } while (tsp->tv_sec != gd->gd_time_seconds); 1051 1052 if (delta >= sys_cputimer->freq) { 1053 tsp->tv_sec += delta / sys_cputimer->freq; 1054 delta %= sys_cputimer->freq; 1055 } 1056 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1057 } 1058 1059 void 1060 microuptime(struct timeval *tvp) 1061 { 1062 struct globaldata *gd = mycpu; 1063 sysclock_t delta; 1064 1065 do { 1066 tvp->tv_sec = gd->gd_time_seconds; 1067 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1068 } while (tvp->tv_sec != gd->gd_time_seconds); 1069 1070 if (delta >= sys_cputimer->freq) { 1071 tvp->tv_sec += delta / sys_cputimer->freq; 1072 delta %= sys_cputimer->freq; 1073 } 1074 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1075 } 1076 1077 void 1078 nanouptime(struct timespec *tsp) 1079 { 1080 struct globaldata *gd = mycpu; 1081 sysclock_t delta; 1082 1083 do { 1084 tsp->tv_sec = gd->gd_time_seconds; 1085 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1086 } while (tsp->tv_sec != gd->gd_time_seconds); 1087 1088 if (delta >= sys_cputimer->freq) { 1089 tsp->tv_sec += delta / sys_cputimer->freq; 1090 delta %= sys_cputimer->freq; 1091 } 1092 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1093 } 1094 1095 /* 1096 * realtime routines 1097 */ 1098 void 1099 getmicrotime(struct timeval *tvp) 1100 { 1101 struct globaldata *gd = mycpu; 1102 struct timespec *bt; 1103 sysclock_t delta; 1104 1105 do { 1106 tvp->tv_sec = gd->gd_time_seconds; 1107 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1108 } while (tvp->tv_sec != gd->gd_time_seconds); 1109 1110 if (delta >= sys_cputimer->freq) { 1111 tvp->tv_sec += delta / sys_cputimer->freq; 1112 delta %= sys_cputimer->freq; 1113 } 1114 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1115 1116 bt = &basetime[basetime_index]; 1117 tvp->tv_sec += bt->tv_sec; 1118 tvp->tv_usec += bt->tv_nsec / 1000; 1119 while (tvp->tv_usec >= 1000000) { 1120 tvp->tv_usec -= 1000000; 1121 ++tvp->tv_sec; 1122 } 1123 } 1124 1125 void 1126 getnanotime(struct timespec *tsp) 1127 { 1128 struct globaldata *gd = mycpu; 1129 struct timespec *bt; 1130 sysclock_t delta; 1131 1132 do { 1133 tsp->tv_sec = gd->gd_time_seconds; 1134 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1135 } while (tsp->tv_sec != gd->gd_time_seconds); 1136 1137 if (delta >= sys_cputimer->freq) { 1138 tsp->tv_sec += delta / sys_cputimer->freq; 1139 delta %= sys_cputimer->freq; 1140 } 1141 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1142 1143 bt = &basetime[basetime_index]; 1144 tsp->tv_sec += bt->tv_sec; 1145 tsp->tv_nsec += bt->tv_nsec; 1146 while (tsp->tv_nsec >= 1000000000) { 1147 tsp->tv_nsec -= 1000000000; 1148 ++tsp->tv_sec; 1149 } 1150 } 1151 1152 static void 1153 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp) 1154 { 1155 struct globaldata *gd = mycpu; 1156 sysclock_t delta; 1157 1158 do { 1159 tsp->tv_sec = gd->gd_time_seconds; 1160 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1161 } while (tsp->tv_sec != gd->gd_time_seconds); 1162 1163 if (delta >= sys_cputimer->freq) { 1164 tsp->tv_sec += delta / sys_cputimer->freq; 1165 delta %= sys_cputimer->freq; 1166 } 1167 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1168 1169 tsp->tv_sec += nbt->tv_sec; 1170 tsp->tv_nsec += nbt->tv_nsec; 1171 while (tsp->tv_nsec >= 1000000000) { 1172 tsp->tv_nsec -= 1000000000; 1173 ++tsp->tv_sec; 1174 } 1175 } 1176 1177 1178 void 1179 microtime(struct timeval *tvp) 1180 { 1181 struct globaldata *gd = mycpu; 1182 struct timespec *bt; 1183 sysclock_t delta; 1184 1185 do { 1186 tvp->tv_sec = gd->gd_time_seconds; 1187 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1188 } while (tvp->tv_sec != gd->gd_time_seconds); 1189 1190 if (delta >= sys_cputimer->freq) { 1191 tvp->tv_sec += delta / sys_cputimer->freq; 1192 delta %= sys_cputimer->freq; 1193 } 1194 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1195 1196 bt = &basetime[basetime_index]; 1197 tvp->tv_sec += bt->tv_sec; 1198 tvp->tv_usec += bt->tv_nsec / 1000; 1199 while (tvp->tv_usec >= 1000000) { 1200 tvp->tv_usec -= 1000000; 1201 ++tvp->tv_sec; 1202 } 1203 } 1204 1205 void 1206 nanotime(struct timespec *tsp) 1207 { 1208 struct globaldata *gd = mycpu; 1209 struct timespec *bt; 1210 sysclock_t delta; 1211 1212 do { 1213 tsp->tv_sec = gd->gd_time_seconds; 1214 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1215 } while (tsp->tv_sec != gd->gd_time_seconds); 1216 1217 if (delta >= sys_cputimer->freq) { 1218 tsp->tv_sec += delta / sys_cputimer->freq; 1219 delta %= sys_cputimer->freq; 1220 } 1221 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1222 1223 bt = &basetime[basetime_index]; 1224 tsp->tv_sec += bt->tv_sec; 1225 tsp->tv_nsec += bt->tv_nsec; 1226 while (tsp->tv_nsec >= 1000000000) { 1227 tsp->tv_nsec -= 1000000000; 1228 ++tsp->tv_sec; 1229 } 1230 } 1231 1232 /* 1233 * note: this is not exactly synchronized with real time. To do that we 1234 * would have to do what microtime does and check for a nanoseconds overflow. 1235 */ 1236 time_t 1237 get_approximate_time_t(void) 1238 { 1239 struct globaldata *gd = mycpu; 1240 struct timespec *bt; 1241 1242 bt = &basetime[basetime_index]; 1243 return(gd->gd_time_seconds + bt->tv_sec); 1244 } 1245 1246 int 1247 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 1248 { 1249 pps_params_t *app; 1250 struct pps_fetch_args *fapi; 1251 #ifdef PPS_SYNC 1252 struct pps_kcbind_args *kapi; 1253 #endif 1254 1255 switch (cmd) { 1256 case PPS_IOC_CREATE: 1257 return (0); 1258 case PPS_IOC_DESTROY: 1259 return (0); 1260 case PPS_IOC_SETPARAMS: 1261 app = (pps_params_t *)data; 1262 if (app->mode & ~pps->ppscap) 1263 return (EINVAL); 1264 pps->ppsparam = *app; 1265 return (0); 1266 case PPS_IOC_GETPARAMS: 1267 app = (pps_params_t *)data; 1268 *app = pps->ppsparam; 1269 app->api_version = PPS_API_VERS_1; 1270 return (0); 1271 case PPS_IOC_GETCAP: 1272 *(int*)data = pps->ppscap; 1273 return (0); 1274 case PPS_IOC_FETCH: 1275 fapi = (struct pps_fetch_args *)data; 1276 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 1277 return (EINVAL); 1278 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 1279 return (EOPNOTSUPP); 1280 pps->ppsinfo.current_mode = pps->ppsparam.mode; 1281 fapi->pps_info_buf = pps->ppsinfo; 1282 return (0); 1283 case PPS_IOC_KCBIND: 1284 #ifdef PPS_SYNC 1285 kapi = (struct pps_kcbind_args *)data; 1286 /* XXX Only root should be able to do this */ 1287 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 1288 return (EINVAL); 1289 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 1290 return (EINVAL); 1291 if (kapi->edge & ~pps->ppscap) 1292 return (EINVAL); 1293 pps->kcmode = kapi->edge; 1294 return (0); 1295 #else 1296 return (EOPNOTSUPP); 1297 #endif 1298 default: 1299 return (ENOTTY); 1300 } 1301 } 1302 1303 void 1304 pps_init(struct pps_state *pps) 1305 { 1306 pps->ppscap |= PPS_TSFMT_TSPEC; 1307 if (pps->ppscap & PPS_CAPTUREASSERT) 1308 pps->ppscap |= PPS_OFFSETASSERT; 1309 if (pps->ppscap & PPS_CAPTURECLEAR) 1310 pps->ppscap |= PPS_OFFSETCLEAR; 1311 } 1312 1313 void 1314 pps_event(struct pps_state *pps, sysclock_t count, int event) 1315 { 1316 struct globaldata *gd; 1317 struct timespec *tsp; 1318 struct timespec *osp; 1319 struct timespec *bt; 1320 struct timespec ts; 1321 sysclock_t *pcount; 1322 #ifdef PPS_SYNC 1323 sysclock_t tcount; 1324 #endif 1325 sysclock_t delta; 1326 pps_seq_t *pseq; 1327 int foff; 1328 int fhard; 1329 1330 gd = mycpu; 1331 1332 /* Things would be easier with arrays... */ 1333 if (event == PPS_CAPTUREASSERT) { 1334 tsp = &pps->ppsinfo.assert_timestamp; 1335 osp = &pps->ppsparam.assert_offset; 1336 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1337 fhard = pps->kcmode & PPS_CAPTUREASSERT; 1338 pcount = &pps->ppscount[0]; 1339 pseq = &pps->ppsinfo.assert_sequence; 1340 } else { 1341 tsp = &pps->ppsinfo.clear_timestamp; 1342 osp = &pps->ppsparam.clear_offset; 1343 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1344 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1345 pcount = &pps->ppscount[1]; 1346 pseq = &pps->ppsinfo.clear_sequence; 1347 } 1348 1349 /* Nothing really happened */ 1350 if (*pcount == count) 1351 return; 1352 1353 *pcount = count; 1354 1355 do { 1356 ts.tv_sec = gd->gd_time_seconds; 1357 delta = count - gd->gd_cpuclock_base; 1358 } while (ts.tv_sec != gd->gd_time_seconds); 1359 1360 if (delta >= sys_cputimer->freq) { 1361 ts.tv_sec += delta / sys_cputimer->freq; 1362 delta %= sys_cputimer->freq; 1363 } 1364 ts.tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1365 bt = &basetime[basetime_index]; 1366 ts.tv_sec += bt->tv_sec; 1367 ts.tv_nsec += bt->tv_nsec; 1368 while (ts.tv_nsec >= 1000000000) { 1369 ts.tv_nsec -= 1000000000; 1370 ++ts.tv_sec; 1371 } 1372 1373 (*pseq)++; 1374 *tsp = ts; 1375 1376 if (foff) { 1377 timespecadd(tsp, osp); 1378 if (tsp->tv_nsec < 0) { 1379 tsp->tv_nsec += 1000000000; 1380 tsp->tv_sec -= 1; 1381 } 1382 } 1383 #ifdef PPS_SYNC 1384 if (fhard) { 1385 /* magic, at its best... */ 1386 tcount = count - pps->ppscount[2]; 1387 pps->ppscount[2] = count; 1388 if (tcount >= sys_cputimer->freq) { 1389 delta = (1000000000 * (tcount / sys_cputimer->freq) + 1390 sys_cputimer->freq64_nsec * 1391 (tcount % sys_cputimer->freq)) >> 32; 1392 } else { 1393 delta = (sys_cputimer->freq64_nsec * tcount) >> 32; 1394 } 1395 hardpps(tsp, delta); 1396 } 1397 #endif 1398 } 1399 1400 /* 1401 * Return the tsc target value for a delay of (ns). 1402 * 1403 * Returns -1 if the TSC is not supported. 1404 */ 1405 int64_t 1406 tsc_get_target(int ns) 1407 { 1408 #if defined(_RDTSC_SUPPORTED_) 1409 if (cpu_feature & CPUID_TSC) { 1410 return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000); 1411 } 1412 #endif 1413 return(-1); 1414 } 1415 1416 /* 1417 * Compare the tsc against the passed target 1418 * 1419 * Returns +1 if the target has been reached 1420 * Returns 0 if the target has not yet been reached 1421 * Returns -1 if the TSC is not supported. 1422 * 1423 * Typical use: while (tsc_test_target(target) == 0) { ...poll... } 1424 */ 1425 int 1426 tsc_test_target(int64_t target) 1427 { 1428 #if defined(_RDTSC_SUPPORTED_) 1429 if (cpu_feature & CPUID_TSC) { 1430 if ((int64_t)(target - rdtsc()) <= 0) 1431 return(1); 1432 return(0); 1433 } 1434 #endif 1435 return(-1); 1436 } 1437 1438 /* 1439 * Delay the specified number of nanoseconds using the tsc. This function 1440 * returns immediately if the TSC is not supported. At least one cpu_pause() 1441 * will be issued. 1442 */ 1443 void 1444 tsc_delay(int ns) 1445 { 1446 int64_t clk; 1447 1448 clk = tsc_get_target(ns); 1449 cpu_pause(); 1450 while (tsc_test_target(clk) == 0) 1451 cpu_pause(); 1452 } 1453