1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 68 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ 69 */ 70 71 #include "opt_ntp.h" 72 #include "opt_ifpoll.h" 73 #include "opt_pctrack.h" 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/callout.h> 78 #include <sys/kernel.h> 79 #include <sys/kinfo.h> 80 #include <sys/proc.h> 81 #include <sys/malloc.h> 82 #include <sys/resource.h> 83 #include <sys/resourcevar.h> 84 #include <sys/signalvar.h> 85 #include <sys/timex.h> 86 #include <sys/timepps.h> 87 #include <sys/upmap.h> 88 #include <vm/vm.h> 89 #include <sys/lock.h> 90 #include <vm/pmap.h> 91 #include <vm/vm_map.h> 92 #include <vm/vm_extern.h> 93 #include <sys/sysctl.h> 94 95 #include <sys/thread2.h> 96 #include <sys/mplock2.h> 97 98 #include <machine/cpu.h> 99 #include <machine/limits.h> 100 #include <machine/smp.h> 101 #include <machine/cpufunc.h> 102 #include <machine/specialreg.h> 103 #include <machine/clock.h> 104 105 #ifdef GPROF 106 #include <sys/gmon.h> 107 #endif 108 109 #ifdef IFPOLL_ENABLE 110 extern void ifpoll_init_pcpu(int); 111 #endif 112 113 #ifdef DEBUG_PCTRACK 114 static void do_pctrack(struct intrframe *frame, int which); 115 #endif 116 117 static void initclocks (void *dummy); 118 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL); 119 120 /* 121 * Some of these don't belong here, but it's easiest to concentrate them. 122 * Note that cpu_time counts in microseconds, but most userland programs 123 * just compare relative times against the total by delta. 124 */ 125 struct kinfo_cputime cputime_percpu[MAXCPU]; 126 #ifdef DEBUG_PCTRACK 127 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE }; 128 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE]; 129 #endif 130 131 static int 132 sysctl_cputime(SYSCTL_HANDLER_ARGS) 133 { 134 int cpu, error = 0; 135 size_t size = sizeof(struct kinfo_cputime); 136 137 for (cpu = 0; cpu < ncpus; ++cpu) { 138 if ((error = SYSCTL_OUT(req, &cputime_percpu[cpu], size))) 139 break; 140 } 141 142 return (error); 143 } 144 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 145 sysctl_cputime, "S,kinfo_cputime", "CPU time statistics"); 146 147 static int 148 sysctl_cp_time(SYSCTL_HANDLER_ARGS) 149 { 150 long cpu_states[5] = {0}; 151 int cpu, error = 0; 152 size_t size = sizeof(cpu_states); 153 154 for (cpu = 0; cpu < ncpus; ++cpu) { 155 cpu_states[CP_USER] += cputime_percpu[cpu].cp_user; 156 cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice; 157 cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys; 158 cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr; 159 cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle; 160 } 161 162 error = SYSCTL_OUT(req, cpu_states, size); 163 164 return (error); 165 } 166 167 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 168 sysctl_cp_time, "LU", "CPU time statistics"); 169 170 /* 171 * boottime is used to calculate the 'real' uptime. Do not confuse this with 172 * microuptime(). microtime() is not drift compensated. The real uptime 173 * with compensation is nanotime() - bootime. boottime is recalculated 174 * whenever the real time is set based on the compensated elapsed time 175 * in seconds (gd->gd_time_seconds). 176 * 177 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic. 178 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to 179 * the real time. 180 * 181 * WARNING! time_second can backstep on time corrections. Also, unlike 182 * time second, time_uptime is not a "real" time_t (seconds 183 * since the Epoch) but seconds since booting. 184 */ 185 struct timespec boottime; /* boot time (realtime) for reference only */ 186 time_t time_second; /* read-only 'passive' realtime in seconds */ 187 time_t time_uptime; /* read-only 'passive' uptime in seconds */ 188 189 /* 190 * basetime is used to calculate the compensated real time of day. The 191 * basetime can be modified on a per-tick basis by the adjtime(), 192 * ntp_adjtime(), and sysctl-based time correction APIs. 193 * 194 * Note that frequency corrections can also be made by adjusting 195 * gd_cpuclock_base. 196 * 197 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is 198 * used on both SMP and UP systems to avoid MP races between cpu's and 199 * interrupt races on UP systems. 200 */ 201 #define BASETIME_ARYSIZE 16 202 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1) 203 static struct timespec basetime[BASETIME_ARYSIZE]; 204 static volatile int basetime_index; 205 206 static int 207 sysctl_get_basetime(SYSCTL_HANDLER_ARGS) 208 { 209 struct timespec *bt; 210 int error; 211 int index; 212 213 /* 214 * Because basetime data and index may be updated by another cpu, 215 * a load fence is required to ensure that the data we read has 216 * not been speculatively read relative to a possibly updated index. 217 */ 218 index = basetime_index; 219 cpu_lfence(); 220 bt = &basetime[index]; 221 error = SYSCTL_OUT(req, bt, sizeof(*bt)); 222 return (error); 223 } 224 225 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 226 &boottime, timespec, "System boottime"); 227 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, 228 sysctl_get_basetime, "S,timespec", "System basetime"); 229 230 static void hardclock(systimer_t info, int, struct intrframe *frame); 231 static void statclock(systimer_t info, int, struct intrframe *frame); 232 static void schedclock(systimer_t info, int, struct intrframe *frame); 233 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp); 234 235 int ticks; /* system master ticks at hz */ 236 int clocks_running; /* tsleep/timeout clocks operational */ 237 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */ 238 int64_t nsec_acc; /* accumulator */ 239 int sched_ticks; /* global schedule clock ticks */ 240 241 /* NTPD time correction fields */ 242 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */ 243 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */ 244 int64_t ntp_delta; /* one-time correction in nsec */ 245 int64_t ntp_big_delta = 1000000000; 246 int32_t ntp_tick_delta; /* current adjustment rate */ 247 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */ 248 time_t ntp_leap_second; /* time of next leap second */ 249 int ntp_leap_insert; /* whether to insert or remove a second */ 250 251 /* 252 * Finish initializing clock frequencies and start all clocks running. 253 */ 254 /* ARGSUSED*/ 255 static void 256 initclocks(void *dummy) 257 { 258 /*psratio = profhz / stathz;*/ 259 initclocks_pcpu(); 260 clocks_running = 1; 261 if (kpmap) { 262 kpmap->tsc_freq = (uint64_t)tsc_frequency; 263 kpmap->tick_freq = hz; 264 } 265 } 266 267 /* 268 * Called on a per-cpu basis from the idle thread bootstrap on each cpu 269 * during SMP initialization. 270 * 271 * This routine is called concurrently during low-level SMP initialization 272 * and may not block in any way. Meaning, among other things, we can't 273 * acquire any tokens. 274 */ 275 void 276 initclocks_pcpu(void) 277 { 278 struct globaldata *gd = mycpu; 279 280 crit_enter(); 281 if (gd->gd_cpuid == 0) { 282 gd->gd_time_seconds = 1; 283 gd->gd_cpuclock_base = sys_cputimer->count(); 284 } else { 285 /* XXX */ 286 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; 287 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; 288 } 289 290 systimer_intr_enable(); 291 292 crit_exit(); 293 } 294 295 /* 296 * This routine is called on just the BSP, just after SMP initialization 297 * completes to * finish initializing any clocks that might contend/block 298 * (e.g. like on a token). We can't do this in initclocks_pcpu() because 299 * that function is called from the idle thread bootstrap for each cpu and 300 * not allowed to block at all. 301 */ 302 static 303 void 304 initclocks_other(void *dummy) 305 { 306 struct globaldata *ogd = mycpu; 307 struct globaldata *gd; 308 int n; 309 310 for (n = 0; n < ncpus; ++n) { 311 lwkt_setcpu_self(globaldata_find(n)); 312 gd = mycpu; 313 314 /* 315 * Use a non-queued periodic systimer to prevent multiple 316 * ticks from building up if the sysclock jumps forward 317 * (8254 gets reset). The sysclock will never jump backwards. 318 * Our time sync is based on the actual sysclock, not the 319 * ticks count. 320 */ 321 systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, 322 NULL, hz); 323 systimer_init_periodic_nq(&gd->gd_statclock, statclock, 324 NULL, stathz); 325 /* XXX correct the frequency for scheduler / estcpu tests */ 326 systimer_init_periodic_nq(&gd->gd_schedclock, schedclock, 327 NULL, ESTCPUFREQ); 328 #ifdef IFPOLL_ENABLE 329 ifpoll_init_pcpu(gd->gd_cpuid); 330 #endif 331 } 332 lwkt_setcpu_self(ogd); 333 } 334 SYSINIT(clocks2, SI_BOOT2_POST_SMP, SI_ORDER_ANY, initclocks_other, NULL); 335 336 /* 337 * This sets the current real time of day. Timespecs are in seconds and 338 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base, 339 * instead we adjust basetime so basetime + gd_* results in the current 340 * time of day. This way the gd_* fields are guaranteed to represent 341 * a monotonically increasing 'uptime' value. 342 * 343 * When set_timeofday() is called from userland, the system call forces it 344 * onto cpu #0 since only cpu #0 can update basetime_index. 345 */ 346 void 347 set_timeofday(struct timespec *ts) 348 { 349 struct timespec *nbt; 350 int ni; 351 352 /* 353 * XXX SMP / non-atomic basetime updates 354 */ 355 crit_enter(); 356 ni = (basetime_index + 1) & BASETIME_ARYMASK; 357 nbt = &basetime[ni]; 358 nanouptime(nbt); 359 nbt->tv_sec = ts->tv_sec - nbt->tv_sec; 360 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec; 361 if (nbt->tv_nsec < 0) { 362 nbt->tv_nsec += 1000000000; 363 --nbt->tv_sec; 364 } 365 366 /* 367 * Note that basetime diverges from boottime as the clock drift is 368 * compensated for, so we cannot do away with boottime. When setting 369 * the absolute time of day the drift is 0 (for an instant) and we 370 * can simply assign boottime to basetime. 371 * 372 * Note that nanouptime() is based on gd_time_seconds which is drift 373 * compensated up to a point (it is guaranteed to remain monotonically 374 * increasing). gd_time_seconds is thus our best uptime guess and 375 * suitable for use in the boottime calculation. It is already taken 376 * into account in the basetime calculation above. 377 */ 378 boottime.tv_sec = nbt->tv_sec; 379 ntp_delta = 0; 380 381 /* 382 * We now have a new basetime, make sure all other cpus have it, 383 * then update the index. 384 */ 385 cpu_sfence(); 386 basetime_index = ni; 387 388 crit_exit(); 389 } 390 391 /* 392 * Each cpu has its own hardclock, but we only increments ticks and softticks 393 * on cpu #0. 394 * 395 * NOTE! systimer! the MP lock might not be held here. We can only safely 396 * manipulate objects owned by the current cpu. 397 */ 398 static void 399 hardclock(systimer_t info, int in_ipi, struct intrframe *frame) 400 { 401 sysclock_t cputicks; 402 struct proc *p; 403 struct globaldata *gd = mycpu; 404 405 if ((gd->gd_reqflags & RQF_IPIQ) == 0 && lwkt_need_ipiq_process(gd)) { 406 /* Defer to doreti on passive IPIQ processing */ 407 need_ipiq(); 408 } 409 410 /* 411 * Realtime updates are per-cpu. Note that timer corrections as 412 * returned by microtime() and friends make an additional adjustment 413 * using a system-wise 'basetime', but the running time is always 414 * taken from the per-cpu globaldata area. Since the same clock 415 * is distributing (XXX SMP) to all cpus, the per-cpu timebases 416 * stay in synch. 417 * 418 * Note that we never allow info->time (aka gd->gd_hardclock.time) 419 * to reverse index gd_cpuclock_base, but that it is possible for 420 * it to temporarily get behind in the seconds if something in the 421 * system locks interrupts for a long period of time. Since periodic 422 * timers count events, though everything should resynch again 423 * immediately. 424 */ 425 cputicks = info->time - gd->gd_cpuclock_base; 426 if (cputicks >= sys_cputimer->freq) { 427 ++gd->gd_time_seconds; 428 gd->gd_cpuclock_base += sys_cputimer->freq; 429 if (gd->gd_cpuid == 0) 430 ++time_uptime; /* uncorrected monotonic 1-sec gran */ 431 } 432 433 /* 434 * The system-wide ticks counter and NTP related timedelta/tickdelta 435 * adjustments only occur on cpu #0. NTP adjustments are accomplished 436 * by updating basetime. 437 */ 438 if (gd->gd_cpuid == 0) { 439 struct timespec *nbt; 440 struct timespec nts; 441 int leap; 442 int ni; 443 444 ++ticks; 445 446 #if 0 447 if (tco->tc_poll_pps) 448 tco->tc_poll_pps(tco); 449 #endif 450 451 /* 452 * Calculate the new basetime index. We are in a critical section 453 * on cpu #0 and can safely play with basetime_index. Start 454 * with the current basetime and then make adjustments. 455 */ 456 ni = (basetime_index + 1) & BASETIME_ARYMASK; 457 nbt = &basetime[ni]; 458 *nbt = basetime[basetime_index]; 459 460 /* 461 * Apply adjtime corrections. (adjtime() API) 462 * 463 * adjtime() only runs on cpu #0 so our critical section is 464 * sufficient to access these variables. 465 */ 466 if (ntp_delta != 0) { 467 nbt->tv_nsec += ntp_tick_delta; 468 ntp_delta -= ntp_tick_delta; 469 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) || 470 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) { 471 ntp_tick_delta = ntp_delta; 472 } 473 } 474 475 /* 476 * Apply permanent frequency corrections. (sysctl API) 477 */ 478 if (ntp_tick_permanent != 0) { 479 ntp_tick_acc += ntp_tick_permanent; 480 if (ntp_tick_acc >= (1LL << 32)) { 481 nbt->tv_nsec += ntp_tick_acc >> 32; 482 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32; 483 } else if (ntp_tick_acc <= -(1LL << 32)) { 484 /* Negate ntp_tick_acc to avoid shifting the sign bit. */ 485 nbt->tv_nsec -= (-ntp_tick_acc) >> 32; 486 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32; 487 } 488 } 489 490 if (nbt->tv_nsec >= 1000000000) { 491 nbt->tv_sec++; 492 nbt->tv_nsec -= 1000000000; 493 } else if (nbt->tv_nsec < 0) { 494 nbt->tv_sec--; 495 nbt->tv_nsec += 1000000000; 496 } 497 498 /* 499 * Another per-tick compensation. (for ntp_adjtime() API) 500 */ 501 if (nsec_adj != 0) { 502 nsec_acc += nsec_adj; 503 if (nsec_acc >= 0x100000000LL) { 504 nbt->tv_nsec += nsec_acc >> 32; 505 nsec_acc = (nsec_acc & 0xFFFFFFFFLL); 506 } else if (nsec_acc <= -0x100000000LL) { 507 nbt->tv_nsec -= -nsec_acc >> 32; 508 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL); 509 } 510 if (nbt->tv_nsec >= 1000000000) { 511 nbt->tv_nsec -= 1000000000; 512 ++nbt->tv_sec; 513 } else if (nbt->tv_nsec < 0) { 514 nbt->tv_nsec += 1000000000; 515 --nbt->tv_sec; 516 } 517 } 518 519 /************************************************************ 520 * LEAP SECOND CORRECTION * 521 ************************************************************ 522 * 523 * Taking into account all the corrections made above, figure 524 * out the new real time. If the seconds field has changed 525 * then apply any pending leap-second corrections. 526 */ 527 getnanotime_nbt(nbt, &nts); 528 529 if (time_second != nts.tv_sec) { 530 /* 531 * Apply leap second (sysctl API). Adjust nts for changes 532 * so we do not have to call getnanotime_nbt again. 533 */ 534 if (ntp_leap_second) { 535 if (ntp_leap_second == nts.tv_sec) { 536 if (ntp_leap_insert) { 537 nbt->tv_sec++; 538 nts.tv_sec++; 539 } else { 540 nbt->tv_sec--; 541 nts.tv_sec--; 542 } 543 ntp_leap_second--; 544 } 545 } 546 547 /* 548 * Apply leap second (ntp_adjtime() API), calculate a new 549 * nsec_adj field. ntp_update_second() returns nsec_adj 550 * as a per-second value but we need it as a per-tick value. 551 */ 552 leap = ntp_update_second(time_second, &nsec_adj); 553 nsec_adj /= hz; 554 nbt->tv_sec += leap; 555 nts.tv_sec += leap; 556 557 /* 558 * Update the time_second 'approximate time' global. 559 */ 560 time_second = nts.tv_sec; 561 } 562 563 /* 564 * Finally, our new basetime is ready to go live! 565 */ 566 cpu_sfence(); 567 basetime_index = ni; 568 569 /* 570 * Update kpmap on each tick. TS updates are integrated with 571 * fences and upticks allowing userland to read the data 572 * deterministically. 573 */ 574 if (kpmap) { 575 int w; 576 577 w = (kpmap->upticks + 1) & 1; 578 getnanouptime(&kpmap->ts_uptime[w]); 579 getnanotime(&kpmap->ts_realtime[w]); 580 cpu_sfence(); 581 ++kpmap->upticks; 582 cpu_sfence(); 583 } 584 } 585 586 /* 587 * lwkt thread scheduler fair queueing 588 */ 589 lwkt_schedulerclock(curthread); 590 591 /* 592 * softticks are handled for all cpus 593 */ 594 hardclock_softtick(gd); 595 596 /* 597 * ITimer handling is per-tick, per-cpu. 598 * 599 * We must acquire the per-process token in order for ksignal() 600 * to be non-blocking. For the moment this requires an AST fault, 601 * the ksignal() cannot be safely issued from this hard interrupt. 602 * 603 * XXX Even the trytoken here isn't right, and itimer operation in 604 * a multi threaded environment is going to be weird at the 605 * very least. 606 */ 607 if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) { 608 crit_enter_hard(); 609 if (p->p_upmap) 610 ++p->p_upmap->runticks; 611 612 if (frame && CLKF_USERMODE(frame) && 613 timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) && 614 itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) { 615 p->p_flags |= P_SIGVTALRM; 616 need_user_resched(); 617 } 618 if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) && 619 itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) { 620 p->p_flags |= P_SIGPROF; 621 need_user_resched(); 622 } 623 crit_exit_hard(); 624 lwkt_reltoken(&p->p_token); 625 } 626 setdelayed(); 627 } 628 629 /* 630 * The statistics clock typically runs at a 125Hz rate, and is intended 631 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu. 632 * 633 * NOTE! systimer! the MP lock might not be held here. We can only safely 634 * manipulate objects owned by the current cpu. 635 * 636 * The stats clock is responsible for grabbing a profiling sample. 637 * Most of the statistics are only used by user-level statistics programs. 638 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and 639 * p->p_estcpu. 640 * 641 * Like the other clocks, the stat clock is called from what is effectively 642 * a fast interrupt, so the context should be the thread/process that got 643 * interrupted. 644 */ 645 static void 646 statclock(systimer_t info, int in_ipi, struct intrframe *frame) 647 { 648 #ifdef GPROF 649 struct gmonparam *g; 650 int i; 651 #endif 652 thread_t td; 653 struct proc *p; 654 int bump; 655 sysclock_t cv; 656 sysclock_t scv; 657 658 /* 659 * How big was our timeslice relative to the last time? Calculate 660 * in microseconds. 661 * 662 * NOTE: Use of microuptime() is typically MPSAFE, but usually not 663 * during early boot. Just use the systimer count to be nice 664 * to e.g. qemu. The systimer has a better chance of being 665 * MPSAFE at early boot. 666 */ 667 cv = sys_cputimer->count(); 668 scv = mycpu->statint.gd_statcv; 669 if (scv == 0) { 670 bump = 1; 671 } else { 672 bump = (sys_cputimer->freq64_usec * (cv - scv)) >> 32; 673 if (bump < 0) 674 bump = 0; 675 if (bump > 1000000) 676 bump = 1000000; 677 } 678 mycpu->statint.gd_statcv = cv; 679 680 #if 0 681 stv = &mycpu->gd_stattv; 682 if (stv->tv_sec == 0) { 683 bump = 1; 684 } else { 685 bump = tv.tv_usec - stv->tv_usec + 686 (tv.tv_sec - stv->tv_sec) * 1000000; 687 if (bump < 0) 688 bump = 0; 689 if (bump > 1000000) 690 bump = 1000000; 691 } 692 *stv = tv; 693 #endif 694 695 td = curthread; 696 p = td->td_proc; 697 698 if (frame && CLKF_USERMODE(frame)) { 699 /* 700 * Came from userland, handle user time and deal with 701 * possible process. 702 */ 703 if (p && (p->p_flags & P_PROFIL)) 704 addupc_intr(p, CLKF_PC(frame), 1); 705 td->td_uticks += bump; 706 707 /* 708 * Charge the time as appropriate 709 */ 710 if (p && p->p_nice > NZERO) 711 cpu_time.cp_nice += bump; 712 else 713 cpu_time.cp_user += bump; 714 } else { 715 int intr_nest = mycpu->gd_intr_nesting_level; 716 717 if (in_ipi) { 718 /* 719 * IPI processing code will bump gd_intr_nesting_level 720 * up by one, which breaks following CLKF_INTR testing, 721 * so we subtract it by one here. 722 */ 723 --intr_nest; 724 } 725 #ifdef GPROF 726 /* 727 * Kernel statistics are just like addupc_intr, only easier. 728 */ 729 g = &_gmonparam; 730 if (g->state == GMON_PROF_ON && frame) { 731 i = CLKF_PC(frame) - g->lowpc; 732 if (i < g->textsize) { 733 i /= HISTFRACTION * sizeof(*g->kcount); 734 g->kcount[i]++; 735 } 736 } 737 #endif 738 739 #define IS_INTR_RUNNING ((frame && CLKF_INTR(intr_nest)) || CLKF_INTR_TD(td)) 740 741 /* 742 * Came from kernel mode, so we were: 743 * - handling an interrupt, 744 * - doing syscall or trap work on behalf of the current 745 * user process, or 746 * - spinning in the idle loop. 747 * Whichever it is, charge the time as appropriate. 748 * Note that we charge interrupts to the current process, 749 * regardless of whether they are ``for'' that process, 750 * so that we know how much of its real time was spent 751 * in ``non-process'' (i.e., interrupt) work. 752 * 753 * XXX assume system if frame is NULL. A NULL frame 754 * can occur if ipi processing is done from a crit_exit(). 755 */ 756 if (IS_INTR_RUNNING) 757 td->td_iticks += bump; 758 else 759 td->td_sticks += bump; 760 761 if (IS_INTR_RUNNING) { 762 /* 763 * If we interrupted an interrupt thread, well, 764 * count it as interrupt time. 765 */ 766 #ifdef DEBUG_PCTRACK 767 if (frame) 768 do_pctrack(frame, PCTRACK_INT); 769 #endif 770 cpu_time.cp_intr += bump; 771 } else { 772 if (td == &mycpu->gd_idlethread) { 773 /* 774 * Even if the current thread is the idle 775 * thread it could be due to token contention 776 * in the LWKT scheduler. Count such as 777 * system time. 778 */ 779 if (mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) 780 cpu_time.cp_sys += bump; 781 else 782 cpu_time.cp_idle += bump; 783 } else { 784 /* 785 * System thread was running. 786 */ 787 #ifdef DEBUG_PCTRACK 788 if (frame) 789 do_pctrack(frame, PCTRACK_SYS); 790 #endif 791 cpu_time.cp_sys += bump; 792 } 793 } 794 795 #undef IS_INTR_RUNNING 796 } 797 } 798 799 #ifdef DEBUG_PCTRACK 800 /* 801 * Sample the PC when in the kernel or in an interrupt. User code can 802 * retrieve the information and generate a histogram or other output. 803 */ 804 805 static void 806 do_pctrack(struct intrframe *frame, int which) 807 { 808 struct kinfo_pctrack *pctrack; 809 810 pctrack = &cputime_pctrack[mycpu->gd_cpuid][which]; 811 pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] = 812 (void *)CLKF_PC(frame); 813 ++pctrack->pc_index; 814 } 815 816 static int 817 sysctl_pctrack(SYSCTL_HANDLER_ARGS) 818 { 819 struct kinfo_pcheader head; 820 int error; 821 int cpu; 822 int ntrack; 823 824 head.pc_ntrack = PCTRACK_SIZE; 825 head.pc_arysize = PCTRACK_ARYSIZE; 826 827 if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0) 828 return (error); 829 830 for (cpu = 0; cpu < ncpus; ++cpu) { 831 for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) { 832 error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack], 833 sizeof(struct kinfo_pctrack)); 834 if (error) 835 break; 836 } 837 if (error) 838 break; 839 } 840 return (error); 841 } 842 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 843 sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking"); 844 845 #endif 846 847 /* 848 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer, 849 * the MP lock might not be held. We can safely manipulate parts of curproc 850 * but that's about it. 851 * 852 * Each cpu has its own scheduler clock. 853 */ 854 static void 855 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 856 { 857 struct lwp *lp; 858 struct rusage *ru; 859 struct vmspace *vm; 860 long rss; 861 862 if ((lp = lwkt_preempted_proc()) != NULL) { 863 /* 864 * Account for cpu time used and hit the scheduler. Note 865 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD 866 * HERE. 867 */ 868 ++lp->lwp_cpticks; 869 usched_schedulerclock(lp, info->periodic, info->time); 870 } else { 871 usched_schedulerclock(NULL, info->periodic, info->time); 872 } 873 if ((lp = curthread->td_lwp) != NULL) { 874 /* 875 * Update resource usage integrals and maximums. 876 */ 877 if ((ru = &lp->lwp_proc->p_ru) && 878 (vm = lp->lwp_proc->p_vmspace) != NULL) { 879 ru->ru_ixrss += pgtok(vm->vm_tsize); 880 ru->ru_idrss += pgtok(vm->vm_dsize); 881 ru->ru_isrss += pgtok(vm->vm_ssize); 882 if (lwkt_trytoken(&vm->vm_map.token)) { 883 rss = pgtok(vmspace_resident_count(vm)); 884 if (ru->ru_maxrss < rss) 885 ru->ru_maxrss = rss; 886 lwkt_reltoken(&vm->vm_map.token); 887 } 888 } 889 } 890 /* Increment the global sched_ticks */ 891 if (mycpu->gd_cpuid == 0) 892 ++sched_ticks; 893 } 894 895 /* 896 * Compute number of ticks for the specified amount of time. The 897 * return value is intended to be used in a clock interrupt timed 898 * operation and guaranteed to meet or exceed the requested time. 899 * If the representation overflows, return INT_MAX. The minimum return 900 * value is 1 ticks and the function will average the calculation up. 901 * If any value greater then 0 microseconds is supplied, a value 902 * of at least 2 will be returned to ensure that a near-term clock 903 * interrupt does not cause the timeout to occur (degenerately) early. 904 * 905 * Note that limit checks must take into account microseconds, which is 906 * done simply by using the smaller signed long maximum instead of 907 * the unsigned long maximum. 908 * 909 * If ints have 32 bits, then the maximum value for any timeout in 910 * 10ms ticks is 248 days. 911 */ 912 int 913 tvtohz_high(struct timeval *tv) 914 { 915 int ticks; 916 long sec, usec; 917 918 sec = tv->tv_sec; 919 usec = tv->tv_usec; 920 if (usec < 0) { 921 sec--; 922 usec += 1000000; 923 } 924 if (sec < 0) { 925 #ifdef DIAGNOSTIC 926 if (usec > 0) { 927 sec++; 928 usec -= 1000000; 929 } 930 kprintf("tvtohz_high: negative time difference " 931 "%ld sec %ld usec\n", 932 sec, usec); 933 #endif 934 ticks = 1; 935 } else if (sec <= INT_MAX / hz) { 936 ticks = (int)(sec * hz + 937 ((u_long)usec + (ustick - 1)) / ustick) + 1; 938 } else { 939 ticks = INT_MAX; 940 } 941 return (ticks); 942 } 943 944 int 945 tstohz_high(struct timespec *ts) 946 { 947 int ticks; 948 long sec, nsec; 949 950 sec = ts->tv_sec; 951 nsec = ts->tv_nsec; 952 if (nsec < 0) { 953 sec--; 954 nsec += 1000000000; 955 } 956 if (sec < 0) { 957 #ifdef DIAGNOSTIC 958 if (nsec > 0) { 959 sec++; 960 nsec -= 1000000000; 961 } 962 kprintf("tstohz_high: negative time difference " 963 "%ld sec %ld nsec\n", 964 sec, nsec); 965 #endif 966 ticks = 1; 967 } else if (sec <= INT_MAX / hz) { 968 ticks = (int)(sec * hz + 969 ((u_long)nsec + (nstick - 1)) / nstick) + 1; 970 } else { 971 ticks = INT_MAX; 972 } 973 return (ticks); 974 } 975 976 977 /* 978 * Compute number of ticks for the specified amount of time, erroring on 979 * the side of it being too low to ensure that sleeping the returned number 980 * of ticks will not result in a late return. 981 * 982 * The supplied timeval may not be negative and should be normalized. A 983 * return value of 0 is possible if the timeval converts to less then 984 * 1 tick. 985 * 986 * If ints have 32 bits, then the maximum value for any timeout in 987 * 10ms ticks is 248 days. 988 */ 989 int 990 tvtohz_low(struct timeval *tv) 991 { 992 int ticks; 993 long sec; 994 995 sec = tv->tv_sec; 996 if (sec <= INT_MAX / hz) 997 ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick); 998 else 999 ticks = INT_MAX; 1000 return (ticks); 1001 } 1002 1003 int 1004 tstohz_low(struct timespec *ts) 1005 { 1006 int ticks; 1007 long sec; 1008 1009 sec = ts->tv_sec; 1010 if (sec <= INT_MAX / hz) 1011 ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick); 1012 else 1013 ticks = INT_MAX; 1014 return (ticks); 1015 } 1016 1017 /* 1018 * Start profiling on a process. 1019 * 1020 * Kernel profiling passes proc0 which never exits and hence 1021 * keeps the profile clock running constantly. 1022 */ 1023 void 1024 startprofclock(struct proc *p) 1025 { 1026 if ((p->p_flags & P_PROFIL) == 0) { 1027 p->p_flags |= P_PROFIL; 1028 #if 0 /* XXX */ 1029 if (++profprocs == 1 && stathz != 0) { 1030 crit_enter(); 1031 psdiv = psratio; 1032 setstatclockrate(profhz); 1033 crit_exit(); 1034 } 1035 #endif 1036 } 1037 } 1038 1039 /* 1040 * Stop profiling on a process. 1041 * 1042 * caller must hold p->p_token 1043 */ 1044 void 1045 stopprofclock(struct proc *p) 1046 { 1047 if (p->p_flags & P_PROFIL) { 1048 p->p_flags &= ~P_PROFIL; 1049 #if 0 /* XXX */ 1050 if (--profprocs == 0 && stathz != 0) { 1051 crit_enter(); 1052 psdiv = 1; 1053 setstatclockrate(stathz); 1054 crit_exit(); 1055 } 1056 #endif 1057 } 1058 } 1059 1060 /* 1061 * Return information about system clocks. 1062 */ 1063 static int 1064 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 1065 { 1066 struct kinfo_clockinfo clkinfo; 1067 /* 1068 * Construct clockinfo structure. 1069 */ 1070 clkinfo.ci_hz = hz; 1071 clkinfo.ci_tick = ustick; 1072 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000; 1073 clkinfo.ci_profhz = profhz; 1074 clkinfo.ci_stathz = stathz ? stathz : hz; 1075 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 1076 } 1077 1078 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 1079 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 1080 1081 /* 1082 * We have eight functions for looking at the clock, four for 1083 * microseconds and four for nanoseconds. For each there is fast 1084 * but less precise version "get{nano|micro}[up]time" which will 1085 * return a time which is up to 1/HZ previous to the call, whereas 1086 * the raw version "{nano|micro}[up]time" will return a timestamp 1087 * which is as precise as possible. The "up" variants return the 1088 * time relative to system boot, these are well suited for time 1089 * interval measurements. 1090 * 1091 * Each cpu independently maintains the current time of day, so all 1092 * we need to do to protect ourselves from changes is to do a loop 1093 * check on the seconds field changing out from under us. 1094 * 1095 * The system timer maintains a 32 bit count and due to various issues 1096 * it is possible for the calculated delta to occasionally exceed 1097 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec 1098 * multiplication can easily overflow, so we deal with the case. For 1099 * uniformity we deal with the case in the usec case too. 1100 * 1101 * All the [get][micro,nano][time,uptime]() routines are MPSAFE. 1102 */ 1103 void 1104 getmicrouptime(struct timeval *tvp) 1105 { 1106 struct globaldata *gd = mycpu; 1107 sysclock_t delta; 1108 1109 do { 1110 tvp->tv_sec = gd->gd_time_seconds; 1111 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1112 } while (tvp->tv_sec != gd->gd_time_seconds); 1113 1114 if (delta >= sys_cputimer->freq) { 1115 tvp->tv_sec += delta / sys_cputimer->freq; 1116 delta %= sys_cputimer->freq; 1117 } 1118 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1119 if (tvp->tv_usec >= 1000000) { 1120 tvp->tv_usec -= 1000000; 1121 ++tvp->tv_sec; 1122 } 1123 } 1124 1125 void 1126 getnanouptime(struct timespec *tsp) 1127 { 1128 struct globaldata *gd = mycpu; 1129 sysclock_t delta; 1130 1131 do { 1132 tsp->tv_sec = gd->gd_time_seconds; 1133 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1134 } while (tsp->tv_sec != gd->gd_time_seconds); 1135 1136 if (delta >= sys_cputimer->freq) { 1137 tsp->tv_sec += delta / sys_cputimer->freq; 1138 delta %= sys_cputimer->freq; 1139 } 1140 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1141 } 1142 1143 void 1144 microuptime(struct timeval *tvp) 1145 { 1146 struct globaldata *gd = mycpu; 1147 sysclock_t delta; 1148 1149 do { 1150 tvp->tv_sec = gd->gd_time_seconds; 1151 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1152 } while (tvp->tv_sec != gd->gd_time_seconds); 1153 1154 if (delta >= sys_cputimer->freq) { 1155 tvp->tv_sec += delta / sys_cputimer->freq; 1156 delta %= sys_cputimer->freq; 1157 } 1158 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1159 } 1160 1161 void 1162 nanouptime(struct timespec *tsp) 1163 { 1164 struct globaldata *gd = mycpu; 1165 sysclock_t delta; 1166 1167 do { 1168 tsp->tv_sec = gd->gd_time_seconds; 1169 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1170 } while (tsp->tv_sec != gd->gd_time_seconds); 1171 1172 if (delta >= sys_cputimer->freq) { 1173 tsp->tv_sec += delta / sys_cputimer->freq; 1174 delta %= sys_cputimer->freq; 1175 } 1176 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1177 } 1178 1179 /* 1180 * realtime routines 1181 */ 1182 void 1183 getmicrotime(struct timeval *tvp) 1184 { 1185 struct globaldata *gd = mycpu; 1186 struct timespec *bt; 1187 sysclock_t delta; 1188 1189 do { 1190 tvp->tv_sec = gd->gd_time_seconds; 1191 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1192 } while (tvp->tv_sec != gd->gd_time_seconds); 1193 1194 if (delta >= sys_cputimer->freq) { 1195 tvp->tv_sec += delta / sys_cputimer->freq; 1196 delta %= sys_cputimer->freq; 1197 } 1198 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1199 1200 bt = &basetime[basetime_index]; 1201 tvp->tv_sec += bt->tv_sec; 1202 tvp->tv_usec += bt->tv_nsec / 1000; 1203 while (tvp->tv_usec >= 1000000) { 1204 tvp->tv_usec -= 1000000; 1205 ++tvp->tv_sec; 1206 } 1207 } 1208 1209 void 1210 getnanotime(struct timespec *tsp) 1211 { 1212 struct globaldata *gd = mycpu; 1213 struct timespec *bt; 1214 sysclock_t delta; 1215 1216 do { 1217 tsp->tv_sec = gd->gd_time_seconds; 1218 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1219 } while (tsp->tv_sec != gd->gd_time_seconds); 1220 1221 if (delta >= sys_cputimer->freq) { 1222 tsp->tv_sec += delta / sys_cputimer->freq; 1223 delta %= sys_cputimer->freq; 1224 } 1225 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1226 1227 bt = &basetime[basetime_index]; 1228 tsp->tv_sec += bt->tv_sec; 1229 tsp->tv_nsec += bt->tv_nsec; 1230 while (tsp->tv_nsec >= 1000000000) { 1231 tsp->tv_nsec -= 1000000000; 1232 ++tsp->tv_sec; 1233 } 1234 } 1235 1236 static void 1237 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp) 1238 { 1239 struct globaldata *gd = mycpu; 1240 sysclock_t delta; 1241 1242 do { 1243 tsp->tv_sec = gd->gd_time_seconds; 1244 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1245 } while (tsp->tv_sec != gd->gd_time_seconds); 1246 1247 if (delta >= sys_cputimer->freq) { 1248 tsp->tv_sec += delta / sys_cputimer->freq; 1249 delta %= sys_cputimer->freq; 1250 } 1251 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1252 1253 tsp->tv_sec += nbt->tv_sec; 1254 tsp->tv_nsec += nbt->tv_nsec; 1255 while (tsp->tv_nsec >= 1000000000) { 1256 tsp->tv_nsec -= 1000000000; 1257 ++tsp->tv_sec; 1258 } 1259 } 1260 1261 1262 void 1263 microtime(struct timeval *tvp) 1264 { 1265 struct globaldata *gd = mycpu; 1266 struct timespec *bt; 1267 sysclock_t delta; 1268 1269 do { 1270 tvp->tv_sec = gd->gd_time_seconds; 1271 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1272 } while (tvp->tv_sec != gd->gd_time_seconds); 1273 1274 if (delta >= sys_cputimer->freq) { 1275 tvp->tv_sec += delta / sys_cputimer->freq; 1276 delta %= sys_cputimer->freq; 1277 } 1278 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1279 1280 bt = &basetime[basetime_index]; 1281 tvp->tv_sec += bt->tv_sec; 1282 tvp->tv_usec += bt->tv_nsec / 1000; 1283 while (tvp->tv_usec >= 1000000) { 1284 tvp->tv_usec -= 1000000; 1285 ++tvp->tv_sec; 1286 } 1287 } 1288 1289 void 1290 nanotime(struct timespec *tsp) 1291 { 1292 struct globaldata *gd = mycpu; 1293 struct timespec *bt; 1294 sysclock_t delta; 1295 1296 do { 1297 tsp->tv_sec = gd->gd_time_seconds; 1298 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1299 } while (tsp->tv_sec != gd->gd_time_seconds); 1300 1301 if (delta >= sys_cputimer->freq) { 1302 tsp->tv_sec += delta / sys_cputimer->freq; 1303 delta %= sys_cputimer->freq; 1304 } 1305 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1306 1307 bt = &basetime[basetime_index]; 1308 tsp->tv_sec += bt->tv_sec; 1309 tsp->tv_nsec += bt->tv_nsec; 1310 while (tsp->tv_nsec >= 1000000000) { 1311 tsp->tv_nsec -= 1000000000; 1312 ++tsp->tv_sec; 1313 } 1314 } 1315 1316 /* 1317 * note: this is not exactly synchronized with real time. To do that we 1318 * would have to do what microtime does and check for a nanoseconds overflow. 1319 */ 1320 time_t 1321 get_approximate_time_t(void) 1322 { 1323 struct globaldata *gd = mycpu; 1324 struct timespec *bt; 1325 1326 bt = &basetime[basetime_index]; 1327 return(gd->gd_time_seconds + bt->tv_sec); 1328 } 1329 1330 int 1331 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 1332 { 1333 pps_params_t *app; 1334 struct pps_fetch_args *fapi; 1335 #ifdef PPS_SYNC 1336 struct pps_kcbind_args *kapi; 1337 #endif 1338 1339 switch (cmd) { 1340 case PPS_IOC_CREATE: 1341 return (0); 1342 case PPS_IOC_DESTROY: 1343 return (0); 1344 case PPS_IOC_SETPARAMS: 1345 app = (pps_params_t *)data; 1346 if (app->mode & ~pps->ppscap) 1347 return (EINVAL); 1348 pps->ppsparam = *app; 1349 return (0); 1350 case PPS_IOC_GETPARAMS: 1351 app = (pps_params_t *)data; 1352 *app = pps->ppsparam; 1353 app->api_version = PPS_API_VERS_1; 1354 return (0); 1355 case PPS_IOC_GETCAP: 1356 *(int*)data = pps->ppscap; 1357 return (0); 1358 case PPS_IOC_FETCH: 1359 fapi = (struct pps_fetch_args *)data; 1360 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 1361 return (EINVAL); 1362 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 1363 return (EOPNOTSUPP); 1364 pps->ppsinfo.current_mode = pps->ppsparam.mode; 1365 fapi->pps_info_buf = pps->ppsinfo; 1366 return (0); 1367 case PPS_IOC_KCBIND: 1368 #ifdef PPS_SYNC 1369 kapi = (struct pps_kcbind_args *)data; 1370 /* XXX Only root should be able to do this */ 1371 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 1372 return (EINVAL); 1373 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 1374 return (EINVAL); 1375 if (kapi->edge & ~pps->ppscap) 1376 return (EINVAL); 1377 pps->kcmode = kapi->edge; 1378 return (0); 1379 #else 1380 return (EOPNOTSUPP); 1381 #endif 1382 default: 1383 return (ENOTTY); 1384 } 1385 } 1386 1387 void 1388 pps_init(struct pps_state *pps) 1389 { 1390 pps->ppscap |= PPS_TSFMT_TSPEC; 1391 if (pps->ppscap & PPS_CAPTUREASSERT) 1392 pps->ppscap |= PPS_OFFSETASSERT; 1393 if (pps->ppscap & PPS_CAPTURECLEAR) 1394 pps->ppscap |= PPS_OFFSETCLEAR; 1395 } 1396 1397 void 1398 pps_event(struct pps_state *pps, sysclock_t count, int event) 1399 { 1400 struct globaldata *gd; 1401 struct timespec *tsp; 1402 struct timespec *osp; 1403 struct timespec *bt; 1404 struct timespec ts; 1405 sysclock_t *pcount; 1406 #ifdef PPS_SYNC 1407 sysclock_t tcount; 1408 #endif 1409 sysclock_t delta; 1410 pps_seq_t *pseq; 1411 int foff; 1412 #ifdef PPS_SYNC 1413 int fhard; 1414 #else 1415 int fhard __unused; 1416 #endif 1417 1418 gd = mycpu; 1419 1420 /* Things would be easier with arrays... */ 1421 if (event == PPS_CAPTUREASSERT) { 1422 tsp = &pps->ppsinfo.assert_timestamp; 1423 osp = &pps->ppsparam.assert_offset; 1424 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1425 fhard = pps->kcmode & PPS_CAPTUREASSERT; 1426 pcount = &pps->ppscount[0]; 1427 pseq = &pps->ppsinfo.assert_sequence; 1428 } else { 1429 tsp = &pps->ppsinfo.clear_timestamp; 1430 osp = &pps->ppsparam.clear_offset; 1431 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1432 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1433 pcount = &pps->ppscount[1]; 1434 pseq = &pps->ppsinfo.clear_sequence; 1435 } 1436 1437 /* Nothing really happened */ 1438 if (*pcount == count) 1439 return; 1440 1441 *pcount = count; 1442 1443 do { 1444 ts.tv_sec = gd->gd_time_seconds; 1445 delta = count - gd->gd_cpuclock_base; 1446 } while (ts.tv_sec != gd->gd_time_seconds); 1447 1448 if (delta >= sys_cputimer->freq) { 1449 ts.tv_sec += delta / sys_cputimer->freq; 1450 delta %= sys_cputimer->freq; 1451 } 1452 ts.tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1453 bt = &basetime[basetime_index]; 1454 ts.tv_sec += bt->tv_sec; 1455 ts.tv_nsec += bt->tv_nsec; 1456 while (ts.tv_nsec >= 1000000000) { 1457 ts.tv_nsec -= 1000000000; 1458 ++ts.tv_sec; 1459 } 1460 1461 (*pseq)++; 1462 *tsp = ts; 1463 1464 if (foff) { 1465 timespecadd(tsp, osp); 1466 if (tsp->tv_nsec < 0) { 1467 tsp->tv_nsec += 1000000000; 1468 tsp->tv_sec -= 1; 1469 } 1470 } 1471 #ifdef PPS_SYNC 1472 if (fhard) { 1473 /* magic, at its best... */ 1474 tcount = count - pps->ppscount[2]; 1475 pps->ppscount[2] = count; 1476 if (tcount >= sys_cputimer->freq) { 1477 delta = (1000000000 * (tcount / sys_cputimer->freq) + 1478 sys_cputimer->freq64_nsec * 1479 (tcount % sys_cputimer->freq)) >> 32; 1480 } else { 1481 delta = (sys_cputimer->freq64_nsec * tcount) >> 32; 1482 } 1483 hardpps(tsp, delta); 1484 } 1485 #endif 1486 } 1487 1488 /* 1489 * Return the tsc target value for a delay of (ns). 1490 * 1491 * Returns -1 if the TSC is not supported. 1492 */ 1493 int64_t 1494 tsc_get_target(int ns) 1495 { 1496 #if defined(_RDTSC_SUPPORTED_) 1497 if (cpu_feature & CPUID_TSC) { 1498 return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000); 1499 } 1500 #endif 1501 return(-1); 1502 } 1503 1504 /* 1505 * Compare the tsc against the passed target 1506 * 1507 * Returns +1 if the target has been reached 1508 * Returns 0 if the target has not yet been reached 1509 * Returns -1 if the TSC is not supported. 1510 * 1511 * Typical use: while (tsc_test_target(target) == 0) { ...poll... } 1512 */ 1513 int 1514 tsc_test_target(int64_t target) 1515 { 1516 #if defined(_RDTSC_SUPPORTED_) 1517 if (cpu_feature & CPUID_TSC) { 1518 if ((int64_t)(target - rdtsc()) <= 0) 1519 return(1); 1520 return(0); 1521 } 1522 #endif 1523 return(-1); 1524 } 1525 1526 /* 1527 * Delay the specified number of nanoseconds using the tsc. This function 1528 * returns immediately if the TSC is not supported. At least one cpu_pause() 1529 * will be issued. 1530 */ 1531 void 1532 tsc_delay(int ns) 1533 { 1534 int64_t clk; 1535 1536 clk = tsc_get_target(ns); 1537 cpu_pause(); 1538 while (tsc_test_target(clk) == 0) 1539 cpu_pause(); 1540 } 1541