1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 68 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ 69 */ 70 71 #include "opt_ntp.h" 72 #include "opt_ifpoll.h" 73 #include "opt_pctrack.h" 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/callout.h> 78 #include <sys/kernel.h> 79 #include <sys/kinfo.h> 80 #include <sys/proc.h> 81 #include <sys/malloc.h> 82 #include <sys/resource.h> 83 #include <sys/resourcevar.h> 84 #include <sys/signalvar.h> 85 #include <sys/timex.h> 86 #include <sys/timepps.h> 87 #include <sys/upmap.h> 88 #include <vm/vm.h> 89 #include <sys/lock.h> 90 #include <vm/pmap.h> 91 #include <vm/vm_map.h> 92 #include <vm/vm_extern.h> 93 #include <sys/sysctl.h> 94 95 #include <sys/thread2.h> 96 #include <sys/mplock2.h> 97 98 #include <machine/cpu.h> 99 #include <machine/limits.h> 100 #include <machine/smp.h> 101 #include <machine/cpufunc.h> 102 #include <machine/specialreg.h> 103 #include <machine/clock.h> 104 105 #ifdef GPROF 106 #include <sys/gmon.h> 107 #endif 108 109 #ifdef IFPOLL_ENABLE 110 extern void ifpoll_init_pcpu(int); 111 #endif 112 113 #ifdef DEBUG_PCTRACK 114 static void do_pctrack(struct intrframe *frame, int which); 115 #endif 116 117 static void initclocks (void *dummy); 118 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) 119 120 /* 121 * Some of these don't belong here, but it's easiest to concentrate them. 122 * Note that cpu_time counts in microseconds, but most userland programs 123 * just compare relative times against the total by delta. 124 */ 125 struct kinfo_cputime cputime_percpu[MAXCPU]; 126 #ifdef DEBUG_PCTRACK 127 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE }; 128 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE]; 129 #endif 130 131 static int 132 sysctl_cputime(SYSCTL_HANDLER_ARGS) 133 { 134 int cpu, error = 0; 135 size_t size = sizeof(struct kinfo_cputime); 136 137 for (cpu = 0; cpu < ncpus; ++cpu) { 138 if ((error = SYSCTL_OUT(req, &cputime_percpu[cpu], size))) 139 break; 140 } 141 142 return (error); 143 } 144 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 145 sysctl_cputime, "S,kinfo_cputime", "CPU time statistics"); 146 147 static int 148 sysctl_cp_time(SYSCTL_HANDLER_ARGS) 149 { 150 long cpu_states[5] = {0}; 151 int cpu, error = 0; 152 size_t size = sizeof(cpu_states); 153 154 for (cpu = 0; cpu < ncpus; ++cpu) { 155 cpu_states[CP_USER] += cputime_percpu[cpu].cp_user; 156 cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice; 157 cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys; 158 cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr; 159 cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle; 160 } 161 162 error = SYSCTL_OUT(req, cpu_states, size); 163 164 return (error); 165 } 166 167 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 168 sysctl_cp_time, "LU", "CPU time statistics"); 169 170 /* 171 * boottime is used to calculate the 'real' uptime. Do not confuse this with 172 * microuptime(). microtime() is not drift compensated. The real uptime 173 * with compensation is nanotime() - bootime. boottime is recalculated 174 * whenever the real time is set based on the compensated elapsed time 175 * in seconds (gd->gd_time_seconds). 176 * 177 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic. 178 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to 179 * the real time. 180 */ 181 struct timespec boottime; /* boot time (realtime) for reference only */ 182 time_t time_second; /* read-only 'passive' uptime in seconds */ 183 time_t time_uptime; /* read-only 'passive' uptime in seconds */ 184 185 /* 186 * basetime is used to calculate the compensated real time of day. The 187 * basetime can be modified on a per-tick basis by the adjtime(), 188 * ntp_adjtime(), and sysctl-based time correction APIs. 189 * 190 * Note that frequency corrections can also be made by adjusting 191 * gd_cpuclock_base. 192 * 193 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is 194 * used on both SMP and UP systems to avoid MP races between cpu's and 195 * interrupt races on UP systems. 196 */ 197 #define BASETIME_ARYSIZE 16 198 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1) 199 static struct timespec basetime[BASETIME_ARYSIZE]; 200 static volatile int basetime_index; 201 202 static int 203 sysctl_get_basetime(SYSCTL_HANDLER_ARGS) 204 { 205 struct timespec *bt; 206 int error; 207 int index; 208 209 /* 210 * Because basetime data and index may be updated by another cpu, 211 * a load fence is required to ensure that the data we read has 212 * not been speculatively read relative to a possibly updated index. 213 */ 214 index = basetime_index; 215 cpu_lfence(); 216 bt = &basetime[index]; 217 error = SYSCTL_OUT(req, bt, sizeof(*bt)); 218 return (error); 219 } 220 221 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 222 &boottime, timespec, "System boottime"); 223 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, 224 sysctl_get_basetime, "S,timespec", "System basetime"); 225 226 static void hardclock(systimer_t info, int, struct intrframe *frame); 227 static void statclock(systimer_t info, int, struct intrframe *frame); 228 static void schedclock(systimer_t info, int, struct intrframe *frame); 229 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp); 230 231 int ticks; /* system master ticks at hz */ 232 int clocks_running; /* tsleep/timeout clocks operational */ 233 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */ 234 int64_t nsec_acc; /* accumulator */ 235 int sched_ticks; /* global schedule clock ticks */ 236 237 /* NTPD time correction fields */ 238 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */ 239 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */ 240 int64_t ntp_delta; /* one-time correction in nsec */ 241 int64_t ntp_big_delta = 1000000000; 242 int32_t ntp_tick_delta; /* current adjustment rate */ 243 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */ 244 time_t ntp_leap_second; /* time of next leap second */ 245 int ntp_leap_insert; /* whether to insert or remove a second */ 246 247 /* 248 * Finish initializing clock frequencies and start all clocks running. 249 */ 250 /* ARGSUSED*/ 251 static void 252 initclocks(void *dummy) 253 { 254 /*psratio = profhz / stathz;*/ 255 initclocks_pcpu(); 256 clocks_running = 1; 257 if (kpmap) { 258 kpmap->tsc_freq = (uint64_t)tsc_frequency; 259 kpmap->tick_freq = hz; 260 } 261 } 262 263 /* 264 * Called on a per-cpu basis from the idle thread bootstrap on each cpu 265 * during SMP initialization. 266 * 267 * This routine is called concurrently during low-level SMP initialization 268 * and may not block in any way. Meaning, among other things, we can't 269 * acquire any tokens. 270 */ 271 void 272 initclocks_pcpu(void) 273 { 274 struct globaldata *gd = mycpu; 275 276 crit_enter(); 277 if (gd->gd_cpuid == 0) { 278 gd->gd_time_seconds = 1; 279 gd->gd_cpuclock_base = sys_cputimer->count(); 280 } else { 281 /* XXX */ 282 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; 283 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; 284 } 285 286 systimer_intr_enable(); 287 288 crit_exit(); 289 } 290 291 /* 292 * This routine is called on just the BSP, just after SMP initialization 293 * completes to * finish initializing any clocks that might contend/block 294 * (e.g. like on a token). We can't do this in initclocks_pcpu() because 295 * that function is called from the idle thread bootstrap for each cpu and 296 * not allowed to block at all. 297 */ 298 static 299 void 300 initclocks_other(void *dummy) 301 { 302 struct globaldata *ogd = mycpu; 303 struct globaldata *gd; 304 int n; 305 306 for (n = 0; n < ncpus; ++n) { 307 lwkt_setcpu_self(globaldata_find(n)); 308 gd = mycpu; 309 310 /* 311 * Use a non-queued periodic systimer to prevent multiple 312 * ticks from building up if the sysclock jumps forward 313 * (8254 gets reset). The sysclock will never jump backwards. 314 * Our time sync is based on the actual sysclock, not the 315 * ticks count. 316 */ 317 systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, 318 NULL, hz); 319 systimer_init_periodic_nq(&gd->gd_statclock, statclock, 320 NULL, stathz); 321 /* XXX correct the frequency for scheduler / estcpu tests */ 322 systimer_init_periodic_nq(&gd->gd_schedclock, schedclock, 323 NULL, ESTCPUFREQ); 324 #ifdef IFPOLL_ENABLE 325 ifpoll_init_pcpu(gd->gd_cpuid); 326 #endif 327 } 328 lwkt_setcpu_self(ogd); 329 } 330 SYSINIT(clocks2, SI_BOOT2_POST_SMP, SI_ORDER_ANY, initclocks_other, NULL) 331 332 /* 333 * This sets the current real time of day. Timespecs are in seconds and 334 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base, 335 * instead we adjust basetime so basetime + gd_* results in the current 336 * time of day. This way the gd_* fields are guarenteed to represent 337 * a monotonically increasing 'uptime' value. 338 * 339 * When set_timeofday() is called from userland, the system call forces it 340 * onto cpu #0 since only cpu #0 can update basetime_index. 341 */ 342 void 343 set_timeofday(struct timespec *ts) 344 { 345 struct timespec *nbt; 346 int ni; 347 348 /* 349 * XXX SMP / non-atomic basetime updates 350 */ 351 crit_enter(); 352 ni = (basetime_index + 1) & BASETIME_ARYMASK; 353 nbt = &basetime[ni]; 354 nanouptime(nbt); 355 nbt->tv_sec = ts->tv_sec - nbt->tv_sec; 356 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec; 357 if (nbt->tv_nsec < 0) { 358 nbt->tv_nsec += 1000000000; 359 --nbt->tv_sec; 360 } 361 362 /* 363 * Note that basetime diverges from boottime as the clock drift is 364 * compensated for, so we cannot do away with boottime. When setting 365 * the absolute time of day the drift is 0 (for an instant) and we 366 * can simply assign boottime to basetime. 367 * 368 * Note that nanouptime() is based on gd_time_seconds which is drift 369 * compensated up to a point (it is guarenteed to remain monotonically 370 * increasing). gd_time_seconds is thus our best uptime guess and 371 * suitable for use in the boottime calculation. It is already taken 372 * into account in the basetime calculation above. 373 */ 374 boottime.tv_sec = nbt->tv_sec; 375 ntp_delta = 0; 376 377 /* 378 * We now have a new basetime, make sure all other cpus have it, 379 * then update the index. 380 */ 381 cpu_sfence(); 382 basetime_index = ni; 383 384 crit_exit(); 385 } 386 387 /* 388 * Each cpu has its own hardclock, but we only increments ticks and softticks 389 * on cpu #0. 390 * 391 * NOTE! systimer! the MP lock might not be held here. We can only safely 392 * manipulate objects owned by the current cpu. 393 */ 394 static void 395 hardclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 396 { 397 sysclock_t cputicks; 398 struct proc *p; 399 struct globaldata *gd = mycpu; 400 401 /* 402 * Realtime updates are per-cpu. Note that timer corrections as 403 * returned by microtime() and friends make an additional adjustment 404 * using a system-wise 'basetime', but the running time is always 405 * taken from the per-cpu globaldata area. Since the same clock 406 * is distributing (XXX SMP) to all cpus, the per-cpu timebases 407 * stay in synch. 408 * 409 * Note that we never allow info->time (aka gd->gd_hardclock.time) 410 * to reverse index gd_cpuclock_base, but that it is possible for 411 * it to temporarily get behind in the seconds if something in the 412 * system locks interrupts for a long period of time. Since periodic 413 * timers count events, though everything should resynch again 414 * immediately. 415 */ 416 cputicks = info->time - gd->gd_cpuclock_base; 417 if (cputicks >= sys_cputimer->freq) { 418 ++gd->gd_time_seconds; 419 gd->gd_cpuclock_base += sys_cputimer->freq; 420 if (gd->gd_cpuid == 0) 421 ++time_uptime; /* uncorrected monotonic 1-sec gran */ 422 } 423 424 /* 425 * The system-wide ticks counter and NTP related timedelta/tickdelta 426 * adjustments only occur on cpu #0. NTP adjustments are accomplished 427 * by updating basetime. 428 */ 429 if (gd->gd_cpuid == 0) { 430 struct timespec *nbt; 431 struct timespec nts; 432 int leap; 433 int ni; 434 435 ++ticks; 436 437 #if 0 438 if (tco->tc_poll_pps) 439 tco->tc_poll_pps(tco); 440 #endif 441 442 /* 443 * Calculate the new basetime index. We are in a critical section 444 * on cpu #0 and can safely play with basetime_index. Start 445 * with the current basetime and then make adjustments. 446 */ 447 ni = (basetime_index + 1) & BASETIME_ARYMASK; 448 nbt = &basetime[ni]; 449 *nbt = basetime[basetime_index]; 450 451 /* 452 * Apply adjtime corrections. (adjtime() API) 453 * 454 * adjtime() only runs on cpu #0 so our critical section is 455 * sufficient to access these variables. 456 */ 457 if (ntp_delta != 0) { 458 nbt->tv_nsec += ntp_tick_delta; 459 ntp_delta -= ntp_tick_delta; 460 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) || 461 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) { 462 ntp_tick_delta = ntp_delta; 463 } 464 } 465 466 /* 467 * Apply permanent frequency corrections. (sysctl API) 468 */ 469 if (ntp_tick_permanent != 0) { 470 ntp_tick_acc += ntp_tick_permanent; 471 if (ntp_tick_acc >= (1LL << 32)) { 472 nbt->tv_nsec += ntp_tick_acc >> 32; 473 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32; 474 } else if (ntp_tick_acc <= -(1LL << 32)) { 475 /* Negate ntp_tick_acc to avoid shifting the sign bit. */ 476 nbt->tv_nsec -= (-ntp_tick_acc) >> 32; 477 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32; 478 } 479 } 480 481 if (nbt->tv_nsec >= 1000000000) { 482 nbt->tv_sec++; 483 nbt->tv_nsec -= 1000000000; 484 } else if (nbt->tv_nsec < 0) { 485 nbt->tv_sec--; 486 nbt->tv_nsec += 1000000000; 487 } 488 489 /* 490 * Another per-tick compensation. (for ntp_adjtime() API) 491 */ 492 if (nsec_adj != 0) { 493 nsec_acc += nsec_adj; 494 if (nsec_acc >= 0x100000000LL) { 495 nbt->tv_nsec += nsec_acc >> 32; 496 nsec_acc = (nsec_acc & 0xFFFFFFFFLL); 497 } else if (nsec_acc <= -0x100000000LL) { 498 nbt->tv_nsec -= -nsec_acc >> 32; 499 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL); 500 } 501 if (nbt->tv_nsec >= 1000000000) { 502 nbt->tv_nsec -= 1000000000; 503 ++nbt->tv_sec; 504 } else if (nbt->tv_nsec < 0) { 505 nbt->tv_nsec += 1000000000; 506 --nbt->tv_sec; 507 } 508 } 509 510 /************************************************************ 511 * LEAP SECOND CORRECTION * 512 ************************************************************ 513 * 514 * Taking into account all the corrections made above, figure 515 * out the new real time. If the seconds field has changed 516 * then apply any pending leap-second corrections. 517 */ 518 getnanotime_nbt(nbt, &nts); 519 520 if (time_second != nts.tv_sec) { 521 /* 522 * Apply leap second (sysctl API). Adjust nts for changes 523 * so we do not have to call getnanotime_nbt again. 524 */ 525 if (ntp_leap_second) { 526 if (ntp_leap_second == nts.tv_sec) { 527 if (ntp_leap_insert) { 528 nbt->tv_sec++; 529 nts.tv_sec++; 530 } else { 531 nbt->tv_sec--; 532 nts.tv_sec--; 533 } 534 ntp_leap_second--; 535 } 536 } 537 538 /* 539 * Apply leap second (ntp_adjtime() API), calculate a new 540 * nsec_adj field. ntp_update_second() returns nsec_adj 541 * as a per-second value but we need it as a per-tick value. 542 */ 543 leap = ntp_update_second(time_second, &nsec_adj); 544 nsec_adj /= hz; 545 nbt->tv_sec += leap; 546 nts.tv_sec += leap; 547 548 /* 549 * Update the time_second 'approximate time' global. 550 */ 551 time_second = nts.tv_sec; 552 } 553 554 /* 555 * Finally, our new basetime is ready to go live! 556 */ 557 cpu_sfence(); 558 basetime_index = ni; 559 560 /* 561 * Update kpmap on each tick. TS updates are integrated with 562 * fences and upticks allowing userland to read the data 563 * deterministically. 564 */ 565 if (kpmap) { 566 int w; 567 568 w = (kpmap->upticks + 1) & 1; 569 getnanouptime(&kpmap->ts_uptime[w]); 570 getnanotime(&kpmap->ts_realtime[w]); 571 cpu_sfence(); 572 ++kpmap->upticks; 573 cpu_sfence(); 574 } 575 } 576 577 /* 578 * lwkt thread scheduler fair queueing 579 */ 580 lwkt_schedulerclock(curthread); 581 582 /* 583 * softticks are handled for all cpus 584 */ 585 hardclock_softtick(gd); 586 587 /* 588 * ITimer handling is per-tick, per-cpu. 589 * 590 * We must acquire the per-process token in order for ksignal() 591 * to be non-blocking. For the moment this requires an AST fault, 592 * the ksignal() cannot be safely issued from this hard interrupt. 593 * 594 * XXX Even the trytoken here isn't right, and itimer operation in 595 * a multi threaded environment is going to be weird at the 596 * very least. 597 */ 598 if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) { 599 crit_enter_hard(); 600 if (p->p_upmap) 601 ++p->p_upmap->runticks; 602 603 if (frame && CLKF_USERMODE(frame) && 604 timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) && 605 itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) { 606 p->p_flags |= P_SIGVTALRM; 607 need_user_resched(); 608 } 609 if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) && 610 itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) { 611 p->p_flags |= P_SIGPROF; 612 need_user_resched(); 613 } 614 crit_exit_hard(); 615 lwkt_reltoken(&p->p_token); 616 } 617 setdelayed(); 618 } 619 620 /* 621 * The statistics clock typically runs at a 125Hz rate, and is intended 622 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu. 623 * 624 * NOTE! systimer! the MP lock might not be held here. We can only safely 625 * manipulate objects owned by the current cpu. 626 * 627 * The stats clock is responsible for grabbing a profiling sample. 628 * Most of the statistics are only used by user-level statistics programs. 629 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and 630 * p->p_estcpu. 631 * 632 * Like the other clocks, the stat clock is called from what is effectively 633 * a fast interrupt, so the context should be the thread/process that got 634 * interrupted. 635 */ 636 static void 637 statclock(systimer_t info, int in_ipi, struct intrframe *frame) 638 { 639 #ifdef GPROF 640 struct gmonparam *g; 641 int i; 642 #endif 643 thread_t td; 644 struct proc *p; 645 int bump; 646 sysclock_t cv; 647 sysclock_t scv; 648 649 /* 650 * How big was our timeslice relative to the last time? Calculate 651 * in microseconds. 652 * 653 * NOTE: Use of microuptime() is typically MPSAFE, but usually not 654 * during early boot. Just use the systimer count to be nice 655 * to e.g. qemu. The systimer has a better chance of being 656 * MPSAFE at early boot. 657 */ 658 cv = sys_cputimer->count(); 659 scv = mycpu->statint.gd_statcv; 660 if (scv == 0) { 661 bump = 1; 662 } else { 663 bump = (sys_cputimer->freq64_usec * (cv - scv)) >> 32; 664 if (bump < 0) 665 bump = 0; 666 if (bump > 1000000) 667 bump = 1000000; 668 } 669 mycpu->statint.gd_statcv = cv; 670 671 #if 0 672 stv = &mycpu->gd_stattv; 673 if (stv->tv_sec == 0) { 674 bump = 1; 675 } else { 676 bump = tv.tv_usec - stv->tv_usec + 677 (tv.tv_sec - stv->tv_sec) * 1000000; 678 if (bump < 0) 679 bump = 0; 680 if (bump > 1000000) 681 bump = 1000000; 682 } 683 *stv = tv; 684 #endif 685 686 td = curthread; 687 p = td->td_proc; 688 689 if (frame && CLKF_USERMODE(frame)) { 690 /* 691 * Came from userland, handle user time and deal with 692 * possible process. 693 */ 694 if (p && (p->p_flags & P_PROFIL)) 695 addupc_intr(p, CLKF_PC(frame), 1); 696 td->td_uticks += bump; 697 698 /* 699 * Charge the time as appropriate 700 */ 701 if (p && p->p_nice > NZERO) 702 cpu_time.cp_nice += bump; 703 else 704 cpu_time.cp_user += bump; 705 } else { 706 int intr_nest = mycpu->gd_intr_nesting_level; 707 708 if (in_ipi) { 709 /* 710 * IPI processing code will bump gd_intr_nesting_level 711 * up by one, which breaks following CLKF_INTR testing, 712 * so we substract it by one here. 713 */ 714 --intr_nest; 715 } 716 #ifdef GPROF 717 /* 718 * Kernel statistics are just like addupc_intr, only easier. 719 */ 720 g = &_gmonparam; 721 if (g->state == GMON_PROF_ON && frame) { 722 i = CLKF_PC(frame) - g->lowpc; 723 if (i < g->textsize) { 724 i /= HISTFRACTION * sizeof(*g->kcount); 725 g->kcount[i]++; 726 } 727 } 728 #endif 729 730 #define IS_INTR_RUNNING ((frame && CLKF_INTR(intr_nest)) || CLKF_INTR_TD(td)) 731 732 /* 733 * Came from kernel mode, so we were: 734 * - handling an interrupt, 735 * - doing syscall or trap work on behalf of the current 736 * user process, or 737 * - spinning in the idle loop. 738 * Whichever it is, charge the time as appropriate. 739 * Note that we charge interrupts to the current process, 740 * regardless of whether they are ``for'' that process, 741 * so that we know how much of its real time was spent 742 * in ``non-process'' (i.e., interrupt) work. 743 * 744 * XXX assume system if frame is NULL. A NULL frame 745 * can occur if ipi processing is done from a crit_exit(). 746 */ 747 if (IS_INTR_RUNNING) 748 td->td_iticks += bump; 749 else 750 td->td_sticks += bump; 751 752 if (IS_INTR_RUNNING) { 753 /* 754 * If we interrupted an interrupt thread, well, 755 * count it as interrupt time. 756 */ 757 #ifdef DEBUG_PCTRACK 758 if (frame) 759 do_pctrack(frame, PCTRACK_INT); 760 #endif 761 cpu_time.cp_intr += bump; 762 } else { 763 if (td == &mycpu->gd_idlethread) { 764 /* 765 * Even if the current thread is the idle 766 * thread it could be due to token contention 767 * in the LWKT scheduler. Count such as 768 * system time. 769 */ 770 if (mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) 771 cpu_time.cp_sys += bump; 772 else 773 cpu_time.cp_idle += bump; 774 } else { 775 /* 776 * System thread was running. 777 */ 778 #ifdef DEBUG_PCTRACK 779 if (frame) 780 do_pctrack(frame, PCTRACK_SYS); 781 #endif 782 cpu_time.cp_sys += bump; 783 } 784 } 785 786 #undef IS_INTR_RUNNING 787 } 788 } 789 790 #ifdef DEBUG_PCTRACK 791 /* 792 * Sample the PC when in the kernel or in an interrupt. User code can 793 * retrieve the information and generate a histogram or other output. 794 */ 795 796 static void 797 do_pctrack(struct intrframe *frame, int which) 798 { 799 struct kinfo_pctrack *pctrack; 800 801 pctrack = &cputime_pctrack[mycpu->gd_cpuid][which]; 802 pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] = 803 (void *)CLKF_PC(frame); 804 ++pctrack->pc_index; 805 } 806 807 static int 808 sysctl_pctrack(SYSCTL_HANDLER_ARGS) 809 { 810 struct kinfo_pcheader head; 811 int error; 812 int cpu; 813 int ntrack; 814 815 head.pc_ntrack = PCTRACK_SIZE; 816 head.pc_arysize = PCTRACK_ARYSIZE; 817 818 if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0) 819 return (error); 820 821 for (cpu = 0; cpu < ncpus; ++cpu) { 822 for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) { 823 error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack], 824 sizeof(struct kinfo_pctrack)); 825 if (error) 826 break; 827 } 828 if (error) 829 break; 830 } 831 return (error); 832 } 833 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 834 sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking"); 835 836 #endif 837 838 /* 839 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer, 840 * the MP lock might not be held. We can safely manipulate parts of curproc 841 * but that's about it. 842 * 843 * Each cpu has its own scheduler clock. 844 */ 845 static void 846 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 847 { 848 struct lwp *lp; 849 struct rusage *ru; 850 struct vmspace *vm; 851 long rss; 852 853 if ((lp = lwkt_preempted_proc()) != NULL) { 854 /* 855 * Account for cpu time used and hit the scheduler. Note 856 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD 857 * HERE. 858 */ 859 ++lp->lwp_cpticks; 860 usched_schedulerclock(lp, info->periodic, info->time); 861 } else { 862 usched_schedulerclock(NULL, info->periodic, info->time); 863 } 864 if ((lp = curthread->td_lwp) != NULL) { 865 /* 866 * Update resource usage integrals and maximums. 867 */ 868 if ((ru = &lp->lwp_proc->p_ru) && 869 (vm = lp->lwp_proc->p_vmspace) != NULL) { 870 ru->ru_ixrss += pgtok(vm->vm_tsize); 871 ru->ru_idrss += pgtok(vm->vm_dsize); 872 ru->ru_isrss += pgtok(vm->vm_ssize); 873 if (lwkt_trytoken(&vm->vm_map.token)) { 874 rss = pgtok(vmspace_resident_count(vm)); 875 if (ru->ru_maxrss < rss) 876 ru->ru_maxrss = rss; 877 lwkt_reltoken(&vm->vm_map.token); 878 } 879 } 880 } 881 /* Increment the global sched_ticks */ 882 if (mycpu->gd_cpuid == 0) 883 ++sched_ticks; 884 } 885 886 /* 887 * Compute number of ticks for the specified amount of time. The 888 * return value is intended to be used in a clock interrupt timed 889 * operation and guarenteed to meet or exceed the requested time. 890 * If the representation overflows, return INT_MAX. The minimum return 891 * value is 1 ticks and the function will average the calculation up. 892 * If any value greater then 0 microseconds is supplied, a value 893 * of at least 2 will be returned to ensure that a near-term clock 894 * interrupt does not cause the timeout to occur (degenerately) early. 895 * 896 * Note that limit checks must take into account microseconds, which is 897 * done simply by using the smaller signed long maximum instead of 898 * the unsigned long maximum. 899 * 900 * If ints have 32 bits, then the maximum value for any timeout in 901 * 10ms ticks is 248 days. 902 */ 903 int 904 tvtohz_high(struct timeval *tv) 905 { 906 int ticks; 907 long sec, usec; 908 909 sec = tv->tv_sec; 910 usec = tv->tv_usec; 911 if (usec < 0) { 912 sec--; 913 usec += 1000000; 914 } 915 if (sec < 0) { 916 #ifdef DIAGNOSTIC 917 if (usec > 0) { 918 sec++; 919 usec -= 1000000; 920 } 921 kprintf("tvtohz_high: negative time difference " 922 "%ld sec %ld usec\n", 923 sec, usec); 924 #endif 925 ticks = 1; 926 } else if (sec <= INT_MAX / hz) { 927 ticks = (int)(sec * hz + 928 ((u_long)usec + (ustick - 1)) / ustick) + 1; 929 } else { 930 ticks = INT_MAX; 931 } 932 return (ticks); 933 } 934 935 int 936 tstohz_high(struct timespec *ts) 937 { 938 int ticks; 939 long sec, nsec; 940 941 sec = ts->tv_sec; 942 nsec = ts->tv_nsec; 943 if (nsec < 0) { 944 sec--; 945 nsec += 1000000000; 946 } 947 if (sec < 0) { 948 #ifdef DIAGNOSTIC 949 if (nsec > 0) { 950 sec++; 951 nsec -= 1000000000; 952 } 953 kprintf("tstohz_high: negative time difference " 954 "%ld sec %ld nsec\n", 955 sec, nsec); 956 #endif 957 ticks = 1; 958 } else if (sec <= INT_MAX / hz) { 959 ticks = (int)(sec * hz + 960 ((u_long)nsec + (nstick - 1)) / nstick) + 1; 961 } else { 962 ticks = INT_MAX; 963 } 964 return (ticks); 965 } 966 967 968 /* 969 * Compute number of ticks for the specified amount of time, erroring on 970 * the side of it being too low to ensure that sleeping the returned number 971 * of ticks will not result in a late return. 972 * 973 * The supplied timeval may not be negative and should be normalized. A 974 * return value of 0 is possible if the timeval converts to less then 975 * 1 tick. 976 * 977 * If ints have 32 bits, then the maximum value for any timeout in 978 * 10ms ticks is 248 days. 979 */ 980 int 981 tvtohz_low(struct timeval *tv) 982 { 983 int ticks; 984 long sec; 985 986 sec = tv->tv_sec; 987 if (sec <= INT_MAX / hz) 988 ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick); 989 else 990 ticks = INT_MAX; 991 return (ticks); 992 } 993 994 int 995 tstohz_low(struct timespec *ts) 996 { 997 int ticks; 998 long sec; 999 1000 sec = ts->tv_sec; 1001 if (sec <= INT_MAX / hz) 1002 ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick); 1003 else 1004 ticks = INT_MAX; 1005 return (ticks); 1006 } 1007 1008 /* 1009 * Start profiling on a process. 1010 * 1011 * Kernel profiling passes proc0 which never exits and hence 1012 * keeps the profile clock running constantly. 1013 */ 1014 void 1015 startprofclock(struct proc *p) 1016 { 1017 if ((p->p_flags & P_PROFIL) == 0) { 1018 p->p_flags |= P_PROFIL; 1019 #if 0 /* XXX */ 1020 if (++profprocs == 1 && stathz != 0) { 1021 crit_enter(); 1022 psdiv = psratio; 1023 setstatclockrate(profhz); 1024 crit_exit(); 1025 } 1026 #endif 1027 } 1028 } 1029 1030 /* 1031 * Stop profiling on a process. 1032 * 1033 * caller must hold p->p_token 1034 */ 1035 void 1036 stopprofclock(struct proc *p) 1037 { 1038 if (p->p_flags & P_PROFIL) { 1039 p->p_flags &= ~P_PROFIL; 1040 #if 0 /* XXX */ 1041 if (--profprocs == 0 && stathz != 0) { 1042 crit_enter(); 1043 psdiv = 1; 1044 setstatclockrate(stathz); 1045 crit_exit(); 1046 } 1047 #endif 1048 } 1049 } 1050 1051 /* 1052 * Return information about system clocks. 1053 */ 1054 static int 1055 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 1056 { 1057 struct kinfo_clockinfo clkinfo; 1058 /* 1059 * Construct clockinfo structure. 1060 */ 1061 clkinfo.ci_hz = hz; 1062 clkinfo.ci_tick = ustick; 1063 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000; 1064 clkinfo.ci_profhz = profhz; 1065 clkinfo.ci_stathz = stathz ? stathz : hz; 1066 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 1067 } 1068 1069 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 1070 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 1071 1072 /* 1073 * We have eight functions for looking at the clock, four for 1074 * microseconds and four for nanoseconds. For each there is fast 1075 * but less precise version "get{nano|micro}[up]time" which will 1076 * return a time which is up to 1/HZ previous to the call, whereas 1077 * the raw version "{nano|micro}[up]time" will return a timestamp 1078 * which is as precise as possible. The "up" variants return the 1079 * time relative to system boot, these are well suited for time 1080 * interval measurements. 1081 * 1082 * Each cpu independantly maintains the current time of day, so all 1083 * we need to do to protect ourselves from changes is to do a loop 1084 * check on the seconds field changing out from under us. 1085 * 1086 * The system timer maintains a 32 bit count and due to various issues 1087 * it is possible for the calculated delta to occassionally exceed 1088 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec 1089 * multiplication can easily overflow, so we deal with the case. For 1090 * uniformity we deal with the case in the usec case too. 1091 * 1092 * All the [get][micro,nano][time,uptime]() routines are MPSAFE. 1093 */ 1094 void 1095 getmicrouptime(struct timeval *tvp) 1096 { 1097 struct globaldata *gd = mycpu; 1098 sysclock_t delta; 1099 1100 do { 1101 tvp->tv_sec = gd->gd_time_seconds; 1102 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1103 } while (tvp->tv_sec != gd->gd_time_seconds); 1104 1105 if (delta >= sys_cputimer->freq) { 1106 tvp->tv_sec += delta / sys_cputimer->freq; 1107 delta %= sys_cputimer->freq; 1108 } 1109 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1110 if (tvp->tv_usec >= 1000000) { 1111 tvp->tv_usec -= 1000000; 1112 ++tvp->tv_sec; 1113 } 1114 } 1115 1116 void 1117 getnanouptime(struct timespec *tsp) 1118 { 1119 struct globaldata *gd = mycpu; 1120 sysclock_t delta; 1121 1122 do { 1123 tsp->tv_sec = gd->gd_time_seconds; 1124 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1125 } while (tsp->tv_sec != gd->gd_time_seconds); 1126 1127 if (delta >= sys_cputimer->freq) { 1128 tsp->tv_sec += delta / sys_cputimer->freq; 1129 delta %= sys_cputimer->freq; 1130 } 1131 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1132 } 1133 1134 void 1135 microuptime(struct timeval *tvp) 1136 { 1137 struct globaldata *gd = mycpu; 1138 sysclock_t delta; 1139 1140 do { 1141 tvp->tv_sec = gd->gd_time_seconds; 1142 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1143 } while (tvp->tv_sec != gd->gd_time_seconds); 1144 1145 if (delta >= sys_cputimer->freq) { 1146 tvp->tv_sec += delta / sys_cputimer->freq; 1147 delta %= sys_cputimer->freq; 1148 } 1149 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1150 } 1151 1152 void 1153 nanouptime(struct timespec *tsp) 1154 { 1155 struct globaldata *gd = mycpu; 1156 sysclock_t delta; 1157 1158 do { 1159 tsp->tv_sec = gd->gd_time_seconds; 1160 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1161 } while (tsp->tv_sec != gd->gd_time_seconds); 1162 1163 if (delta >= sys_cputimer->freq) { 1164 tsp->tv_sec += delta / sys_cputimer->freq; 1165 delta %= sys_cputimer->freq; 1166 } 1167 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1168 } 1169 1170 /* 1171 * realtime routines 1172 */ 1173 void 1174 getmicrotime(struct timeval *tvp) 1175 { 1176 struct globaldata *gd = mycpu; 1177 struct timespec *bt; 1178 sysclock_t delta; 1179 1180 do { 1181 tvp->tv_sec = gd->gd_time_seconds; 1182 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1183 } while (tvp->tv_sec != gd->gd_time_seconds); 1184 1185 if (delta >= sys_cputimer->freq) { 1186 tvp->tv_sec += delta / sys_cputimer->freq; 1187 delta %= sys_cputimer->freq; 1188 } 1189 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1190 1191 bt = &basetime[basetime_index]; 1192 tvp->tv_sec += bt->tv_sec; 1193 tvp->tv_usec += bt->tv_nsec / 1000; 1194 while (tvp->tv_usec >= 1000000) { 1195 tvp->tv_usec -= 1000000; 1196 ++tvp->tv_sec; 1197 } 1198 } 1199 1200 void 1201 getnanotime(struct timespec *tsp) 1202 { 1203 struct globaldata *gd = mycpu; 1204 struct timespec *bt; 1205 sysclock_t delta; 1206 1207 do { 1208 tsp->tv_sec = gd->gd_time_seconds; 1209 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1210 } while (tsp->tv_sec != gd->gd_time_seconds); 1211 1212 if (delta >= sys_cputimer->freq) { 1213 tsp->tv_sec += delta / sys_cputimer->freq; 1214 delta %= sys_cputimer->freq; 1215 } 1216 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1217 1218 bt = &basetime[basetime_index]; 1219 tsp->tv_sec += bt->tv_sec; 1220 tsp->tv_nsec += bt->tv_nsec; 1221 while (tsp->tv_nsec >= 1000000000) { 1222 tsp->tv_nsec -= 1000000000; 1223 ++tsp->tv_sec; 1224 } 1225 } 1226 1227 static void 1228 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp) 1229 { 1230 struct globaldata *gd = mycpu; 1231 sysclock_t delta; 1232 1233 do { 1234 tsp->tv_sec = gd->gd_time_seconds; 1235 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1236 } while (tsp->tv_sec != gd->gd_time_seconds); 1237 1238 if (delta >= sys_cputimer->freq) { 1239 tsp->tv_sec += delta / sys_cputimer->freq; 1240 delta %= sys_cputimer->freq; 1241 } 1242 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1243 1244 tsp->tv_sec += nbt->tv_sec; 1245 tsp->tv_nsec += nbt->tv_nsec; 1246 while (tsp->tv_nsec >= 1000000000) { 1247 tsp->tv_nsec -= 1000000000; 1248 ++tsp->tv_sec; 1249 } 1250 } 1251 1252 1253 void 1254 microtime(struct timeval *tvp) 1255 { 1256 struct globaldata *gd = mycpu; 1257 struct timespec *bt; 1258 sysclock_t delta; 1259 1260 do { 1261 tvp->tv_sec = gd->gd_time_seconds; 1262 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1263 } while (tvp->tv_sec != gd->gd_time_seconds); 1264 1265 if (delta >= sys_cputimer->freq) { 1266 tvp->tv_sec += delta / sys_cputimer->freq; 1267 delta %= sys_cputimer->freq; 1268 } 1269 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1270 1271 bt = &basetime[basetime_index]; 1272 tvp->tv_sec += bt->tv_sec; 1273 tvp->tv_usec += bt->tv_nsec / 1000; 1274 while (tvp->tv_usec >= 1000000) { 1275 tvp->tv_usec -= 1000000; 1276 ++tvp->tv_sec; 1277 } 1278 } 1279 1280 void 1281 nanotime(struct timespec *tsp) 1282 { 1283 struct globaldata *gd = mycpu; 1284 struct timespec *bt; 1285 sysclock_t delta; 1286 1287 do { 1288 tsp->tv_sec = gd->gd_time_seconds; 1289 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1290 } while (tsp->tv_sec != gd->gd_time_seconds); 1291 1292 if (delta >= sys_cputimer->freq) { 1293 tsp->tv_sec += delta / sys_cputimer->freq; 1294 delta %= sys_cputimer->freq; 1295 } 1296 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1297 1298 bt = &basetime[basetime_index]; 1299 tsp->tv_sec += bt->tv_sec; 1300 tsp->tv_nsec += bt->tv_nsec; 1301 while (tsp->tv_nsec >= 1000000000) { 1302 tsp->tv_nsec -= 1000000000; 1303 ++tsp->tv_sec; 1304 } 1305 } 1306 1307 /* 1308 * note: this is not exactly synchronized with real time. To do that we 1309 * would have to do what microtime does and check for a nanoseconds overflow. 1310 */ 1311 time_t 1312 get_approximate_time_t(void) 1313 { 1314 struct globaldata *gd = mycpu; 1315 struct timespec *bt; 1316 1317 bt = &basetime[basetime_index]; 1318 return(gd->gd_time_seconds + bt->tv_sec); 1319 } 1320 1321 int 1322 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 1323 { 1324 pps_params_t *app; 1325 struct pps_fetch_args *fapi; 1326 #ifdef PPS_SYNC 1327 struct pps_kcbind_args *kapi; 1328 #endif 1329 1330 switch (cmd) { 1331 case PPS_IOC_CREATE: 1332 return (0); 1333 case PPS_IOC_DESTROY: 1334 return (0); 1335 case PPS_IOC_SETPARAMS: 1336 app = (pps_params_t *)data; 1337 if (app->mode & ~pps->ppscap) 1338 return (EINVAL); 1339 pps->ppsparam = *app; 1340 return (0); 1341 case PPS_IOC_GETPARAMS: 1342 app = (pps_params_t *)data; 1343 *app = pps->ppsparam; 1344 app->api_version = PPS_API_VERS_1; 1345 return (0); 1346 case PPS_IOC_GETCAP: 1347 *(int*)data = pps->ppscap; 1348 return (0); 1349 case PPS_IOC_FETCH: 1350 fapi = (struct pps_fetch_args *)data; 1351 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 1352 return (EINVAL); 1353 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 1354 return (EOPNOTSUPP); 1355 pps->ppsinfo.current_mode = pps->ppsparam.mode; 1356 fapi->pps_info_buf = pps->ppsinfo; 1357 return (0); 1358 case PPS_IOC_KCBIND: 1359 #ifdef PPS_SYNC 1360 kapi = (struct pps_kcbind_args *)data; 1361 /* XXX Only root should be able to do this */ 1362 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 1363 return (EINVAL); 1364 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 1365 return (EINVAL); 1366 if (kapi->edge & ~pps->ppscap) 1367 return (EINVAL); 1368 pps->kcmode = kapi->edge; 1369 return (0); 1370 #else 1371 return (EOPNOTSUPP); 1372 #endif 1373 default: 1374 return (ENOTTY); 1375 } 1376 } 1377 1378 void 1379 pps_init(struct pps_state *pps) 1380 { 1381 pps->ppscap |= PPS_TSFMT_TSPEC; 1382 if (pps->ppscap & PPS_CAPTUREASSERT) 1383 pps->ppscap |= PPS_OFFSETASSERT; 1384 if (pps->ppscap & PPS_CAPTURECLEAR) 1385 pps->ppscap |= PPS_OFFSETCLEAR; 1386 } 1387 1388 void 1389 pps_event(struct pps_state *pps, sysclock_t count, int event) 1390 { 1391 struct globaldata *gd; 1392 struct timespec *tsp; 1393 struct timespec *osp; 1394 struct timespec *bt; 1395 struct timespec ts; 1396 sysclock_t *pcount; 1397 #ifdef PPS_SYNC 1398 sysclock_t tcount; 1399 #endif 1400 sysclock_t delta; 1401 pps_seq_t *pseq; 1402 int foff; 1403 int fhard; 1404 1405 gd = mycpu; 1406 1407 /* Things would be easier with arrays... */ 1408 if (event == PPS_CAPTUREASSERT) { 1409 tsp = &pps->ppsinfo.assert_timestamp; 1410 osp = &pps->ppsparam.assert_offset; 1411 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1412 fhard = pps->kcmode & PPS_CAPTUREASSERT; 1413 pcount = &pps->ppscount[0]; 1414 pseq = &pps->ppsinfo.assert_sequence; 1415 } else { 1416 tsp = &pps->ppsinfo.clear_timestamp; 1417 osp = &pps->ppsparam.clear_offset; 1418 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1419 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1420 pcount = &pps->ppscount[1]; 1421 pseq = &pps->ppsinfo.clear_sequence; 1422 } 1423 1424 /* Nothing really happened */ 1425 if (*pcount == count) 1426 return; 1427 1428 *pcount = count; 1429 1430 do { 1431 ts.tv_sec = gd->gd_time_seconds; 1432 delta = count - gd->gd_cpuclock_base; 1433 } while (ts.tv_sec != gd->gd_time_seconds); 1434 1435 if (delta >= sys_cputimer->freq) { 1436 ts.tv_sec += delta / sys_cputimer->freq; 1437 delta %= sys_cputimer->freq; 1438 } 1439 ts.tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1440 bt = &basetime[basetime_index]; 1441 ts.tv_sec += bt->tv_sec; 1442 ts.tv_nsec += bt->tv_nsec; 1443 while (ts.tv_nsec >= 1000000000) { 1444 ts.tv_nsec -= 1000000000; 1445 ++ts.tv_sec; 1446 } 1447 1448 (*pseq)++; 1449 *tsp = ts; 1450 1451 if (foff) { 1452 timespecadd(tsp, osp); 1453 if (tsp->tv_nsec < 0) { 1454 tsp->tv_nsec += 1000000000; 1455 tsp->tv_sec -= 1; 1456 } 1457 } 1458 #ifdef PPS_SYNC 1459 if (fhard) { 1460 /* magic, at its best... */ 1461 tcount = count - pps->ppscount[2]; 1462 pps->ppscount[2] = count; 1463 if (tcount >= sys_cputimer->freq) { 1464 delta = (1000000000 * (tcount / sys_cputimer->freq) + 1465 sys_cputimer->freq64_nsec * 1466 (tcount % sys_cputimer->freq)) >> 32; 1467 } else { 1468 delta = (sys_cputimer->freq64_nsec * tcount) >> 32; 1469 } 1470 hardpps(tsp, delta); 1471 } 1472 #endif 1473 } 1474 1475 /* 1476 * Return the tsc target value for a delay of (ns). 1477 * 1478 * Returns -1 if the TSC is not supported. 1479 */ 1480 int64_t 1481 tsc_get_target(int ns) 1482 { 1483 #if defined(_RDTSC_SUPPORTED_) 1484 if (cpu_feature & CPUID_TSC) { 1485 return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000); 1486 } 1487 #endif 1488 return(-1); 1489 } 1490 1491 /* 1492 * Compare the tsc against the passed target 1493 * 1494 * Returns +1 if the target has been reached 1495 * Returns 0 if the target has not yet been reached 1496 * Returns -1 if the TSC is not supported. 1497 * 1498 * Typical use: while (tsc_test_target(target) == 0) { ...poll... } 1499 */ 1500 int 1501 tsc_test_target(int64_t target) 1502 { 1503 #if defined(_RDTSC_SUPPORTED_) 1504 if (cpu_feature & CPUID_TSC) { 1505 if ((int64_t)(target - rdtsc()) <= 0) 1506 return(1); 1507 return(0); 1508 } 1509 #endif 1510 return(-1); 1511 } 1512 1513 /* 1514 * Delay the specified number of nanoseconds using the tsc. This function 1515 * returns immediately if the TSC is not supported. At least one cpu_pause() 1516 * will be issued. 1517 */ 1518 void 1519 tsc_delay(int ns) 1520 { 1521 int64_t clk; 1522 1523 clk = tsc_get_target(ns); 1524 cpu_pause(); 1525 while (tsc_test_target(clk) == 0) 1526 cpu_pause(); 1527 } 1528