1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 68 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ 69 */ 70 71 #include "opt_ntp.h" 72 #include "opt_ifpoll.h" 73 #include "opt_pctrack.h" 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/callout.h> 78 #include <sys/kernel.h> 79 #include <sys/kinfo.h> 80 #include <sys/proc.h> 81 #include <sys/malloc.h> 82 #include <sys/resource.h> 83 #include <sys/resourcevar.h> 84 #include <sys/signalvar.h> 85 #include <sys/priv.h> 86 #include <sys/timex.h> 87 #include <sys/timepps.h> 88 #include <sys/upmap.h> 89 #include <vm/vm.h> 90 #include <sys/lock.h> 91 #include <vm/pmap.h> 92 #include <vm/vm_map.h> 93 #include <vm/vm_extern.h> 94 #include <sys/sysctl.h> 95 96 #include <sys/thread2.h> 97 #include <sys/mplock2.h> 98 99 #include <machine/cpu.h> 100 #include <machine/limits.h> 101 #include <machine/smp.h> 102 #include <machine/cpufunc.h> 103 #include <machine/specialreg.h> 104 #include <machine/clock.h> 105 106 #ifdef GPROF 107 #include <sys/gmon.h> 108 #endif 109 110 #ifdef IFPOLL_ENABLE 111 extern void ifpoll_init_pcpu(int); 112 #endif 113 114 #ifdef DEBUG_PCTRACK 115 static void do_pctrack(struct intrframe *frame, int which); 116 #endif 117 118 static void initclocks (void *dummy); 119 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL); 120 121 /* 122 * Some of these don't belong here, but it's easiest to concentrate them. 123 * Note that cpu_time counts in microseconds, but most userland programs 124 * just compare relative times against the total by delta. 125 */ 126 struct kinfo_cputime cputime_percpu[MAXCPU]; 127 #ifdef DEBUG_PCTRACK 128 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE }; 129 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE]; 130 #endif 131 132 static int 133 sysctl_cputime(SYSCTL_HANDLER_ARGS) 134 { 135 int cpu, error = 0; 136 int root_error; 137 size_t size = sizeof(struct kinfo_cputime); 138 struct kinfo_cputime tmp; 139 140 /* 141 * NOTE: For security reasons, only root can sniff %rip 142 */ 143 root_error = priv_check_cred(curthread->td_ucred, PRIV_ROOT, 0); 144 145 for (cpu = 0; cpu < ncpus; ++cpu) { 146 tmp = cputime_percpu[cpu]; 147 if (root_error == 0) { 148 tmp.cp_sample_pc = 149 (int64_t)globaldata_find(cpu)->gd_sample_pc; 150 tmp.cp_sample_sp = 151 (int64_t)globaldata_find(cpu)->gd_sample_sp; 152 } 153 if ((error = SYSCTL_OUT(req, &tmp, size)) != 0) 154 break; 155 } 156 157 if (root_error == 0) 158 smp_sniff(); 159 160 return (error); 161 } 162 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 163 sysctl_cputime, "S,kinfo_cputime", "CPU time statistics"); 164 165 static int 166 sysctl_cp_time(SYSCTL_HANDLER_ARGS) 167 { 168 long cpu_states[5] = {0}; 169 int cpu, error = 0; 170 size_t size = sizeof(cpu_states); 171 172 for (cpu = 0; cpu < ncpus; ++cpu) { 173 cpu_states[CP_USER] += cputime_percpu[cpu].cp_user; 174 cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice; 175 cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys; 176 cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr; 177 cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle; 178 } 179 180 error = SYSCTL_OUT(req, cpu_states, size); 181 182 return (error); 183 } 184 185 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 186 sysctl_cp_time, "LU", "CPU time statistics"); 187 188 /* 189 * boottime is used to calculate the 'real' uptime. Do not confuse this with 190 * microuptime(). microtime() is not drift compensated. The real uptime 191 * with compensation is nanotime() - bootime. boottime is recalculated 192 * whenever the real time is set based on the compensated elapsed time 193 * in seconds (gd->gd_time_seconds). 194 * 195 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic. 196 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to 197 * the real time. 198 * 199 * WARNING! time_second can backstep on time corrections. Also, unlike 200 * time_second, time_uptime is not a "real" time_t (seconds 201 * since the Epoch) but seconds since booting. 202 */ 203 struct timespec boottime; /* boot time (realtime) for reference only */ 204 time_t time_second; /* read-only 'passive' realtime in seconds */ 205 time_t time_uptime; /* read-only 'passive' uptime in seconds */ 206 207 /* 208 * basetime is used to calculate the compensated real time of day. The 209 * basetime can be modified on a per-tick basis by the adjtime(), 210 * ntp_adjtime(), and sysctl-based time correction APIs. 211 * 212 * Note that frequency corrections can also be made by adjusting 213 * gd_cpuclock_base. 214 * 215 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is 216 * used on both SMP and UP systems to avoid MP races between cpu's and 217 * interrupt races on UP systems. 218 */ 219 struct hardtime { 220 __uint32_t time_second; 221 sysclock_t cpuclock_base; 222 }; 223 224 #define BASETIME_ARYSIZE 16 225 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1) 226 static struct timespec basetime[BASETIME_ARYSIZE]; 227 static struct hardtime hardtime[BASETIME_ARYSIZE]; 228 static volatile int basetime_index; 229 230 static int 231 sysctl_get_basetime(SYSCTL_HANDLER_ARGS) 232 { 233 struct timespec *bt; 234 int error; 235 int index; 236 237 /* 238 * Because basetime data and index may be updated by another cpu, 239 * a load fence is required to ensure that the data we read has 240 * not been speculatively read relative to a possibly updated index. 241 */ 242 index = basetime_index; 243 cpu_lfence(); 244 bt = &basetime[index]; 245 error = SYSCTL_OUT(req, bt, sizeof(*bt)); 246 return (error); 247 } 248 249 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 250 &boottime, timespec, "System boottime"); 251 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, 252 sysctl_get_basetime, "S,timespec", "System basetime"); 253 254 static void hardclock(systimer_t info, int, struct intrframe *frame); 255 static void statclock(systimer_t info, int, struct intrframe *frame); 256 static void schedclock(systimer_t info, int, struct intrframe *frame); 257 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp); 258 259 int ticks; /* system master ticks at hz */ 260 int clocks_running; /* tsleep/timeout clocks operational */ 261 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */ 262 int64_t nsec_acc; /* accumulator */ 263 int sched_ticks; /* global schedule clock ticks */ 264 265 /* NTPD time correction fields */ 266 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */ 267 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */ 268 int64_t ntp_delta; /* one-time correction in nsec */ 269 int64_t ntp_big_delta = 1000000000; 270 int32_t ntp_tick_delta; /* current adjustment rate */ 271 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */ 272 time_t ntp_leap_second; /* time of next leap second */ 273 int ntp_leap_insert; /* whether to insert or remove a second */ 274 275 /* 276 * Finish initializing clock frequencies and start all clocks running. 277 */ 278 /* ARGSUSED*/ 279 static void 280 initclocks(void *dummy) 281 { 282 /*psratio = profhz / stathz;*/ 283 initclocks_pcpu(); 284 clocks_running = 1; 285 if (kpmap) { 286 kpmap->tsc_freq = (uint64_t)tsc_frequency; 287 kpmap->tick_freq = hz; 288 } 289 } 290 291 /* 292 * Called on a per-cpu basis from the idle thread bootstrap on each cpu 293 * during SMP initialization. 294 * 295 * This routine is called concurrently during low-level SMP initialization 296 * and may not block in any way. Meaning, among other things, we can't 297 * acquire any tokens. 298 */ 299 void 300 initclocks_pcpu(void) 301 { 302 struct globaldata *gd = mycpu; 303 304 crit_enter(); 305 if (gd->gd_cpuid == 0) { 306 gd->gd_time_seconds = 1; 307 gd->gd_cpuclock_base = sys_cputimer->count(); 308 hardtime[0].time_second = gd->gd_time_seconds; 309 hardtime[0].cpuclock_base = gd->gd_cpuclock_base; 310 } else { 311 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; 312 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; 313 } 314 315 systimer_intr_enable(); 316 317 crit_exit(); 318 } 319 320 /* 321 * This routine is called on just the BSP, just after SMP initialization 322 * completes to * finish initializing any clocks that might contend/block 323 * (e.g. like on a token). We can't do this in initclocks_pcpu() because 324 * that function is called from the idle thread bootstrap for each cpu and 325 * not allowed to block at all. 326 */ 327 static 328 void 329 initclocks_other(void *dummy) 330 { 331 struct globaldata *ogd = mycpu; 332 struct globaldata *gd; 333 int n; 334 335 for (n = 0; n < ncpus; ++n) { 336 lwkt_setcpu_self(globaldata_find(n)); 337 gd = mycpu; 338 339 /* 340 * Use a non-queued periodic systimer to prevent multiple 341 * ticks from building up if the sysclock jumps forward 342 * (8254 gets reset). The sysclock will never jump backwards. 343 * Our time sync is based on the actual sysclock, not the 344 * ticks count. 345 */ 346 systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, 347 NULL, hz); 348 systimer_init_periodic_nq(&gd->gd_statclock, statclock, 349 NULL, stathz); 350 /* XXX correct the frequency for scheduler / estcpu tests */ 351 systimer_init_periodic_nq(&gd->gd_schedclock, schedclock, 352 NULL, ESTCPUFREQ); 353 #ifdef IFPOLL_ENABLE 354 ifpoll_init_pcpu(gd->gd_cpuid); 355 #endif 356 } 357 lwkt_setcpu_self(ogd); 358 } 359 SYSINIT(clocks2, SI_BOOT2_POST_SMP, SI_ORDER_ANY, initclocks_other, NULL); 360 361 /* 362 * This sets the current real time of day. Timespecs are in seconds and 363 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base, 364 * instead we adjust basetime so basetime + gd_* results in the current 365 * time of day. This way the gd_* fields are guaranteed to represent 366 * a monotonically increasing 'uptime' value. 367 * 368 * When set_timeofday() is called from userland, the system call forces it 369 * onto cpu #0 since only cpu #0 can update basetime_index. 370 */ 371 void 372 set_timeofday(struct timespec *ts) 373 { 374 struct timespec *nbt; 375 int ni; 376 377 /* 378 * XXX SMP / non-atomic basetime updates 379 */ 380 crit_enter(); 381 ni = (basetime_index + 1) & BASETIME_ARYMASK; 382 cpu_lfence(); 383 nbt = &basetime[ni]; 384 nanouptime(nbt); 385 nbt->tv_sec = ts->tv_sec - nbt->tv_sec; 386 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec; 387 if (nbt->tv_nsec < 0) { 388 nbt->tv_nsec += 1000000000; 389 --nbt->tv_sec; 390 } 391 392 /* 393 * Note that basetime diverges from boottime as the clock drift is 394 * compensated for, so we cannot do away with boottime. When setting 395 * the absolute time of day the drift is 0 (for an instant) and we 396 * can simply assign boottime to basetime. 397 * 398 * Note that nanouptime() is based on gd_time_seconds which is drift 399 * compensated up to a point (it is guaranteed to remain monotonically 400 * increasing). gd_time_seconds is thus our best uptime guess and 401 * suitable for use in the boottime calculation. It is already taken 402 * into account in the basetime calculation above. 403 */ 404 boottime.tv_sec = nbt->tv_sec; 405 ntp_delta = 0; 406 407 /* 408 * We now have a new basetime, make sure all other cpus have it, 409 * then update the index. 410 */ 411 cpu_sfence(); 412 basetime_index = ni; 413 414 crit_exit(); 415 } 416 417 /* 418 * Each cpu has its own hardclock, but we only increments ticks and softticks 419 * on cpu #0. 420 * 421 * NOTE! systimer! the MP lock might not be held here. We can only safely 422 * manipulate objects owned by the current cpu. 423 */ 424 static void 425 hardclock(systimer_t info, int in_ipi, struct intrframe *frame) 426 { 427 sysclock_t cputicks; 428 struct proc *p; 429 struct globaldata *gd = mycpu; 430 431 if ((gd->gd_reqflags & RQF_IPIQ) == 0 && lwkt_need_ipiq_process(gd)) { 432 /* Defer to doreti on passive IPIQ processing */ 433 need_ipiq(); 434 } 435 436 /* 437 * We update the compensation base to calculate fine-grained time 438 * from the sys_cputimer on a per-cpu basis in order to avoid 439 * having to mess around with locks. sys_cputimer is assumed to 440 * be consistent across all cpus. CPU N copies the base state from 441 * CPU 0 using the same FIFO trick that we use for basetime (so we 442 * don't catch a CPU 0 update in the middle). 443 * 444 * Note that we never allow info->time (aka gd->gd_hardclock.time) 445 * to reverse index gd_cpuclock_base, but that it is possible for 446 * it to temporarily get behind in the seconds if something in the 447 * system locks interrupts for a long period of time. Since periodic 448 * timers count events, though everything should resynch again 449 * immediately. 450 */ 451 if (gd->gd_cpuid == 0) { 452 int ni; 453 454 cputicks = info->time - gd->gd_cpuclock_base; 455 if (cputicks >= sys_cputimer->freq) { 456 cputicks /= sys_cputimer->freq; 457 if (cputicks != 0 && cputicks != 1) 458 kprintf("Warning: hardclock missed > 1 sec\n"); 459 gd->gd_time_seconds += cputicks; 460 gd->gd_cpuclock_base += sys_cputimer->freq * cputicks; 461 /* uncorrected monotonic 1-sec gran */ 462 time_uptime += cputicks; 463 } 464 ni = (basetime_index + 1) & BASETIME_ARYMASK; 465 hardtime[ni].time_second = gd->gd_time_seconds; 466 hardtime[ni].cpuclock_base = gd->gd_cpuclock_base; 467 } else { 468 int ni; 469 470 ni = basetime_index; 471 cpu_lfence(); 472 gd->gd_time_seconds = hardtime[ni].time_second; 473 gd->gd_cpuclock_base = hardtime[ni].cpuclock_base; 474 } 475 476 /* 477 * The system-wide ticks counter and NTP related timedelta/tickdelta 478 * adjustments only occur on cpu #0. NTP adjustments are accomplished 479 * by updating basetime. 480 */ 481 if (gd->gd_cpuid == 0) { 482 struct timespec *nbt; 483 struct timespec nts; 484 int leap; 485 int ni; 486 487 ++ticks; 488 489 #if 0 490 if (tco->tc_poll_pps) 491 tco->tc_poll_pps(tco); 492 #endif 493 494 /* 495 * Calculate the new basetime index. We are in a critical section 496 * on cpu #0 and can safely play with basetime_index. Start 497 * with the current basetime and then make adjustments. 498 */ 499 ni = (basetime_index + 1) & BASETIME_ARYMASK; 500 nbt = &basetime[ni]; 501 *nbt = basetime[basetime_index]; 502 503 /* 504 * Apply adjtime corrections. (adjtime() API) 505 * 506 * adjtime() only runs on cpu #0 so our critical section is 507 * sufficient to access these variables. 508 */ 509 if (ntp_delta != 0) { 510 nbt->tv_nsec += ntp_tick_delta; 511 ntp_delta -= ntp_tick_delta; 512 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) || 513 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) { 514 ntp_tick_delta = ntp_delta; 515 } 516 } 517 518 /* 519 * Apply permanent frequency corrections. (sysctl API) 520 */ 521 if (ntp_tick_permanent != 0) { 522 ntp_tick_acc += ntp_tick_permanent; 523 if (ntp_tick_acc >= (1LL << 32)) { 524 nbt->tv_nsec += ntp_tick_acc >> 32; 525 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32; 526 } else if (ntp_tick_acc <= -(1LL << 32)) { 527 /* Negate ntp_tick_acc to avoid shifting the sign bit. */ 528 nbt->tv_nsec -= (-ntp_tick_acc) >> 32; 529 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32; 530 } 531 } 532 533 if (nbt->tv_nsec >= 1000000000) { 534 nbt->tv_sec++; 535 nbt->tv_nsec -= 1000000000; 536 } else if (nbt->tv_nsec < 0) { 537 nbt->tv_sec--; 538 nbt->tv_nsec += 1000000000; 539 } 540 541 /* 542 * Another per-tick compensation. (for ntp_adjtime() API) 543 */ 544 if (nsec_adj != 0) { 545 nsec_acc += nsec_adj; 546 if (nsec_acc >= 0x100000000LL) { 547 nbt->tv_nsec += nsec_acc >> 32; 548 nsec_acc = (nsec_acc & 0xFFFFFFFFLL); 549 } else if (nsec_acc <= -0x100000000LL) { 550 nbt->tv_nsec -= -nsec_acc >> 32; 551 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL); 552 } 553 if (nbt->tv_nsec >= 1000000000) { 554 nbt->tv_nsec -= 1000000000; 555 ++nbt->tv_sec; 556 } else if (nbt->tv_nsec < 0) { 557 nbt->tv_nsec += 1000000000; 558 --nbt->tv_sec; 559 } 560 } 561 562 /************************************************************ 563 * LEAP SECOND CORRECTION * 564 ************************************************************ 565 * 566 * Taking into account all the corrections made above, figure 567 * out the new real time. If the seconds field has changed 568 * then apply any pending leap-second corrections. 569 */ 570 getnanotime_nbt(nbt, &nts); 571 572 if (time_second != nts.tv_sec) { 573 /* 574 * Apply leap second (sysctl API). Adjust nts for changes 575 * so we do not have to call getnanotime_nbt again. 576 */ 577 if (ntp_leap_second) { 578 if (ntp_leap_second == nts.tv_sec) { 579 if (ntp_leap_insert) { 580 nbt->tv_sec++; 581 nts.tv_sec++; 582 } else { 583 nbt->tv_sec--; 584 nts.tv_sec--; 585 } 586 ntp_leap_second--; 587 } 588 } 589 590 /* 591 * Apply leap second (ntp_adjtime() API), calculate a new 592 * nsec_adj field. ntp_update_second() returns nsec_adj 593 * as a per-second value but we need it as a per-tick value. 594 */ 595 leap = ntp_update_second(time_second, &nsec_adj); 596 nsec_adj /= hz; 597 nbt->tv_sec += leap; 598 nts.tv_sec += leap; 599 600 /* 601 * Update the time_second 'approximate time' global. 602 */ 603 time_second = nts.tv_sec; 604 } 605 606 /* 607 * Finally, our new basetime is ready to go live! 608 */ 609 cpu_sfence(); 610 basetime_index = ni; 611 612 /* 613 * Update kpmap on each tick. TS updates are integrated with 614 * fences and upticks allowing userland to read the data 615 * deterministically. 616 */ 617 if (kpmap) { 618 int w; 619 620 w = (kpmap->upticks + 1) & 1; 621 getnanouptime(&kpmap->ts_uptime[w]); 622 getnanotime(&kpmap->ts_realtime[w]); 623 cpu_sfence(); 624 ++kpmap->upticks; 625 cpu_sfence(); 626 } 627 } 628 629 /* 630 * lwkt thread scheduler fair queueing 631 */ 632 lwkt_schedulerclock(curthread); 633 634 /* 635 * softticks are handled for all cpus 636 */ 637 hardclock_softtick(gd); 638 639 /* 640 * ITimer handling is per-tick, per-cpu. 641 * 642 * We must acquire the per-process token in order for ksignal() 643 * to be non-blocking. For the moment this requires an AST fault, 644 * the ksignal() cannot be safely issued from this hard interrupt. 645 * 646 * XXX Even the trytoken here isn't right, and itimer operation in 647 * a multi threaded environment is going to be weird at the 648 * very least. 649 */ 650 if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) { 651 crit_enter_hard(); 652 if (p->p_upmap) 653 ++p->p_upmap->runticks; 654 655 if (frame && CLKF_USERMODE(frame) && 656 timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) && 657 itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) { 658 p->p_flags |= P_SIGVTALRM; 659 need_user_resched(); 660 } 661 if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) && 662 itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) { 663 p->p_flags |= P_SIGPROF; 664 need_user_resched(); 665 } 666 crit_exit_hard(); 667 lwkt_reltoken(&p->p_token); 668 } 669 setdelayed(); 670 } 671 672 /* 673 * The statistics clock typically runs at a 125Hz rate, and is intended 674 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu. 675 * 676 * NOTE! systimer! the MP lock might not be held here. We can only safely 677 * manipulate objects owned by the current cpu. 678 * 679 * The stats clock is responsible for grabbing a profiling sample. 680 * Most of the statistics are only used by user-level statistics programs. 681 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and 682 * p->p_estcpu. 683 * 684 * Like the other clocks, the stat clock is called from what is effectively 685 * a fast interrupt, so the context should be the thread/process that got 686 * interrupted. 687 */ 688 static void 689 statclock(systimer_t info, int in_ipi, struct intrframe *frame) 690 { 691 #ifdef GPROF 692 struct gmonparam *g; 693 int i; 694 #endif 695 thread_t td; 696 struct proc *p; 697 int bump; 698 sysclock_t cv; 699 sysclock_t scv; 700 701 /* 702 * How big was our timeslice relative to the last time? Calculate 703 * in microseconds. 704 * 705 * NOTE: Use of microuptime() is typically MPSAFE, but usually not 706 * during early boot. Just use the systimer count to be nice 707 * to e.g. qemu. The systimer has a better chance of being 708 * MPSAFE at early boot. 709 */ 710 cv = sys_cputimer->count(); 711 scv = mycpu->statint.gd_statcv; 712 if (scv == 0) { 713 bump = 1; 714 } else { 715 bump = (sys_cputimer->freq64_usec * (cv - scv)) >> 32; 716 if (bump < 0) 717 bump = 0; 718 if (bump > 1000000) 719 bump = 1000000; 720 } 721 mycpu->statint.gd_statcv = cv; 722 723 #if 0 724 stv = &mycpu->gd_stattv; 725 if (stv->tv_sec == 0) { 726 bump = 1; 727 } else { 728 bump = tv.tv_usec - stv->tv_usec + 729 (tv.tv_sec - stv->tv_sec) * 1000000; 730 if (bump < 0) 731 bump = 0; 732 if (bump > 1000000) 733 bump = 1000000; 734 } 735 *stv = tv; 736 #endif 737 738 td = curthread; 739 p = td->td_proc; 740 741 if (frame && CLKF_USERMODE(frame)) { 742 /* 743 * Came from userland, handle user time and deal with 744 * possible process. 745 */ 746 if (p && (p->p_flags & P_PROFIL)) 747 addupc_intr(p, CLKF_PC(frame), 1); 748 td->td_uticks += bump; 749 750 /* 751 * Charge the time as appropriate 752 */ 753 if (p && p->p_nice > NZERO) 754 cpu_time.cp_nice += bump; 755 else 756 cpu_time.cp_user += bump; 757 } else { 758 int intr_nest = mycpu->gd_intr_nesting_level; 759 760 if (in_ipi) { 761 /* 762 * IPI processing code will bump gd_intr_nesting_level 763 * up by one, which breaks following CLKF_INTR testing, 764 * so we subtract it by one here. 765 */ 766 --intr_nest; 767 } 768 #ifdef GPROF 769 /* 770 * Kernel statistics are just like addupc_intr, only easier. 771 */ 772 g = &_gmonparam; 773 if (g->state == GMON_PROF_ON && frame) { 774 i = CLKF_PC(frame) - g->lowpc; 775 if (i < g->textsize) { 776 i /= HISTFRACTION * sizeof(*g->kcount); 777 g->kcount[i]++; 778 } 779 } 780 #endif 781 782 #define IS_INTR_RUNNING ((frame && CLKF_INTR(intr_nest)) || CLKF_INTR_TD(td)) 783 784 /* 785 * Came from kernel mode, so we were: 786 * - handling an interrupt, 787 * - doing syscall or trap work on behalf of the current 788 * user process, or 789 * - spinning in the idle loop. 790 * Whichever it is, charge the time as appropriate. 791 * Note that we charge interrupts to the current process, 792 * regardless of whether they are ``for'' that process, 793 * so that we know how much of its real time was spent 794 * in ``non-process'' (i.e., interrupt) work. 795 * 796 * XXX assume system if frame is NULL. A NULL frame 797 * can occur if ipi processing is done from a crit_exit(). 798 */ 799 if (IS_INTR_RUNNING) 800 td->td_iticks += bump; 801 else 802 td->td_sticks += bump; 803 804 if (IS_INTR_RUNNING) { 805 /* 806 * If we interrupted an interrupt thread, well, 807 * count it as interrupt time. 808 */ 809 #ifdef DEBUG_PCTRACK 810 if (frame) 811 do_pctrack(frame, PCTRACK_INT); 812 #endif 813 cpu_time.cp_intr += bump; 814 } else { 815 if (td == &mycpu->gd_idlethread) { 816 /* 817 * Even if the current thread is the idle 818 * thread it could be due to token contention 819 * in the LWKT scheduler. Count such as 820 * system time. 821 */ 822 if (mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) 823 cpu_time.cp_sys += bump; 824 else 825 cpu_time.cp_idle += bump; 826 } else { 827 /* 828 * System thread was running. 829 */ 830 #ifdef DEBUG_PCTRACK 831 if (frame) 832 do_pctrack(frame, PCTRACK_SYS); 833 #endif 834 cpu_time.cp_sys += bump; 835 } 836 } 837 838 #undef IS_INTR_RUNNING 839 } 840 } 841 842 #ifdef DEBUG_PCTRACK 843 /* 844 * Sample the PC when in the kernel or in an interrupt. User code can 845 * retrieve the information and generate a histogram or other output. 846 */ 847 848 static void 849 do_pctrack(struct intrframe *frame, int which) 850 { 851 struct kinfo_pctrack *pctrack; 852 853 pctrack = &cputime_pctrack[mycpu->gd_cpuid][which]; 854 pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] = 855 (void *)CLKF_PC(frame); 856 ++pctrack->pc_index; 857 } 858 859 static int 860 sysctl_pctrack(SYSCTL_HANDLER_ARGS) 861 { 862 struct kinfo_pcheader head; 863 int error; 864 int cpu; 865 int ntrack; 866 867 head.pc_ntrack = PCTRACK_SIZE; 868 head.pc_arysize = PCTRACK_ARYSIZE; 869 870 if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0) 871 return (error); 872 873 for (cpu = 0; cpu < ncpus; ++cpu) { 874 for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) { 875 error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack], 876 sizeof(struct kinfo_pctrack)); 877 if (error) 878 break; 879 } 880 if (error) 881 break; 882 } 883 return (error); 884 } 885 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 886 sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking"); 887 888 #endif 889 890 /* 891 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer, 892 * the MP lock might not be held. We can safely manipulate parts of curproc 893 * but that's about it. 894 * 895 * Each cpu has its own scheduler clock. 896 */ 897 static void 898 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 899 { 900 struct lwp *lp; 901 struct rusage *ru; 902 struct vmspace *vm; 903 long rss; 904 905 if ((lp = lwkt_preempted_proc()) != NULL) { 906 /* 907 * Account for cpu time used and hit the scheduler. Note 908 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD 909 * HERE. 910 */ 911 ++lp->lwp_cpticks; 912 usched_schedulerclock(lp, info->periodic, info->time); 913 } else { 914 usched_schedulerclock(NULL, info->periodic, info->time); 915 } 916 if ((lp = curthread->td_lwp) != NULL) { 917 /* 918 * Update resource usage integrals and maximums. 919 */ 920 if ((ru = &lp->lwp_proc->p_ru) && 921 (vm = lp->lwp_proc->p_vmspace) != NULL) { 922 ru->ru_ixrss += pgtok(vm->vm_tsize); 923 ru->ru_idrss += pgtok(vm->vm_dsize); 924 ru->ru_isrss += pgtok(vm->vm_ssize); 925 if (lwkt_trytoken(&vm->vm_map.token)) { 926 rss = pgtok(vmspace_resident_count(vm)); 927 if (ru->ru_maxrss < rss) 928 ru->ru_maxrss = rss; 929 lwkt_reltoken(&vm->vm_map.token); 930 } 931 } 932 } 933 /* Increment the global sched_ticks */ 934 if (mycpu->gd_cpuid == 0) 935 ++sched_ticks; 936 } 937 938 /* 939 * Compute number of ticks for the specified amount of time. The 940 * return value is intended to be used in a clock interrupt timed 941 * operation and guaranteed to meet or exceed the requested time. 942 * If the representation overflows, return INT_MAX. The minimum return 943 * value is 1 ticks and the function will average the calculation up. 944 * If any value greater then 0 microseconds is supplied, a value 945 * of at least 2 will be returned to ensure that a near-term clock 946 * interrupt does not cause the timeout to occur (degenerately) early. 947 * 948 * Note that limit checks must take into account microseconds, which is 949 * done simply by using the smaller signed long maximum instead of 950 * the unsigned long maximum. 951 * 952 * If ints have 32 bits, then the maximum value for any timeout in 953 * 10ms ticks is 248 days. 954 */ 955 int 956 tvtohz_high(struct timeval *tv) 957 { 958 int ticks; 959 long sec, usec; 960 961 sec = tv->tv_sec; 962 usec = tv->tv_usec; 963 if (usec < 0) { 964 sec--; 965 usec += 1000000; 966 } 967 if (sec < 0) { 968 #ifdef DIAGNOSTIC 969 if (usec > 0) { 970 sec++; 971 usec -= 1000000; 972 } 973 kprintf("tvtohz_high: negative time difference " 974 "%ld sec %ld usec\n", 975 sec, usec); 976 #endif 977 ticks = 1; 978 } else if (sec <= INT_MAX / hz) { 979 ticks = (int)(sec * hz + 980 ((u_long)usec + (ustick - 1)) / ustick) + 1; 981 } else { 982 ticks = INT_MAX; 983 } 984 return (ticks); 985 } 986 987 int 988 tstohz_high(struct timespec *ts) 989 { 990 int ticks; 991 long sec, nsec; 992 993 sec = ts->tv_sec; 994 nsec = ts->tv_nsec; 995 if (nsec < 0) { 996 sec--; 997 nsec += 1000000000; 998 } 999 if (sec < 0) { 1000 #ifdef DIAGNOSTIC 1001 if (nsec > 0) { 1002 sec++; 1003 nsec -= 1000000000; 1004 } 1005 kprintf("tstohz_high: negative time difference " 1006 "%ld sec %ld nsec\n", 1007 sec, nsec); 1008 #endif 1009 ticks = 1; 1010 } else if (sec <= INT_MAX / hz) { 1011 ticks = (int)(sec * hz + 1012 ((u_long)nsec + (nstick - 1)) / nstick) + 1; 1013 } else { 1014 ticks = INT_MAX; 1015 } 1016 return (ticks); 1017 } 1018 1019 1020 /* 1021 * Compute number of ticks for the specified amount of time, erroring on 1022 * the side of it being too low to ensure that sleeping the returned number 1023 * of ticks will not result in a late return. 1024 * 1025 * The supplied timeval may not be negative and should be normalized. A 1026 * return value of 0 is possible if the timeval converts to less then 1027 * 1 tick. 1028 * 1029 * If ints have 32 bits, then the maximum value for any timeout in 1030 * 10ms ticks is 248 days. 1031 */ 1032 int 1033 tvtohz_low(struct timeval *tv) 1034 { 1035 int ticks; 1036 long sec; 1037 1038 sec = tv->tv_sec; 1039 if (sec <= INT_MAX / hz) 1040 ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick); 1041 else 1042 ticks = INT_MAX; 1043 return (ticks); 1044 } 1045 1046 int 1047 tstohz_low(struct timespec *ts) 1048 { 1049 int ticks; 1050 long sec; 1051 1052 sec = ts->tv_sec; 1053 if (sec <= INT_MAX / hz) 1054 ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick); 1055 else 1056 ticks = INT_MAX; 1057 return (ticks); 1058 } 1059 1060 /* 1061 * Start profiling on a process. 1062 * 1063 * Kernel profiling passes proc0 which never exits and hence 1064 * keeps the profile clock running constantly. 1065 */ 1066 void 1067 startprofclock(struct proc *p) 1068 { 1069 if ((p->p_flags & P_PROFIL) == 0) { 1070 p->p_flags |= P_PROFIL; 1071 #if 0 /* XXX */ 1072 if (++profprocs == 1 && stathz != 0) { 1073 crit_enter(); 1074 psdiv = psratio; 1075 setstatclockrate(profhz); 1076 crit_exit(); 1077 } 1078 #endif 1079 } 1080 } 1081 1082 /* 1083 * Stop profiling on a process. 1084 * 1085 * caller must hold p->p_token 1086 */ 1087 void 1088 stopprofclock(struct proc *p) 1089 { 1090 if (p->p_flags & P_PROFIL) { 1091 p->p_flags &= ~P_PROFIL; 1092 #if 0 /* XXX */ 1093 if (--profprocs == 0 && stathz != 0) { 1094 crit_enter(); 1095 psdiv = 1; 1096 setstatclockrate(stathz); 1097 crit_exit(); 1098 } 1099 #endif 1100 } 1101 } 1102 1103 /* 1104 * Return information about system clocks. 1105 */ 1106 static int 1107 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 1108 { 1109 struct kinfo_clockinfo clkinfo; 1110 /* 1111 * Construct clockinfo structure. 1112 */ 1113 clkinfo.ci_hz = hz; 1114 clkinfo.ci_tick = ustick; 1115 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000; 1116 clkinfo.ci_profhz = profhz; 1117 clkinfo.ci_stathz = stathz ? stathz : hz; 1118 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 1119 } 1120 1121 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 1122 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 1123 1124 /* 1125 * We have eight functions for looking at the clock, four for 1126 * microseconds and four for nanoseconds. For each there is fast 1127 * but less precise version "get{nano|micro}[up]time" which will 1128 * return a time which is up to 1/HZ previous to the call, whereas 1129 * the raw version "{nano|micro}[up]time" will return a timestamp 1130 * which is as precise as possible. The "up" variants return the 1131 * time relative to system boot, these are well suited for time 1132 * interval measurements. 1133 * 1134 * Each cpu independently maintains the current time of day, so all 1135 * we need to do to protect ourselves from changes is to do a loop 1136 * check on the seconds field changing out from under us. 1137 * 1138 * The system timer maintains a 32 bit count and due to various issues 1139 * it is possible for the calculated delta to occasionally exceed 1140 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec 1141 * multiplication can easily overflow, so we deal with the case. For 1142 * uniformity we deal with the case in the usec case too. 1143 * 1144 * All the [get][micro,nano][time,uptime]() routines are MPSAFE. 1145 */ 1146 void 1147 getmicrouptime(struct timeval *tvp) 1148 { 1149 struct globaldata *gd = mycpu; 1150 sysclock_t delta; 1151 1152 do { 1153 tvp->tv_sec = gd->gd_time_seconds; 1154 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1155 } while (tvp->tv_sec != gd->gd_time_seconds); 1156 1157 if (delta >= sys_cputimer->freq) { 1158 tvp->tv_sec += delta / sys_cputimer->freq; 1159 delta %= sys_cputimer->freq; 1160 } 1161 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1162 if (tvp->tv_usec >= 1000000) { 1163 tvp->tv_usec -= 1000000; 1164 ++tvp->tv_sec; 1165 } 1166 } 1167 1168 void 1169 getnanouptime(struct timespec *tsp) 1170 { 1171 struct globaldata *gd = mycpu; 1172 sysclock_t delta; 1173 1174 do { 1175 tsp->tv_sec = gd->gd_time_seconds; 1176 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1177 } while (tsp->tv_sec != gd->gd_time_seconds); 1178 1179 if (delta >= sys_cputimer->freq) { 1180 tsp->tv_sec += delta / sys_cputimer->freq; 1181 delta %= sys_cputimer->freq; 1182 } 1183 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1184 } 1185 1186 void 1187 microuptime(struct timeval *tvp) 1188 { 1189 struct globaldata *gd = mycpu; 1190 sysclock_t delta; 1191 1192 do { 1193 tvp->tv_sec = gd->gd_time_seconds; 1194 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1195 } while (tvp->tv_sec != gd->gd_time_seconds); 1196 1197 if (delta >= sys_cputimer->freq) { 1198 tvp->tv_sec += delta / sys_cputimer->freq; 1199 delta %= sys_cputimer->freq; 1200 } 1201 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1202 } 1203 1204 void 1205 nanouptime(struct timespec *tsp) 1206 { 1207 struct globaldata *gd = mycpu; 1208 sysclock_t delta; 1209 1210 do { 1211 tsp->tv_sec = gd->gd_time_seconds; 1212 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1213 } while (tsp->tv_sec != gd->gd_time_seconds); 1214 1215 if (delta >= sys_cputimer->freq) { 1216 tsp->tv_sec += delta / sys_cputimer->freq; 1217 delta %= sys_cputimer->freq; 1218 } 1219 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1220 } 1221 1222 /* 1223 * realtime routines 1224 */ 1225 void 1226 getmicrotime(struct timeval *tvp) 1227 { 1228 struct globaldata *gd = mycpu; 1229 struct timespec *bt; 1230 sysclock_t delta; 1231 1232 do { 1233 tvp->tv_sec = gd->gd_time_seconds; 1234 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1235 } while (tvp->tv_sec != gd->gd_time_seconds); 1236 1237 if (delta >= sys_cputimer->freq) { 1238 tvp->tv_sec += delta / sys_cputimer->freq; 1239 delta %= sys_cputimer->freq; 1240 } 1241 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1242 1243 bt = &basetime[basetime_index]; 1244 cpu_lfence(); 1245 tvp->tv_sec += bt->tv_sec; 1246 tvp->tv_usec += bt->tv_nsec / 1000; 1247 while (tvp->tv_usec >= 1000000) { 1248 tvp->tv_usec -= 1000000; 1249 ++tvp->tv_sec; 1250 } 1251 } 1252 1253 void 1254 getnanotime(struct timespec *tsp) 1255 { 1256 struct globaldata *gd = mycpu; 1257 struct timespec *bt; 1258 sysclock_t delta; 1259 1260 do { 1261 tsp->tv_sec = gd->gd_time_seconds; 1262 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1263 } while (tsp->tv_sec != gd->gd_time_seconds); 1264 1265 if (delta >= sys_cputimer->freq) { 1266 tsp->tv_sec += delta / sys_cputimer->freq; 1267 delta %= sys_cputimer->freq; 1268 } 1269 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1270 1271 bt = &basetime[basetime_index]; 1272 cpu_lfence(); 1273 tsp->tv_sec += bt->tv_sec; 1274 tsp->tv_nsec += bt->tv_nsec; 1275 while (tsp->tv_nsec >= 1000000000) { 1276 tsp->tv_nsec -= 1000000000; 1277 ++tsp->tv_sec; 1278 } 1279 } 1280 1281 static void 1282 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp) 1283 { 1284 struct globaldata *gd = mycpu; 1285 sysclock_t delta; 1286 1287 do { 1288 tsp->tv_sec = gd->gd_time_seconds; 1289 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1290 } while (tsp->tv_sec != gd->gd_time_seconds); 1291 1292 if (delta >= sys_cputimer->freq) { 1293 tsp->tv_sec += delta / sys_cputimer->freq; 1294 delta %= sys_cputimer->freq; 1295 } 1296 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1297 1298 tsp->tv_sec += nbt->tv_sec; 1299 tsp->tv_nsec += nbt->tv_nsec; 1300 while (tsp->tv_nsec >= 1000000000) { 1301 tsp->tv_nsec -= 1000000000; 1302 ++tsp->tv_sec; 1303 } 1304 } 1305 1306 1307 void 1308 microtime(struct timeval *tvp) 1309 { 1310 struct globaldata *gd = mycpu; 1311 struct timespec *bt; 1312 sysclock_t delta; 1313 1314 do { 1315 tvp->tv_sec = gd->gd_time_seconds; 1316 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1317 } while (tvp->tv_sec != gd->gd_time_seconds); 1318 1319 if (delta >= sys_cputimer->freq) { 1320 tvp->tv_sec += delta / sys_cputimer->freq; 1321 delta %= sys_cputimer->freq; 1322 } 1323 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1324 1325 bt = &basetime[basetime_index]; 1326 cpu_lfence(); 1327 tvp->tv_sec += bt->tv_sec; 1328 tvp->tv_usec += bt->tv_nsec / 1000; 1329 while (tvp->tv_usec >= 1000000) { 1330 tvp->tv_usec -= 1000000; 1331 ++tvp->tv_sec; 1332 } 1333 } 1334 1335 void 1336 nanotime(struct timespec *tsp) 1337 { 1338 struct globaldata *gd = mycpu; 1339 struct timespec *bt; 1340 sysclock_t delta; 1341 1342 do { 1343 tsp->tv_sec = gd->gd_time_seconds; 1344 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1345 } while (tsp->tv_sec != gd->gd_time_seconds); 1346 1347 if (delta >= sys_cputimer->freq) { 1348 tsp->tv_sec += delta / sys_cputimer->freq; 1349 delta %= sys_cputimer->freq; 1350 } 1351 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1352 1353 bt = &basetime[basetime_index]; 1354 cpu_lfence(); 1355 tsp->tv_sec += bt->tv_sec; 1356 tsp->tv_nsec += bt->tv_nsec; 1357 while (tsp->tv_nsec >= 1000000000) { 1358 tsp->tv_nsec -= 1000000000; 1359 ++tsp->tv_sec; 1360 } 1361 } 1362 1363 /* 1364 * Get an approximate time_t. It does not have to be accurate. This 1365 * function is called only from KTR and can be called with the system in 1366 * any state so do not use a critical section or other complex operation 1367 * here. 1368 * 1369 * NOTE: This is not exactly synchronized with real time. To do that we 1370 * would have to do what microtime does and check for a nanoseconds 1371 * overflow. 1372 */ 1373 time_t 1374 get_approximate_time_t(void) 1375 { 1376 struct globaldata *gd = mycpu; 1377 struct timespec *bt; 1378 1379 bt = &basetime[basetime_index]; 1380 return(gd->gd_time_seconds + bt->tv_sec); 1381 } 1382 1383 int 1384 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 1385 { 1386 pps_params_t *app; 1387 struct pps_fetch_args *fapi; 1388 #ifdef PPS_SYNC 1389 struct pps_kcbind_args *kapi; 1390 #endif 1391 1392 switch (cmd) { 1393 case PPS_IOC_CREATE: 1394 return (0); 1395 case PPS_IOC_DESTROY: 1396 return (0); 1397 case PPS_IOC_SETPARAMS: 1398 app = (pps_params_t *)data; 1399 if (app->mode & ~pps->ppscap) 1400 return (EINVAL); 1401 pps->ppsparam = *app; 1402 return (0); 1403 case PPS_IOC_GETPARAMS: 1404 app = (pps_params_t *)data; 1405 *app = pps->ppsparam; 1406 app->api_version = PPS_API_VERS_1; 1407 return (0); 1408 case PPS_IOC_GETCAP: 1409 *(int*)data = pps->ppscap; 1410 return (0); 1411 case PPS_IOC_FETCH: 1412 fapi = (struct pps_fetch_args *)data; 1413 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 1414 return (EINVAL); 1415 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 1416 return (EOPNOTSUPP); 1417 pps->ppsinfo.current_mode = pps->ppsparam.mode; 1418 fapi->pps_info_buf = pps->ppsinfo; 1419 return (0); 1420 case PPS_IOC_KCBIND: 1421 #ifdef PPS_SYNC 1422 kapi = (struct pps_kcbind_args *)data; 1423 /* XXX Only root should be able to do this */ 1424 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 1425 return (EINVAL); 1426 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 1427 return (EINVAL); 1428 if (kapi->edge & ~pps->ppscap) 1429 return (EINVAL); 1430 pps->kcmode = kapi->edge; 1431 return (0); 1432 #else 1433 return (EOPNOTSUPP); 1434 #endif 1435 default: 1436 return (ENOTTY); 1437 } 1438 } 1439 1440 void 1441 pps_init(struct pps_state *pps) 1442 { 1443 pps->ppscap |= PPS_TSFMT_TSPEC; 1444 if (pps->ppscap & PPS_CAPTUREASSERT) 1445 pps->ppscap |= PPS_OFFSETASSERT; 1446 if (pps->ppscap & PPS_CAPTURECLEAR) 1447 pps->ppscap |= PPS_OFFSETCLEAR; 1448 } 1449 1450 void 1451 pps_event(struct pps_state *pps, sysclock_t count, int event) 1452 { 1453 struct globaldata *gd; 1454 struct timespec *tsp; 1455 struct timespec *osp; 1456 struct timespec *bt; 1457 struct timespec ts; 1458 sysclock_t *pcount; 1459 #ifdef PPS_SYNC 1460 sysclock_t tcount; 1461 #endif 1462 sysclock_t delta; 1463 pps_seq_t *pseq; 1464 int foff; 1465 #ifdef PPS_SYNC 1466 int fhard; 1467 #endif 1468 int ni; 1469 1470 gd = mycpu; 1471 1472 /* Things would be easier with arrays... */ 1473 if (event == PPS_CAPTUREASSERT) { 1474 tsp = &pps->ppsinfo.assert_timestamp; 1475 osp = &pps->ppsparam.assert_offset; 1476 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1477 #ifdef PPS_SYNC 1478 fhard = pps->kcmode & PPS_CAPTUREASSERT; 1479 #endif 1480 pcount = &pps->ppscount[0]; 1481 pseq = &pps->ppsinfo.assert_sequence; 1482 } else { 1483 tsp = &pps->ppsinfo.clear_timestamp; 1484 osp = &pps->ppsparam.clear_offset; 1485 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1486 #ifdef PPS_SYNC 1487 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1488 #endif 1489 pcount = &pps->ppscount[1]; 1490 pseq = &pps->ppsinfo.clear_sequence; 1491 } 1492 1493 /* Nothing really happened */ 1494 if (*pcount == count) 1495 return; 1496 1497 *pcount = count; 1498 1499 do { 1500 ts.tv_sec = gd->gd_time_seconds; 1501 delta = count - gd->gd_cpuclock_base; 1502 } while (ts.tv_sec != gd->gd_time_seconds); 1503 1504 if (delta >= sys_cputimer->freq) { 1505 ts.tv_sec += delta / sys_cputimer->freq; 1506 delta %= sys_cputimer->freq; 1507 } 1508 ts.tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1509 ni = basetime_index; 1510 cpu_lfence(); 1511 bt = &basetime[ni]; 1512 ts.tv_sec += bt->tv_sec; 1513 ts.tv_nsec += bt->tv_nsec; 1514 while (ts.tv_nsec >= 1000000000) { 1515 ts.tv_nsec -= 1000000000; 1516 ++ts.tv_sec; 1517 } 1518 1519 (*pseq)++; 1520 *tsp = ts; 1521 1522 if (foff) { 1523 timespecadd(tsp, osp); 1524 if (tsp->tv_nsec < 0) { 1525 tsp->tv_nsec += 1000000000; 1526 tsp->tv_sec -= 1; 1527 } 1528 } 1529 #ifdef PPS_SYNC 1530 if (fhard) { 1531 /* magic, at its best... */ 1532 tcount = count - pps->ppscount[2]; 1533 pps->ppscount[2] = count; 1534 if (tcount >= sys_cputimer->freq) { 1535 delta = (1000000000 * (tcount / sys_cputimer->freq) + 1536 sys_cputimer->freq64_nsec * 1537 (tcount % sys_cputimer->freq)) >> 32; 1538 } else { 1539 delta = (sys_cputimer->freq64_nsec * tcount) >> 32; 1540 } 1541 hardpps(tsp, delta); 1542 } 1543 #endif 1544 } 1545 1546 /* 1547 * Return the tsc target value for a delay of (ns). 1548 * 1549 * Returns -1 if the TSC is not supported. 1550 */ 1551 int64_t 1552 tsc_get_target(int ns) 1553 { 1554 #if defined(_RDTSC_SUPPORTED_) 1555 if (cpu_feature & CPUID_TSC) { 1556 return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000); 1557 } 1558 #endif 1559 return(-1); 1560 } 1561 1562 /* 1563 * Compare the tsc against the passed target 1564 * 1565 * Returns +1 if the target has been reached 1566 * Returns 0 if the target has not yet been reached 1567 * Returns -1 if the TSC is not supported. 1568 * 1569 * Typical use: while (tsc_test_target(target) == 0) { ...poll... } 1570 */ 1571 int 1572 tsc_test_target(int64_t target) 1573 { 1574 #if defined(_RDTSC_SUPPORTED_) 1575 if (cpu_feature & CPUID_TSC) { 1576 if ((int64_t)(target - rdtsc()) <= 0) 1577 return(1); 1578 return(0); 1579 } 1580 #endif 1581 return(-1); 1582 } 1583 1584 /* 1585 * Delay the specified number of nanoseconds using the tsc. This function 1586 * returns immediately if the TSC is not supported. At least one cpu_pause() 1587 * will be issued. 1588 */ 1589 void 1590 tsc_delay(int ns) 1591 { 1592 int64_t clk; 1593 1594 clk = tsc_get_target(ns); 1595 cpu_pause(); 1596 while (tsc_test_target(clk) == 0) 1597 cpu_pause(); 1598 } 1599