1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 68 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ 69 */ 70 71 #include "opt_ntp.h" 72 #include "opt_ifpoll.h" 73 #include "opt_pctrack.h" 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/callout.h> 78 #include <sys/kernel.h> 79 #include <sys/kinfo.h> 80 #include <sys/proc.h> 81 #include <sys/malloc.h> 82 #include <sys/resource.h> 83 #include <sys/resourcevar.h> 84 #include <sys/signalvar.h> 85 #include <sys/priv.h> 86 #include <sys/timex.h> 87 #include <sys/timepps.h> 88 #include <sys/upmap.h> 89 #include <vm/vm.h> 90 #include <sys/lock.h> 91 #include <vm/pmap.h> 92 #include <vm/vm_map.h> 93 #include <vm/vm_extern.h> 94 #include <sys/sysctl.h> 95 96 #include <sys/thread2.h> 97 #include <sys/spinlock2.h> 98 99 #include <machine/cpu.h> 100 #include <machine/limits.h> 101 #include <machine/smp.h> 102 #include <machine/cpufunc.h> 103 #include <machine/specialreg.h> 104 #include <machine/clock.h> 105 106 #ifdef GPROF 107 #include <sys/gmon.h> 108 #endif 109 110 #ifdef IFPOLL_ENABLE 111 extern void ifpoll_init_pcpu(int); 112 #endif 113 114 #ifdef DEBUG_PCTRACK 115 static void do_pctrack(struct intrframe *frame, int which); 116 #endif 117 118 static void initclocks (void *dummy); 119 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL); 120 121 /* 122 * Some of these don't belong here, but it's easiest to concentrate them. 123 * Note that cpu_time counts in microseconds, but most userland programs 124 * just compare relative times against the total by delta. 125 */ 126 struct kinfo_cputime cputime_percpu[MAXCPU]; 127 #ifdef DEBUG_PCTRACK 128 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE }; 129 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE]; 130 #endif 131 132 static int 133 sysctl_cputime(SYSCTL_HANDLER_ARGS) 134 { 135 int cpu, error = 0; 136 int root_error; 137 size_t size = sizeof(struct kinfo_cputime); 138 struct kinfo_cputime tmp; 139 140 /* 141 * NOTE: For security reasons, only root can sniff %rip 142 */ 143 root_error = priv_check_cred(curthread->td_ucred, PRIV_ROOT, 0); 144 145 for (cpu = 0; cpu < ncpus; ++cpu) { 146 tmp = cputime_percpu[cpu]; 147 if (root_error == 0) { 148 tmp.cp_sample_pc = 149 (int64_t)globaldata_find(cpu)->gd_sample_pc; 150 tmp.cp_sample_sp = 151 (int64_t)globaldata_find(cpu)->gd_sample_sp; 152 } 153 if ((error = SYSCTL_OUT(req, &tmp, size)) != 0) 154 break; 155 } 156 157 if (root_error == 0) 158 smp_sniff(); 159 160 return (error); 161 } 162 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 163 sysctl_cputime, "S,kinfo_cputime", "CPU time statistics"); 164 165 static int 166 sysctl_cp_time(SYSCTL_HANDLER_ARGS) 167 { 168 long cpu_states[CPUSTATES] = {0}; 169 int cpu, error = 0; 170 size_t size = sizeof(cpu_states); 171 172 for (cpu = 0; cpu < ncpus; ++cpu) { 173 cpu_states[CP_USER] += cputime_percpu[cpu].cp_user; 174 cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice; 175 cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys; 176 cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr; 177 cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle; 178 } 179 180 error = SYSCTL_OUT(req, cpu_states, size); 181 182 return (error); 183 } 184 185 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 186 sysctl_cp_time, "LU", "CPU time statistics"); 187 188 /* 189 * boottime is used to calculate the 'real' uptime. Do not confuse this with 190 * microuptime(). microtime() is not drift compensated. The real uptime 191 * with compensation is nanotime() - bootime. boottime is recalculated 192 * whenever the real time is set based on the compensated elapsed time 193 * in seconds (gd->gd_time_seconds). 194 * 195 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic. 196 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to 197 * the real time. 198 * 199 * WARNING! time_second can backstep on time corrections. Also, unlike 200 * time_second, time_uptime is not a "real" time_t (seconds 201 * since the Epoch) but seconds since booting. 202 */ 203 struct timespec boottime; /* boot time (realtime) for reference only */ 204 time_t time_second; /* read-only 'passive' realtime in seconds */ 205 time_t time_uptime; /* read-only 'passive' uptime in seconds */ 206 207 /* 208 * basetime is used to calculate the compensated real time of day. The 209 * basetime can be modified on a per-tick basis by the adjtime(), 210 * ntp_adjtime(), and sysctl-based time correction APIs. 211 * 212 * Note that frequency corrections can also be made by adjusting 213 * gd_cpuclock_base. 214 * 215 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is 216 * used on both SMP and UP systems to avoid MP races between cpu's and 217 * interrupt races on UP systems. 218 */ 219 struct hardtime { 220 __uint32_t time_second; 221 sysclock_t cpuclock_base; 222 }; 223 224 #define BASETIME_ARYSIZE 16 225 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1) 226 static struct timespec basetime[BASETIME_ARYSIZE]; 227 static struct hardtime hardtime[BASETIME_ARYSIZE]; 228 static volatile int basetime_index; 229 230 static int 231 sysctl_get_basetime(SYSCTL_HANDLER_ARGS) 232 { 233 struct timespec *bt; 234 int error; 235 int index; 236 237 /* 238 * Because basetime data and index may be updated by another cpu, 239 * a load fence is required to ensure that the data we read has 240 * not been speculatively read relative to a possibly updated index. 241 */ 242 index = basetime_index; 243 cpu_lfence(); 244 bt = &basetime[index]; 245 error = SYSCTL_OUT(req, bt, sizeof(*bt)); 246 return (error); 247 } 248 249 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 250 &boottime, timespec, "System boottime"); 251 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, 252 sysctl_get_basetime, "S,timespec", "System basetime"); 253 254 static void hardclock(systimer_t info, int, struct intrframe *frame); 255 static void statclock(systimer_t info, int, struct intrframe *frame); 256 static void schedclock(systimer_t info, int, struct intrframe *frame); 257 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp); 258 259 int ticks; /* system master ticks at hz */ 260 int clocks_running; /* tsleep/timeout clocks operational */ 261 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */ 262 int64_t nsec_acc; /* accumulator */ 263 int sched_ticks; /* global schedule clock ticks */ 264 265 /* NTPD time correction fields */ 266 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */ 267 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */ 268 int64_t ntp_delta; /* one-time correction in nsec */ 269 int64_t ntp_big_delta = 1000000000; 270 int32_t ntp_tick_delta; /* current adjustment rate */ 271 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */ 272 time_t ntp_leap_second; /* time of next leap second */ 273 int ntp_leap_insert; /* whether to insert or remove a second */ 274 struct spinlock ntp_spin; 275 276 /* 277 * Finish initializing clock frequencies and start all clocks running. 278 */ 279 /* ARGSUSED*/ 280 static void 281 initclocks(void *dummy) 282 { 283 /*psratio = profhz / stathz;*/ 284 spin_init(&ntp_spin, "ntp"); 285 initclocks_pcpu(); 286 clocks_running = 1; 287 if (kpmap) { 288 kpmap->tsc_freq = (uint64_t)tsc_frequency; 289 kpmap->tick_freq = hz; 290 } 291 } 292 293 /* 294 * Called on a per-cpu basis from the idle thread bootstrap on each cpu 295 * during SMP initialization. 296 * 297 * This routine is called concurrently during low-level SMP initialization 298 * and may not block in any way. Meaning, among other things, we can't 299 * acquire any tokens. 300 */ 301 void 302 initclocks_pcpu(void) 303 { 304 struct globaldata *gd = mycpu; 305 306 crit_enter(); 307 if (gd->gd_cpuid == 0) { 308 gd->gd_time_seconds = 1; 309 gd->gd_cpuclock_base = sys_cputimer->count(); 310 hardtime[0].time_second = gd->gd_time_seconds; 311 hardtime[0].cpuclock_base = gd->gd_cpuclock_base; 312 } else { 313 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; 314 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; 315 } 316 317 systimer_intr_enable(); 318 319 crit_exit(); 320 } 321 322 /* 323 * This routine is called on just the BSP, just after SMP initialization 324 * completes to * finish initializing any clocks that might contend/block 325 * (e.g. like on a token). We can't do this in initclocks_pcpu() because 326 * that function is called from the idle thread bootstrap for each cpu and 327 * not allowed to block at all. 328 */ 329 static 330 void 331 initclocks_other(void *dummy) 332 { 333 struct globaldata *ogd = mycpu; 334 struct globaldata *gd; 335 int n; 336 337 for (n = 0; n < ncpus; ++n) { 338 lwkt_setcpu_self(globaldata_find(n)); 339 gd = mycpu; 340 341 /* 342 * Use a non-queued periodic systimer to prevent multiple 343 * ticks from building up if the sysclock jumps forward 344 * (8254 gets reset). The sysclock will never jump backwards. 345 * Our time sync is based on the actual sysclock, not the 346 * ticks count. 347 * 348 * Install statclock before hardclock to prevent statclock 349 * from misinterpreting gd_flags for tick assignment when 350 * they overlap. 351 */ 352 systimer_init_periodic_nq(&gd->gd_statclock, statclock, 353 NULL, stathz); 354 systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, 355 NULL, hz); 356 /* XXX correct the frequency for scheduler / estcpu tests */ 357 systimer_init_periodic_nq(&gd->gd_schedclock, schedclock, 358 NULL, ESTCPUFREQ); 359 #ifdef IFPOLL_ENABLE 360 ifpoll_init_pcpu(gd->gd_cpuid); 361 #endif 362 } 363 lwkt_setcpu_self(ogd); 364 } 365 SYSINIT(clocks2, SI_BOOT2_POST_SMP, SI_ORDER_ANY, initclocks_other, NULL); 366 367 /* 368 * This sets the current real time of day. Timespecs are in seconds and 369 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base, 370 * instead we adjust basetime so basetime + gd_* results in the current 371 * time of day. This way the gd_* fields are guaranteed to represent 372 * a monotonically increasing 'uptime' value. 373 * 374 * When set_timeofday() is called from userland, the system call forces it 375 * onto cpu #0 since only cpu #0 can update basetime_index. 376 */ 377 void 378 set_timeofday(struct timespec *ts) 379 { 380 struct timespec *nbt; 381 int ni; 382 383 /* 384 * XXX SMP / non-atomic basetime updates 385 */ 386 crit_enter(); 387 ni = (basetime_index + 1) & BASETIME_ARYMASK; 388 cpu_lfence(); 389 nbt = &basetime[ni]; 390 nanouptime(nbt); 391 nbt->tv_sec = ts->tv_sec - nbt->tv_sec; 392 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec; 393 if (nbt->tv_nsec < 0) { 394 nbt->tv_nsec += 1000000000; 395 --nbt->tv_sec; 396 } 397 398 /* 399 * Note that basetime diverges from boottime as the clock drift is 400 * compensated for, so we cannot do away with boottime. When setting 401 * the absolute time of day the drift is 0 (for an instant) and we 402 * can simply assign boottime to basetime. 403 * 404 * Note that nanouptime() is based on gd_time_seconds which is drift 405 * compensated up to a point (it is guaranteed to remain monotonically 406 * increasing). gd_time_seconds is thus our best uptime guess and 407 * suitable for use in the boottime calculation. It is already taken 408 * into account in the basetime calculation above. 409 */ 410 spin_lock(&ntp_spin); 411 boottime.tv_sec = nbt->tv_sec; 412 ntp_delta = 0; 413 414 /* 415 * We now have a new basetime, make sure all other cpus have it, 416 * then update the index. 417 */ 418 cpu_sfence(); 419 basetime_index = ni; 420 spin_unlock(&ntp_spin); 421 422 crit_exit(); 423 } 424 425 /* 426 * Each cpu has its own hardclock, but we only increments ticks and softticks 427 * on cpu #0. 428 * 429 * NOTE! systimer! the MP lock might not be held here. We can only safely 430 * manipulate objects owned by the current cpu. 431 */ 432 static void 433 hardclock(systimer_t info, int in_ipi, struct intrframe *frame) 434 { 435 sysclock_t cputicks; 436 struct proc *p; 437 struct globaldata *gd = mycpu; 438 439 if ((gd->gd_reqflags & RQF_IPIQ) == 0 && lwkt_need_ipiq_process(gd)) { 440 /* Defer to doreti on passive IPIQ processing */ 441 need_ipiq(); 442 } 443 444 /* 445 * We update the compensation base to calculate fine-grained time 446 * from the sys_cputimer on a per-cpu basis in order to avoid 447 * having to mess around with locks. sys_cputimer is assumed to 448 * be consistent across all cpus. CPU N copies the base state from 449 * CPU 0 using the same FIFO trick that we use for basetime (so we 450 * don't catch a CPU 0 update in the middle). 451 * 452 * Note that we never allow info->time (aka gd->gd_hardclock.time) 453 * to reverse index gd_cpuclock_base, but that it is possible for 454 * it to temporarily get behind in the seconds if something in the 455 * system locks interrupts for a long period of time. Since periodic 456 * timers count events, though everything should resynch again 457 * immediately. 458 */ 459 if (gd->gd_cpuid == 0) { 460 int ni; 461 462 cputicks = info->time - gd->gd_cpuclock_base; 463 if (cputicks >= sys_cputimer->freq) { 464 cputicks /= sys_cputimer->freq; 465 if (cputicks != 0 && cputicks != 1) 466 kprintf("Warning: hardclock missed > 1 sec\n"); 467 gd->gd_time_seconds += cputicks; 468 gd->gd_cpuclock_base += sys_cputimer->freq * cputicks; 469 /* uncorrected monotonic 1-sec gran */ 470 time_uptime += cputicks; 471 } 472 ni = (basetime_index + 1) & BASETIME_ARYMASK; 473 hardtime[ni].time_second = gd->gd_time_seconds; 474 hardtime[ni].cpuclock_base = gd->gd_cpuclock_base; 475 } else { 476 int ni; 477 478 ni = basetime_index; 479 cpu_lfence(); 480 gd->gd_time_seconds = hardtime[ni].time_second; 481 gd->gd_cpuclock_base = hardtime[ni].cpuclock_base; 482 } 483 484 /* 485 * The system-wide ticks counter and NTP related timedelta/tickdelta 486 * adjustments only occur on cpu #0. NTP adjustments are accomplished 487 * by updating basetime. 488 */ 489 if (gd->gd_cpuid == 0) { 490 struct timespec *nbt; 491 struct timespec nts; 492 int leap; 493 int ni; 494 495 ++ticks; 496 497 #if 0 498 if (tco->tc_poll_pps) 499 tco->tc_poll_pps(tco); 500 #endif 501 502 /* 503 * Calculate the new basetime index. We are in a critical section 504 * on cpu #0 and can safely play with basetime_index. Start 505 * with the current basetime and then make adjustments. 506 */ 507 ni = (basetime_index + 1) & BASETIME_ARYMASK; 508 nbt = &basetime[ni]; 509 *nbt = basetime[basetime_index]; 510 511 /* 512 * ntp adjustments only occur on cpu 0 and are protected by 513 * ntp_spin. This spinlock virtually never conflicts. 514 */ 515 spin_lock(&ntp_spin); 516 517 /* 518 * Apply adjtime corrections. (adjtime() API) 519 * 520 * adjtime() only runs on cpu #0 so our critical section is 521 * sufficient to access these variables. 522 */ 523 if (ntp_delta != 0) { 524 nbt->tv_nsec += ntp_tick_delta; 525 ntp_delta -= ntp_tick_delta; 526 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) || 527 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) { 528 ntp_tick_delta = ntp_delta; 529 } 530 } 531 532 /* 533 * Apply permanent frequency corrections. (sysctl API) 534 */ 535 if (ntp_tick_permanent != 0) { 536 ntp_tick_acc += ntp_tick_permanent; 537 if (ntp_tick_acc >= (1LL << 32)) { 538 nbt->tv_nsec += ntp_tick_acc >> 32; 539 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32; 540 } else if (ntp_tick_acc <= -(1LL << 32)) { 541 /* Negate ntp_tick_acc to avoid shifting the sign bit. */ 542 nbt->tv_nsec -= (-ntp_tick_acc) >> 32; 543 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32; 544 } 545 } 546 547 if (nbt->tv_nsec >= 1000000000) { 548 nbt->tv_sec++; 549 nbt->tv_nsec -= 1000000000; 550 } else if (nbt->tv_nsec < 0) { 551 nbt->tv_sec--; 552 nbt->tv_nsec += 1000000000; 553 } 554 555 /* 556 * Another per-tick compensation. (for ntp_adjtime() API) 557 */ 558 if (nsec_adj != 0) { 559 nsec_acc += nsec_adj; 560 if (nsec_acc >= 0x100000000LL) { 561 nbt->tv_nsec += nsec_acc >> 32; 562 nsec_acc = (nsec_acc & 0xFFFFFFFFLL); 563 } else if (nsec_acc <= -0x100000000LL) { 564 nbt->tv_nsec -= -nsec_acc >> 32; 565 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL); 566 } 567 if (nbt->tv_nsec >= 1000000000) { 568 nbt->tv_nsec -= 1000000000; 569 ++nbt->tv_sec; 570 } else if (nbt->tv_nsec < 0) { 571 nbt->tv_nsec += 1000000000; 572 --nbt->tv_sec; 573 } 574 } 575 spin_unlock(&ntp_spin); 576 577 /************************************************************ 578 * LEAP SECOND CORRECTION * 579 ************************************************************ 580 * 581 * Taking into account all the corrections made above, figure 582 * out the new real time. If the seconds field has changed 583 * then apply any pending leap-second corrections. 584 */ 585 getnanotime_nbt(nbt, &nts); 586 587 if (time_second != nts.tv_sec) { 588 /* 589 * Apply leap second (sysctl API). Adjust nts for changes 590 * so we do not have to call getnanotime_nbt again. 591 */ 592 if (ntp_leap_second) { 593 if (ntp_leap_second == nts.tv_sec) { 594 if (ntp_leap_insert) { 595 nbt->tv_sec++; 596 nts.tv_sec++; 597 } else { 598 nbt->tv_sec--; 599 nts.tv_sec--; 600 } 601 ntp_leap_second--; 602 } 603 } 604 605 /* 606 * Apply leap second (ntp_adjtime() API), calculate a new 607 * nsec_adj field. ntp_update_second() returns nsec_adj 608 * as a per-second value but we need it as a per-tick value. 609 */ 610 leap = ntp_update_second(time_second, &nsec_adj); 611 nsec_adj /= hz; 612 nbt->tv_sec += leap; 613 nts.tv_sec += leap; 614 615 /* 616 * Update the time_second 'approximate time' global. 617 */ 618 time_second = nts.tv_sec; 619 } 620 621 /* 622 * Finally, our new basetime is ready to go live! 623 */ 624 cpu_sfence(); 625 basetime_index = ni; 626 627 /* 628 * Update kpmap on each tick. TS updates are integrated with 629 * fences and upticks allowing userland to read the data 630 * deterministically. 631 */ 632 if (kpmap) { 633 int w; 634 635 w = (kpmap->upticks + 1) & 1; 636 getnanouptime(&kpmap->ts_uptime[w]); 637 getnanotime(&kpmap->ts_realtime[w]); 638 cpu_sfence(); 639 ++kpmap->upticks; 640 cpu_sfence(); 641 } 642 } 643 644 /* 645 * lwkt thread scheduler fair queueing 646 */ 647 lwkt_schedulerclock(curthread); 648 649 /* 650 * softticks are handled for all cpus 651 */ 652 hardclock_softtick(gd); 653 654 /* 655 * Rollup accumulated vmstats, copy-back for critical path checks. 656 */ 657 vmstats_rollup_cpu(gd); 658 mycpu->gd_vmstats = vmstats; 659 660 /* 661 * ITimer handling is per-tick, per-cpu. 662 * 663 * We must acquire the per-process token in order for ksignal() 664 * to be non-blocking. For the moment this requires an AST fault, 665 * the ksignal() cannot be safely issued from this hard interrupt. 666 * 667 * XXX Even the trytoken here isn't right, and itimer operation in 668 * a multi threaded environment is going to be weird at the 669 * very least. 670 */ 671 if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) { 672 crit_enter_hard(); 673 if (p->p_upmap) 674 ++p->p_upmap->runticks; 675 676 if (frame && CLKF_USERMODE(frame) && 677 timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) && 678 itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) { 679 p->p_flags |= P_SIGVTALRM; 680 need_user_resched(); 681 } 682 if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) && 683 itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) { 684 p->p_flags |= P_SIGPROF; 685 need_user_resched(); 686 } 687 crit_exit_hard(); 688 lwkt_reltoken(&p->p_token); 689 } 690 setdelayed(); 691 } 692 693 /* 694 * The statistics clock typically runs at a 125Hz rate, and is intended 695 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu. 696 * 697 * NOTE! systimer! the MP lock might not be held here. We can only safely 698 * manipulate objects owned by the current cpu. 699 * 700 * The stats clock is responsible for grabbing a profiling sample. 701 * Most of the statistics are only used by user-level statistics programs. 702 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and 703 * p->p_estcpu. 704 * 705 * Like the other clocks, the stat clock is called from what is effectively 706 * a fast interrupt, so the context should be the thread/process that got 707 * interrupted. 708 */ 709 static void 710 statclock(systimer_t info, int in_ipi, struct intrframe *frame) 711 { 712 #ifdef GPROF 713 struct gmonparam *g; 714 int i; 715 #endif 716 globaldata_t gd = mycpu; 717 thread_t td; 718 struct proc *p; 719 int bump; 720 sysclock_t cv; 721 sysclock_t scv; 722 723 /* 724 * How big was our timeslice relative to the last time? Calculate 725 * in microseconds. 726 * 727 * NOTE: Use of microuptime() is typically MPSAFE, but usually not 728 * during early boot. Just use the systimer count to be nice 729 * to e.g. qemu. The systimer has a better chance of being 730 * MPSAFE at early boot. 731 */ 732 cv = sys_cputimer->count(); 733 scv = gd->statint.gd_statcv; 734 if (scv == 0) { 735 bump = 1; 736 } else { 737 bump = (sys_cputimer->freq64_usec * (cv - scv)) >> 32; 738 if (bump < 0) 739 bump = 0; 740 if (bump > 1000000) 741 bump = 1000000; 742 } 743 gd->statint.gd_statcv = cv; 744 745 #if 0 746 stv = &gd->gd_stattv; 747 if (stv->tv_sec == 0) { 748 bump = 1; 749 } else { 750 bump = tv.tv_usec - stv->tv_usec + 751 (tv.tv_sec - stv->tv_sec) * 1000000; 752 if (bump < 0) 753 bump = 0; 754 if (bump > 1000000) 755 bump = 1000000; 756 } 757 *stv = tv; 758 #endif 759 760 td = curthread; 761 p = td->td_proc; 762 763 if (frame && CLKF_USERMODE(frame)) { 764 /* 765 * Came from userland, handle user time and deal with 766 * possible process. 767 */ 768 if (p && (p->p_flags & P_PROFIL)) 769 addupc_intr(p, CLKF_PC(frame), 1); 770 td->td_uticks += bump; 771 772 /* 773 * Charge the time as appropriate 774 */ 775 if (p && p->p_nice > NZERO) 776 cpu_time.cp_nice += bump; 777 else 778 cpu_time.cp_user += bump; 779 } else { 780 int intr_nest = gd->gd_intr_nesting_level; 781 782 if (in_ipi) { 783 /* 784 * IPI processing code will bump gd_intr_nesting_level 785 * up by one, which breaks following CLKF_INTR testing, 786 * so we subtract it by one here. 787 */ 788 --intr_nest; 789 } 790 #ifdef GPROF 791 /* 792 * Kernel statistics are just like addupc_intr, only easier. 793 */ 794 g = &_gmonparam; 795 if (g->state == GMON_PROF_ON && frame) { 796 i = CLKF_PC(frame) - g->lowpc; 797 if (i < g->textsize) { 798 i /= HISTFRACTION * sizeof(*g->kcount); 799 g->kcount[i]++; 800 } 801 } 802 #endif 803 804 #define IS_INTR_RUNNING ((frame && CLKF_INTR(intr_nest)) || CLKF_INTR_TD(td)) 805 806 /* 807 * Came from kernel mode, so we were: 808 * - handling an interrupt, 809 * - doing syscall or trap work on behalf of the current 810 * user process, or 811 * - spinning in the idle loop. 812 * Whichever it is, charge the time as appropriate. 813 * Note that we charge interrupts to the current process, 814 * regardless of whether they are ``for'' that process, 815 * so that we know how much of its real time was spent 816 * in ``non-process'' (i.e., interrupt) work. 817 * 818 * XXX assume system if frame is NULL. A NULL frame 819 * can occur if ipi processing is done from a crit_exit(). 820 */ 821 if (IS_INTR_RUNNING) { 822 /* 823 * If we interrupted an interrupt thread, well, 824 * count it as interrupt time. 825 */ 826 td->td_iticks += bump; 827 #ifdef DEBUG_PCTRACK 828 if (frame) 829 do_pctrack(frame, PCTRACK_INT); 830 #endif 831 cpu_time.cp_intr += bump; 832 } else if (gd->gd_flags & GDF_VIRTUSER) { 833 /* 834 * The vkernel doesn't do a good job providing trap 835 * frames that we can test. If the GDF_VIRTUSER 836 * flag is set we probably interrupted user mode. 837 * 838 * We also use this flag on the host when entering 839 * VMM mode. 840 */ 841 td->td_uticks += bump; 842 843 /* 844 * Charge the time as appropriate 845 */ 846 if (p && p->p_nice > NZERO) 847 cpu_time.cp_nice += bump; 848 else 849 cpu_time.cp_user += bump; 850 } else { 851 td->td_sticks += bump; 852 if (td == &gd->gd_idlethread) { 853 /* 854 * Token contention can cause us to mis-count 855 * a contended as idle, but it doesn't work 856 * properly for VKERNELs so just test on a 857 * real kernel. 858 */ 859 #ifdef _KERNEL_VIRTUAL 860 cpu_time.cp_idle += bump; 861 #else 862 if (mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) 863 cpu_time.cp_sys += bump; 864 else 865 cpu_time.cp_idle += bump; 866 #endif 867 } else { 868 /* 869 * System thread was running. 870 */ 871 #ifdef DEBUG_PCTRACK 872 if (frame) 873 do_pctrack(frame, PCTRACK_SYS); 874 #endif 875 cpu_time.cp_sys += bump; 876 } 877 } 878 879 #undef IS_INTR_RUNNING 880 } 881 } 882 883 #ifdef DEBUG_PCTRACK 884 /* 885 * Sample the PC when in the kernel or in an interrupt. User code can 886 * retrieve the information and generate a histogram or other output. 887 */ 888 889 static void 890 do_pctrack(struct intrframe *frame, int which) 891 { 892 struct kinfo_pctrack *pctrack; 893 894 pctrack = &cputime_pctrack[mycpu->gd_cpuid][which]; 895 pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] = 896 (void *)CLKF_PC(frame); 897 ++pctrack->pc_index; 898 } 899 900 static int 901 sysctl_pctrack(SYSCTL_HANDLER_ARGS) 902 { 903 struct kinfo_pcheader head; 904 int error; 905 int cpu; 906 int ntrack; 907 908 head.pc_ntrack = PCTRACK_SIZE; 909 head.pc_arysize = PCTRACK_ARYSIZE; 910 911 if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0) 912 return (error); 913 914 for (cpu = 0; cpu < ncpus; ++cpu) { 915 for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) { 916 error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack], 917 sizeof(struct kinfo_pctrack)); 918 if (error) 919 break; 920 } 921 if (error) 922 break; 923 } 924 return (error); 925 } 926 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 927 sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking"); 928 929 #endif 930 931 /* 932 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer, 933 * the MP lock might not be held. We can safely manipulate parts of curproc 934 * but that's about it. 935 * 936 * Each cpu has its own scheduler clock. 937 */ 938 static void 939 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 940 { 941 struct lwp *lp; 942 struct rusage *ru; 943 struct vmspace *vm; 944 long rss; 945 946 if ((lp = lwkt_preempted_proc()) != NULL) { 947 /* 948 * Account for cpu time used and hit the scheduler. Note 949 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD 950 * HERE. 951 */ 952 ++lp->lwp_cpticks; 953 usched_schedulerclock(lp, info->periodic, info->time); 954 } else { 955 usched_schedulerclock(NULL, info->periodic, info->time); 956 } 957 if ((lp = curthread->td_lwp) != NULL) { 958 /* 959 * Update resource usage integrals and maximums. 960 */ 961 if ((ru = &lp->lwp_proc->p_ru) && 962 (vm = lp->lwp_proc->p_vmspace) != NULL) { 963 ru->ru_ixrss += pgtok(vm->vm_tsize); 964 ru->ru_idrss += pgtok(vm->vm_dsize); 965 ru->ru_isrss += pgtok(vm->vm_ssize); 966 if (lwkt_trytoken(&vm->vm_map.token)) { 967 rss = pgtok(vmspace_resident_count(vm)); 968 if (ru->ru_maxrss < rss) 969 ru->ru_maxrss = rss; 970 lwkt_reltoken(&vm->vm_map.token); 971 } 972 } 973 } 974 /* Increment the global sched_ticks */ 975 if (mycpu->gd_cpuid == 0) 976 ++sched_ticks; 977 } 978 979 /* 980 * Compute number of ticks for the specified amount of time. The 981 * return value is intended to be used in a clock interrupt timed 982 * operation and guaranteed to meet or exceed the requested time. 983 * If the representation overflows, return INT_MAX. The minimum return 984 * value is 1 ticks and the function will average the calculation up. 985 * If any value greater then 0 microseconds is supplied, a value 986 * of at least 2 will be returned to ensure that a near-term clock 987 * interrupt does not cause the timeout to occur (degenerately) early. 988 * 989 * Note that limit checks must take into account microseconds, which is 990 * done simply by using the smaller signed long maximum instead of 991 * the unsigned long maximum. 992 * 993 * If ints have 32 bits, then the maximum value for any timeout in 994 * 10ms ticks is 248 days. 995 */ 996 int 997 tvtohz_high(struct timeval *tv) 998 { 999 int ticks; 1000 long sec, usec; 1001 1002 sec = tv->tv_sec; 1003 usec = tv->tv_usec; 1004 if (usec < 0) { 1005 sec--; 1006 usec += 1000000; 1007 } 1008 if (sec < 0) { 1009 #ifdef DIAGNOSTIC 1010 if (usec > 0) { 1011 sec++; 1012 usec -= 1000000; 1013 } 1014 kprintf("tvtohz_high: negative time difference " 1015 "%ld sec %ld usec\n", 1016 sec, usec); 1017 #endif 1018 ticks = 1; 1019 } else if (sec <= INT_MAX / hz) { 1020 ticks = (int)(sec * hz + 1021 ((u_long)usec + (ustick - 1)) / ustick) + 1; 1022 } else { 1023 ticks = INT_MAX; 1024 } 1025 return (ticks); 1026 } 1027 1028 int 1029 tstohz_high(struct timespec *ts) 1030 { 1031 int ticks; 1032 long sec, nsec; 1033 1034 sec = ts->tv_sec; 1035 nsec = ts->tv_nsec; 1036 if (nsec < 0) { 1037 sec--; 1038 nsec += 1000000000; 1039 } 1040 if (sec < 0) { 1041 #ifdef DIAGNOSTIC 1042 if (nsec > 0) { 1043 sec++; 1044 nsec -= 1000000000; 1045 } 1046 kprintf("tstohz_high: negative time difference " 1047 "%ld sec %ld nsec\n", 1048 sec, nsec); 1049 #endif 1050 ticks = 1; 1051 } else if (sec <= INT_MAX / hz) { 1052 ticks = (int)(sec * hz + 1053 ((u_long)nsec + (nstick - 1)) / nstick) + 1; 1054 } else { 1055 ticks = INT_MAX; 1056 } 1057 return (ticks); 1058 } 1059 1060 1061 /* 1062 * Compute number of ticks for the specified amount of time, erroring on 1063 * the side of it being too low to ensure that sleeping the returned number 1064 * of ticks will not result in a late return. 1065 * 1066 * The supplied timeval may not be negative and should be normalized. A 1067 * return value of 0 is possible if the timeval converts to less then 1068 * 1 tick. 1069 * 1070 * If ints have 32 bits, then the maximum value for any timeout in 1071 * 10ms ticks is 248 days. 1072 */ 1073 int 1074 tvtohz_low(struct timeval *tv) 1075 { 1076 int ticks; 1077 long sec; 1078 1079 sec = tv->tv_sec; 1080 if (sec <= INT_MAX / hz) 1081 ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick); 1082 else 1083 ticks = INT_MAX; 1084 return (ticks); 1085 } 1086 1087 int 1088 tstohz_low(struct timespec *ts) 1089 { 1090 int ticks; 1091 long sec; 1092 1093 sec = ts->tv_sec; 1094 if (sec <= INT_MAX / hz) 1095 ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick); 1096 else 1097 ticks = INT_MAX; 1098 return (ticks); 1099 } 1100 1101 /* 1102 * Start profiling on a process. 1103 * 1104 * Caller must hold p->p_token(); 1105 * 1106 * Kernel profiling passes proc0 which never exits and hence 1107 * keeps the profile clock running constantly. 1108 */ 1109 void 1110 startprofclock(struct proc *p) 1111 { 1112 if ((p->p_flags & P_PROFIL) == 0) { 1113 p->p_flags |= P_PROFIL; 1114 #if 0 /* XXX */ 1115 if (++profprocs == 1 && stathz != 0) { 1116 crit_enter(); 1117 psdiv = psratio; 1118 setstatclockrate(profhz); 1119 crit_exit(); 1120 } 1121 #endif 1122 } 1123 } 1124 1125 /* 1126 * Stop profiling on a process. 1127 * 1128 * caller must hold p->p_token 1129 */ 1130 void 1131 stopprofclock(struct proc *p) 1132 { 1133 if (p->p_flags & P_PROFIL) { 1134 p->p_flags &= ~P_PROFIL; 1135 #if 0 /* XXX */ 1136 if (--profprocs == 0 && stathz != 0) { 1137 crit_enter(); 1138 psdiv = 1; 1139 setstatclockrate(stathz); 1140 crit_exit(); 1141 } 1142 #endif 1143 } 1144 } 1145 1146 /* 1147 * Return information about system clocks. 1148 */ 1149 static int 1150 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 1151 { 1152 struct kinfo_clockinfo clkinfo; 1153 /* 1154 * Construct clockinfo structure. 1155 */ 1156 clkinfo.ci_hz = hz; 1157 clkinfo.ci_tick = ustick; 1158 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000; 1159 clkinfo.ci_profhz = profhz; 1160 clkinfo.ci_stathz = stathz ? stathz : hz; 1161 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 1162 } 1163 1164 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 1165 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 1166 1167 /* 1168 * We have eight functions for looking at the clock, four for 1169 * microseconds and four for nanoseconds. For each there is fast 1170 * but less precise version "get{nano|micro}[up]time" which will 1171 * return a time which is up to 1/HZ previous to the call, whereas 1172 * the raw version "{nano|micro}[up]time" will return a timestamp 1173 * which is as precise as possible. The "up" variants return the 1174 * time relative to system boot, these are well suited for time 1175 * interval measurements. 1176 * 1177 * Each cpu independently maintains the current time of day, so all 1178 * we need to do to protect ourselves from changes is to do a loop 1179 * check on the seconds field changing out from under us. 1180 * 1181 * The system timer maintains a 32 bit count and due to various issues 1182 * it is possible for the calculated delta to occasionally exceed 1183 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec 1184 * multiplication can easily overflow, so we deal with the case. For 1185 * uniformity we deal with the case in the usec case too. 1186 * 1187 * All the [get][micro,nano][time,uptime]() routines are MPSAFE. 1188 */ 1189 void 1190 getmicrouptime(struct timeval *tvp) 1191 { 1192 struct globaldata *gd = mycpu; 1193 sysclock_t delta; 1194 1195 do { 1196 tvp->tv_sec = gd->gd_time_seconds; 1197 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1198 } while (tvp->tv_sec != gd->gd_time_seconds); 1199 1200 if (delta >= sys_cputimer->freq) { 1201 tvp->tv_sec += delta / sys_cputimer->freq; 1202 delta %= sys_cputimer->freq; 1203 } 1204 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1205 if (tvp->tv_usec >= 1000000) { 1206 tvp->tv_usec -= 1000000; 1207 ++tvp->tv_sec; 1208 } 1209 } 1210 1211 void 1212 getnanouptime(struct timespec *tsp) 1213 { 1214 struct globaldata *gd = mycpu; 1215 sysclock_t delta; 1216 1217 do { 1218 tsp->tv_sec = gd->gd_time_seconds; 1219 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1220 } while (tsp->tv_sec != gd->gd_time_seconds); 1221 1222 if (delta >= sys_cputimer->freq) { 1223 tsp->tv_sec += delta / sys_cputimer->freq; 1224 delta %= sys_cputimer->freq; 1225 } 1226 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1227 } 1228 1229 void 1230 microuptime(struct timeval *tvp) 1231 { 1232 struct globaldata *gd = mycpu; 1233 sysclock_t delta; 1234 1235 do { 1236 tvp->tv_sec = gd->gd_time_seconds; 1237 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1238 } while (tvp->tv_sec != gd->gd_time_seconds); 1239 1240 if (delta >= sys_cputimer->freq) { 1241 tvp->tv_sec += delta / sys_cputimer->freq; 1242 delta %= sys_cputimer->freq; 1243 } 1244 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1245 } 1246 1247 void 1248 nanouptime(struct timespec *tsp) 1249 { 1250 struct globaldata *gd = mycpu; 1251 sysclock_t delta; 1252 1253 do { 1254 tsp->tv_sec = gd->gd_time_seconds; 1255 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1256 } while (tsp->tv_sec != gd->gd_time_seconds); 1257 1258 if (delta >= sys_cputimer->freq) { 1259 tsp->tv_sec += delta / sys_cputimer->freq; 1260 delta %= sys_cputimer->freq; 1261 } 1262 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1263 } 1264 1265 /* 1266 * realtime routines 1267 */ 1268 void 1269 getmicrotime(struct timeval *tvp) 1270 { 1271 struct globaldata *gd = mycpu; 1272 struct timespec *bt; 1273 sysclock_t delta; 1274 1275 do { 1276 tvp->tv_sec = gd->gd_time_seconds; 1277 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1278 } while (tvp->tv_sec != gd->gd_time_seconds); 1279 1280 if (delta >= sys_cputimer->freq) { 1281 tvp->tv_sec += delta / sys_cputimer->freq; 1282 delta %= sys_cputimer->freq; 1283 } 1284 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1285 1286 bt = &basetime[basetime_index]; 1287 cpu_lfence(); 1288 tvp->tv_sec += bt->tv_sec; 1289 tvp->tv_usec += bt->tv_nsec / 1000; 1290 while (tvp->tv_usec >= 1000000) { 1291 tvp->tv_usec -= 1000000; 1292 ++tvp->tv_sec; 1293 } 1294 } 1295 1296 void 1297 getnanotime(struct timespec *tsp) 1298 { 1299 struct globaldata *gd = mycpu; 1300 struct timespec *bt; 1301 sysclock_t delta; 1302 1303 do { 1304 tsp->tv_sec = gd->gd_time_seconds; 1305 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1306 } while (tsp->tv_sec != gd->gd_time_seconds); 1307 1308 if (delta >= sys_cputimer->freq) { 1309 tsp->tv_sec += delta / sys_cputimer->freq; 1310 delta %= sys_cputimer->freq; 1311 } 1312 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1313 1314 bt = &basetime[basetime_index]; 1315 cpu_lfence(); 1316 tsp->tv_sec += bt->tv_sec; 1317 tsp->tv_nsec += bt->tv_nsec; 1318 while (tsp->tv_nsec >= 1000000000) { 1319 tsp->tv_nsec -= 1000000000; 1320 ++tsp->tv_sec; 1321 } 1322 } 1323 1324 static void 1325 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp) 1326 { 1327 struct globaldata *gd = mycpu; 1328 sysclock_t delta; 1329 1330 do { 1331 tsp->tv_sec = gd->gd_time_seconds; 1332 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1333 } while (tsp->tv_sec != gd->gd_time_seconds); 1334 1335 if (delta >= sys_cputimer->freq) { 1336 tsp->tv_sec += delta / sys_cputimer->freq; 1337 delta %= sys_cputimer->freq; 1338 } 1339 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1340 1341 tsp->tv_sec += nbt->tv_sec; 1342 tsp->tv_nsec += nbt->tv_nsec; 1343 while (tsp->tv_nsec >= 1000000000) { 1344 tsp->tv_nsec -= 1000000000; 1345 ++tsp->tv_sec; 1346 } 1347 } 1348 1349 1350 void 1351 microtime(struct timeval *tvp) 1352 { 1353 struct globaldata *gd = mycpu; 1354 struct timespec *bt; 1355 sysclock_t delta; 1356 1357 do { 1358 tvp->tv_sec = gd->gd_time_seconds; 1359 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1360 } while (tvp->tv_sec != gd->gd_time_seconds); 1361 1362 if (delta >= sys_cputimer->freq) { 1363 tvp->tv_sec += delta / sys_cputimer->freq; 1364 delta %= sys_cputimer->freq; 1365 } 1366 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1367 1368 bt = &basetime[basetime_index]; 1369 cpu_lfence(); 1370 tvp->tv_sec += bt->tv_sec; 1371 tvp->tv_usec += bt->tv_nsec / 1000; 1372 while (tvp->tv_usec >= 1000000) { 1373 tvp->tv_usec -= 1000000; 1374 ++tvp->tv_sec; 1375 } 1376 } 1377 1378 void 1379 nanotime(struct timespec *tsp) 1380 { 1381 struct globaldata *gd = mycpu; 1382 struct timespec *bt; 1383 sysclock_t delta; 1384 1385 do { 1386 tsp->tv_sec = gd->gd_time_seconds; 1387 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1388 } while (tsp->tv_sec != gd->gd_time_seconds); 1389 1390 if (delta >= sys_cputimer->freq) { 1391 tsp->tv_sec += delta / sys_cputimer->freq; 1392 delta %= sys_cputimer->freq; 1393 } 1394 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1395 1396 bt = &basetime[basetime_index]; 1397 cpu_lfence(); 1398 tsp->tv_sec += bt->tv_sec; 1399 tsp->tv_nsec += bt->tv_nsec; 1400 while (tsp->tv_nsec >= 1000000000) { 1401 tsp->tv_nsec -= 1000000000; 1402 ++tsp->tv_sec; 1403 } 1404 } 1405 1406 /* 1407 * Get an approximate time_t. It does not have to be accurate. This 1408 * function is called only from KTR and can be called with the system in 1409 * any state so do not use a critical section or other complex operation 1410 * here. 1411 * 1412 * NOTE: This is not exactly synchronized with real time. To do that we 1413 * would have to do what microtime does and check for a nanoseconds 1414 * overflow. 1415 */ 1416 time_t 1417 get_approximate_time_t(void) 1418 { 1419 struct globaldata *gd = mycpu; 1420 struct timespec *bt; 1421 1422 bt = &basetime[basetime_index]; 1423 return(gd->gd_time_seconds + bt->tv_sec); 1424 } 1425 1426 int 1427 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 1428 { 1429 pps_params_t *app; 1430 struct pps_fetch_args *fapi; 1431 #ifdef PPS_SYNC 1432 struct pps_kcbind_args *kapi; 1433 #endif 1434 1435 switch (cmd) { 1436 case PPS_IOC_CREATE: 1437 return (0); 1438 case PPS_IOC_DESTROY: 1439 return (0); 1440 case PPS_IOC_SETPARAMS: 1441 app = (pps_params_t *)data; 1442 if (app->mode & ~pps->ppscap) 1443 return (EINVAL); 1444 pps->ppsparam = *app; 1445 return (0); 1446 case PPS_IOC_GETPARAMS: 1447 app = (pps_params_t *)data; 1448 *app = pps->ppsparam; 1449 app->api_version = PPS_API_VERS_1; 1450 return (0); 1451 case PPS_IOC_GETCAP: 1452 *(int*)data = pps->ppscap; 1453 return (0); 1454 case PPS_IOC_FETCH: 1455 fapi = (struct pps_fetch_args *)data; 1456 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 1457 return (EINVAL); 1458 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 1459 return (EOPNOTSUPP); 1460 pps->ppsinfo.current_mode = pps->ppsparam.mode; 1461 fapi->pps_info_buf = pps->ppsinfo; 1462 return (0); 1463 case PPS_IOC_KCBIND: 1464 #ifdef PPS_SYNC 1465 kapi = (struct pps_kcbind_args *)data; 1466 /* XXX Only root should be able to do this */ 1467 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 1468 return (EINVAL); 1469 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 1470 return (EINVAL); 1471 if (kapi->edge & ~pps->ppscap) 1472 return (EINVAL); 1473 pps->kcmode = kapi->edge; 1474 return (0); 1475 #else 1476 return (EOPNOTSUPP); 1477 #endif 1478 default: 1479 return (ENOTTY); 1480 } 1481 } 1482 1483 void 1484 pps_init(struct pps_state *pps) 1485 { 1486 pps->ppscap |= PPS_TSFMT_TSPEC; 1487 if (pps->ppscap & PPS_CAPTUREASSERT) 1488 pps->ppscap |= PPS_OFFSETASSERT; 1489 if (pps->ppscap & PPS_CAPTURECLEAR) 1490 pps->ppscap |= PPS_OFFSETCLEAR; 1491 } 1492 1493 void 1494 pps_event(struct pps_state *pps, sysclock_t count, int event) 1495 { 1496 struct globaldata *gd; 1497 struct timespec *tsp; 1498 struct timespec *osp; 1499 struct timespec *bt; 1500 struct timespec ts; 1501 sysclock_t *pcount; 1502 #ifdef PPS_SYNC 1503 sysclock_t tcount; 1504 #endif 1505 sysclock_t delta; 1506 pps_seq_t *pseq; 1507 int foff; 1508 #ifdef PPS_SYNC 1509 int fhard; 1510 #endif 1511 int ni; 1512 1513 gd = mycpu; 1514 1515 /* Things would be easier with arrays... */ 1516 if (event == PPS_CAPTUREASSERT) { 1517 tsp = &pps->ppsinfo.assert_timestamp; 1518 osp = &pps->ppsparam.assert_offset; 1519 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1520 #ifdef PPS_SYNC 1521 fhard = pps->kcmode & PPS_CAPTUREASSERT; 1522 #endif 1523 pcount = &pps->ppscount[0]; 1524 pseq = &pps->ppsinfo.assert_sequence; 1525 } else { 1526 tsp = &pps->ppsinfo.clear_timestamp; 1527 osp = &pps->ppsparam.clear_offset; 1528 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1529 #ifdef PPS_SYNC 1530 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1531 #endif 1532 pcount = &pps->ppscount[1]; 1533 pseq = &pps->ppsinfo.clear_sequence; 1534 } 1535 1536 /* Nothing really happened */ 1537 if (*pcount == count) 1538 return; 1539 1540 *pcount = count; 1541 1542 do { 1543 ts.tv_sec = gd->gd_time_seconds; 1544 delta = count - gd->gd_cpuclock_base; 1545 } while (ts.tv_sec != gd->gd_time_seconds); 1546 1547 if (delta >= sys_cputimer->freq) { 1548 ts.tv_sec += delta / sys_cputimer->freq; 1549 delta %= sys_cputimer->freq; 1550 } 1551 ts.tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1552 ni = basetime_index; 1553 cpu_lfence(); 1554 bt = &basetime[ni]; 1555 ts.tv_sec += bt->tv_sec; 1556 ts.tv_nsec += bt->tv_nsec; 1557 while (ts.tv_nsec >= 1000000000) { 1558 ts.tv_nsec -= 1000000000; 1559 ++ts.tv_sec; 1560 } 1561 1562 (*pseq)++; 1563 *tsp = ts; 1564 1565 if (foff) { 1566 timespecadd(tsp, osp); 1567 if (tsp->tv_nsec < 0) { 1568 tsp->tv_nsec += 1000000000; 1569 tsp->tv_sec -= 1; 1570 } 1571 } 1572 #ifdef PPS_SYNC 1573 if (fhard) { 1574 /* magic, at its best... */ 1575 tcount = count - pps->ppscount[2]; 1576 pps->ppscount[2] = count; 1577 if (tcount >= sys_cputimer->freq) { 1578 delta = (1000000000 * (tcount / sys_cputimer->freq) + 1579 sys_cputimer->freq64_nsec * 1580 (tcount % sys_cputimer->freq)) >> 32; 1581 } else { 1582 delta = (sys_cputimer->freq64_nsec * tcount) >> 32; 1583 } 1584 hardpps(tsp, delta); 1585 } 1586 #endif 1587 } 1588 1589 /* 1590 * Return the tsc target value for a delay of (ns). 1591 * 1592 * Returns -1 if the TSC is not supported. 1593 */ 1594 int64_t 1595 tsc_get_target(int ns) 1596 { 1597 #if defined(_RDTSC_SUPPORTED_) 1598 if (cpu_feature & CPUID_TSC) { 1599 return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000); 1600 } 1601 #endif 1602 return(-1); 1603 } 1604 1605 /* 1606 * Compare the tsc against the passed target 1607 * 1608 * Returns +1 if the target has been reached 1609 * Returns 0 if the target has not yet been reached 1610 * Returns -1 if the TSC is not supported. 1611 * 1612 * Typical use: while (tsc_test_target(target) == 0) { ...poll... } 1613 */ 1614 int 1615 tsc_test_target(int64_t target) 1616 { 1617 #if defined(_RDTSC_SUPPORTED_) 1618 if (cpu_feature & CPUID_TSC) { 1619 if ((int64_t)(target - rdtsc()) <= 0) 1620 return(1); 1621 return(0); 1622 } 1623 #endif 1624 return(-1); 1625 } 1626 1627 /* 1628 * Delay the specified number of nanoseconds using the tsc. This function 1629 * returns immediately if the TSC is not supported. At least one cpu_pause() 1630 * will be issued. 1631 */ 1632 void 1633 tsc_delay(int ns) 1634 { 1635 int64_t clk; 1636 1637 clk = tsc_get_target(ns); 1638 cpu_pause(); 1639 while (tsc_test_target(clk) == 0) 1640 cpu_pause(); 1641 } 1642