1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 68 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ 69 */ 70 71 #include "opt_ntp.h" 72 #include "opt_pctrack.h" 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/callout.h> 77 #include <sys/kernel.h> 78 #include <sys/kinfo.h> 79 #include <sys/proc.h> 80 #include <sys/malloc.h> 81 #include <sys/resource.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/priv.h> 85 #include <sys/timex.h> 86 #include <sys/timepps.h> 87 #include <sys/upmap.h> 88 #include <sys/lock.h> 89 #include <sys/sysctl.h> 90 #include <sys/kcollect.h> 91 92 #include <vm/vm.h> 93 #include <vm/pmap.h> 94 #include <vm/vm_map.h> 95 #include <vm/vm_extern.h> 96 97 #include <sys/thread2.h> 98 #include <sys/spinlock2.h> 99 100 #include <machine/cpu.h> 101 #include <machine/limits.h> 102 #include <machine/smp.h> 103 #include <machine/cpufunc.h> 104 #include <machine/specialreg.h> 105 #include <machine/clock.h> 106 107 #ifdef GPROF 108 #include <sys/gmon.h> 109 #endif 110 111 #ifdef DEBUG_PCTRACK 112 static void do_pctrack(struct intrframe *frame, int which); 113 #endif 114 115 static void initclocks (void *dummy); 116 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL); 117 118 /* 119 * Some of these don't belong here, but it's easiest to concentrate them. 120 * Note that cpu_time counts in microseconds, but most userland programs 121 * just compare relative times against the total by delta. 122 */ 123 struct kinfo_cputime cputime_percpu[MAXCPU]; 124 #ifdef DEBUG_PCTRACK 125 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE }; 126 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE]; 127 #endif 128 129 static int sniff_enable = 1; 130 static int sniff_target = -1; 131 SYSCTL_INT(_kern, OID_AUTO, sniff_enable, CTLFLAG_RW, &sniff_enable, 0 , ""); 132 SYSCTL_INT(_kern, OID_AUTO, sniff_target, CTLFLAG_RW, &sniff_target, 0 , ""); 133 134 static int 135 sysctl_cputime(SYSCTL_HANDLER_ARGS) 136 { 137 int cpu, error = 0; 138 int root_error; 139 size_t size = sizeof(struct kinfo_cputime); 140 struct kinfo_cputime tmp; 141 142 /* 143 * NOTE: For security reasons, only root can sniff %rip 144 */ 145 root_error = priv_check_cred(curthread->td_ucred, PRIV_ROOT, 0); 146 147 for (cpu = 0; cpu < ncpus; ++cpu) { 148 tmp = cputime_percpu[cpu]; 149 if (root_error == 0) { 150 tmp.cp_sample_pc = 151 (int64_t)globaldata_find(cpu)->gd_sample_pc; 152 tmp.cp_sample_sp = 153 (int64_t)globaldata_find(cpu)->gd_sample_sp; 154 } 155 if ((error = SYSCTL_OUT(req, &tmp, size)) != 0) 156 break; 157 } 158 159 if (root_error == 0) { 160 if (sniff_enable) { 161 int n = sniff_target; 162 if (n < 0) 163 smp_sniff(); 164 else if (n < ncpus) 165 cpu_sniff(n); 166 } 167 } 168 169 return (error); 170 } 171 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 172 sysctl_cputime, "S,kinfo_cputime", "CPU time statistics"); 173 174 static int 175 sysctl_cp_time(SYSCTL_HANDLER_ARGS) 176 { 177 long cpu_states[CPUSTATES] = {0}; 178 int cpu, error = 0; 179 size_t size = sizeof(cpu_states); 180 181 for (cpu = 0; cpu < ncpus; ++cpu) { 182 cpu_states[CP_USER] += cputime_percpu[cpu].cp_user; 183 cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice; 184 cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys; 185 cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr; 186 cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle; 187 } 188 189 error = SYSCTL_OUT(req, cpu_states, size); 190 191 return (error); 192 } 193 194 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 195 sysctl_cp_time, "LU", "CPU time statistics"); 196 197 static int 198 sysctl_cp_times(SYSCTL_HANDLER_ARGS) 199 { 200 long cpu_states[CPUSTATES] = {0}; 201 int cpu, error; 202 size_t size = sizeof(cpu_states); 203 204 for (error = 0, cpu = 0; error == 0 && cpu < ncpus; ++cpu) { 205 cpu_states[CP_USER] = cputime_percpu[cpu].cp_user; 206 cpu_states[CP_NICE] = cputime_percpu[cpu].cp_nice; 207 cpu_states[CP_SYS] = cputime_percpu[cpu].cp_sys; 208 cpu_states[CP_INTR] = cputime_percpu[cpu].cp_intr; 209 cpu_states[CP_IDLE] = cputime_percpu[cpu].cp_idle; 210 error = SYSCTL_OUT(req, cpu_states, size); 211 } 212 213 return (error); 214 } 215 216 SYSCTL_PROC(_kern, OID_AUTO, cp_times, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 217 sysctl_cp_times, "LU", "per-CPU time statistics"); 218 219 /* 220 * boottime is used to calculate the 'real' uptime. Do not confuse this with 221 * microuptime(). microtime() is not drift compensated. The real uptime 222 * with compensation is nanotime() - bootime. boottime is recalculated 223 * whenever the real time is set based on the compensated elapsed time 224 * in seconds (gd->gd_time_seconds). 225 * 226 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic. 227 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to 228 * the real time. 229 * 230 * WARNING! time_second can backstep on time corrections. Also, unlike 231 * time_second, time_uptime is not a "real" time_t (seconds 232 * since the Epoch) but seconds since booting. 233 */ 234 struct timespec boottime; /* boot time (realtime) for reference only */ 235 time_t time_second; /* read-only 'passive' realtime in seconds */ 236 time_t time_uptime; /* read-only 'passive' uptime in seconds */ 237 238 /* 239 * basetime is used to calculate the compensated real time of day. The 240 * basetime can be modified on a per-tick basis by the adjtime(), 241 * ntp_adjtime(), and sysctl-based time correction APIs. 242 * 243 * Note that frequency corrections can also be made by adjusting 244 * gd_cpuclock_base. 245 * 246 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is 247 * used on both SMP and UP systems to avoid MP races between cpu's and 248 * interrupt races on UP systems. 249 */ 250 struct hardtime { 251 __uint32_t time_second; 252 sysclock_t cpuclock_base; 253 }; 254 255 #define BASETIME_ARYSIZE 16 256 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1) 257 static struct timespec basetime[BASETIME_ARYSIZE]; 258 static struct hardtime hardtime[BASETIME_ARYSIZE]; 259 static volatile int basetime_index; 260 261 static int 262 sysctl_get_basetime(SYSCTL_HANDLER_ARGS) 263 { 264 struct timespec *bt; 265 int error; 266 int index; 267 268 /* 269 * Because basetime data and index may be updated by another cpu, 270 * a load fence is required to ensure that the data we read has 271 * not been speculatively read relative to a possibly updated index. 272 */ 273 index = basetime_index; 274 cpu_lfence(); 275 bt = &basetime[index]; 276 error = SYSCTL_OUT(req, bt, sizeof(*bt)); 277 return (error); 278 } 279 280 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 281 &boottime, timespec, "System boottime"); 282 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, 283 sysctl_get_basetime, "S,timespec", "System basetime"); 284 285 static void hardclock(systimer_t info, int, struct intrframe *frame); 286 static void statclock(systimer_t info, int, struct intrframe *frame); 287 static void schedclock(systimer_t info, int, struct intrframe *frame); 288 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp); 289 290 int ticks; /* system master ticks at hz */ 291 int clocks_running; /* tsleep/timeout clocks operational */ 292 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */ 293 int64_t nsec_acc; /* accumulator */ 294 int sched_ticks; /* global schedule clock ticks */ 295 296 /* NTPD time correction fields */ 297 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */ 298 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */ 299 int64_t ntp_delta; /* one-time correction in nsec */ 300 int64_t ntp_big_delta = 1000000000; 301 int32_t ntp_tick_delta; /* current adjustment rate */ 302 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */ 303 time_t ntp_leap_second; /* time of next leap second */ 304 int ntp_leap_insert; /* whether to insert or remove a second */ 305 struct spinlock ntp_spin; 306 307 /* 308 * Finish initializing clock frequencies and start all clocks running. 309 */ 310 /* ARGSUSED*/ 311 static void 312 initclocks(void *dummy) 313 { 314 /*psratio = profhz / stathz;*/ 315 spin_init(&ntp_spin, "ntp"); 316 initclocks_pcpu(); 317 clocks_running = 1; 318 if (kpmap) { 319 kpmap->tsc_freq = (uint64_t)tsc_frequency; 320 kpmap->tick_freq = hz; 321 } 322 } 323 324 /* 325 * Called on a per-cpu basis from the idle thread bootstrap on each cpu 326 * during SMP initialization. 327 * 328 * This routine is called concurrently during low-level SMP initialization 329 * and may not block in any way. Meaning, among other things, we can't 330 * acquire any tokens. 331 */ 332 void 333 initclocks_pcpu(void) 334 { 335 struct globaldata *gd = mycpu; 336 337 crit_enter(); 338 if (gd->gd_cpuid == 0) { 339 gd->gd_time_seconds = 1; 340 gd->gd_cpuclock_base = sys_cputimer->count(); 341 hardtime[0].time_second = gd->gd_time_seconds; 342 hardtime[0].cpuclock_base = gd->gd_cpuclock_base; 343 } else { 344 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; 345 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; 346 } 347 348 systimer_intr_enable(); 349 350 crit_exit(); 351 } 352 353 /* 354 * Called on a 10-second interval after the system is operational. 355 * Return the collection data for USERPCT and install the data for 356 * SYSTPCT and IDLEPCT. 357 */ 358 static 359 uint64_t 360 collect_cputime_callback(int n) 361 { 362 static long cpu_base[CPUSTATES]; 363 long cpu_states[CPUSTATES]; 364 long total; 365 long acc; 366 long lsb; 367 368 bzero(cpu_states, sizeof(cpu_states)); 369 for (n = 0; n < ncpus; ++n) { 370 cpu_states[CP_USER] += cputime_percpu[n].cp_user; 371 cpu_states[CP_NICE] += cputime_percpu[n].cp_nice; 372 cpu_states[CP_SYS] += cputime_percpu[n].cp_sys; 373 cpu_states[CP_INTR] += cputime_percpu[n].cp_intr; 374 cpu_states[CP_IDLE] += cputime_percpu[n].cp_idle; 375 } 376 377 acc = 0; 378 for (n = 0; n < CPUSTATES; ++n) { 379 total = cpu_states[n] - cpu_base[n]; 380 cpu_base[n] = cpu_states[n]; 381 cpu_states[n] = total; 382 acc += total; 383 } 384 if (acc == 0) /* prevent degenerate divide by 0 */ 385 acc = 1; 386 lsb = acc / (10000 * 2); 387 kcollect_setvalue(KCOLLECT_SYSTPCT, 388 (cpu_states[CP_SYS] + lsb) * 10000 / acc); 389 kcollect_setvalue(KCOLLECT_IDLEPCT, 390 (cpu_states[CP_IDLE] + lsb) * 10000 / acc); 391 kcollect_setvalue(KCOLLECT_INTRPCT, 392 (cpu_states[CP_INTR] + lsb) * 10000 / acc); 393 return((cpu_states[CP_USER] + cpu_states[CP_NICE] + lsb) * 10000 / acc); 394 } 395 396 /* 397 * This routine is called on just the BSP, just after SMP initialization 398 * completes to * finish initializing any clocks that might contend/block 399 * (e.g. like on a token). We can't do this in initclocks_pcpu() because 400 * that function is called from the idle thread bootstrap for each cpu and 401 * not allowed to block at all. 402 */ 403 static 404 void 405 initclocks_other(void *dummy) 406 { 407 struct globaldata *ogd = mycpu; 408 struct globaldata *gd; 409 int n; 410 411 for (n = 0; n < ncpus; ++n) { 412 lwkt_setcpu_self(globaldata_find(n)); 413 gd = mycpu; 414 415 /* 416 * Use a non-queued periodic systimer to prevent multiple 417 * ticks from building up if the sysclock jumps forward 418 * (8254 gets reset). The sysclock will never jump backwards. 419 * Our time sync is based on the actual sysclock, not the 420 * ticks count. 421 * 422 * Install statclock before hardclock to prevent statclock 423 * from misinterpreting gd_flags for tick assignment when 424 * they overlap. 425 */ 426 systimer_init_periodic_nq(&gd->gd_statclock, statclock, 427 NULL, stathz); 428 systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, 429 NULL, hz); 430 /* XXX correct the frequency for scheduler / estcpu tests */ 431 systimer_init_periodic_nq(&gd->gd_schedclock, schedclock, 432 NULL, ESTCPUFREQ); 433 } 434 lwkt_setcpu_self(ogd); 435 436 /* 437 * Regular data collection 438 */ 439 kcollect_register(KCOLLECT_USERPCT, "user", collect_cputime_callback, 440 KCOLLECT_SCALE(KCOLLECT_USERPCT_FORMAT, 0)); 441 kcollect_register(KCOLLECT_SYSTPCT, "syst", NULL, 442 KCOLLECT_SCALE(KCOLLECT_SYSTPCT_FORMAT, 0)); 443 kcollect_register(KCOLLECT_IDLEPCT, "idle", NULL, 444 KCOLLECT_SCALE(KCOLLECT_IDLEPCT_FORMAT, 0)); 445 } 446 SYSINIT(clocks2, SI_BOOT2_POST_SMP, SI_ORDER_ANY, initclocks_other, NULL); 447 448 /* 449 * This sets the current real time of day. Timespecs are in seconds and 450 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base, 451 * instead we adjust basetime so basetime + gd_* results in the current 452 * time of day. This way the gd_* fields are guaranteed to represent 453 * a monotonically increasing 'uptime' value. 454 * 455 * When set_timeofday() is called from userland, the system call forces it 456 * onto cpu #0 since only cpu #0 can update basetime_index. 457 */ 458 void 459 set_timeofday(struct timespec *ts) 460 { 461 struct timespec *nbt; 462 int ni; 463 464 /* 465 * XXX SMP / non-atomic basetime updates 466 */ 467 crit_enter(); 468 ni = (basetime_index + 1) & BASETIME_ARYMASK; 469 cpu_lfence(); 470 nbt = &basetime[ni]; 471 nanouptime(nbt); 472 nbt->tv_sec = ts->tv_sec - nbt->tv_sec; 473 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec; 474 if (nbt->tv_nsec < 0) { 475 nbt->tv_nsec += 1000000000; 476 --nbt->tv_sec; 477 } 478 479 /* 480 * Note that basetime diverges from boottime as the clock drift is 481 * compensated for, so we cannot do away with boottime. When setting 482 * the absolute time of day the drift is 0 (for an instant) and we 483 * can simply assign boottime to basetime. 484 * 485 * Note that nanouptime() is based on gd_time_seconds which is drift 486 * compensated up to a point (it is guaranteed to remain monotonically 487 * increasing). gd_time_seconds is thus our best uptime guess and 488 * suitable for use in the boottime calculation. It is already taken 489 * into account in the basetime calculation above. 490 */ 491 spin_lock(&ntp_spin); 492 boottime.tv_sec = nbt->tv_sec; 493 ntp_delta = 0; 494 495 /* 496 * We now have a new basetime, make sure all other cpus have it, 497 * then update the index. 498 */ 499 cpu_sfence(); 500 basetime_index = ni; 501 spin_unlock(&ntp_spin); 502 503 crit_exit(); 504 } 505 506 /* 507 * Each cpu has its own hardclock, but we only increments ticks and softticks 508 * on cpu #0. 509 * 510 * NOTE! systimer! the MP lock might not be held here. We can only safely 511 * manipulate objects owned by the current cpu. 512 */ 513 static void 514 hardclock(systimer_t info, int in_ipi, struct intrframe *frame) 515 { 516 sysclock_t cputicks; 517 struct proc *p; 518 struct globaldata *gd = mycpu; 519 520 if ((gd->gd_reqflags & RQF_IPIQ) == 0 && lwkt_need_ipiq_process(gd)) { 521 /* Defer to doreti on passive IPIQ processing */ 522 need_ipiq(); 523 } 524 525 /* 526 * We update the compensation base to calculate fine-grained time 527 * from the sys_cputimer on a per-cpu basis in order to avoid 528 * having to mess around with locks. sys_cputimer is assumed to 529 * be consistent across all cpus. CPU N copies the base state from 530 * CPU 0 using the same FIFO trick that we use for basetime (so we 531 * don't catch a CPU 0 update in the middle). 532 * 533 * Note that we never allow info->time (aka gd->gd_hardclock.time) 534 * to reverse index gd_cpuclock_base, but that it is possible for 535 * it to temporarily get behind in the seconds if something in the 536 * system locks interrupts for a long period of time. Since periodic 537 * timers count events, though everything should resynch again 538 * immediately. 539 */ 540 if (gd->gd_cpuid == 0) { 541 int ni; 542 543 cputicks = info->time - gd->gd_cpuclock_base; 544 if (cputicks >= sys_cputimer->freq) { 545 cputicks /= sys_cputimer->freq; 546 if (cputicks != 0 && cputicks != 1) 547 kprintf("Warning: hardclock missed > 1 sec\n"); 548 gd->gd_time_seconds += cputicks; 549 gd->gd_cpuclock_base += sys_cputimer->freq * cputicks; 550 /* uncorrected monotonic 1-sec gran */ 551 time_uptime += cputicks; 552 } 553 ni = (basetime_index + 1) & BASETIME_ARYMASK; 554 hardtime[ni].time_second = gd->gd_time_seconds; 555 hardtime[ni].cpuclock_base = gd->gd_cpuclock_base; 556 } else { 557 int ni; 558 559 ni = basetime_index; 560 cpu_lfence(); 561 gd->gd_time_seconds = hardtime[ni].time_second; 562 gd->gd_cpuclock_base = hardtime[ni].cpuclock_base; 563 } 564 565 /* 566 * The system-wide ticks counter and NTP related timedelta/tickdelta 567 * adjustments only occur on cpu #0. NTP adjustments are accomplished 568 * by updating basetime. 569 */ 570 if (gd->gd_cpuid == 0) { 571 struct timespec *nbt; 572 struct timespec nts; 573 int leap; 574 int ni; 575 576 ++ticks; 577 578 #if 0 579 if (tco->tc_poll_pps) 580 tco->tc_poll_pps(tco); 581 #endif 582 583 /* 584 * Calculate the new basetime index. We are in a critical section 585 * on cpu #0 and can safely play with basetime_index. Start 586 * with the current basetime and then make adjustments. 587 */ 588 ni = (basetime_index + 1) & BASETIME_ARYMASK; 589 nbt = &basetime[ni]; 590 *nbt = basetime[basetime_index]; 591 592 /* 593 * ntp adjustments only occur on cpu 0 and are protected by 594 * ntp_spin. This spinlock virtually never conflicts. 595 */ 596 spin_lock(&ntp_spin); 597 598 /* 599 * Apply adjtime corrections. (adjtime() API) 600 * 601 * adjtime() only runs on cpu #0 so our critical section is 602 * sufficient to access these variables. 603 */ 604 if (ntp_delta != 0) { 605 nbt->tv_nsec += ntp_tick_delta; 606 ntp_delta -= ntp_tick_delta; 607 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) || 608 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) { 609 ntp_tick_delta = ntp_delta; 610 } 611 } 612 613 /* 614 * Apply permanent frequency corrections. (sysctl API) 615 */ 616 if (ntp_tick_permanent != 0) { 617 ntp_tick_acc += ntp_tick_permanent; 618 if (ntp_tick_acc >= (1LL << 32)) { 619 nbt->tv_nsec += ntp_tick_acc >> 32; 620 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32; 621 } else if (ntp_tick_acc <= -(1LL << 32)) { 622 /* Negate ntp_tick_acc to avoid shifting the sign bit. */ 623 nbt->tv_nsec -= (-ntp_tick_acc) >> 32; 624 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32; 625 } 626 } 627 628 if (nbt->tv_nsec >= 1000000000) { 629 nbt->tv_sec++; 630 nbt->tv_nsec -= 1000000000; 631 } else if (nbt->tv_nsec < 0) { 632 nbt->tv_sec--; 633 nbt->tv_nsec += 1000000000; 634 } 635 636 /* 637 * Another per-tick compensation. (for ntp_adjtime() API) 638 */ 639 if (nsec_adj != 0) { 640 nsec_acc += nsec_adj; 641 if (nsec_acc >= 0x100000000LL) { 642 nbt->tv_nsec += nsec_acc >> 32; 643 nsec_acc = (nsec_acc & 0xFFFFFFFFLL); 644 } else if (nsec_acc <= -0x100000000LL) { 645 nbt->tv_nsec -= -nsec_acc >> 32; 646 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL); 647 } 648 if (nbt->tv_nsec >= 1000000000) { 649 nbt->tv_nsec -= 1000000000; 650 ++nbt->tv_sec; 651 } else if (nbt->tv_nsec < 0) { 652 nbt->tv_nsec += 1000000000; 653 --nbt->tv_sec; 654 } 655 } 656 spin_unlock(&ntp_spin); 657 658 /************************************************************ 659 * LEAP SECOND CORRECTION * 660 ************************************************************ 661 * 662 * Taking into account all the corrections made above, figure 663 * out the new real time. If the seconds field has changed 664 * then apply any pending leap-second corrections. 665 */ 666 getnanotime_nbt(nbt, &nts); 667 668 if (time_second != nts.tv_sec) { 669 /* 670 * Apply leap second (sysctl API). Adjust nts for changes 671 * so we do not have to call getnanotime_nbt again. 672 */ 673 if (ntp_leap_second) { 674 if (ntp_leap_second == nts.tv_sec) { 675 if (ntp_leap_insert) { 676 nbt->tv_sec++; 677 nts.tv_sec++; 678 } else { 679 nbt->tv_sec--; 680 nts.tv_sec--; 681 } 682 ntp_leap_second--; 683 } 684 } 685 686 /* 687 * Apply leap second (ntp_adjtime() API), calculate a new 688 * nsec_adj field. ntp_update_second() returns nsec_adj 689 * as a per-second value but we need it as a per-tick value. 690 */ 691 leap = ntp_update_second(time_second, &nsec_adj); 692 nsec_adj /= hz; 693 nbt->tv_sec += leap; 694 nts.tv_sec += leap; 695 696 /* 697 * Update the time_second 'approximate time' global. 698 */ 699 time_second = nts.tv_sec; 700 } 701 702 /* 703 * Finally, our new basetime is ready to go live! 704 */ 705 cpu_sfence(); 706 basetime_index = ni; 707 708 /* 709 * Update kpmap on each tick. TS updates are integrated with 710 * fences and upticks allowing userland to read the data 711 * deterministically. 712 */ 713 if (kpmap) { 714 int w; 715 716 w = (kpmap->upticks + 1) & 1; 717 getnanouptime(&kpmap->ts_uptime[w]); 718 getnanotime(&kpmap->ts_realtime[w]); 719 cpu_sfence(); 720 ++kpmap->upticks; 721 cpu_sfence(); 722 } 723 } 724 725 /* 726 * lwkt thread scheduler fair queueing 727 */ 728 lwkt_schedulerclock(curthread); 729 730 /* 731 * softticks are handled for all cpus 732 */ 733 hardclock_softtick(gd); 734 735 /* 736 * Rollup accumulated vmstats, copy-back for critical path checks. 737 */ 738 vmstats_rollup_cpu(gd); 739 mycpu->gd_vmstats = vmstats; 740 741 /* 742 * ITimer handling is per-tick, per-cpu. 743 * 744 * We must acquire the per-process token in order for ksignal() 745 * to be non-blocking. For the moment this requires an AST fault, 746 * the ksignal() cannot be safely issued from this hard interrupt. 747 * 748 * XXX Even the trytoken here isn't right, and itimer operation in 749 * a multi threaded environment is going to be weird at the 750 * very least. 751 */ 752 if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) { 753 crit_enter_hard(); 754 if (p->p_upmap) 755 ++p->p_upmap->runticks; 756 757 if (frame && CLKF_USERMODE(frame) && 758 timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) && 759 itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) { 760 p->p_flags |= P_SIGVTALRM; 761 need_user_resched(); 762 } 763 if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) && 764 itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) { 765 p->p_flags |= P_SIGPROF; 766 need_user_resched(); 767 } 768 crit_exit_hard(); 769 lwkt_reltoken(&p->p_token); 770 } 771 setdelayed(); 772 } 773 774 /* 775 * The statistics clock typically runs at a 125Hz rate, and is intended 776 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu. 777 * 778 * NOTE! systimer! the MP lock might not be held here. We can only safely 779 * manipulate objects owned by the current cpu. 780 * 781 * The stats clock is responsible for grabbing a profiling sample. 782 * Most of the statistics are only used by user-level statistics programs. 783 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and 784 * p->p_estcpu. 785 * 786 * Like the other clocks, the stat clock is called from what is effectively 787 * a fast interrupt, so the context should be the thread/process that got 788 * interrupted. 789 */ 790 static void 791 statclock(systimer_t info, int in_ipi, struct intrframe *frame) 792 { 793 #ifdef GPROF 794 struct gmonparam *g; 795 int i; 796 #endif 797 globaldata_t gd = mycpu; 798 thread_t td; 799 struct proc *p; 800 int bump; 801 sysclock_t cv; 802 sysclock_t scv; 803 804 /* 805 * How big was our timeslice relative to the last time? Calculate 806 * in microseconds. 807 * 808 * NOTE: Use of microuptime() is typically MPSAFE, but usually not 809 * during early boot. Just use the systimer count to be nice 810 * to e.g. qemu. The systimer has a better chance of being 811 * MPSAFE at early boot. 812 */ 813 cv = sys_cputimer->count(); 814 scv = gd->statint.gd_statcv; 815 if (scv == 0) { 816 bump = 1; 817 } else { 818 bump = (sys_cputimer->freq64_usec * (cv - scv)) >> 32; 819 if (bump < 0) 820 bump = 0; 821 if (bump > 1000000) 822 bump = 1000000; 823 } 824 gd->statint.gd_statcv = cv; 825 826 #if 0 827 stv = &gd->gd_stattv; 828 if (stv->tv_sec == 0) { 829 bump = 1; 830 } else { 831 bump = tv.tv_usec - stv->tv_usec + 832 (tv.tv_sec - stv->tv_sec) * 1000000; 833 if (bump < 0) 834 bump = 0; 835 if (bump > 1000000) 836 bump = 1000000; 837 } 838 *stv = tv; 839 #endif 840 841 td = curthread; 842 p = td->td_proc; 843 844 if (frame && CLKF_USERMODE(frame)) { 845 /* 846 * Came from userland, handle user time and deal with 847 * possible process. 848 */ 849 if (p && (p->p_flags & P_PROFIL)) 850 addupc_intr(p, CLKF_PC(frame), 1); 851 td->td_uticks += bump; 852 853 /* 854 * Charge the time as appropriate 855 */ 856 if (p && p->p_nice > NZERO) 857 cpu_time.cp_nice += bump; 858 else 859 cpu_time.cp_user += bump; 860 } else { 861 int intr_nest = gd->gd_intr_nesting_level; 862 863 if (in_ipi) { 864 /* 865 * IPI processing code will bump gd_intr_nesting_level 866 * up by one, which breaks following CLKF_INTR testing, 867 * so we subtract it by one here. 868 */ 869 --intr_nest; 870 } 871 #ifdef GPROF 872 /* 873 * Kernel statistics are just like addupc_intr, only easier. 874 */ 875 g = &_gmonparam; 876 if (g->state == GMON_PROF_ON && frame) { 877 i = CLKF_PC(frame) - g->lowpc; 878 if (i < g->textsize) { 879 i /= HISTFRACTION * sizeof(*g->kcount); 880 g->kcount[i]++; 881 } 882 } 883 #endif 884 885 #define IS_INTR_RUNNING ((frame && CLKF_INTR(intr_nest)) || CLKF_INTR_TD(td)) 886 887 /* 888 * Came from kernel mode, so we were: 889 * - handling an interrupt, 890 * - doing syscall or trap work on behalf of the current 891 * user process, or 892 * - spinning in the idle loop. 893 * Whichever it is, charge the time as appropriate. 894 * Note that we charge interrupts to the current process, 895 * regardless of whether they are ``for'' that process, 896 * so that we know how much of its real time was spent 897 * in ``non-process'' (i.e., interrupt) work. 898 * 899 * XXX assume system if frame is NULL. A NULL frame 900 * can occur if ipi processing is done from a crit_exit(). 901 */ 902 if (IS_INTR_RUNNING) { 903 /* 904 * If we interrupted an interrupt thread, well, 905 * count it as interrupt time. 906 */ 907 td->td_iticks += bump; 908 #ifdef DEBUG_PCTRACK 909 if (frame) 910 do_pctrack(frame, PCTRACK_INT); 911 #endif 912 cpu_time.cp_intr += bump; 913 } else if (gd->gd_flags & GDF_VIRTUSER) { 914 /* 915 * The vkernel doesn't do a good job providing trap 916 * frames that we can test. If the GDF_VIRTUSER 917 * flag is set we probably interrupted user mode. 918 * 919 * We also use this flag on the host when entering 920 * VMM mode. 921 */ 922 td->td_uticks += bump; 923 924 /* 925 * Charge the time as appropriate 926 */ 927 if (p && p->p_nice > NZERO) 928 cpu_time.cp_nice += bump; 929 else 930 cpu_time.cp_user += bump; 931 } else { 932 td->td_sticks += bump; 933 if (td == &gd->gd_idlethread) { 934 /* 935 * Token contention can cause us to mis-count 936 * a contended as idle, but it doesn't work 937 * properly for VKERNELs so just test on a 938 * real kernel. 939 */ 940 #ifdef _KERNEL_VIRTUAL 941 cpu_time.cp_idle += bump; 942 #else 943 if (mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) 944 cpu_time.cp_sys += bump; 945 else 946 cpu_time.cp_idle += bump; 947 #endif 948 } else { 949 /* 950 * System thread was running. 951 */ 952 #ifdef DEBUG_PCTRACK 953 if (frame) 954 do_pctrack(frame, PCTRACK_SYS); 955 #endif 956 cpu_time.cp_sys += bump; 957 } 958 } 959 960 #undef IS_INTR_RUNNING 961 } 962 } 963 964 #ifdef DEBUG_PCTRACK 965 /* 966 * Sample the PC when in the kernel or in an interrupt. User code can 967 * retrieve the information and generate a histogram or other output. 968 */ 969 970 static void 971 do_pctrack(struct intrframe *frame, int which) 972 { 973 struct kinfo_pctrack *pctrack; 974 975 pctrack = &cputime_pctrack[mycpu->gd_cpuid][which]; 976 pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] = 977 (void *)CLKF_PC(frame); 978 ++pctrack->pc_index; 979 } 980 981 static int 982 sysctl_pctrack(SYSCTL_HANDLER_ARGS) 983 { 984 struct kinfo_pcheader head; 985 int error; 986 int cpu; 987 int ntrack; 988 989 head.pc_ntrack = PCTRACK_SIZE; 990 head.pc_arysize = PCTRACK_ARYSIZE; 991 992 if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0) 993 return (error); 994 995 for (cpu = 0; cpu < ncpus; ++cpu) { 996 for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) { 997 error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack], 998 sizeof(struct kinfo_pctrack)); 999 if (error) 1000 break; 1001 } 1002 if (error) 1003 break; 1004 } 1005 return (error); 1006 } 1007 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 1008 sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking"); 1009 1010 #endif 1011 1012 /* 1013 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer, 1014 * the MP lock might not be held. We can safely manipulate parts of curproc 1015 * but that's about it. 1016 * 1017 * Each cpu has its own scheduler clock. 1018 */ 1019 static void 1020 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 1021 { 1022 struct lwp *lp; 1023 struct rusage *ru; 1024 struct vmspace *vm; 1025 long rss; 1026 1027 if ((lp = lwkt_preempted_proc()) != NULL) { 1028 /* 1029 * Account for cpu time used and hit the scheduler. Note 1030 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD 1031 * HERE. 1032 */ 1033 ++lp->lwp_cpticks; 1034 usched_schedulerclock(lp, info->periodic, info->time); 1035 } else { 1036 usched_schedulerclock(NULL, info->periodic, info->time); 1037 } 1038 if ((lp = curthread->td_lwp) != NULL) { 1039 /* 1040 * Update resource usage integrals and maximums. 1041 */ 1042 if ((ru = &lp->lwp_proc->p_ru) && 1043 (vm = lp->lwp_proc->p_vmspace) != NULL) { 1044 ru->ru_ixrss += pgtok(vm->vm_tsize); 1045 ru->ru_idrss += pgtok(vm->vm_dsize); 1046 ru->ru_isrss += pgtok(vm->vm_ssize); 1047 if (lwkt_trytoken(&vm->vm_map.token)) { 1048 rss = pgtok(vmspace_resident_count(vm)); 1049 if (ru->ru_maxrss < rss) 1050 ru->ru_maxrss = rss; 1051 lwkt_reltoken(&vm->vm_map.token); 1052 } 1053 } 1054 } 1055 /* Increment the global sched_ticks */ 1056 if (mycpu->gd_cpuid == 0) 1057 ++sched_ticks; 1058 } 1059 1060 /* 1061 * Compute number of ticks for the specified amount of time. The 1062 * return value is intended to be used in a clock interrupt timed 1063 * operation and guaranteed to meet or exceed the requested time. 1064 * If the representation overflows, return INT_MAX. The minimum return 1065 * value is 1 ticks and the function will average the calculation up. 1066 * If any value greater then 0 microseconds is supplied, a value 1067 * of at least 2 will be returned to ensure that a near-term clock 1068 * interrupt does not cause the timeout to occur (degenerately) early. 1069 * 1070 * Note that limit checks must take into account microseconds, which is 1071 * done simply by using the smaller signed long maximum instead of 1072 * the unsigned long maximum. 1073 * 1074 * If ints have 32 bits, then the maximum value for any timeout in 1075 * 10ms ticks is 248 days. 1076 */ 1077 int 1078 tvtohz_high(struct timeval *tv) 1079 { 1080 int ticks; 1081 long sec, usec; 1082 1083 sec = tv->tv_sec; 1084 usec = tv->tv_usec; 1085 if (usec < 0) { 1086 sec--; 1087 usec += 1000000; 1088 } 1089 if (sec < 0) { 1090 #ifdef DIAGNOSTIC 1091 if (usec > 0) { 1092 sec++; 1093 usec -= 1000000; 1094 } 1095 kprintf("tvtohz_high: negative time difference " 1096 "%ld sec %ld usec\n", 1097 sec, usec); 1098 #endif 1099 ticks = 1; 1100 } else if (sec <= INT_MAX / hz) { 1101 ticks = (int)(sec * hz + 1102 ((u_long)usec + (ustick - 1)) / ustick) + 1; 1103 } else { 1104 ticks = INT_MAX; 1105 } 1106 return (ticks); 1107 } 1108 1109 int 1110 tstohz_high(struct timespec *ts) 1111 { 1112 int ticks; 1113 long sec, nsec; 1114 1115 sec = ts->tv_sec; 1116 nsec = ts->tv_nsec; 1117 if (nsec < 0) { 1118 sec--; 1119 nsec += 1000000000; 1120 } 1121 if (sec < 0) { 1122 #ifdef DIAGNOSTIC 1123 if (nsec > 0) { 1124 sec++; 1125 nsec -= 1000000000; 1126 } 1127 kprintf("tstohz_high: negative time difference " 1128 "%ld sec %ld nsec\n", 1129 sec, nsec); 1130 #endif 1131 ticks = 1; 1132 } else if (sec <= INT_MAX / hz) { 1133 ticks = (int)(sec * hz + 1134 ((u_long)nsec + (nstick - 1)) / nstick) + 1; 1135 } else { 1136 ticks = INT_MAX; 1137 } 1138 return (ticks); 1139 } 1140 1141 1142 /* 1143 * Compute number of ticks for the specified amount of time, erroring on 1144 * the side of it being too low to ensure that sleeping the returned number 1145 * of ticks will not result in a late return. 1146 * 1147 * The supplied timeval may not be negative and should be normalized. A 1148 * return value of 0 is possible if the timeval converts to less then 1149 * 1 tick. 1150 * 1151 * If ints have 32 bits, then the maximum value for any timeout in 1152 * 10ms ticks is 248 days. 1153 */ 1154 int 1155 tvtohz_low(struct timeval *tv) 1156 { 1157 int ticks; 1158 long sec; 1159 1160 sec = tv->tv_sec; 1161 if (sec <= INT_MAX / hz) 1162 ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick); 1163 else 1164 ticks = INT_MAX; 1165 return (ticks); 1166 } 1167 1168 int 1169 tstohz_low(struct timespec *ts) 1170 { 1171 int ticks; 1172 long sec; 1173 1174 sec = ts->tv_sec; 1175 if (sec <= INT_MAX / hz) 1176 ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick); 1177 else 1178 ticks = INT_MAX; 1179 return (ticks); 1180 } 1181 1182 /* 1183 * Start profiling on a process. 1184 * 1185 * Caller must hold p->p_token(); 1186 * 1187 * Kernel profiling passes proc0 which never exits and hence 1188 * keeps the profile clock running constantly. 1189 */ 1190 void 1191 startprofclock(struct proc *p) 1192 { 1193 if ((p->p_flags & P_PROFIL) == 0) { 1194 p->p_flags |= P_PROFIL; 1195 #if 0 /* XXX */ 1196 if (++profprocs == 1 && stathz != 0) { 1197 crit_enter(); 1198 psdiv = psratio; 1199 setstatclockrate(profhz); 1200 crit_exit(); 1201 } 1202 #endif 1203 } 1204 } 1205 1206 /* 1207 * Stop profiling on a process. 1208 * 1209 * caller must hold p->p_token 1210 */ 1211 void 1212 stopprofclock(struct proc *p) 1213 { 1214 if (p->p_flags & P_PROFIL) { 1215 p->p_flags &= ~P_PROFIL; 1216 #if 0 /* XXX */ 1217 if (--profprocs == 0 && stathz != 0) { 1218 crit_enter(); 1219 psdiv = 1; 1220 setstatclockrate(stathz); 1221 crit_exit(); 1222 } 1223 #endif 1224 } 1225 } 1226 1227 /* 1228 * Return information about system clocks. 1229 */ 1230 static int 1231 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 1232 { 1233 struct kinfo_clockinfo clkinfo; 1234 /* 1235 * Construct clockinfo structure. 1236 */ 1237 clkinfo.ci_hz = hz; 1238 clkinfo.ci_tick = ustick; 1239 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000; 1240 clkinfo.ci_profhz = profhz; 1241 clkinfo.ci_stathz = stathz ? stathz : hz; 1242 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 1243 } 1244 1245 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 1246 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 1247 1248 /* 1249 * We have eight functions for looking at the clock, four for 1250 * microseconds and four for nanoseconds. For each there is fast 1251 * but less precise version "get{nano|micro}[up]time" which will 1252 * return a time which is up to 1/HZ previous to the call, whereas 1253 * the raw version "{nano|micro}[up]time" will return a timestamp 1254 * which is as precise as possible. The "up" variants return the 1255 * time relative to system boot, these are well suited for time 1256 * interval measurements. 1257 * 1258 * Each cpu independently maintains the current time of day, so all 1259 * we need to do to protect ourselves from changes is to do a loop 1260 * check on the seconds field changing out from under us. 1261 * 1262 * The system timer maintains a 32 bit count and due to various issues 1263 * it is possible for the calculated delta to occasionally exceed 1264 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec 1265 * multiplication can easily overflow, so we deal with the case. For 1266 * uniformity we deal with the case in the usec case too. 1267 * 1268 * All the [get][micro,nano][time,uptime]() routines are MPSAFE. 1269 */ 1270 void 1271 getmicrouptime(struct timeval *tvp) 1272 { 1273 struct globaldata *gd = mycpu; 1274 sysclock_t delta; 1275 1276 do { 1277 tvp->tv_sec = gd->gd_time_seconds; 1278 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1279 } while (tvp->tv_sec != gd->gd_time_seconds); 1280 1281 if (delta >= sys_cputimer->freq) { 1282 tvp->tv_sec += delta / sys_cputimer->freq; 1283 delta %= sys_cputimer->freq; 1284 } 1285 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1286 if (tvp->tv_usec >= 1000000) { 1287 tvp->tv_usec -= 1000000; 1288 ++tvp->tv_sec; 1289 } 1290 } 1291 1292 void 1293 getnanouptime(struct timespec *tsp) 1294 { 1295 struct globaldata *gd = mycpu; 1296 sysclock_t delta; 1297 1298 do { 1299 tsp->tv_sec = gd->gd_time_seconds; 1300 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1301 } while (tsp->tv_sec != gd->gd_time_seconds); 1302 1303 if (delta >= sys_cputimer->freq) { 1304 tsp->tv_sec += delta / sys_cputimer->freq; 1305 delta %= sys_cputimer->freq; 1306 } 1307 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1308 } 1309 1310 void 1311 microuptime(struct timeval *tvp) 1312 { 1313 struct globaldata *gd = mycpu; 1314 sysclock_t delta; 1315 1316 do { 1317 tvp->tv_sec = gd->gd_time_seconds; 1318 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1319 } while (tvp->tv_sec != gd->gd_time_seconds); 1320 1321 if (delta >= sys_cputimer->freq) { 1322 tvp->tv_sec += delta / sys_cputimer->freq; 1323 delta %= sys_cputimer->freq; 1324 } 1325 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1326 } 1327 1328 void 1329 nanouptime(struct timespec *tsp) 1330 { 1331 struct globaldata *gd = mycpu; 1332 sysclock_t delta; 1333 1334 do { 1335 tsp->tv_sec = gd->gd_time_seconds; 1336 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1337 } while (tsp->tv_sec != gd->gd_time_seconds); 1338 1339 if (delta >= sys_cputimer->freq) { 1340 tsp->tv_sec += delta / sys_cputimer->freq; 1341 delta %= sys_cputimer->freq; 1342 } 1343 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1344 } 1345 1346 /* 1347 * realtime routines 1348 */ 1349 void 1350 getmicrotime(struct timeval *tvp) 1351 { 1352 struct globaldata *gd = mycpu; 1353 struct timespec *bt; 1354 sysclock_t delta; 1355 1356 do { 1357 tvp->tv_sec = gd->gd_time_seconds; 1358 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1359 } while (tvp->tv_sec != gd->gd_time_seconds); 1360 1361 if (delta >= sys_cputimer->freq) { 1362 tvp->tv_sec += delta / sys_cputimer->freq; 1363 delta %= sys_cputimer->freq; 1364 } 1365 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1366 1367 bt = &basetime[basetime_index]; 1368 cpu_lfence(); 1369 tvp->tv_sec += bt->tv_sec; 1370 tvp->tv_usec += bt->tv_nsec / 1000; 1371 while (tvp->tv_usec >= 1000000) { 1372 tvp->tv_usec -= 1000000; 1373 ++tvp->tv_sec; 1374 } 1375 } 1376 1377 void 1378 getnanotime(struct timespec *tsp) 1379 { 1380 struct globaldata *gd = mycpu; 1381 struct timespec *bt; 1382 sysclock_t delta; 1383 1384 do { 1385 tsp->tv_sec = gd->gd_time_seconds; 1386 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1387 } while (tsp->tv_sec != gd->gd_time_seconds); 1388 1389 if (delta >= sys_cputimer->freq) { 1390 tsp->tv_sec += delta / sys_cputimer->freq; 1391 delta %= sys_cputimer->freq; 1392 } 1393 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1394 1395 bt = &basetime[basetime_index]; 1396 cpu_lfence(); 1397 tsp->tv_sec += bt->tv_sec; 1398 tsp->tv_nsec += bt->tv_nsec; 1399 while (tsp->tv_nsec >= 1000000000) { 1400 tsp->tv_nsec -= 1000000000; 1401 ++tsp->tv_sec; 1402 } 1403 } 1404 1405 static void 1406 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp) 1407 { 1408 struct globaldata *gd = mycpu; 1409 sysclock_t delta; 1410 1411 do { 1412 tsp->tv_sec = gd->gd_time_seconds; 1413 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1414 } while (tsp->tv_sec != gd->gd_time_seconds); 1415 1416 if (delta >= sys_cputimer->freq) { 1417 tsp->tv_sec += delta / sys_cputimer->freq; 1418 delta %= sys_cputimer->freq; 1419 } 1420 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1421 1422 tsp->tv_sec += nbt->tv_sec; 1423 tsp->tv_nsec += nbt->tv_nsec; 1424 while (tsp->tv_nsec >= 1000000000) { 1425 tsp->tv_nsec -= 1000000000; 1426 ++tsp->tv_sec; 1427 } 1428 } 1429 1430 1431 void 1432 microtime(struct timeval *tvp) 1433 { 1434 struct globaldata *gd = mycpu; 1435 struct timespec *bt; 1436 sysclock_t delta; 1437 1438 do { 1439 tvp->tv_sec = gd->gd_time_seconds; 1440 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1441 } while (tvp->tv_sec != gd->gd_time_seconds); 1442 1443 if (delta >= sys_cputimer->freq) { 1444 tvp->tv_sec += delta / sys_cputimer->freq; 1445 delta %= sys_cputimer->freq; 1446 } 1447 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1448 1449 bt = &basetime[basetime_index]; 1450 cpu_lfence(); 1451 tvp->tv_sec += bt->tv_sec; 1452 tvp->tv_usec += bt->tv_nsec / 1000; 1453 while (tvp->tv_usec >= 1000000) { 1454 tvp->tv_usec -= 1000000; 1455 ++tvp->tv_sec; 1456 } 1457 } 1458 1459 void 1460 nanotime(struct timespec *tsp) 1461 { 1462 struct globaldata *gd = mycpu; 1463 struct timespec *bt; 1464 sysclock_t delta; 1465 1466 do { 1467 tsp->tv_sec = gd->gd_time_seconds; 1468 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1469 } while (tsp->tv_sec != gd->gd_time_seconds); 1470 1471 if (delta >= sys_cputimer->freq) { 1472 tsp->tv_sec += delta / sys_cputimer->freq; 1473 delta %= sys_cputimer->freq; 1474 } 1475 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1476 1477 bt = &basetime[basetime_index]; 1478 cpu_lfence(); 1479 tsp->tv_sec += bt->tv_sec; 1480 tsp->tv_nsec += bt->tv_nsec; 1481 while (tsp->tv_nsec >= 1000000000) { 1482 tsp->tv_nsec -= 1000000000; 1483 ++tsp->tv_sec; 1484 } 1485 } 1486 1487 /* 1488 * Get an approximate time_t. It does not have to be accurate. This 1489 * function is called only from KTR and can be called with the system in 1490 * any state so do not use a critical section or other complex operation 1491 * here. 1492 * 1493 * NOTE: This is not exactly synchronized with real time. To do that we 1494 * would have to do what microtime does and check for a nanoseconds 1495 * overflow. 1496 */ 1497 time_t 1498 get_approximate_time_t(void) 1499 { 1500 struct globaldata *gd = mycpu; 1501 struct timespec *bt; 1502 1503 bt = &basetime[basetime_index]; 1504 return(gd->gd_time_seconds + bt->tv_sec); 1505 } 1506 1507 int 1508 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 1509 { 1510 pps_params_t *app; 1511 struct pps_fetch_args *fapi; 1512 #ifdef PPS_SYNC 1513 struct pps_kcbind_args *kapi; 1514 #endif 1515 1516 switch (cmd) { 1517 case PPS_IOC_CREATE: 1518 return (0); 1519 case PPS_IOC_DESTROY: 1520 return (0); 1521 case PPS_IOC_SETPARAMS: 1522 app = (pps_params_t *)data; 1523 if (app->mode & ~pps->ppscap) 1524 return (EINVAL); 1525 pps->ppsparam = *app; 1526 return (0); 1527 case PPS_IOC_GETPARAMS: 1528 app = (pps_params_t *)data; 1529 *app = pps->ppsparam; 1530 app->api_version = PPS_API_VERS_1; 1531 return (0); 1532 case PPS_IOC_GETCAP: 1533 *(int*)data = pps->ppscap; 1534 return (0); 1535 case PPS_IOC_FETCH: 1536 fapi = (struct pps_fetch_args *)data; 1537 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 1538 return (EINVAL); 1539 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 1540 return (EOPNOTSUPP); 1541 pps->ppsinfo.current_mode = pps->ppsparam.mode; 1542 fapi->pps_info_buf = pps->ppsinfo; 1543 return (0); 1544 case PPS_IOC_KCBIND: 1545 #ifdef PPS_SYNC 1546 kapi = (struct pps_kcbind_args *)data; 1547 /* XXX Only root should be able to do this */ 1548 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 1549 return (EINVAL); 1550 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 1551 return (EINVAL); 1552 if (kapi->edge & ~pps->ppscap) 1553 return (EINVAL); 1554 pps->kcmode = kapi->edge; 1555 return (0); 1556 #else 1557 return (EOPNOTSUPP); 1558 #endif 1559 default: 1560 return (ENOTTY); 1561 } 1562 } 1563 1564 void 1565 pps_init(struct pps_state *pps) 1566 { 1567 pps->ppscap |= PPS_TSFMT_TSPEC; 1568 if (pps->ppscap & PPS_CAPTUREASSERT) 1569 pps->ppscap |= PPS_OFFSETASSERT; 1570 if (pps->ppscap & PPS_CAPTURECLEAR) 1571 pps->ppscap |= PPS_OFFSETCLEAR; 1572 } 1573 1574 void 1575 pps_event(struct pps_state *pps, sysclock_t count, int event) 1576 { 1577 struct globaldata *gd; 1578 struct timespec *tsp; 1579 struct timespec *osp; 1580 struct timespec *bt; 1581 struct timespec ts; 1582 sysclock_t *pcount; 1583 #ifdef PPS_SYNC 1584 sysclock_t tcount; 1585 #endif 1586 sysclock_t delta; 1587 pps_seq_t *pseq; 1588 int foff; 1589 #ifdef PPS_SYNC 1590 int fhard; 1591 #endif 1592 int ni; 1593 1594 gd = mycpu; 1595 1596 /* Things would be easier with arrays... */ 1597 if (event == PPS_CAPTUREASSERT) { 1598 tsp = &pps->ppsinfo.assert_timestamp; 1599 osp = &pps->ppsparam.assert_offset; 1600 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1601 #ifdef PPS_SYNC 1602 fhard = pps->kcmode & PPS_CAPTUREASSERT; 1603 #endif 1604 pcount = &pps->ppscount[0]; 1605 pseq = &pps->ppsinfo.assert_sequence; 1606 } else { 1607 tsp = &pps->ppsinfo.clear_timestamp; 1608 osp = &pps->ppsparam.clear_offset; 1609 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1610 #ifdef PPS_SYNC 1611 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1612 #endif 1613 pcount = &pps->ppscount[1]; 1614 pseq = &pps->ppsinfo.clear_sequence; 1615 } 1616 1617 /* Nothing really happened */ 1618 if (*pcount == count) 1619 return; 1620 1621 *pcount = count; 1622 1623 do { 1624 ts.tv_sec = gd->gd_time_seconds; 1625 delta = count - gd->gd_cpuclock_base; 1626 } while (ts.tv_sec != gd->gd_time_seconds); 1627 1628 if (delta >= sys_cputimer->freq) { 1629 ts.tv_sec += delta / sys_cputimer->freq; 1630 delta %= sys_cputimer->freq; 1631 } 1632 ts.tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1633 ni = basetime_index; 1634 cpu_lfence(); 1635 bt = &basetime[ni]; 1636 ts.tv_sec += bt->tv_sec; 1637 ts.tv_nsec += bt->tv_nsec; 1638 while (ts.tv_nsec >= 1000000000) { 1639 ts.tv_nsec -= 1000000000; 1640 ++ts.tv_sec; 1641 } 1642 1643 (*pseq)++; 1644 *tsp = ts; 1645 1646 if (foff) { 1647 timespecadd(tsp, osp); 1648 if (tsp->tv_nsec < 0) { 1649 tsp->tv_nsec += 1000000000; 1650 tsp->tv_sec -= 1; 1651 } 1652 } 1653 #ifdef PPS_SYNC 1654 if (fhard) { 1655 /* magic, at its best... */ 1656 tcount = count - pps->ppscount[2]; 1657 pps->ppscount[2] = count; 1658 if (tcount >= sys_cputimer->freq) { 1659 delta = (1000000000 * (tcount / sys_cputimer->freq) + 1660 sys_cputimer->freq64_nsec * 1661 (tcount % sys_cputimer->freq)) >> 32; 1662 } else { 1663 delta = (sys_cputimer->freq64_nsec * tcount) >> 32; 1664 } 1665 hardpps(tsp, delta); 1666 } 1667 #endif 1668 } 1669 1670 /* 1671 * Return the tsc target value for a delay of (ns). 1672 * 1673 * Returns -1 if the TSC is not supported. 1674 */ 1675 int64_t 1676 tsc_get_target(int ns) 1677 { 1678 #if defined(_RDTSC_SUPPORTED_) 1679 if (cpu_feature & CPUID_TSC) { 1680 return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000); 1681 } 1682 #endif 1683 return(-1); 1684 } 1685 1686 /* 1687 * Compare the tsc against the passed target 1688 * 1689 * Returns +1 if the target has been reached 1690 * Returns 0 if the target has not yet been reached 1691 * Returns -1 if the TSC is not supported. 1692 * 1693 * Typical use: while (tsc_test_target(target) == 0) { ...poll... } 1694 */ 1695 int 1696 tsc_test_target(int64_t target) 1697 { 1698 #if defined(_RDTSC_SUPPORTED_) 1699 if (cpu_feature & CPUID_TSC) { 1700 if ((int64_t)(target - rdtsc()) <= 0) 1701 return(1); 1702 return(0); 1703 } 1704 #endif 1705 return(-1); 1706 } 1707 1708 /* 1709 * Delay the specified number of nanoseconds using the tsc. This function 1710 * returns immediately if the TSC is not supported. At least one cpu_pause() 1711 * will be issued. 1712 */ 1713 void 1714 tsc_delay(int ns) 1715 { 1716 int64_t clk; 1717 1718 clk = tsc_get_target(ns); 1719 cpu_pause(); 1720 while (tsc_test_target(clk) == 0) 1721 cpu_pause(); 1722 } 1723