1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 68 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ 69 */ 70 71 #include "opt_ntp.h" 72 #include "opt_pctrack.h" 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/callout.h> 77 #include <sys/kernel.h> 78 #include <sys/kinfo.h> 79 #include <sys/proc.h> 80 #include <sys/malloc.h> 81 #include <sys/resource.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/priv.h> 85 #include <sys/timex.h> 86 #include <sys/timepps.h> 87 #include <sys/upmap.h> 88 #include <sys/lock.h> 89 #include <sys/sysctl.h> 90 #include <sys/kcollect.h> 91 92 #include <vm/vm.h> 93 #include <vm/pmap.h> 94 #include <vm/vm_map.h> 95 #include <vm/vm_extern.h> 96 97 #include <sys/thread2.h> 98 #include <sys/spinlock2.h> 99 100 #include <machine/cpu.h> 101 #include <machine/limits.h> 102 #include <machine/smp.h> 103 #include <machine/cpufunc.h> 104 #include <machine/specialreg.h> 105 #include <machine/clock.h> 106 107 #ifdef GPROF 108 #include <sys/gmon.h> 109 #endif 110 111 #ifdef DEBUG_PCTRACK 112 static void do_pctrack(struct intrframe *frame, int which); 113 #endif 114 115 static void initclocks (void *dummy); 116 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL); 117 118 /* 119 * Some of these don't belong here, but it's easiest to concentrate them. 120 * Note that cpu_time counts in microseconds, but most userland programs 121 * just compare relative times against the total by delta. 122 */ 123 struct kinfo_cputime cputime_percpu[MAXCPU]; 124 #ifdef DEBUG_PCTRACK 125 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE }; 126 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE]; 127 #endif 128 129 static int sniff_enable = 1; 130 static int sniff_target = -1; 131 SYSCTL_INT(_kern, OID_AUTO, sniff_enable, CTLFLAG_RW, &sniff_enable, 0 , ""); 132 SYSCTL_INT(_kern, OID_AUTO, sniff_target, CTLFLAG_RW, &sniff_target, 0 , ""); 133 134 static int 135 sysctl_cputime(SYSCTL_HANDLER_ARGS) 136 { 137 int cpu, error = 0; 138 int root_error; 139 size_t size = sizeof(struct kinfo_cputime); 140 struct kinfo_cputime tmp; 141 142 /* 143 * NOTE: For security reasons, only root can sniff %rip 144 */ 145 root_error = priv_check_cred(curthread->td_ucred, PRIV_ROOT, 0); 146 147 for (cpu = 0; cpu < ncpus; ++cpu) { 148 tmp = cputime_percpu[cpu]; 149 if (root_error == 0) { 150 tmp.cp_sample_pc = 151 (int64_t)globaldata_find(cpu)->gd_sample_pc; 152 tmp.cp_sample_sp = 153 (int64_t)globaldata_find(cpu)->gd_sample_sp; 154 } 155 if ((error = SYSCTL_OUT(req, &tmp, size)) != 0) 156 break; 157 } 158 159 if (root_error == 0) { 160 if (sniff_enable) { 161 int n = sniff_target; 162 if (n < 0) 163 smp_sniff(); 164 else if (n < ncpus) 165 cpu_sniff(n); 166 } 167 } 168 169 return (error); 170 } 171 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 172 sysctl_cputime, "S,kinfo_cputime", "CPU time statistics"); 173 174 static int 175 sysctl_cp_time(SYSCTL_HANDLER_ARGS) 176 { 177 long cpu_states[CPUSTATES] = {0}; 178 int cpu, error = 0; 179 size_t size = sizeof(cpu_states); 180 181 for (cpu = 0; cpu < ncpus; ++cpu) { 182 cpu_states[CP_USER] += cputime_percpu[cpu].cp_user; 183 cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice; 184 cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys; 185 cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr; 186 cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle; 187 } 188 189 error = SYSCTL_OUT(req, cpu_states, size); 190 191 return (error); 192 } 193 194 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 195 sysctl_cp_time, "LU", "CPU time statistics"); 196 197 static int 198 sysctl_cp_times(SYSCTL_HANDLER_ARGS) 199 { 200 long cpu_states[CPUSTATES] = {0}; 201 int cpu, error; 202 size_t size = sizeof(cpu_states); 203 204 for (error = 0, cpu = 0; error == 0 && cpu < ncpus; ++cpu) { 205 cpu_states[CP_USER] = cputime_percpu[cpu].cp_user; 206 cpu_states[CP_NICE] = cputime_percpu[cpu].cp_nice; 207 cpu_states[CP_SYS] = cputime_percpu[cpu].cp_sys; 208 cpu_states[CP_INTR] = cputime_percpu[cpu].cp_intr; 209 cpu_states[CP_IDLE] = cputime_percpu[cpu].cp_idle; 210 error = SYSCTL_OUT(req, cpu_states, size); 211 } 212 213 return (error); 214 } 215 216 SYSCTL_PROC(_kern, OID_AUTO, cp_times, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 217 sysctl_cp_times, "LU", "per-CPU time statistics"); 218 219 /* 220 * boottime is used to calculate the 'real' uptime. Do not confuse this with 221 * microuptime(). microtime() is not drift compensated. The real uptime 222 * with compensation is nanotime() - bootime. boottime is recalculated 223 * whenever the real time is set based on the compensated elapsed time 224 * in seconds (gd->gd_time_seconds). 225 * 226 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic. 227 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to 228 * the real time. 229 * 230 * WARNING! time_second can backstep on time corrections. Also, unlike 231 * time_second, time_uptime is not a "real" time_t (seconds 232 * since the Epoch) but seconds since booting. 233 */ 234 struct timespec boottime; /* boot time (realtime) for reference only */ 235 time_t time_second; /* read-only 'passive' realtime in seconds */ 236 time_t time_uptime; /* read-only 'passive' uptime in seconds */ 237 238 /* 239 * basetime is used to calculate the compensated real time of day. The 240 * basetime can be modified on a per-tick basis by the adjtime(), 241 * ntp_adjtime(), and sysctl-based time correction APIs. 242 * 243 * Note that frequency corrections can also be made by adjusting 244 * gd_cpuclock_base. 245 * 246 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is 247 * used on both SMP and UP systems to avoid MP races between cpu's and 248 * interrupt races on UP systems. 249 */ 250 struct hardtime { 251 __uint32_t time_second; 252 sysclock_t cpuclock_base; 253 }; 254 255 #define BASETIME_ARYSIZE 16 256 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1) 257 static struct timespec basetime[BASETIME_ARYSIZE]; 258 static struct hardtime hardtime[BASETIME_ARYSIZE]; 259 static volatile int basetime_index; 260 261 static int 262 sysctl_get_basetime(SYSCTL_HANDLER_ARGS) 263 { 264 struct timespec *bt; 265 int error; 266 int index; 267 268 /* 269 * Because basetime data and index may be updated by another cpu, 270 * a load fence is required to ensure that the data we read has 271 * not been speculatively read relative to a possibly updated index. 272 */ 273 index = basetime_index; 274 cpu_lfence(); 275 bt = &basetime[index]; 276 error = SYSCTL_OUT(req, bt, sizeof(*bt)); 277 return (error); 278 } 279 280 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 281 &boottime, timespec, "System boottime"); 282 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, 283 sysctl_get_basetime, "S,timespec", "System basetime"); 284 285 static void hardclock(systimer_t info, int, struct intrframe *frame); 286 static void statclock(systimer_t info, int, struct intrframe *frame); 287 static void schedclock(systimer_t info, int, struct intrframe *frame); 288 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp); 289 290 int ticks; /* system master ticks at hz */ 291 int clocks_running; /* tsleep/timeout clocks operational */ 292 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */ 293 int64_t nsec_acc; /* accumulator */ 294 int sched_ticks; /* global schedule clock ticks */ 295 296 /* NTPD time correction fields */ 297 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */ 298 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */ 299 int64_t ntp_delta; /* one-time correction in nsec */ 300 int64_t ntp_big_delta = 1000000000; 301 int32_t ntp_tick_delta; /* current adjustment rate */ 302 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */ 303 time_t ntp_leap_second; /* time of next leap second */ 304 int ntp_leap_insert; /* whether to insert or remove a second */ 305 struct spinlock ntp_spin; 306 307 /* 308 * Finish initializing clock frequencies and start all clocks running. 309 */ 310 /* ARGSUSED*/ 311 static void 312 initclocks(void *dummy) 313 { 314 /*psratio = profhz / stathz;*/ 315 spin_init(&ntp_spin, "ntp"); 316 initclocks_pcpu(); 317 clocks_running = 1; 318 if (kpmap) { 319 kpmap->tsc_freq = tsc_frequency; 320 kpmap->tick_freq = hz; 321 } 322 } 323 324 /* 325 * Called on a per-cpu basis from the idle thread bootstrap on each cpu 326 * during SMP initialization. 327 * 328 * This routine is called concurrently during low-level SMP initialization 329 * and may not block in any way. Meaning, among other things, we can't 330 * acquire any tokens. 331 */ 332 void 333 initclocks_pcpu(void) 334 { 335 struct globaldata *gd = mycpu; 336 337 crit_enter(); 338 if (gd->gd_cpuid == 0) { 339 gd->gd_time_seconds = 1; 340 gd->gd_cpuclock_base = sys_cputimer->count(); 341 hardtime[0].time_second = gd->gd_time_seconds; 342 hardtime[0].cpuclock_base = gd->gd_cpuclock_base; 343 } else { 344 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; 345 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; 346 } 347 348 systimer_intr_enable(); 349 350 crit_exit(); 351 } 352 353 /* 354 * Called on a 10-second interval after the system is operational. 355 * Return the collection data for USERPCT and install the data for 356 * SYSTPCT and IDLEPCT. 357 */ 358 static 359 uint64_t 360 collect_cputime_callback(int n) 361 { 362 static long cpu_base[CPUSTATES]; 363 long cpu_states[CPUSTATES]; 364 long total; 365 long acc; 366 long lsb; 367 368 bzero(cpu_states, sizeof(cpu_states)); 369 for (n = 0; n < ncpus; ++n) { 370 cpu_states[CP_USER] += cputime_percpu[n].cp_user; 371 cpu_states[CP_NICE] += cputime_percpu[n].cp_nice; 372 cpu_states[CP_SYS] += cputime_percpu[n].cp_sys; 373 cpu_states[CP_INTR] += cputime_percpu[n].cp_intr; 374 cpu_states[CP_IDLE] += cputime_percpu[n].cp_idle; 375 } 376 377 acc = 0; 378 for (n = 0; n < CPUSTATES; ++n) { 379 total = cpu_states[n] - cpu_base[n]; 380 cpu_base[n] = cpu_states[n]; 381 cpu_states[n] = total; 382 acc += total; 383 } 384 if (acc == 0) /* prevent degenerate divide by 0 */ 385 acc = 1; 386 lsb = acc / (10000 * 2); 387 kcollect_setvalue(KCOLLECT_SYSTPCT, 388 (cpu_states[CP_SYS] + lsb) * 10000 / acc); 389 kcollect_setvalue(KCOLLECT_IDLEPCT, 390 (cpu_states[CP_IDLE] + lsb) * 10000 / acc); 391 kcollect_setvalue(KCOLLECT_INTRPCT, 392 (cpu_states[CP_INTR] + lsb) * 10000 / acc); 393 return((cpu_states[CP_USER] + cpu_states[CP_NICE] + lsb) * 10000 / acc); 394 } 395 396 /* 397 * This routine is called on just the BSP, just after SMP initialization 398 * completes to * finish initializing any clocks that might contend/block 399 * (e.g. like on a token). We can't do this in initclocks_pcpu() because 400 * that function is called from the idle thread bootstrap for each cpu and 401 * not allowed to block at all. 402 */ 403 static 404 void 405 initclocks_other(void *dummy) 406 { 407 struct globaldata *ogd = mycpu; 408 struct globaldata *gd; 409 int n; 410 411 for (n = 0; n < ncpus; ++n) { 412 lwkt_setcpu_self(globaldata_find(n)); 413 gd = mycpu; 414 415 /* 416 * Use a non-queued periodic systimer to prevent multiple 417 * ticks from building up if the sysclock jumps forward 418 * (8254 gets reset). The sysclock will never jump backwards. 419 * Our time sync is based on the actual sysclock, not the 420 * ticks count. 421 * 422 * Install statclock before hardclock to prevent statclock 423 * from misinterpreting gd_flags for tick assignment when 424 * they overlap. 425 */ 426 systimer_init_periodic_flags(&gd->gd_statclock, statclock, 427 NULL, stathz, 428 SYSTF_MSSYNC | SYSTF_FIRST); 429 systimer_init_periodic_flags(&gd->gd_hardclock, hardclock, 430 NULL, hz, SYSTF_MSSYNC); 431 } 432 lwkt_setcpu_self(ogd); 433 434 /* 435 * Regular data collection 436 */ 437 kcollect_register(KCOLLECT_USERPCT, "user", collect_cputime_callback, 438 KCOLLECT_SCALE(KCOLLECT_USERPCT_FORMAT, 0)); 439 kcollect_register(KCOLLECT_SYSTPCT, "syst", NULL, 440 KCOLLECT_SCALE(KCOLLECT_SYSTPCT_FORMAT, 0)); 441 kcollect_register(KCOLLECT_IDLEPCT, "idle", NULL, 442 KCOLLECT_SCALE(KCOLLECT_IDLEPCT_FORMAT, 0)); 443 } 444 SYSINIT(clocks2, SI_BOOT2_POST_SMP, SI_ORDER_ANY, initclocks_other, NULL); 445 446 /* 447 * This method is called on just the BSP, after all the usched implementations 448 * are initialized. This avoids races between usched initialization functions 449 * and usched_schedulerclock(). 450 */ 451 static 452 void 453 initclocks_usched(void *dummy) 454 { 455 struct globaldata *ogd = mycpu; 456 struct globaldata *gd; 457 int n; 458 459 for (n = 0; n < ncpus; ++n) { 460 lwkt_setcpu_self(globaldata_find(n)); 461 gd = mycpu; 462 463 /* XXX correct the frequency for scheduler / estcpu tests */ 464 systimer_init_periodic_flags(&gd->gd_schedclock, schedclock, 465 NULL, ESTCPUFREQ, SYSTF_MSSYNC); 466 } 467 lwkt_setcpu_self(ogd); 468 } 469 SYSINIT(clocks3, SI_BOOT2_USCHED, SI_ORDER_ANY, initclocks_usched, NULL); 470 471 /* 472 * This sets the current real time of day. Timespecs are in seconds and 473 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base, 474 * instead we adjust basetime so basetime + gd_* results in the current 475 * time of day. This way the gd_* fields are guaranteed to represent 476 * a monotonically increasing 'uptime' value. 477 * 478 * When set_timeofday() is called from userland, the system call forces it 479 * onto cpu #0 since only cpu #0 can update basetime_index. 480 */ 481 void 482 set_timeofday(struct timespec *ts) 483 { 484 struct timespec *nbt; 485 int ni; 486 487 /* 488 * XXX SMP / non-atomic basetime updates 489 */ 490 crit_enter(); 491 ni = (basetime_index + 1) & BASETIME_ARYMASK; 492 cpu_lfence(); 493 nbt = &basetime[ni]; 494 nanouptime(nbt); 495 nbt->tv_sec = ts->tv_sec - nbt->tv_sec; 496 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec; 497 if (nbt->tv_nsec < 0) { 498 nbt->tv_nsec += 1000000000; 499 --nbt->tv_sec; 500 } 501 502 /* 503 * Note that basetime diverges from boottime as the clock drift is 504 * compensated for, so we cannot do away with boottime. When setting 505 * the absolute time of day the drift is 0 (for an instant) and we 506 * can simply assign boottime to basetime. 507 * 508 * Note that nanouptime() is based on gd_time_seconds which is drift 509 * compensated up to a point (it is guaranteed to remain monotonically 510 * increasing). gd_time_seconds is thus our best uptime guess and 511 * suitable for use in the boottime calculation. It is already taken 512 * into account in the basetime calculation above. 513 */ 514 spin_lock(&ntp_spin); 515 boottime.tv_sec = nbt->tv_sec; 516 ntp_delta = 0; 517 518 /* 519 * We now have a new basetime, make sure all other cpus have it, 520 * then update the index. 521 */ 522 cpu_sfence(); 523 basetime_index = ni; 524 spin_unlock(&ntp_spin); 525 526 crit_exit(); 527 } 528 529 /* 530 * Each cpu has its own hardclock, but we only increments ticks and softticks 531 * on cpu #0. 532 * 533 * NOTE! systimer! the MP lock might not be held here. We can only safely 534 * manipulate objects owned by the current cpu. 535 */ 536 static void 537 hardclock(systimer_t info, int in_ipi, struct intrframe *frame) 538 { 539 sysclock_t cputicks; 540 struct proc *p; 541 struct globaldata *gd = mycpu; 542 543 if ((gd->gd_reqflags & RQF_IPIQ) == 0 && lwkt_need_ipiq_process(gd)) { 544 /* Defer to doreti on passive IPIQ processing */ 545 need_ipiq(); 546 } 547 548 /* 549 * We update the compensation base to calculate fine-grained time 550 * from the sys_cputimer on a per-cpu basis in order to avoid 551 * having to mess around with locks. sys_cputimer is assumed to 552 * be consistent across all cpus. CPU N copies the base state from 553 * CPU 0 using the same FIFO trick that we use for basetime (so we 554 * don't catch a CPU 0 update in the middle). 555 * 556 * Note that we never allow info->time (aka gd->gd_hardclock.time) 557 * to reverse index gd_cpuclock_base, but that it is possible for 558 * it to temporarily get behind in the seconds if something in the 559 * system locks interrupts for a long period of time. Since periodic 560 * timers count events, though everything should resynch again 561 * immediately. 562 */ 563 if (gd->gd_cpuid == 0) { 564 int ni; 565 566 cputicks = info->time - gd->gd_cpuclock_base; 567 if (cputicks >= sys_cputimer->freq) { 568 cputicks /= sys_cputimer->freq; 569 if (cputicks != 0 && cputicks != 1) 570 kprintf("Warning: hardclock missed > 1 sec\n"); 571 gd->gd_time_seconds += cputicks; 572 gd->gd_cpuclock_base += sys_cputimer->freq * cputicks; 573 /* uncorrected monotonic 1-sec gran */ 574 time_uptime += cputicks; 575 } 576 ni = (basetime_index + 1) & BASETIME_ARYMASK; 577 hardtime[ni].time_second = gd->gd_time_seconds; 578 hardtime[ni].cpuclock_base = gd->gd_cpuclock_base; 579 } else { 580 int ni; 581 582 ni = basetime_index; 583 cpu_lfence(); 584 gd->gd_time_seconds = hardtime[ni].time_second; 585 gd->gd_cpuclock_base = hardtime[ni].cpuclock_base; 586 } 587 588 /* 589 * The system-wide ticks counter and NTP related timedelta/tickdelta 590 * adjustments only occur on cpu #0. NTP adjustments are accomplished 591 * by updating basetime. 592 */ 593 if (gd->gd_cpuid == 0) { 594 struct timespec *nbt; 595 struct timespec nts; 596 int leap; 597 int ni; 598 599 ++ticks; 600 601 #if 0 602 if (tco->tc_poll_pps) 603 tco->tc_poll_pps(tco); 604 #endif 605 606 /* 607 * Calculate the new basetime index. We are in a critical section 608 * on cpu #0 and can safely play with basetime_index. Start 609 * with the current basetime and then make adjustments. 610 */ 611 ni = (basetime_index + 1) & BASETIME_ARYMASK; 612 nbt = &basetime[ni]; 613 *nbt = basetime[basetime_index]; 614 615 /* 616 * ntp adjustments only occur on cpu 0 and are protected by 617 * ntp_spin. This spinlock virtually never conflicts. 618 */ 619 spin_lock(&ntp_spin); 620 621 /* 622 * Apply adjtime corrections. (adjtime() API) 623 * 624 * adjtime() only runs on cpu #0 so our critical section is 625 * sufficient to access these variables. 626 */ 627 if (ntp_delta != 0) { 628 nbt->tv_nsec += ntp_tick_delta; 629 ntp_delta -= ntp_tick_delta; 630 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) || 631 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) { 632 ntp_tick_delta = ntp_delta; 633 } 634 } 635 636 /* 637 * Apply permanent frequency corrections. (sysctl API) 638 */ 639 if (ntp_tick_permanent != 0) { 640 ntp_tick_acc += ntp_tick_permanent; 641 if (ntp_tick_acc >= (1LL << 32)) { 642 nbt->tv_nsec += ntp_tick_acc >> 32; 643 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32; 644 } else if (ntp_tick_acc <= -(1LL << 32)) { 645 /* Negate ntp_tick_acc to avoid shifting the sign bit. */ 646 nbt->tv_nsec -= (-ntp_tick_acc) >> 32; 647 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32; 648 } 649 } 650 651 if (nbt->tv_nsec >= 1000000000) { 652 nbt->tv_sec++; 653 nbt->tv_nsec -= 1000000000; 654 } else if (nbt->tv_nsec < 0) { 655 nbt->tv_sec--; 656 nbt->tv_nsec += 1000000000; 657 } 658 659 /* 660 * Another per-tick compensation. (for ntp_adjtime() API) 661 */ 662 if (nsec_adj != 0) { 663 nsec_acc += nsec_adj; 664 if (nsec_acc >= 0x100000000LL) { 665 nbt->tv_nsec += nsec_acc >> 32; 666 nsec_acc = (nsec_acc & 0xFFFFFFFFLL); 667 } else if (nsec_acc <= -0x100000000LL) { 668 nbt->tv_nsec -= -nsec_acc >> 32; 669 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL); 670 } 671 if (nbt->tv_nsec >= 1000000000) { 672 nbt->tv_nsec -= 1000000000; 673 ++nbt->tv_sec; 674 } else if (nbt->tv_nsec < 0) { 675 nbt->tv_nsec += 1000000000; 676 --nbt->tv_sec; 677 } 678 } 679 spin_unlock(&ntp_spin); 680 681 /************************************************************ 682 * LEAP SECOND CORRECTION * 683 ************************************************************ 684 * 685 * Taking into account all the corrections made above, figure 686 * out the new real time. If the seconds field has changed 687 * then apply any pending leap-second corrections. 688 */ 689 getnanotime_nbt(nbt, &nts); 690 691 if (time_second != nts.tv_sec) { 692 /* 693 * Apply leap second (sysctl API). Adjust nts for changes 694 * so we do not have to call getnanotime_nbt again. 695 */ 696 if (ntp_leap_second) { 697 if (ntp_leap_second == nts.tv_sec) { 698 if (ntp_leap_insert) { 699 nbt->tv_sec++; 700 nts.tv_sec++; 701 } else { 702 nbt->tv_sec--; 703 nts.tv_sec--; 704 } 705 ntp_leap_second--; 706 } 707 } 708 709 /* 710 * Apply leap second (ntp_adjtime() API), calculate a new 711 * nsec_adj field. ntp_update_second() returns nsec_adj 712 * as a per-second value but we need it as a per-tick value. 713 */ 714 leap = ntp_update_second(time_second, &nsec_adj); 715 nsec_adj /= hz; 716 nbt->tv_sec += leap; 717 nts.tv_sec += leap; 718 719 /* 720 * Update the time_second 'approximate time' global. 721 */ 722 time_second = nts.tv_sec; 723 } 724 725 /* 726 * Finally, our new basetime is ready to go live! 727 */ 728 cpu_sfence(); 729 basetime_index = ni; 730 731 /* 732 * Update kpmap on each tick. TS updates are integrated with 733 * fences and upticks allowing userland to read the data 734 * deterministically. 735 */ 736 if (kpmap) { 737 int w; 738 739 w = (kpmap->upticks + 1) & 1; 740 getnanouptime(&kpmap->ts_uptime[w]); 741 getnanotime(&kpmap->ts_realtime[w]); 742 cpu_sfence(); 743 ++kpmap->upticks; 744 cpu_sfence(); 745 } 746 } 747 748 /* 749 * lwkt thread scheduler fair queueing 750 */ 751 lwkt_schedulerclock(curthread); 752 753 /* 754 * softticks are handled for all cpus 755 */ 756 hardclock_softtick(gd); 757 758 /* 759 * Rollup accumulated vmstats, copy-back for critical path checks. 760 */ 761 vmstats_rollup_cpu(gd); 762 vfscache_rollup_cpu(gd); 763 mycpu->gd_vmstats = vmstats; 764 765 /* 766 * ITimer handling is per-tick, per-cpu. 767 * 768 * We must acquire the per-process token in order for ksignal() 769 * to be non-blocking. For the moment this requires an AST fault, 770 * the ksignal() cannot be safely issued from this hard interrupt. 771 * 772 * XXX Even the trytoken here isn't right, and itimer operation in 773 * a multi threaded environment is going to be weird at the 774 * very least. 775 */ 776 if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) { 777 crit_enter_hard(); 778 if (p->p_upmap) 779 ++p->p_upmap->runticks; 780 781 if (frame && CLKF_USERMODE(frame) && 782 timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) && 783 itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) { 784 p->p_flags |= P_SIGVTALRM; 785 need_user_resched(); 786 } 787 if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) && 788 itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) { 789 p->p_flags |= P_SIGPROF; 790 need_user_resched(); 791 } 792 crit_exit_hard(); 793 lwkt_reltoken(&p->p_token); 794 } 795 setdelayed(); 796 } 797 798 /* 799 * The statistics clock typically runs at a 125Hz rate, and is intended 800 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu. 801 * 802 * NOTE! systimer! the MP lock might not be held here. We can only safely 803 * manipulate objects owned by the current cpu. 804 * 805 * The stats clock is responsible for grabbing a profiling sample. 806 * Most of the statistics are only used by user-level statistics programs. 807 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and 808 * p->p_estcpu. 809 * 810 * Like the other clocks, the stat clock is called from what is effectively 811 * a fast interrupt, so the context should be the thread/process that got 812 * interrupted. 813 */ 814 static void 815 statclock(systimer_t info, int in_ipi, struct intrframe *frame) 816 { 817 #ifdef GPROF 818 struct gmonparam *g; 819 int i; 820 #endif 821 globaldata_t gd = mycpu; 822 thread_t td; 823 struct proc *p; 824 int bump; 825 sysclock_t cv; 826 sysclock_t scv; 827 828 /* 829 * How big was our timeslice relative to the last time? Calculate 830 * in microseconds. 831 * 832 * NOTE: Use of microuptime() is typically MPSAFE, but usually not 833 * during early boot. Just use the systimer count to be nice 834 * to e.g. qemu. The systimer has a better chance of being 835 * MPSAFE at early boot. 836 */ 837 cv = sys_cputimer->count(); 838 scv = gd->statint.gd_statcv; 839 if (scv == 0) { 840 bump = 1; 841 } else { 842 bump = (sys_cputimer->freq64_usec * (cv - scv)) >> 32; 843 if (bump < 0) 844 bump = 0; 845 if (bump > 1000000) 846 bump = 1000000; 847 } 848 gd->statint.gd_statcv = cv; 849 850 #if 0 851 stv = &gd->gd_stattv; 852 if (stv->tv_sec == 0) { 853 bump = 1; 854 } else { 855 bump = tv.tv_usec - stv->tv_usec + 856 (tv.tv_sec - stv->tv_sec) * 1000000; 857 if (bump < 0) 858 bump = 0; 859 if (bump > 1000000) 860 bump = 1000000; 861 } 862 *stv = tv; 863 #endif 864 865 td = curthread; 866 p = td->td_proc; 867 868 if (frame && CLKF_USERMODE(frame)) { 869 /* 870 * Came from userland, handle user time and deal with 871 * possible process. 872 */ 873 if (p && (p->p_flags & P_PROFIL)) 874 addupc_intr(p, CLKF_PC(frame), 1); 875 td->td_uticks += bump; 876 877 /* 878 * Charge the time as appropriate 879 */ 880 if (p && p->p_nice > NZERO) 881 cpu_time.cp_nice += bump; 882 else 883 cpu_time.cp_user += bump; 884 } else { 885 int intr_nest = gd->gd_intr_nesting_level; 886 887 if (in_ipi) { 888 /* 889 * IPI processing code will bump gd_intr_nesting_level 890 * up by one, which breaks following CLKF_INTR testing, 891 * so we subtract it by one here. 892 */ 893 --intr_nest; 894 } 895 #ifdef GPROF 896 /* 897 * Kernel statistics are just like addupc_intr, only easier. 898 */ 899 g = &_gmonparam; 900 if (g->state == GMON_PROF_ON && frame) { 901 i = CLKF_PC(frame) - g->lowpc; 902 if (i < g->textsize) { 903 i /= HISTFRACTION * sizeof(*g->kcount); 904 g->kcount[i]++; 905 } 906 } 907 #endif 908 909 #define IS_INTR_RUNNING ((frame && CLKF_INTR(intr_nest)) || CLKF_INTR_TD(td)) 910 911 /* 912 * Came from kernel mode, so we were: 913 * - handling an interrupt, 914 * - doing syscall or trap work on behalf of the current 915 * user process, or 916 * - spinning in the idle loop. 917 * Whichever it is, charge the time as appropriate. 918 * Note that we charge interrupts to the current process, 919 * regardless of whether they are ``for'' that process, 920 * so that we know how much of its real time was spent 921 * in ``non-process'' (i.e., interrupt) work. 922 * 923 * XXX assume system if frame is NULL. A NULL frame 924 * can occur if ipi processing is done from a crit_exit(). 925 */ 926 if (IS_INTR_RUNNING || 927 (gd->gd_reqflags & RQF_INTPEND)) { 928 /* 929 * If we interrupted an interrupt thread, well, 930 * count it as interrupt time. 931 */ 932 td->td_iticks += bump; 933 #ifdef DEBUG_PCTRACK 934 if (frame) 935 do_pctrack(frame, PCTRACK_INT); 936 #endif 937 cpu_time.cp_intr += bump; 938 } else if (gd->gd_flags & GDF_VIRTUSER) { 939 /* 940 * The vkernel doesn't do a good job providing trap 941 * frames that we can test. If the GDF_VIRTUSER 942 * flag is set we probably interrupted user mode. 943 * 944 * We also use this flag on the host when entering 945 * VMM mode. 946 */ 947 td->td_uticks += bump; 948 949 /* 950 * Charge the time as appropriate 951 */ 952 if (p && p->p_nice > NZERO) 953 cpu_time.cp_nice += bump; 954 else 955 cpu_time.cp_user += bump; 956 } else { 957 td->td_sticks += bump; 958 if (td == &gd->gd_idlethread) { 959 /* 960 * We want to count token contention as 961 * system time. When token contention occurs 962 * the cpu may only be outside its critical 963 * section while switching through the idle 964 * thread. In this situation, various flags 965 * will be set in gd_reqflags. 966 */ 967 if (gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) 968 cpu_time.cp_sys += bump; 969 else 970 cpu_time.cp_idle += bump; 971 } else { 972 /* 973 * System thread was running. 974 */ 975 #ifdef DEBUG_PCTRACK 976 if (frame) 977 do_pctrack(frame, PCTRACK_SYS); 978 #endif 979 cpu_time.cp_sys += bump; 980 } 981 } 982 983 #undef IS_INTR_RUNNING 984 } 985 } 986 987 #ifdef DEBUG_PCTRACK 988 /* 989 * Sample the PC when in the kernel or in an interrupt. User code can 990 * retrieve the information and generate a histogram or other output. 991 */ 992 993 static void 994 do_pctrack(struct intrframe *frame, int which) 995 { 996 struct kinfo_pctrack *pctrack; 997 998 pctrack = &cputime_pctrack[mycpu->gd_cpuid][which]; 999 pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] = 1000 (void *)CLKF_PC(frame); 1001 ++pctrack->pc_index; 1002 } 1003 1004 static int 1005 sysctl_pctrack(SYSCTL_HANDLER_ARGS) 1006 { 1007 struct kinfo_pcheader head; 1008 int error; 1009 int cpu; 1010 int ntrack; 1011 1012 head.pc_ntrack = PCTRACK_SIZE; 1013 head.pc_arysize = PCTRACK_ARYSIZE; 1014 1015 if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0) 1016 return (error); 1017 1018 for (cpu = 0; cpu < ncpus; ++cpu) { 1019 for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) { 1020 error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack], 1021 sizeof(struct kinfo_pctrack)); 1022 if (error) 1023 break; 1024 } 1025 if (error) 1026 break; 1027 } 1028 return (error); 1029 } 1030 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 1031 sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking"); 1032 1033 #endif 1034 1035 /* 1036 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer, 1037 * the MP lock might not be held. We can safely manipulate parts of curproc 1038 * but that's about it. 1039 * 1040 * Each cpu has its own scheduler clock. 1041 */ 1042 static void 1043 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 1044 { 1045 struct lwp *lp; 1046 struct rusage *ru; 1047 struct vmspace *vm; 1048 long rss; 1049 1050 if ((lp = lwkt_preempted_proc()) != NULL) { 1051 /* 1052 * Account for cpu time used and hit the scheduler. Note 1053 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD 1054 * HERE. 1055 */ 1056 ++lp->lwp_cpticks; 1057 usched_schedulerclock(lp, info->periodic, info->time); 1058 } else { 1059 usched_schedulerclock(NULL, info->periodic, info->time); 1060 } 1061 if ((lp = curthread->td_lwp) != NULL) { 1062 /* 1063 * Update resource usage integrals and maximums. 1064 */ 1065 if ((ru = &lp->lwp_proc->p_ru) && 1066 (vm = lp->lwp_proc->p_vmspace) != NULL) { 1067 ru->ru_ixrss += pgtok(vm->vm_tsize); 1068 ru->ru_idrss += pgtok(vm->vm_dsize); 1069 ru->ru_isrss += pgtok(vm->vm_ssize); 1070 if (lwkt_trytoken(&vm->vm_map.token)) { 1071 rss = pgtok(vmspace_resident_count(vm)); 1072 if (ru->ru_maxrss < rss) 1073 ru->ru_maxrss = rss; 1074 lwkt_reltoken(&vm->vm_map.token); 1075 } 1076 } 1077 } 1078 /* Increment the global sched_ticks */ 1079 if (mycpu->gd_cpuid == 0) 1080 ++sched_ticks; 1081 } 1082 1083 /* 1084 * Compute number of ticks for the specified amount of time. The 1085 * return value is intended to be used in a clock interrupt timed 1086 * operation and guaranteed to meet or exceed the requested time. 1087 * If the representation overflows, return INT_MAX. The minimum return 1088 * value is 1 ticks and the function will average the calculation up. 1089 * If any value greater then 0 microseconds is supplied, a value 1090 * of at least 2 will be returned to ensure that a near-term clock 1091 * interrupt does not cause the timeout to occur (degenerately) early. 1092 * 1093 * Note that limit checks must take into account microseconds, which is 1094 * done simply by using the smaller signed long maximum instead of 1095 * the unsigned long maximum. 1096 * 1097 * If ints have 32 bits, then the maximum value for any timeout in 1098 * 10ms ticks is 248 days. 1099 */ 1100 int 1101 tvtohz_high(struct timeval *tv) 1102 { 1103 int ticks; 1104 long sec, usec; 1105 1106 sec = tv->tv_sec; 1107 usec = tv->tv_usec; 1108 if (usec < 0) { 1109 sec--; 1110 usec += 1000000; 1111 } 1112 if (sec < 0) { 1113 #ifdef DIAGNOSTIC 1114 if (usec > 0) { 1115 sec++; 1116 usec -= 1000000; 1117 } 1118 kprintf("tvtohz_high: negative time difference " 1119 "%ld sec %ld usec\n", 1120 sec, usec); 1121 #endif 1122 ticks = 1; 1123 } else if (sec <= INT_MAX / hz) { 1124 ticks = (int)(sec * hz + 1125 ((u_long)usec + (ustick - 1)) / ustick) + 1; 1126 } else { 1127 ticks = INT_MAX; 1128 } 1129 return (ticks); 1130 } 1131 1132 int 1133 tstohz_high(struct timespec *ts) 1134 { 1135 int ticks; 1136 long sec, nsec; 1137 1138 sec = ts->tv_sec; 1139 nsec = ts->tv_nsec; 1140 if (nsec < 0) { 1141 sec--; 1142 nsec += 1000000000; 1143 } 1144 if (sec < 0) { 1145 #ifdef DIAGNOSTIC 1146 if (nsec > 0) { 1147 sec++; 1148 nsec -= 1000000000; 1149 } 1150 kprintf("tstohz_high: negative time difference " 1151 "%ld sec %ld nsec\n", 1152 sec, nsec); 1153 #endif 1154 ticks = 1; 1155 } else if (sec <= INT_MAX / hz) { 1156 ticks = (int)(sec * hz + 1157 ((u_long)nsec + (nstick - 1)) / nstick) + 1; 1158 } else { 1159 ticks = INT_MAX; 1160 } 1161 return (ticks); 1162 } 1163 1164 1165 /* 1166 * Compute number of ticks for the specified amount of time, erroring on 1167 * the side of it being too low to ensure that sleeping the returned number 1168 * of ticks will not result in a late return. 1169 * 1170 * The supplied timeval may not be negative and should be normalized. A 1171 * return value of 0 is possible if the timeval converts to less then 1172 * 1 tick. 1173 * 1174 * If ints have 32 bits, then the maximum value for any timeout in 1175 * 10ms ticks is 248 days. 1176 */ 1177 int 1178 tvtohz_low(struct timeval *tv) 1179 { 1180 int ticks; 1181 long sec; 1182 1183 sec = tv->tv_sec; 1184 if (sec <= INT_MAX / hz) 1185 ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick); 1186 else 1187 ticks = INT_MAX; 1188 return (ticks); 1189 } 1190 1191 int 1192 tstohz_low(struct timespec *ts) 1193 { 1194 int ticks; 1195 long sec; 1196 1197 sec = ts->tv_sec; 1198 if (sec <= INT_MAX / hz) 1199 ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick); 1200 else 1201 ticks = INT_MAX; 1202 return (ticks); 1203 } 1204 1205 /* 1206 * Start profiling on a process. 1207 * 1208 * Caller must hold p->p_token(); 1209 * 1210 * Kernel profiling passes proc0 which never exits and hence 1211 * keeps the profile clock running constantly. 1212 */ 1213 void 1214 startprofclock(struct proc *p) 1215 { 1216 if ((p->p_flags & P_PROFIL) == 0) { 1217 p->p_flags |= P_PROFIL; 1218 #if 0 /* XXX */ 1219 if (++profprocs == 1 && stathz != 0) { 1220 crit_enter(); 1221 psdiv = psratio; 1222 setstatclockrate(profhz); 1223 crit_exit(); 1224 } 1225 #endif 1226 } 1227 } 1228 1229 /* 1230 * Stop profiling on a process. 1231 * 1232 * caller must hold p->p_token 1233 */ 1234 void 1235 stopprofclock(struct proc *p) 1236 { 1237 if (p->p_flags & P_PROFIL) { 1238 p->p_flags &= ~P_PROFIL; 1239 #if 0 /* XXX */ 1240 if (--profprocs == 0 && stathz != 0) { 1241 crit_enter(); 1242 psdiv = 1; 1243 setstatclockrate(stathz); 1244 crit_exit(); 1245 } 1246 #endif 1247 } 1248 } 1249 1250 /* 1251 * Return information about system clocks. 1252 */ 1253 static int 1254 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 1255 { 1256 struct kinfo_clockinfo clkinfo; 1257 /* 1258 * Construct clockinfo structure. 1259 */ 1260 clkinfo.ci_hz = hz; 1261 clkinfo.ci_tick = ustick; 1262 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000; 1263 clkinfo.ci_profhz = profhz; 1264 clkinfo.ci_stathz = stathz ? stathz : hz; 1265 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 1266 } 1267 1268 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 1269 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 1270 1271 /* 1272 * We have eight functions for looking at the clock, four for 1273 * microseconds and four for nanoseconds. For each there is fast 1274 * but less precise version "get{nano|micro}[up]time" which will 1275 * return a time which is up to 1/HZ previous to the call, whereas 1276 * the raw version "{nano|micro}[up]time" will return a timestamp 1277 * which is as precise as possible. The "up" variants return the 1278 * time relative to system boot, these are well suited for time 1279 * interval measurements. 1280 * 1281 * Each cpu independently maintains the current time of day, so all 1282 * we need to do to protect ourselves from changes is to do a loop 1283 * check on the seconds field changing out from under us. 1284 * 1285 * The system timer maintains a 32 bit count and due to various issues 1286 * it is possible for the calculated delta to occasionally exceed 1287 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec 1288 * multiplication can easily overflow, so we deal with the case. For 1289 * uniformity we deal with the case in the usec case too. 1290 * 1291 * All the [get][micro,nano][time,uptime]() routines are MPSAFE. 1292 */ 1293 void 1294 getmicrouptime(struct timeval *tvp) 1295 { 1296 struct globaldata *gd = mycpu; 1297 sysclock_t delta; 1298 1299 do { 1300 tvp->tv_sec = gd->gd_time_seconds; 1301 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1302 } while (tvp->tv_sec != gd->gd_time_seconds); 1303 1304 if (delta >= sys_cputimer->freq) { 1305 tvp->tv_sec += delta / sys_cputimer->freq; 1306 delta %= sys_cputimer->freq; 1307 } 1308 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1309 if (tvp->tv_usec >= 1000000) { 1310 tvp->tv_usec -= 1000000; 1311 ++tvp->tv_sec; 1312 } 1313 } 1314 1315 void 1316 getnanouptime(struct timespec *tsp) 1317 { 1318 struct globaldata *gd = mycpu; 1319 sysclock_t delta; 1320 1321 do { 1322 tsp->tv_sec = gd->gd_time_seconds; 1323 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1324 } while (tsp->tv_sec != gd->gd_time_seconds); 1325 1326 if (delta >= sys_cputimer->freq) { 1327 tsp->tv_sec += delta / sys_cputimer->freq; 1328 delta %= sys_cputimer->freq; 1329 } 1330 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1331 } 1332 1333 void 1334 microuptime(struct timeval *tvp) 1335 { 1336 struct globaldata *gd = mycpu; 1337 sysclock_t delta; 1338 1339 do { 1340 tvp->tv_sec = gd->gd_time_seconds; 1341 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1342 } while (tvp->tv_sec != gd->gd_time_seconds); 1343 1344 if (delta >= sys_cputimer->freq) { 1345 tvp->tv_sec += delta / sys_cputimer->freq; 1346 delta %= sys_cputimer->freq; 1347 } 1348 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1349 } 1350 1351 void 1352 nanouptime(struct timespec *tsp) 1353 { 1354 struct globaldata *gd = mycpu; 1355 sysclock_t delta; 1356 1357 do { 1358 tsp->tv_sec = gd->gd_time_seconds; 1359 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1360 } while (tsp->tv_sec != gd->gd_time_seconds); 1361 1362 if (delta >= sys_cputimer->freq) { 1363 tsp->tv_sec += delta / sys_cputimer->freq; 1364 delta %= sys_cputimer->freq; 1365 } 1366 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1367 } 1368 1369 /* 1370 * realtime routines 1371 */ 1372 void 1373 getmicrotime(struct timeval *tvp) 1374 { 1375 struct globaldata *gd = mycpu; 1376 struct timespec *bt; 1377 sysclock_t delta; 1378 1379 do { 1380 tvp->tv_sec = gd->gd_time_seconds; 1381 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1382 } while (tvp->tv_sec != gd->gd_time_seconds); 1383 1384 if (delta >= sys_cputimer->freq) { 1385 tvp->tv_sec += delta / sys_cputimer->freq; 1386 delta %= sys_cputimer->freq; 1387 } 1388 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1389 1390 bt = &basetime[basetime_index]; 1391 cpu_lfence(); 1392 tvp->tv_sec += bt->tv_sec; 1393 tvp->tv_usec += bt->tv_nsec / 1000; 1394 while (tvp->tv_usec >= 1000000) { 1395 tvp->tv_usec -= 1000000; 1396 ++tvp->tv_sec; 1397 } 1398 } 1399 1400 void 1401 getnanotime(struct timespec *tsp) 1402 { 1403 struct globaldata *gd = mycpu; 1404 struct timespec *bt; 1405 sysclock_t delta; 1406 1407 do { 1408 tsp->tv_sec = gd->gd_time_seconds; 1409 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1410 } while (tsp->tv_sec != gd->gd_time_seconds); 1411 1412 if (delta >= sys_cputimer->freq) { 1413 tsp->tv_sec += delta / sys_cputimer->freq; 1414 delta %= sys_cputimer->freq; 1415 } 1416 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1417 1418 bt = &basetime[basetime_index]; 1419 cpu_lfence(); 1420 tsp->tv_sec += bt->tv_sec; 1421 tsp->tv_nsec += bt->tv_nsec; 1422 while (tsp->tv_nsec >= 1000000000) { 1423 tsp->tv_nsec -= 1000000000; 1424 ++tsp->tv_sec; 1425 } 1426 } 1427 1428 static void 1429 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp) 1430 { 1431 struct globaldata *gd = mycpu; 1432 sysclock_t delta; 1433 1434 do { 1435 tsp->tv_sec = gd->gd_time_seconds; 1436 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1437 } while (tsp->tv_sec != gd->gd_time_seconds); 1438 1439 if (delta >= sys_cputimer->freq) { 1440 tsp->tv_sec += delta / sys_cputimer->freq; 1441 delta %= sys_cputimer->freq; 1442 } 1443 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1444 1445 tsp->tv_sec += nbt->tv_sec; 1446 tsp->tv_nsec += nbt->tv_nsec; 1447 while (tsp->tv_nsec >= 1000000000) { 1448 tsp->tv_nsec -= 1000000000; 1449 ++tsp->tv_sec; 1450 } 1451 } 1452 1453 1454 void 1455 microtime(struct timeval *tvp) 1456 { 1457 struct globaldata *gd = mycpu; 1458 struct timespec *bt; 1459 sysclock_t delta; 1460 1461 do { 1462 tvp->tv_sec = gd->gd_time_seconds; 1463 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1464 } while (tvp->tv_sec != gd->gd_time_seconds); 1465 1466 if (delta >= sys_cputimer->freq) { 1467 tvp->tv_sec += delta / sys_cputimer->freq; 1468 delta %= sys_cputimer->freq; 1469 } 1470 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1471 1472 bt = &basetime[basetime_index]; 1473 cpu_lfence(); 1474 tvp->tv_sec += bt->tv_sec; 1475 tvp->tv_usec += bt->tv_nsec / 1000; 1476 while (tvp->tv_usec >= 1000000) { 1477 tvp->tv_usec -= 1000000; 1478 ++tvp->tv_sec; 1479 } 1480 } 1481 1482 void 1483 nanotime(struct timespec *tsp) 1484 { 1485 struct globaldata *gd = mycpu; 1486 struct timespec *bt; 1487 sysclock_t delta; 1488 1489 do { 1490 tsp->tv_sec = gd->gd_time_seconds; 1491 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1492 } while (tsp->tv_sec != gd->gd_time_seconds); 1493 1494 if (delta >= sys_cputimer->freq) { 1495 tsp->tv_sec += delta / sys_cputimer->freq; 1496 delta %= sys_cputimer->freq; 1497 } 1498 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1499 1500 bt = &basetime[basetime_index]; 1501 cpu_lfence(); 1502 tsp->tv_sec += bt->tv_sec; 1503 tsp->tv_nsec += bt->tv_nsec; 1504 while (tsp->tv_nsec >= 1000000000) { 1505 tsp->tv_nsec -= 1000000000; 1506 ++tsp->tv_sec; 1507 } 1508 } 1509 1510 /* 1511 * Get an approximate time_t. It does not have to be accurate. This 1512 * function is called only from KTR and can be called with the system in 1513 * any state so do not use a critical section or other complex operation 1514 * here. 1515 * 1516 * NOTE: This is not exactly synchronized with real time. To do that we 1517 * would have to do what microtime does and check for a nanoseconds 1518 * overflow. 1519 */ 1520 time_t 1521 get_approximate_time_t(void) 1522 { 1523 struct globaldata *gd = mycpu; 1524 struct timespec *bt; 1525 1526 bt = &basetime[basetime_index]; 1527 return(gd->gd_time_seconds + bt->tv_sec); 1528 } 1529 1530 int 1531 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 1532 { 1533 pps_params_t *app; 1534 struct pps_fetch_args *fapi; 1535 #ifdef PPS_SYNC 1536 struct pps_kcbind_args *kapi; 1537 #endif 1538 1539 switch (cmd) { 1540 case PPS_IOC_CREATE: 1541 return (0); 1542 case PPS_IOC_DESTROY: 1543 return (0); 1544 case PPS_IOC_SETPARAMS: 1545 app = (pps_params_t *)data; 1546 if (app->mode & ~pps->ppscap) 1547 return (EINVAL); 1548 pps->ppsparam = *app; 1549 return (0); 1550 case PPS_IOC_GETPARAMS: 1551 app = (pps_params_t *)data; 1552 *app = pps->ppsparam; 1553 app->api_version = PPS_API_VERS_1; 1554 return (0); 1555 case PPS_IOC_GETCAP: 1556 *(int*)data = pps->ppscap; 1557 return (0); 1558 case PPS_IOC_FETCH: 1559 fapi = (struct pps_fetch_args *)data; 1560 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 1561 return (EINVAL); 1562 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 1563 return (EOPNOTSUPP); 1564 pps->ppsinfo.current_mode = pps->ppsparam.mode; 1565 fapi->pps_info_buf = pps->ppsinfo; 1566 return (0); 1567 case PPS_IOC_KCBIND: 1568 #ifdef PPS_SYNC 1569 kapi = (struct pps_kcbind_args *)data; 1570 /* XXX Only root should be able to do this */ 1571 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 1572 return (EINVAL); 1573 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 1574 return (EINVAL); 1575 if (kapi->edge & ~pps->ppscap) 1576 return (EINVAL); 1577 pps->kcmode = kapi->edge; 1578 return (0); 1579 #else 1580 return (EOPNOTSUPP); 1581 #endif 1582 default: 1583 return (ENOTTY); 1584 } 1585 } 1586 1587 void 1588 pps_init(struct pps_state *pps) 1589 { 1590 pps->ppscap |= PPS_TSFMT_TSPEC; 1591 if (pps->ppscap & PPS_CAPTUREASSERT) 1592 pps->ppscap |= PPS_OFFSETASSERT; 1593 if (pps->ppscap & PPS_CAPTURECLEAR) 1594 pps->ppscap |= PPS_OFFSETCLEAR; 1595 } 1596 1597 void 1598 pps_event(struct pps_state *pps, sysclock_t count, int event) 1599 { 1600 struct globaldata *gd; 1601 struct timespec *tsp; 1602 struct timespec *osp; 1603 struct timespec *bt; 1604 struct timespec ts; 1605 sysclock_t *pcount; 1606 #ifdef PPS_SYNC 1607 sysclock_t tcount; 1608 #endif 1609 sysclock_t delta; 1610 pps_seq_t *pseq; 1611 int foff; 1612 #ifdef PPS_SYNC 1613 int fhard; 1614 #endif 1615 int ni; 1616 1617 gd = mycpu; 1618 1619 /* Things would be easier with arrays... */ 1620 if (event == PPS_CAPTUREASSERT) { 1621 tsp = &pps->ppsinfo.assert_timestamp; 1622 osp = &pps->ppsparam.assert_offset; 1623 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1624 #ifdef PPS_SYNC 1625 fhard = pps->kcmode & PPS_CAPTUREASSERT; 1626 #endif 1627 pcount = &pps->ppscount[0]; 1628 pseq = &pps->ppsinfo.assert_sequence; 1629 } else { 1630 tsp = &pps->ppsinfo.clear_timestamp; 1631 osp = &pps->ppsparam.clear_offset; 1632 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1633 #ifdef PPS_SYNC 1634 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1635 #endif 1636 pcount = &pps->ppscount[1]; 1637 pseq = &pps->ppsinfo.clear_sequence; 1638 } 1639 1640 /* Nothing really happened */ 1641 if (*pcount == count) 1642 return; 1643 1644 *pcount = count; 1645 1646 do { 1647 ts.tv_sec = gd->gd_time_seconds; 1648 delta = count - gd->gd_cpuclock_base; 1649 } while (ts.tv_sec != gd->gd_time_seconds); 1650 1651 if (delta >= sys_cputimer->freq) { 1652 ts.tv_sec += delta / sys_cputimer->freq; 1653 delta %= sys_cputimer->freq; 1654 } 1655 ts.tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1656 ni = basetime_index; 1657 cpu_lfence(); 1658 bt = &basetime[ni]; 1659 ts.tv_sec += bt->tv_sec; 1660 ts.tv_nsec += bt->tv_nsec; 1661 while (ts.tv_nsec >= 1000000000) { 1662 ts.tv_nsec -= 1000000000; 1663 ++ts.tv_sec; 1664 } 1665 1666 (*pseq)++; 1667 *tsp = ts; 1668 1669 if (foff) { 1670 timespecadd(tsp, osp); 1671 if (tsp->tv_nsec < 0) { 1672 tsp->tv_nsec += 1000000000; 1673 tsp->tv_sec -= 1; 1674 } 1675 } 1676 #ifdef PPS_SYNC 1677 if (fhard) { 1678 /* magic, at its best... */ 1679 tcount = count - pps->ppscount[2]; 1680 pps->ppscount[2] = count; 1681 if (tcount >= sys_cputimer->freq) { 1682 delta = (1000000000 * (tcount / sys_cputimer->freq) + 1683 sys_cputimer->freq64_nsec * 1684 (tcount % sys_cputimer->freq)) >> 32; 1685 } else { 1686 delta = (sys_cputimer->freq64_nsec * tcount) >> 32; 1687 } 1688 hardpps(tsp, delta); 1689 } 1690 #endif 1691 } 1692 1693 /* 1694 * Return the tsc target value for a delay of (ns). 1695 * 1696 * Returns -1 if the TSC is not supported. 1697 */ 1698 tsc_uclock_t 1699 tsc_get_target(int ns) 1700 { 1701 #if defined(_RDTSC_SUPPORTED_) 1702 if (cpu_feature & CPUID_TSC) { 1703 return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000); 1704 } 1705 #endif 1706 return(-1); 1707 } 1708 1709 /* 1710 * Compare the tsc against the passed target 1711 * 1712 * Returns +1 if the target has been reached 1713 * Returns 0 if the target has not yet been reached 1714 * Returns -1 if the TSC is not supported. 1715 * 1716 * Typical use: while (tsc_test_target(target) == 0) { ...poll... } 1717 */ 1718 int 1719 tsc_test_target(int64_t target) 1720 { 1721 #if defined(_RDTSC_SUPPORTED_) 1722 if (cpu_feature & CPUID_TSC) { 1723 if ((int64_t)(target - rdtsc()) <= 0) 1724 return(1); 1725 return(0); 1726 } 1727 #endif 1728 return(-1); 1729 } 1730 1731 /* 1732 * Delay the specified number of nanoseconds using the tsc. This function 1733 * returns immediately if the TSC is not supported. At least one cpu_pause() 1734 * will be issued. 1735 */ 1736 void 1737 tsc_delay(int ns) 1738 { 1739 int64_t clk; 1740 1741 clk = tsc_get_target(ns); 1742 cpu_pause(); 1743 cpu_pause(); 1744 while (tsc_test_target(clk) == 0) { 1745 cpu_pause(); 1746 cpu_pause(); 1747 cpu_pause(); 1748 cpu_pause(); 1749 } 1750 } 1751