1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 68 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ 69 */ 70 71 #include "opt_ntp.h" 72 #include "opt_pctrack.h" 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/callout.h> 77 #include <sys/kernel.h> 78 #include <sys/kinfo.h> 79 #include <sys/proc.h> 80 #include <sys/malloc.h> 81 #include <sys/resource.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/priv.h> 85 #include <sys/timex.h> 86 #include <sys/timepps.h> 87 #include <sys/upmap.h> 88 #include <sys/lock.h> 89 #include <sys/sysctl.h> 90 #include <sys/kcollect.h> 91 92 #include <vm/vm.h> 93 #include <vm/pmap.h> 94 #include <vm/vm_map.h> 95 #include <vm/vm_extern.h> 96 97 #include <sys/thread2.h> 98 #include <sys/spinlock2.h> 99 100 #include <machine/cpu.h> 101 #include <machine/limits.h> 102 #include <machine/smp.h> 103 #include <machine/cpufunc.h> 104 #include <machine/specialreg.h> 105 #include <machine/clock.h> 106 107 #ifdef DEBUG_PCTRACK 108 static void do_pctrack(struct intrframe *frame, int which); 109 #endif 110 111 static void initclocks (void *dummy); 112 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL); 113 114 /* 115 * Some of these don't belong here, but it's easiest to concentrate them. 116 * Note that cpu_time counts in microseconds, but most userland programs 117 * just compare relative times against the total by delta. 118 */ 119 struct kinfo_cputime cputime_percpu[MAXCPU]; 120 #ifdef DEBUG_PCTRACK 121 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE }; 122 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE]; 123 #endif 124 125 static int sniff_enable = 1; 126 static int sniff_target = -1; 127 SYSCTL_INT(_kern, OID_AUTO, sniff_enable, CTLFLAG_RW, &sniff_enable, 0 , ""); 128 SYSCTL_INT(_kern, OID_AUTO, sniff_target, CTLFLAG_RW, &sniff_target, 0 , ""); 129 130 static int 131 sysctl_cputime(SYSCTL_HANDLER_ARGS) 132 { 133 int cpu, error = 0; 134 int root_error; 135 size_t size = sizeof(struct kinfo_cputime); 136 struct kinfo_cputime tmp; 137 138 /* 139 * NOTE: For security reasons, only root can sniff %rip 140 */ 141 root_error = priv_check_cred(curthread->td_ucred, PRIV_ROOT, 0); 142 143 for (cpu = 0; cpu < ncpus; ++cpu) { 144 tmp = cputime_percpu[cpu]; 145 if (root_error == 0) { 146 tmp.cp_sample_pc = 147 (int64_t)globaldata_find(cpu)->gd_sample_pc; 148 tmp.cp_sample_sp = 149 (int64_t)globaldata_find(cpu)->gd_sample_sp; 150 } 151 if ((error = SYSCTL_OUT(req, &tmp, size)) != 0) 152 break; 153 } 154 155 if (root_error == 0) { 156 if (sniff_enable) { 157 int n = sniff_target; 158 if (n < 0) 159 smp_sniff(); 160 else if (n < ncpus) 161 cpu_sniff(n); 162 } 163 } 164 165 return (error); 166 } 167 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 168 sysctl_cputime, "S,kinfo_cputime", "CPU time statistics"); 169 170 static int 171 sysctl_cp_time(SYSCTL_HANDLER_ARGS) 172 { 173 long cpu_states[CPUSTATES] = {0}; 174 int cpu, error = 0; 175 size_t size = sizeof(cpu_states); 176 177 for (cpu = 0; cpu < ncpus; ++cpu) { 178 cpu_states[CP_USER] += cputime_percpu[cpu].cp_user; 179 cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice; 180 cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys; 181 cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr; 182 cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle; 183 } 184 185 error = SYSCTL_OUT(req, cpu_states, size); 186 187 return (error); 188 } 189 190 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 191 sysctl_cp_time, "LU", "CPU time statistics"); 192 193 static int 194 sysctl_cp_times(SYSCTL_HANDLER_ARGS) 195 { 196 long cpu_states[CPUSTATES] = {0}; 197 int cpu, error; 198 size_t size = sizeof(cpu_states); 199 200 for (error = 0, cpu = 0; error == 0 && cpu < ncpus; ++cpu) { 201 cpu_states[CP_USER] = cputime_percpu[cpu].cp_user; 202 cpu_states[CP_NICE] = cputime_percpu[cpu].cp_nice; 203 cpu_states[CP_SYS] = cputime_percpu[cpu].cp_sys; 204 cpu_states[CP_INTR] = cputime_percpu[cpu].cp_intr; 205 cpu_states[CP_IDLE] = cputime_percpu[cpu].cp_idle; 206 error = SYSCTL_OUT(req, cpu_states, size); 207 } 208 209 return (error); 210 } 211 212 SYSCTL_PROC(_kern, OID_AUTO, cp_times, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 213 sysctl_cp_times, "LU", "per-CPU time statistics"); 214 215 /* 216 * boottime is used to calculate the 'real' uptime. Do not confuse this with 217 * microuptime(). microtime() is not drift compensated. The real uptime 218 * with compensation is nanotime() - bootime. boottime is recalculated 219 * whenever the real time is set based on the compensated elapsed time 220 * in seconds (gd->gd_time_seconds). 221 * 222 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic. 223 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to 224 * the real time. 225 * 226 * WARNING! time_second can backstep on time corrections. Also, unlike 227 * time_second, time_uptime is not a "real" time_t (seconds 228 * since the Epoch) but seconds since booting. 229 */ 230 struct timespec boottime; /* boot time (realtime) for reference only */ 231 time_t time_second; /* read-only 'passive' realtime in seconds */ 232 time_t time_uptime; /* read-only 'passive' uptime in seconds */ 233 234 /* 235 * basetime is used to calculate the compensated real time of day. The 236 * basetime can be modified on a per-tick basis by the adjtime(), 237 * ntp_adjtime(), and sysctl-based time correction APIs. 238 * 239 * Note that frequency corrections can also be made by adjusting 240 * gd_cpuclock_base. 241 * 242 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is 243 * used on both SMP and UP systems to avoid MP races between cpu's and 244 * interrupt races on UP systems. 245 */ 246 struct hardtime { 247 __uint32_t time_second; 248 sysclock_t cpuclock_base; 249 }; 250 251 #define BASETIME_ARYSIZE 16 252 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1) 253 static struct timespec basetime[BASETIME_ARYSIZE]; 254 static struct hardtime hardtime[BASETIME_ARYSIZE]; 255 static volatile int basetime_index; 256 257 static int 258 sysctl_get_basetime(SYSCTL_HANDLER_ARGS) 259 { 260 struct timespec *bt; 261 int error; 262 int index; 263 264 /* 265 * Because basetime data and index may be updated by another cpu, 266 * a load fence is required to ensure that the data we read has 267 * not been speculatively read relative to a possibly updated index. 268 */ 269 index = basetime_index; 270 cpu_lfence(); 271 bt = &basetime[index]; 272 error = SYSCTL_OUT(req, bt, sizeof(*bt)); 273 return (error); 274 } 275 276 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 277 &boottime, timespec, "System boottime"); 278 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, 279 sysctl_get_basetime, "S,timespec", "System basetime"); 280 281 static void hardclock(systimer_t info, int, struct intrframe *frame); 282 static void statclock(systimer_t info, int, struct intrframe *frame); 283 static void schedclock(systimer_t info, int, struct intrframe *frame); 284 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp); 285 286 int ticks; /* system master ticks at hz */ 287 int clocks_running; /* tsleep/timeout clocks operational */ 288 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */ 289 int64_t nsec_acc; /* accumulator */ 290 int sched_ticks; /* global schedule clock ticks */ 291 292 /* NTPD time correction fields */ 293 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */ 294 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */ 295 int64_t ntp_delta; /* one-time correction in nsec */ 296 int64_t ntp_big_delta = 1000000000; 297 int32_t ntp_tick_delta; /* current adjustment rate */ 298 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */ 299 time_t ntp_leap_second; /* time of next leap second */ 300 int ntp_leap_insert; /* whether to insert or remove a second */ 301 struct spinlock ntp_spin; 302 303 /* 304 * Finish initializing clock frequencies and start all clocks running. 305 */ 306 /* ARGSUSED*/ 307 static void 308 initclocks(void *dummy) 309 { 310 /*psratio = profhz / stathz;*/ 311 spin_init(&ntp_spin, "ntp"); 312 initclocks_pcpu(); 313 clocks_running = 1; 314 if (kpmap) { 315 kpmap->tsc_freq = tsc_frequency; 316 kpmap->tick_freq = hz; 317 } 318 } 319 320 /* 321 * Called on a per-cpu basis from the idle thread bootstrap on each cpu 322 * during SMP initialization. 323 * 324 * This routine is called concurrently during low-level SMP initialization 325 * and may not block in any way. Meaning, among other things, we can't 326 * acquire any tokens. 327 */ 328 void 329 initclocks_pcpu(void) 330 { 331 struct globaldata *gd = mycpu; 332 333 crit_enter(); 334 if (gd->gd_cpuid == 0) { 335 gd->gd_time_seconds = 1; 336 gd->gd_cpuclock_base = sys_cputimer->count(); 337 hardtime[0].time_second = gd->gd_time_seconds; 338 hardtime[0].cpuclock_base = gd->gd_cpuclock_base; 339 } else { 340 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; 341 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; 342 } 343 344 systimer_intr_enable(); 345 346 crit_exit(); 347 } 348 349 /* 350 * Called on a 10-second interval after the system is operational. 351 * Return the collection data for USERPCT and install the data for 352 * SYSTPCT and IDLEPCT. 353 */ 354 static 355 uint64_t 356 collect_cputime_callback(int n) 357 { 358 static long cpu_base[CPUSTATES]; 359 long cpu_states[CPUSTATES]; 360 long total; 361 long acc; 362 long lsb; 363 364 bzero(cpu_states, sizeof(cpu_states)); 365 for (n = 0; n < ncpus; ++n) { 366 cpu_states[CP_USER] += cputime_percpu[n].cp_user; 367 cpu_states[CP_NICE] += cputime_percpu[n].cp_nice; 368 cpu_states[CP_SYS] += cputime_percpu[n].cp_sys; 369 cpu_states[CP_INTR] += cputime_percpu[n].cp_intr; 370 cpu_states[CP_IDLE] += cputime_percpu[n].cp_idle; 371 } 372 373 acc = 0; 374 for (n = 0; n < CPUSTATES; ++n) { 375 total = cpu_states[n] - cpu_base[n]; 376 cpu_base[n] = cpu_states[n]; 377 cpu_states[n] = total; 378 acc += total; 379 } 380 if (acc == 0) /* prevent degenerate divide by 0 */ 381 acc = 1; 382 lsb = acc / (10000 * 2); 383 kcollect_setvalue(KCOLLECT_SYSTPCT, 384 (cpu_states[CP_SYS] + lsb) * 10000 / acc); 385 kcollect_setvalue(KCOLLECT_IDLEPCT, 386 (cpu_states[CP_IDLE] + lsb) * 10000 / acc); 387 kcollect_setvalue(KCOLLECT_INTRPCT, 388 (cpu_states[CP_INTR] + lsb) * 10000 / acc); 389 return((cpu_states[CP_USER] + cpu_states[CP_NICE] + lsb) * 10000 / acc); 390 } 391 392 /* 393 * This routine is called on just the BSP, just after SMP initialization 394 * completes to * finish initializing any clocks that might contend/block 395 * (e.g. like on a token). We can't do this in initclocks_pcpu() because 396 * that function is called from the idle thread bootstrap for each cpu and 397 * not allowed to block at all. 398 */ 399 static 400 void 401 initclocks_other(void *dummy) 402 { 403 struct globaldata *ogd = mycpu; 404 struct globaldata *gd; 405 int n; 406 407 for (n = 0; n < ncpus; ++n) { 408 lwkt_setcpu_self(globaldata_find(n)); 409 gd = mycpu; 410 411 /* 412 * Use a non-queued periodic systimer to prevent multiple 413 * ticks from building up if the sysclock jumps forward 414 * (8254 gets reset). The sysclock will never jump backwards. 415 * Our time sync is based on the actual sysclock, not the 416 * ticks count. 417 * 418 * Install statclock before hardclock to prevent statclock 419 * from misinterpreting gd_flags for tick assignment when 420 * they overlap. Also offset the statclock by half of 421 * its interval to try to avoid being coincident with 422 * callouts. 423 */ 424 systimer_init_periodic_flags(&gd->gd_statclock, statclock, 425 NULL, stathz, 426 SYSTF_MSSYNC | SYSTF_FIRST | 427 SYSTF_OFFSET50); 428 systimer_init_periodic_flags(&gd->gd_hardclock, hardclock, 429 NULL, hz, SYSTF_MSSYNC); 430 } 431 lwkt_setcpu_self(ogd); 432 433 /* 434 * Regular data collection 435 */ 436 kcollect_register(KCOLLECT_USERPCT, "user", collect_cputime_callback, 437 KCOLLECT_SCALE(KCOLLECT_USERPCT_FORMAT, 0)); 438 kcollect_register(KCOLLECT_SYSTPCT, "syst", NULL, 439 KCOLLECT_SCALE(KCOLLECT_SYSTPCT_FORMAT, 0)); 440 kcollect_register(KCOLLECT_IDLEPCT, "idle", NULL, 441 KCOLLECT_SCALE(KCOLLECT_IDLEPCT_FORMAT, 0)); 442 } 443 SYSINIT(clocks2, SI_BOOT2_POST_SMP, SI_ORDER_ANY, initclocks_other, NULL); 444 445 /* 446 * This method is called on just the BSP, after all the usched implementations 447 * are initialized. This avoids races between usched initialization functions 448 * and usched_schedulerclock(). 449 */ 450 static 451 void 452 initclocks_usched(void *dummy) 453 { 454 struct globaldata *ogd = mycpu; 455 struct globaldata *gd; 456 int n; 457 458 for (n = 0; n < ncpus; ++n) { 459 lwkt_setcpu_self(globaldata_find(n)); 460 gd = mycpu; 461 462 /* XXX correct the frequency for scheduler / estcpu tests */ 463 systimer_init_periodic_flags(&gd->gd_schedclock, schedclock, 464 NULL, ESTCPUFREQ, SYSTF_MSSYNC); 465 } 466 lwkt_setcpu_self(ogd); 467 } 468 SYSINIT(clocks3, SI_BOOT2_USCHED, SI_ORDER_ANY, initclocks_usched, NULL); 469 470 /* 471 * This sets the current real time of day. Timespecs are in seconds and 472 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base, 473 * instead we adjust basetime so basetime + gd_* results in the current 474 * time of day. This way the gd_* fields are guaranteed to represent 475 * a monotonically increasing 'uptime' value. 476 * 477 * When set_timeofday() is called from userland, the system call forces it 478 * onto cpu #0 since only cpu #0 can update basetime_index. 479 */ 480 void 481 set_timeofday(struct timespec *ts) 482 { 483 struct timespec *nbt; 484 int ni; 485 486 /* 487 * XXX SMP / non-atomic basetime updates 488 */ 489 crit_enter(); 490 ni = (basetime_index + 1) & BASETIME_ARYMASK; 491 cpu_lfence(); 492 nbt = &basetime[ni]; 493 nanouptime(nbt); 494 nbt->tv_sec = ts->tv_sec - nbt->tv_sec; 495 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec; 496 if (nbt->tv_nsec < 0) { 497 nbt->tv_nsec += 1000000000; 498 --nbt->tv_sec; 499 } 500 501 /* 502 * Note that basetime diverges from boottime as the clock drift is 503 * compensated for, so we cannot do away with boottime. When setting 504 * the absolute time of day the drift is 0 (for an instant) and we 505 * can simply assign boottime to basetime. 506 * 507 * Note that nanouptime() is based on gd_time_seconds which is drift 508 * compensated up to a point (it is guaranteed to remain monotonically 509 * increasing). gd_time_seconds is thus our best uptime guess and 510 * suitable for use in the boottime calculation. It is already taken 511 * into account in the basetime calculation above. 512 */ 513 spin_lock(&ntp_spin); 514 boottime.tv_sec = nbt->tv_sec; 515 ntp_delta = 0; 516 517 /* 518 * We now have a new basetime, make sure all other cpus have it, 519 * then update the index. 520 */ 521 cpu_sfence(); 522 basetime_index = ni; 523 spin_unlock(&ntp_spin); 524 525 crit_exit(); 526 } 527 528 /* 529 * Each cpu has its own hardclock, but we only increment ticks and softticks 530 * on cpu #0. 531 * 532 * NOTE! systimer! the MP lock might not be held here. We can only safely 533 * manipulate objects owned by the current cpu. 534 */ 535 static void 536 hardclock(systimer_t info, int in_ipi, struct intrframe *frame) 537 { 538 sysclock_t cputicks; 539 struct proc *p; 540 struct globaldata *gd = mycpu; 541 542 if ((gd->gd_reqflags & RQF_IPIQ) == 0 && lwkt_need_ipiq_process(gd)) { 543 /* Defer to doreti on passive IPIQ processing */ 544 need_ipiq(); 545 } 546 547 /* 548 * We update the compensation base to calculate fine-grained time 549 * from the sys_cputimer on a per-cpu basis in order to avoid 550 * having to mess around with locks. sys_cputimer is assumed to 551 * be consistent across all cpus. CPU N copies the base state from 552 * CPU 0 using the same FIFO trick that we use for basetime (so we 553 * don't catch a CPU 0 update in the middle). 554 * 555 * Note that we never allow info->time (aka gd->gd_hardclock.time) 556 * to reverse index gd_cpuclock_base, but that it is possible for 557 * it to temporarily get behind in the seconds if something in the 558 * system locks interrupts for a long period of time. Since periodic 559 * timers count events, though everything should resynch again 560 * immediately. 561 */ 562 if (gd->gd_cpuid == 0) { 563 int ni; 564 565 cputicks = info->time - gd->gd_cpuclock_base; 566 if (cputicks >= sys_cputimer->freq) { 567 cputicks /= sys_cputimer->freq; 568 if (cputicks != 0 && cputicks != 1) 569 kprintf("Warning: hardclock missed > 1 sec\n"); 570 gd->gd_time_seconds += cputicks; 571 gd->gd_cpuclock_base += sys_cputimer->freq * cputicks; 572 /* uncorrected monotonic 1-sec gran */ 573 time_uptime += cputicks; 574 } 575 ni = (basetime_index + 1) & BASETIME_ARYMASK; 576 hardtime[ni].time_second = gd->gd_time_seconds; 577 hardtime[ni].cpuclock_base = gd->gd_cpuclock_base; 578 } else { 579 int ni; 580 581 ni = basetime_index; 582 cpu_lfence(); 583 gd->gd_time_seconds = hardtime[ni].time_second; 584 gd->gd_cpuclock_base = hardtime[ni].cpuclock_base; 585 } 586 587 /* 588 * The system-wide ticks counter and NTP related timedelta/tickdelta 589 * adjustments only occur on cpu #0. NTP adjustments are accomplished 590 * by updating basetime. 591 */ 592 if (gd->gd_cpuid == 0) { 593 struct timespec *nbt; 594 struct timespec nts; 595 int leap; 596 int ni; 597 598 ++ticks; 599 600 #if 0 601 if (tco->tc_poll_pps) 602 tco->tc_poll_pps(tco); 603 #endif 604 605 /* 606 * Calculate the new basetime index. We are in a critical section 607 * on cpu #0 and can safely play with basetime_index. Start 608 * with the current basetime and then make adjustments. 609 */ 610 ni = (basetime_index + 1) & BASETIME_ARYMASK; 611 nbt = &basetime[ni]; 612 *nbt = basetime[basetime_index]; 613 614 /* 615 * ntp adjustments only occur on cpu 0 and are protected by 616 * ntp_spin. This spinlock virtually never conflicts. 617 */ 618 spin_lock(&ntp_spin); 619 620 /* 621 * Apply adjtime corrections. (adjtime() API) 622 * 623 * adjtime() only runs on cpu #0 so our critical section is 624 * sufficient to access these variables. 625 */ 626 if (ntp_delta != 0) { 627 nbt->tv_nsec += ntp_tick_delta; 628 ntp_delta -= ntp_tick_delta; 629 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) || 630 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) { 631 ntp_tick_delta = ntp_delta; 632 } 633 } 634 635 /* 636 * Apply permanent frequency corrections. (sysctl API) 637 */ 638 if (ntp_tick_permanent != 0) { 639 ntp_tick_acc += ntp_tick_permanent; 640 if (ntp_tick_acc >= (1LL << 32)) { 641 nbt->tv_nsec += ntp_tick_acc >> 32; 642 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32; 643 } else if (ntp_tick_acc <= -(1LL << 32)) { 644 /* Negate ntp_tick_acc to avoid shifting the sign bit. */ 645 nbt->tv_nsec -= (-ntp_tick_acc) >> 32; 646 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32; 647 } 648 } 649 650 if (nbt->tv_nsec >= 1000000000) { 651 nbt->tv_sec++; 652 nbt->tv_nsec -= 1000000000; 653 } else if (nbt->tv_nsec < 0) { 654 nbt->tv_sec--; 655 nbt->tv_nsec += 1000000000; 656 } 657 658 /* 659 * Another per-tick compensation. (for ntp_adjtime() API) 660 */ 661 if (nsec_adj != 0) { 662 nsec_acc += nsec_adj; 663 if (nsec_acc >= 0x100000000LL) { 664 nbt->tv_nsec += nsec_acc >> 32; 665 nsec_acc = (nsec_acc & 0xFFFFFFFFLL); 666 } else if (nsec_acc <= -0x100000000LL) { 667 nbt->tv_nsec -= -nsec_acc >> 32; 668 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL); 669 } 670 if (nbt->tv_nsec >= 1000000000) { 671 nbt->tv_nsec -= 1000000000; 672 ++nbt->tv_sec; 673 } else if (nbt->tv_nsec < 0) { 674 nbt->tv_nsec += 1000000000; 675 --nbt->tv_sec; 676 } 677 } 678 spin_unlock(&ntp_spin); 679 680 /************************************************************ 681 * LEAP SECOND CORRECTION * 682 ************************************************************ 683 * 684 * Taking into account all the corrections made above, figure 685 * out the new real time. If the seconds field has changed 686 * then apply any pending leap-second corrections. 687 */ 688 getnanotime_nbt(nbt, &nts); 689 690 if (time_second != nts.tv_sec) { 691 /* 692 * Apply leap second (sysctl API). Adjust nts for changes 693 * so we do not have to call getnanotime_nbt again. 694 */ 695 if (ntp_leap_second) { 696 if (ntp_leap_second == nts.tv_sec) { 697 if (ntp_leap_insert) { 698 nbt->tv_sec++; 699 nts.tv_sec++; 700 } else { 701 nbt->tv_sec--; 702 nts.tv_sec--; 703 } 704 ntp_leap_second--; 705 } 706 } 707 708 /* 709 * Apply leap second (ntp_adjtime() API), calculate a new 710 * nsec_adj field. ntp_update_second() returns nsec_adj 711 * as a per-second value but we need it as a per-tick value. 712 */ 713 leap = ntp_update_second(time_second, &nsec_adj); 714 nsec_adj /= hz; 715 nbt->tv_sec += leap; 716 nts.tv_sec += leap; 717 718 /* 719 * Update the time_second 'approximate time' global. 720 */ 721 time_second = nts.tv_sec; 722 723 /* 724 * Clear the IPC hint for the currently running thread once 725 * per second, allowing us to disconnect the hint from a 726 * thread which may no longer care. 727 */ 728 curthread->td_wakefromcpu = -1; 729 730 } 731 732 /* 733 * Finally, our new basetime is ready to go live! 734 */ 735 cpu_sfence(); 736 basetime_index = ni; 737 738 /* 739 * Update kpmap on each tick. TS updates are integrated with 740 * fences and upticks allowing userland to read the data 741 * deterministically. 742 */ 743 if (kpmap) { 744 int w; 745 746 w = (kpmap->upticks + 1) & 1; 747 getnanouptime(&kpmap->ts_uptime[w]); 748 getnanotime(&kpmap->ts_realtime[w]); 749 cpu_sfence(); 750 ++kpmap->upticks; 751 cpu_sfence(); 752 } 753 } 754 755 /* 756 * lwkt thread scheduler fair queueing 757 */ 758 lwkt_schedulerclock(curthread); 759 760 /* 761 * softticks are handled for all cpus 762 */ 763 hardclock_softtick(gd); 764 765 /* 766 * Rollup accumulated vmstats, copy-back for critical path checks. 767 */ 768 vmstats_rollup_cpu(gd); 769 vfscache_rollup_cpu(gd); 770 mycpu->gd_vmstats = vmstats; 771 772 /* 773 * ITimer handling is per-tick, per-cpu. 774 * 775 * We must acquire the per-process token in order for ksignal() 776 * to be non-blocking. For the moment this requires an AST fault, 777 * the ksignal() cannot be safely issued from this hard interrupt. 778 * 779 * XXX Even the trytoken here isn't right, and itimer operation in 780 * a multi threaded environment is going to be weird at the 781 * very least. 782 */ 783 if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) { 784 crit_enter_hard(); 785 if (p->p_upmap) 786 ++p->p_upmap->runticks; 787 788 if (frame && CLKF_USERMODE(frame) && 789 timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) && 790 itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) { 791 p->p_flags |= P_SIGVTALRM; 792 need_user_resched(); 793 } 794 if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) && 795 itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) { 796 p->p_flags |= P_SIGPROF; 797 need_user_resched(); 798 } 799 crit_exit_hard(); 800 lwkt_reltoken(&p->p_token); 801 } 802 setdelayed(); 803 } 804 805 /* 806 * The statistics clock typically runs at a 125Hz rate, and is intended 807 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu. 808 * 809 * NOTE! systimer! the MP lock might not be held here. We can only safely 810 * manipulate objects owned by the current cpu. 811 * 812 * The stats clock is responsible for grabbing a profiling sample. 813 * Most of the statistics are only used by user-level statistics programs. 814 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and 815 * p->p_estcpu. 816 * 817 * Like the other clocks, the stat clock is called from what is effectively 818 * a fast interrupt, so the context should be the thread/process that got 819 * interrupted. 820 */ 821 static void 822 statclock(systimer_t info, int in_ipi, struct intrframe *frame) 823 { 824 globaldata_t gd = mycpu; 825 thread_t td; 826 struct proc *p; 827 int bump; 828 sysclock_t cv; 829 sysclock_t scv; 830 831 /* 832 * How big was our timeslice relative to the last time? Calculate 833 * in microseconds. 834 * 835 * NOTE: Use of microuptime() is typically MPSAFE, but usually not 836 * during early boot. Just use the systimer count to be nice 837 * to e.g. qemu. The systimer has a better chance of being 838 * MPSAFE at early boot. 839 */ 840 cv = sys_cputimer->count(); 841 scv = gd->statint.gd_statcv; 842 if (scv == 0) { 843 bump = 1; 844 } else { 845 bump = (sys_cputimer->freq64_usec * (cv - scv)) >> 32; 846 if (bump < 0) 847 bump = 0; 848 if (bump > 1000000) 849 bump = 1000000; 850 } 851 gd->statint.gd_statcv = cv; 852 853 #if 0 854 stv = &gd->gd_stattv; 855 if (stv->tv_sec == 0) { 856 bump = 1; 857 } else { 858 bump = tv.tv_usec - stv->tv_usec + 859 (tv.tv_sec - stv->tv_sec) * 1000000; 860 if (bump < 0) 861 bump = 0; 862 if (bump > 1000000) 863 bump = 1000000; 864 } 865 *stv = tv; 866 #endif 867 868 td = curthread; 869 p = td->td_proc; 870 871 if (frame && CLKF_USERMODE(frame)) { 872 /* 873 * Came from userland, handle user time and deal with 874 * possible process. 875 */ 876 if (p && (p->p_flags & P_PROFIL)) 877 addupc_intr(p, CLKF_PC(frame), 1); 878 td->td_uticks += bump; 879 880 /* 881 * Charge the time as appropriate 882 */ 883 if (p && p->p_nice > NZERO) 884 cpu_time.cp_nice += bump; 885 else 886 cpu_time.cp_user += bump; 887 } else { 888 int intr_nest = gd->gd_intr_nesting_level; 889 890 if (in_ipi) { 891 /* 892 * IPI processing code will bump gd_intr_nesting_level 893 * up by one, which breaks following CLKF_INTR testing, 894 * so we subtract it by one here. 895 */ 896 --intr_nest; 897 } 898 899 #define IS_INTR_RUNNING ((frame && CLKF_INTR(intr_nest)) || CLKF_INTR_TD(td)) 900 901 /* 902 * Came from kernel mode, so we were: 903 * - handling an interrupt, 904 * - doing syscall or trap work on behalf of the current 905 * user process, or 906 * - spinning in the idle loop. 907 * Whichever it is, charge the time as appropriate. 908 * Note that we charge interrupts to the current process, 909 * regardless of whether they are ``for'' that process, 910 * so that we know how much of its real time was spent 911 * in ``non-process'' (i.e., interrupt) work. 912 * 913 * XXX assume system if frame is NULL. A NULL frame 914 * can occur if ipi processing is done from a crit_exit(). 915 */ 916 if (IS_INTR_RUNNING || 917 (gd->gd_reqflags & RQF_INTPEND)) { 918 /* 919 * If we interrupted an interrupt thread, well, 920 * count it as interrupt time. 921 */ 922 td->td_iticks += bump; 923 #ifdef DEBUG_PCTRACK 924 if (frame) 925 do_pctrack(frame, PCTRACK_INT); 926 #endif 927 cpu_time.cp_intr += bump; 928 } else if (gd->gd_flags & GDF_VIRTUSER) { 929 /* 930 * The vkernel doesn't do a good job providing trap 931 * frames that we can test. If the GDF_VIRTUSER 932 * flag is set we probably interrupted user mode. 933 * 934 * We also use this flag on the host when entering 935 * VMM mode. 936 */ 937 td->td_uticks += bump; 938 939 /* 940 * Charge the time as appropriate 941 */ 942 if (p && p->p_nice > NZERO) 943 cpu_time.cp_nice += bump; 944 else 945 cpu_time.cp_user += bump; 946 } else { 947 td->td_sticks += bump; 948 if (td == &gd->gd_idlethread) { 949 /* 950 * We want to count token contention as 951 * system time. When token contention occurs 952 * the cpu may only be outside its critical 953 * section while switching through the idle 954 * thread. In this situation, various flags 955 * will be set in gd_reqflags. 956 */ 957 if (gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) 958 cpu_time.cp_sys += bump; 959 else 960 cpu_time.cp_idle += bump; 961 } else { 962 /* 963 * System thread was running. 964 */ 965 #ifdef DEBUG_PCTRACK 966 if (frame) 967 do_pctrack(frame, PCTRACK_SYS); 968 #endif 969 cpu_time.cp_sys += bump; 970 } 971 } 972 973 #undef IS_INTR_RUNNING 974 } 975 } 976 977 #ifdef DEBUG_PCTRACK 978 /* 979 * Sample the PC when in the kernel or in an interrupt. User code can 980 * retrieve the information and generate a histogram or other output. 981 */ 982 983 static void 984 do_pctrack(struct intrframe *frame, int which) 985 { 986 struct kinfo_pctrack *pctrack; 987 988 pctrack = &cputime_pctrack[mycpu->gd_cpuid][which]; 989 pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] = 990 (void *)CLKF_PC(frame); 991 ++pctrack->pc_index; 992 } 993 994 static int 995 sysctl_pctrack(SYSCTL_HANDLER_ARGS) 996 { 997 struct kinfo_pcheader head; 998 int error; 999 int cpu; 1000 int ntrack; 1001 1002 head.pc_ntrack = PCTRACK_SIZE; 1003 head.pc_arysize = PCTRACK_ARYSIZE; 1004 1005 if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0) 1006 return (error); 1007 1008 for (cpu = 0; cpu < ncpus; ++cpu) { 1009 for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) { 1010 error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack], 1011 sizeof(struct kinfo_pctrack)); 1012 if (error) 1013 break; 1014 } 1015 if (error) 1016 break; 1017 } 1018 return (error); 1019 } 1020 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 1021 sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking"); 1022 1023 #endif 1024 1025 /* 1026 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer, 1027 * the MP lock might not be held. We can safely manipulate parts of curproc 1028 * but that's about it. 1029 * 1030 * Each cpu has its own scheduler clock. 1031 */ 1032 static void 1033 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 1034 { 1035 struct lwp *lp; 1036 struct rusage *ru; 1037 struct vmspace *vm; 1038 long rss; 1039 1040 if ((lp = lwkt_preempted_proc()) != NULL) { 1041 /* 1042 * Account for cpu time used and hit the scheduler. Note 1043 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD 1044 * HERE. 1045 */ 1046 ++lp->lwp_cpticks; 1047 usched_schedulerclock(lp, info->periodic, info->time); 1048 } else { 1049 usched_schedulerclock(NULL, info->periodic, info->time); 1050 } 1051 if ((lp = curthread->td_lwp) != NULL) { 1052 /* 1053 * Update resource usage integrals and maximums. 1054 */ 1055 if ((ru = &lp->lwp_proc->p_ru) && 1056 (vm = lp->lwp_proc->p_vmspace) != NULL) { 1057 ru->ru_ixrss += pgtok(btoc(vm->vm_tsize)); 1058 ru->ru_idrss += pgtok(btoc(vm->vm_dsize)); 1059 ru->ru_isrss += pgtok(btoc(vm->vm_ssize)); 1060 if (lwkt_trytoken(&vm->vm_map.token)) { 1061 rss = pgtok(vmspace_resident_count(vm)); 1062 if (ru->ru_maxrss < rss) 1063 ru->ru_maxrss = rss; 1064 lwkt_reltoken(&vm->vm_map.token); 1065 } 1066 } 1067 } 1068 /* Increment the global sched_ticks */ 1069 if (mycpu->gd_cpuid == 0) 1070 ++sched_ticks; 1071 } 1072 1073 /* 1074 * Compute number of ticks for the specified amount of time. The 1075 * return value is intended to be used in a clock interrupt timed 1076 * operation and guaranteed to meet or exceed the requested time. 1077 * If the representation overflows, return INT_MAX. The minimum return 1078 * value is 1 ticks and the function will average the calculation up. 1079 * If any value greater then 0 microseconds is supplied, a value 1080 * of at least 2 will be returned to ensure that a near-term clock 1081 * interrupt does not cause the timeout to occur (degenerately) early. 1082 * 1083 * Note that limit checks must take into account microseconds, which is 1084 * done simply by using the smaller signed long maximum instead of 1085 * the unsigned long maximum. 1086 * 1087 * If ints have 32 bits, then the maximum value for any timeout in 1088 * 10ms ticks is 248 days. 1089 */ 1090 int 1091 tvtohz_high(struct timeval *tv) 1092 { 1093 int ticks; 1094 long sec, usec; 1095 1096 sec = tv->tv_sec; 1097 usec = tv->tv_usec; 1098 if (usec < 0) { 1099 sec--; 1100 usec += 1000000; 1101 } 1102 if (sec < 0) { 1103 #ifdef DIAGNOSTIC 1104 if (usec > 0) { 1105 sec++; 1106 usec -= 1000000; 1107 } 1108 kprintf("tvtohz_high: negative time difference " 1109 "%ld sec %ld usec\n", 1110 sec, usec); 1111 #endif 1112 ticks = 1; 1113 } else if (sec <= INT_MAX / hz) { 1114 ticks = (int)(sec * hz + howmany((u_long)usec, ustick)) + 1; 1115 } else { 1116 ticks = INT_MAX; 1117 } 1118 return (ticks); 1119 } 1120 1121 int 1122 tstohz_high(struct timespec *ts) 1123 { 1124 int ticks; 1125 long sec, nsec; 1126 1127 sec = ts->tv_sec; 1128 nsec = ts->tv_nsec; 1129 if (nsec < 0) { 1130 sec--; 1131 nsec += 1000000000; 1132 } 1133 if (sec < 0) { 1134 #ifdef DIAGNOSTIC 1135 if (nsec > 0) { 1136 sec++; 1137 nsec -= 1000000000; 1138 } 1139 kprintf("tstohz_high: negative time difference " 1140 "%ld sec %ld nsec\n", 1141 sec, nsec); 1142 #endif 1143 ticks = 1; 1144 } else if (sec <= INT_MAX / hz) { 1145 ticks = (int)(sec * hz + howmany((u_long)nsec, nstick)) + 1; 1146 } else { 1147 ticks = INT_MAX; 1148 } 1149 return (ticks); 1150 } 1151 1152 1153 /* 1154 * Compute number of ticks for the specified amount of time, erroring on 1155 * the side of it being too low to ensure that sleeping the returned number 1156 * of ticks will not result in a late return. 1157 * 1158 * The supplied timeval may not be negative and should be normalized. A 1159 * return value of 0 is possible if the timeval converts to less then 1160 * 1 tick. 1161 * 1162 * If ints have 32 bits, then the maximum value for any timeout in 1163 * 10ms ticks is 248 days. 1164 */ 1165 int 1166 tvtohz_low(struct timeval *tv) 1167 { 1168 int ticks; 1169 long sec; 1170 1171 sec = tv->tv_sec; 1172 if (sec <= INT_MAX / hz) 1173 ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick); 1174 else 1175 ticks = INT_MAX; 1176 return (ticks); 1177 } 1178 1179 int 1180 tstohz_low(struct timespec *ts) 1181 { 1182 int ticks; 1183 long sec; 1184 1185 sec = ts->tv_sec; 1186 if (sec <= INT_MAX / hz) 1187 ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick); 1188 else 1189 ticks = INT_MAX; 1190 return (ticks); 1191 } 1192 1193 /* 1194 * Start profiling on a process. 1195 * 1196 * Caller must hold p->p_token(); 1197 * 1198 * Kernel profiling passes proc0 which never exits and hence 1199 * keeps the profile clock running constantly. 1200 */ 1201 void 1202 startprofclock(struct proc *p) 1203 { 1204 if ((p->p_flags & P_PROFIL) == 0) { 1205 p->p_flags |= P_PROFIL; 1206 #if 0 /* XXX */ 1207 if (++profprocs == 1 && stathz != 0) { 1208 crit_enter(); 1209 psdiv = psratio; 1210 setstatclockrate(profhz); 1211 crit_exit(); 1212 } 1213 #endif 1214 } 1215 } 1216 1217 /* 1218 * Stop profiling on a process. 1219 * 1220 * caller must hold p->p_token 1221 */ 1222 void 1223 stopprofclock(struct proc *p) 1224 { 1225 if (p->p_flags & P_PROFIL) { 1226 p->p_flags &= ~P_PROFIL; 1227 #if 0 /* XXX */ 1228 if (--profprocs == 0 && stathz != 0) { 1229 crit_enter(); 1230 psdiv = 1; 1231 setstatclockrate(stathz); 1232 crit_exit(); 1233 } 1234 #endif 1235 } 1236 } 1237 1238 /* 1239 * Return information about system clocks. 1240 */ 1241 static int 1242 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 1243 { 1244 struct kinfo_clockinfo clkinfo; 1245 /* 1246 * Construct clockinfo structure. 1247 */ 1248 clkinfo.ci_hz = hz; 1249 clkinfo.ci_tick = ustick; 1250 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000; 1251 clkinfo.ci_profhz = profhz; 1252 clkinfo.ci_stathz = stathz ? stathz : hz; 1253 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 1254 } 1255 1256 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 1257 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 1258 1259 /* 1260 * We have eight functions for looking at the clock, four for 1261 * microseconds and four for nanoseconds. For each there is fast 1262 * but less precise version "get{nano|micro}[up]time" which will 1263 * return a time which is up to 1/HZ previous to the call, whereas 1264 * the raw version "{nano|micro}[up]time" will return a timestamp 1265 * which is as precise as possible. The "up" variants return the 1266 * time relative to system boot, these are well suited for time 1267 * interval measurements. 1268 * 1269 * Each cpu independently maintains the current time of day, so all 1270 * we need to do to protect ourselves from changes is to do a loop 1271 * check on the seconds field changing out from under us. 1272 * 1273 * The system timer maintains a 32 bit count and due to various issues 1274 * it is possible for the calculated delta to occasionally exceed 1275 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec 1276 * multiplication can easily overflow, so we deal with the case. For 1277 * uniformity we deal with the case in the usec case too. 1278 * 1279 * All the [get][micro,nano][time,uptime]() routines are MPSAFE. 1280 */ 1281 void 1282 getmicrouptime(struct timeval *tvp) 1283 { 1284 struct globaldata *gd = mycpu; 1285 sysclock_t delta; 1286 1287 do { 1288 tvp->tv_sec = gd->gd_time_seconds; 1289 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1290 } while (tvp->tv_sec != gd->gd_time_seconds); 1291 1292 if (delta >= sys_cputimer->freq) { 1293 tvp->tv_sec += delta / sys_cputimer->freq; 1294 delta %= sys_cputimer->freq; 1295 } 1296 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1297 if (tvp->tv_usec >= 1000000) { 1298 tvp->tv_usec -= 1000000; 1299 ++tvp->tv_sec; 1300 } 1301 } 1302 1303 void 1304 getnanouptime(struct timespec *tsp) 1305 { 1306 struct globaldata *gd = mycpu; 1307 sysclock_t delta; 1308 1309 do { 1310 tsp->tv_sec = gd->gd_time_seconds; 1311 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1312 } while (tsp->tv_sec != gd->gd_time_seconds); 1313 1314 if (delta >= sys_cputimer->freq) { 1315 tsp->tv_sec += delta / sys_cputimer->freq; 1316 delta %= sys_cputimer->freq; 1317 } 1318 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1319 } 1320 1321 void 1322 microuptime(struct timeval *tvp) 1323 { 1324 struct globaldata *gd = mycpu; 1325 sysclock_t delta; 1326 1327 do { 1328 tvp->tv_sec = gd->gd_time_seconds; 1329 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1330 } while (tvp->tv_sec != gd->gd_time_seconds); 1331 1332 if (delta >= sys_cputimer->freq) { 1333 tvp->tv_sec += delta / sys_cputimer->freq; 1334 delta %= sys_cputimer->freq; 1335 } 1336 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1337 } 1338 1339 void 1340 nanouptime(struct timespec *tsp) 1341 { 1342 struct globaldata *gd = mycpu; 1343 sysclock_t delta; 1344 1345 do { 1346 tsp->tv_sec = gd->gd_time_seconds; 1347 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1348 } while (tsp->tv_sec != gd->gd_time_seconds); 1349 1350 if (delta >= sys_cputimer->freq) { 1351 tsp->tv_sec += delta / sys_cputimer->freq; 1352 delta %= sys_cputimer->freq; 1353 } 1354 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1355 } 1356 1357 /* 1358 * realtime routines 1359 */ 1360 void 1361 getmicrotime(struct timeval *tvp) 1362 { 1363 struct globaldata *gd = mycpu; 1364 struct timespec *bt; 1365 sysclock_t delta; 1366 1367 do { 1368 tvp->tv_sec = gd->gd_time_seconds; 1369 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1370 } while (tvp->tv_sec != gd->gd_time_seconds); 1371 1372 if (delta >= sys_cputimer->freq) { 1373 tvp->tv_sec += delta / sys_cputimer->freq; 1374 delta %= sys_cputimer->freq; 1375 } 1376 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1377 1378 bt = &basetime[basetime_index]; 1379 cpu_lfence(); 1380 tvp->tv_sec += bt->tv_sec; 1381 tvp->tv_usec += bt->tv_nsec / 1000; 1382 while (tvp->tv_usec >= 1000000) { 1383 tvp->tv_usec -= 1000000; 1384 ++tvp->tv_sec; 1385 } 1386 } 1387 1388 void 1389 getnanotime(struct timespec *tsp) 1390 { 1391 struct globaldata *gd = mycpu; 1392 struct timespec *bt; 1393 sysclock_t delta; 1394 1395 do { 1396 tsp->tv_sec = gd->gd_time_seconds; 1397 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1398 } while (tsp->tv_sec != gd->gd_time_seconds); 1399 1400 if (delta >= sys_cputimer->freq) { 1401 tsp->tv_sec += delta / sys_cputimer->freq; 1402 delta %= sys_cputimer->freq; 1403 } 1404 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1405 1406 bt = &basetime[basetime_index]; 1407 cpu_lfence(); 1408 tsp->tv_sec += bt->tv_sec; 1409 tsp->tv_nsec += bt->tv_nsec; 1410 while (tsp->tv_nsec >= 1000000000) { 1411 tsp->tv_nsec -= 1000000000; 1412 ++tsp->tv_sec; 1413 } 1414 } 1415 1416 static void 1417 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp) 1418 { 1419 struct globaldata *gd = mycpu; 1420 sysclock_t delta; 1421 1422 do { 1423 tsp->tv_sec = gd->gd_time_seconds; 1424 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1425 } while (tsp->tv_sec != gd->gd_time_seconds); 1426 1427 if (delta >= sys_cputimer->freq) { 1428 tsp->tv_sec += delta / sys_cputimer->freq; 1429 delta %= sys_cputimer->freq; 1430 } 1431 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1432 1433 tsp->tv_sec += nbt->tv_sec; 1434 tsp->tv_nsec += nbt->tv_nsec; 1435 while (tsp->tv_nsec >= 1000000000) { 1436 tsp->tv_nsec -= 1000000000; 1437 ++tsp->tv_sec; 1438 } 1439 } 1440 1441 1442 void 1443 microtime(struct timeval *tvp) 1444 { 1445 struct globaldata *gd = mycpu; 1446 struct timespec *bt; 1447 sysclock_t delta; 1448 1449 do { 1450 tvp->tv_sec = gd->gd_time_seconds; 1451 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1452 } while (tvp->tv_sec != gd->gd_time_seconds); 1453 1454 if (delta >= sys_cputimer->freq) { 1455 tvp->tv_sec += delta / sys_cputimer->freq; 1456 delta %= sys_cputimer->freq; 1457 } 1458 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32; 1459 1460 bt = &basetime[basetime_index]; 1461 cpu_lfence(); 1462 tvp->tv_sec += bt->tv_sec; 1463 tvp->tv_usec += bt->tv_nsec / 1000; 1464 while (tvp->tv_usec >= 1000000) { 1465 tvp->tv_usec -= 1000000; 1466 ++tvp->tv_sec; 1467 } 1468 } 1469 1470 void 1471 nanotime(struct timespec *tsp) 1472 { 1473 struct globaldata *gd = mycpu; 1474 struct timespec *bt; 1475 sysclock_t delta; 1476 1477 do { 1478 tsp->tv_sec = gd->gd_time_seconds; 1479 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1480 } while (tsp->tv_sec != gd->gd_time_seconds); 1481 1482 if (delta >= sys_cputimer->freq) { 1483 tsp->tv_sec += delta / sys_cputimer->freq; 1484 delta %= sys_cputimer->freq; 1485 } 1486 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1487 1488 bt = &basetime[basetime_index]; 1489 cpu_lfence(); 1490 tsp->tv_sec += bt->tv_sec; 1491 tsp->tv_nsec += bt->tv_nsec; 1492 while (tsp->tv_nsec >= 1000000000) { 1493 tsp->tv_nsec -= 1000000000; 1494 ++tsp->tv_sec; 1495 } 1496 } 1497 1498 /* 1499 * Get an approximate time_t. It does not have to be accurate. This 1500 * function is called only from KTR and can be called with the system in 1501 * any state so do not use a critical section or other complex operation 1502 * here. 1503 * 1504 * NOTE: This is not exactly synchronized with real time. To do that we 1505 * would have to do what microtime does and check for a nanoseconds 1506 * overflow. 1507 */ 1508 time_t 1509 get_approximate_time_t(void) 1510 { 1511 struct globaldata *gd = mycpu; 1512 struct timespec *bt; 1513 1514 bt = &basetime[basetime_index]; 1515 return(gd->gd_time_seconds + bt->tv_sec); 1516 } 1517 1518 int 1519 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 1520 { 1521 pps_params_t *app; 1522 struct pps_fetch_args *fapi; 1523 #ifdef PPS_SYNC 1524 struct pps_kcbind_args *kapi; 1525 #endif 1526 1527 switch (cmd) { 1528 case PPS_IOC_CREATE: 1529 return (0); 1530 case PPS_IOC_DESTROY: 1531 return (0); 1532 case PPS_IOC_SETPARAMS: 1533 app = (pps_params_t *)data; 1534 if (app->mode & ~pps->ppscap) 1535 return (EINVAL); 1536 pps->ppsparam = *app; 1537 return (0); 1538 case PPS_IOC_GETPARAMS: 1539 app = (pps_params_t *)data; 1540 *app = pps->ppsparam; 1541 app->api_version = PPS_API_VERS_1; 1542 return (0); 1543 case PPS_IOC_GETCAP: 1544 *(int*)data = pps->ppscap; 1545 return (0); 1546 case PPS_IOC_FETCH: 1547 fapi = (struct pps_fetch_args *)data; 1548 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 1549 return (EINVAL); 1550 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 1551 return (EOPNOTSUPP); 1552 pps->ppsinfo.current_mode = pps->ppsparam.mode; 1553 fapi->pps_info_buf = pps->ppsinfo; 1554 return (0); 1555 case PPS_IOC_KCBIND: 1556 #ifdef PPS_SYNC 1557 kapi = (struct pps_kcbind_args *)data; 1558 /* XXX Only root should be able to do this */ 1559 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 1560 return (EINVAL); 1561 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 1562 return (EINVAL); 1563 if (kapi->edge & ~pps->ppscap) 1564 return (EINVAL); 1565 pps->kcmode = kapi->edge; 1566 return (0); 1567 #else 1568 return (EOPNOTSUPP); 1569 #endif 1570 default: 1571 return (ENOTTY); 1572 } 1573 } 1574 1575 void 1576 pps_init(struct pps_state *pps) 1577 { 1578 pps->ppscap |= PPS_TSFMT_TSPEC; 1579 if (pps->ppscap & PPS_CAPTUREASSERT) 1580 pps->ppscap |= PPS_OFFSETASSERT; 1581 if (pps->ppscap & PPS_CAPTURECLEAR) 1582 pps->ppscap |= PPS_OFFSETCLEAR; 1583 } 1584 1585 void 1586 pps_event(struct pps_state *pps, sysclock_t count, int event) 1587 { 1588 struct globaldata *gd; 1589 struct timespec *tsp; 1590 struct timespec *osp; 1591 struct timespec *bt; 1592 struct timespec ts; 1593 sysclock_t *pcount; 1594 #ifdef PPS_SYNC 1595 sysclock_t tcount; 1596 #endif 1597 sysclock_t delta; 1598 pps_seq_t *pseq; 1599 int foff; 1600 #ifdef PPS_SYNC 1601 int fhard; 1602 #endif 1603 int ni; 1604 1605 gd = mycpu; 1606 1607 /* Things would be easier with arrays... */ 1608 if (event == PPS_CAPTUREASSERT) { 1609 tsp = &pps->ppsinfo.assert_timestamp; 1610 osp = &pps->ppsparam.assert_offset; 1611 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1612 #ifdef PPS_SYNC 1613 fhard = pps->kcmode & PPS_CAPTUREASSERT; 1614 #endif 1615 pcount = &pps->ppscount[0]; 1616 pseq = &pps->ppsinfo.assert_sequence; 1617 } else { 1618 tsp = &pps->ppsinfo.clear_timestamp; 1619 osp = &pps->ppsparam.clear_offset; 1620 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1621 #ifdef PPS_SYNC 1622 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1623 #endif 1624 pcount = &pps->ppscount[1]; 1625 pseq = &pps->ppsinfo.clear_sequence; 1626 } 1627 1628 /* Nothing really happened */ 1629 if (*pcount == count) 1630 return; 1631 1632 *pcount = count; 1633 1634 do { 1635 ts.tv_sec = gd->gd_time_seconds; 1636 delta = count - gd->gd_cpuclock_base; 1637 } while (ts.tv_sec != gd->gd_time_seconds); 1638 1639 if (delta >= sys_cputimer->freq) { 1640 ts.tv_sec += delta / sys_cputimer->freq; 1641 delta %= sys_cputimer->freq; 1642 } 1643 ts.tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32; 1644 ni = basetime_index; 1645 cpu_lfence(); 1646 bt = &basetime[ni]; 1647 ts.tv_sec += bt->tv_sec; 1648 ts.tv_nsec += bt->tv_nsec; 1649 while (ts.tv_nsec >= 1000000000) { 1650 ts.tv_nsec -= 1000000000; 1651 ++ts.tv_sec; 1652 } 1653 1654 (*pseq)++; 1655 *tsp = ts; 1656 1657 if (foff) { 1658 timespecadd(tsp, osp, tsp); 1659 if (tsp->tv_nsec < 0) { 1660 tsp->tv_nsec += 1000000000; 1661 tsp->tv_sec -= 1; 1662 } 1663 } 1664 #ifdef PPS_SYNC 1665 if (fhard) { 1666 /* magic, at its best... */ 1667 tcount = count - pps->ppscount[2]; 1668 pps->ppscount[2] = count; 1669 if (tcount >= sys_cputimer->freq) { 1670 delta = (1000000000 * (tcount / sys_cputimer->freq) + 1671 sys_cputimer->freq64_nsec * 1672 (tcount % sys_cputimer->freq)) >> 32; 1673 } else { 1674 delta = (sys_cputimer->freq64_nsec * tcount) >> 32; 1675 } 1676 hardpps(tsp, delta); 1677 } 1678 #endif 1679 } 1680 1681 /* 1682 * Return the tsc target value for a delay of (ns). 1683 * 1684 * Returns -1 if the TSC is not supported. 1685 */ 1686 tsc_uclock_t 1687 tsc_get_target(int ns) 1688 { 1689 #if defined(_RDTSC_SUPPORTED_) 1690 if (cpu_feature & CPUID_TSC) { 1691 return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000); 1692 } 1693 #endif 1694 return(-1); 1695 } 1696 1697 /* 1698 * Compare the tsc against the passed target 1699 * 1700 * Returns +1 if the target has been reached 1701 * Returns 0 if the target has not yet been reached 1702 * Returns -1 if the TSC is not supported. 1703 * 1704 * Typical use: while (tsc_test_target(target) == 0) { ...poll... } 1705 */ 1706 int 1707 tsc_test_target(int64_t target) 1708 { 1709 #if defined(_RDTSC_SUPPORTED_) 1710 if (cpu_feature & CPUID_TSC) { 1711 if ((int64_t)(target - rdtsc()) <= 0) 1712 return(1); 1713 return(0); 1714 } 1715 #endif 1716 return(-1); 1717 } 1718 1719 /* 1720 * Delay the specified number of nanoseconds using the tsc. This function 1721 * returns immediately if the TSC is not supported. At least one cpu_pause() 1722 * will be issued. 1723 */ 1724 void 1725 tsc_delay(int ns) 1726 { 1727 int64_t clk; 1728 1729 clk = tsc_get_target(ns); 1730 cpu_pause(); 1731 cpu_pause(); 1732 while (tsc_test_target(clk) == 0) { 1733 cpu_pause(); 1734 cpu_pause(); 1735 cpu_pause(); 1736 cpu_pause(); 1737 } 1738 } 1739