1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 68 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ 69 */ 70 71 #include "opt_ntp.h" 72 #include "opt_pctrack.h" 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/callout.h> 77 #include <sys/kernel.h> 78 #include <sys/kinfo.h> 79 #include <sys/proc.h> 80 #include <sys/malloc.h> 81 #include <sys/resource.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/priv.h> 85 #include <sys/timex.h> 86 #include <sys/timepps.h> 87 #include <sys/upmap.h> 88 #include <sys/lock.h> 89 #include <sys/sysctl.h> 90 #include <sys/kcollect.h> 91 92 #include <vm/vm.h> 93 #include <vm/pmap.h> 94 #include <vm/vm_map.h> 95 #include <vm/vm_extern.h> 96 97 #include <sys/thread2.h> 98 #include <sys/spinlock2.h> 99 100 #include <machine/cpu.h> 101 #include <machine/limits.h> 102 #include <machine/smp.h> 103 #include <machine/cpufunc.h> 104 #include <machine/specialreg.h> 105 #include <machine/clock.h> 106 107 #ifdef DEBUG_PCTRACK 108 static void do_pctrack(struct intrframe *frame, int which); 109 #endif 110 111 static void initclocks (void *dummy); 112 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL); 113 114 /* 115 * Some of these don't belong here, but it's easiest to concentrate them. 116 * Note that cpu_time counts in microseconds, but most userland programs 117 * just compare relative times against the total by delta. 118 */ 119 struct kinfo_cputime cputime_percpu[MAXCPU]; 120 #ifdef DEBUG_PCTRACK 121 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE }; 122 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE]; 123 #endif 124 125 __read_mostly static int sniff_enable = 1; 126 __read_mostly static int sniff_target = -1; 127 __read_mostly static int clock_debug2 = 0; 128 SYSCTL_INT(_kern, OID_AUTO, sniff_enable, CTLFLAG_RW, &sniff_enable, 0 , ""); 129 SYSCTL_INT(_kern, OID_AUTO, sniff_target, CTLFLAG_RW, &sniff_target, 0 , ""); 130 SYSCTL_INT(_debug, OID_AUTO, clock_debug2, CTLFLAG_RW, &clock_debug2, 0 , ""); 131 132 static int 133 sysctl_cputime(SYSCTL_HANDLER_ARGS) 134 { 135 int cpu, error = 0; 136 int root_error; 137 size_t size = sizeof(struct kinfo_cputime); 138 struct kinfo_cputime tmp; 139 140 /* 141 * NOTE: For security reasons, only root can sniff %rip 142 */ 143 root_error = priv_check_cred(curthread->td_ucred, PRIV_ROOT, 0); 144 145 for (cpu = 0; cpu < ncpus; ++cpu) { 146 tmp = cputime_percpu[cpu]; 147 if (root_error == 0) { 148 tmp.cp_sample_pc = 149 (int64_t)globaldata_find(cpu)->gd_sample_pc; 150 tmp.cp_sample_sp = 151 (int64_t)globaldata_find(cpu)->gd_sample_sp; 152 } 153 if ((error = SYSCTL_OUT(req, &tmp, size)) != 0) 154 break; 155 } 156 157 if (root_error == 0) { 158 if (sniff_enable) { 159 int n = sniff_target; 160 if (n < 0) 161 smp_sniff(); 162 else if (n < ncpus) 163 cpu_sniff(n); 164 } 165 } 166 167 return (error); 168 } 169 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 170 sysctl_cputime, "S,kinfo_cputime", "CPU time statistics"); 171 172 static int 173 sysctl_cp_time(SYSCTL_HANDLER_ARGS) 174 { 175 long cpu_states[CPUSTATES] = {0}; 176 int cpu, error = 0; 177 size_t size = sizeof(cpu_states); 178 179 for (cpu = 0; cpu < ncpus; ++cpu) { 180 cpu_states[CP_USER] += cputime_percpu[cpu].cp_user; 181 cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice; 182 cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys; 183 cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr; 184 cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle; 185 } 186 187 error = SYSCTL_OUT(req, cpu_states, size); 188 189 return (error); 190 } 191 192 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 193 sysctl_cp_time, "LU", "CPU time statistics"); 194 195 static int 196 sysctl_cp_times(SYSCTL_HANDLER_ARGS) 197 { 198 long cpu_states[CPUSTATES] = {0}; 199 int cpu, error; 200 size_t size = sizeof(cpu_states); 201 202 for (error = 0, cpu = 0; error == 0 && cpu < ncpus; ++cpu) { 203 cpu_states[CP_USER] = cputime_percpu[cpu].cp_user; 204 cpu_states[CP_NICE] = cputime_percpu[cpu].cp_nice; 205 cpu_states[CP_SYS] = cputime_percpu[cpu].cp_sys; 206 cpu_states[CP_INTR] = cputime_percpu[cpu].cp_intr; 207 cpu_states[CP_IDLE] = cputime_percpu[cpu].cp_idle; 208 error = SYSCTL_OUT(req, cpu_states, size); 209 } 210 211 return (error); 212 } 213 214 SYSCTL_PROC(_kern, OID_AUTO, cp_times, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 215 sysctl_cp_times, "LU", "per-CPU time statistics"); 216 217 /* 218 * boottime is used to calculate the 'real' uptime. Do not confuse this with 219 * microuptime(). microtime() is not drift compensated. The real uptime 220 * with compensation is nanotime() - bootime. boottime is recalculated 221 * whenever the real time is set based on the compensated elapsed time 222 * in seconds (gd->gd_time_seconds). 223 * 224 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic. 225 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to 226 * the real time. 227 * 228 * WARNING! time_second can backstep on time corrections. Also, unlike 229 * time_second, time_uptime is not a "real" time_t (seconds 230 * since the Epoch) but seconds since booting. 231 */ 232 __read_mostly struct timespec boottime; /* boot time (realtime) for ref only */ 233 __read_mostly struct timespec ticktime0;/* updated every tick */ 234 __read_mostly struct timespec ticktime2;/* updated every tick */ 235 __read_mostly int ticktime_update; 236 __read_mostly time_t time_second; /* read-only 'passive' rt in seconds */ 237 __read_mostly time_t time_uptime; /* read-only 'passive' ut in seconds */ 238 239 /* 240 * basetime is used to calculate the compensated real time of day. The 241 * basetime can be modified on a per-tick basis by the adjtime(), 242 * ntp_adjtime(), and sysctl-based time correction APIs. 243 * 244 * Note that frequency corrections can also be made by adjusting 245 * gd_cpuclock_base. 246 * 247 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is 248 * used on both SMP and UP systems to avoid MP races between cpu's and 249 * interrupt races on UP systems. 250 */ 251 struct hardtime { 252 __uint32_t time_second; 253 sysclock_t cpuclock_base; 254 }; 255 256 #define BASETIME_ARYSIZE 16 257 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1) 258 static struct timespec basetime[BASETIME_ARYSIZE]; 259 static struct hardtime hardtime[BASETIME_ARYSIZE]; 260 static volatile int basetime_index; 261 262 static int 263 sysctl_get_basetime(SYSCTL_HANDLER_ARGS) 264 { 265 struct timespec *bt; 266 int error; 267 int index; 268 269 /* 270 * Because basetime data and index may be updated by another cpu, 271 * a load fence is required to ensure that the data we read has 272 * not been speculatively read relative to a possibly updated index. 273 */ 274 index = basetime_index; 275 cpu_lfence(); 276 bt = &basetime[index]; 277 error = SYSCTL_OUT(req, bt, sizeof(*bt)); 278 return (error); 279 } 280 281 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 282 &boottime, timespec, "System boottime"); 283 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, 284 sysctl_get_basetime, "S,timespec", "System basetime"); 285 286 static void hardclock(systimer_t info, int, struct intrframe *frame); 287 static void statclock(systimer_t info, int, struct intrframe *frame); 288 static void schedclock(systimer_t info, int, struct intrframe *frame); 289 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp); 290 291 /* 292 * Use __read_mostly for ticks and sched_ticks because these variables are 293 * used all over the kernel and only updated once per tick. 294 */ 295 __read_mostly int ticks; /* system master ticks at hz */ 296 __read_mostly int sched_ticks; /* global schedule clock ticks */ 297 __read_mostly int clocks_running; /* tsleep/timeout clocks operational */ 298 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */ 299 int64_t nsec_acc; /* accumulator */ 300 301 /* NTPD time correction fields */ 302 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */ 303 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */ 304 int64_t ntp_delta; /* one-time correction in nsec */ 305 int64_t ntp_big_delta = 1000000000; 306 int32_t ntp_tick_delta; /* current adjustment rate */ 307 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */ 308 time_t ntp_leap_second; /* time of next leap second */ 309 int ntp_leap_insert; /* whether to insert or remove a second */ 310 struct spinlock ntp_spin; 311 312 /* 313 * Finish initializing clock frequencies and start all clocks running. 314 */ 315 /* ARGSUSED*/ 316 static void 317 initclocks(void *dummy) 318 { 319 /*psratio = profhz / stathz;*/ 320 spin_init(&ntp_spin, "ntp"); 321 initclocks_pcpu(); 322 clocks_running = 1; 323 if (kpmap) { 324 kpmap->tsc_freq = tsc_frequency; 325 kpmap->tick_freq = hz; 326 } 327 } 328 329 /* 330 * Called on a per-cpu basis from the idle thread bootstrap on each cpu 331 * during SMP initialization. 332 * 333 * This routine is called concurrently during low-level SMP initialization 334 * and may not block in any way. Meaning, among other things, we can't 335 * acquire any tokens. 336 */ 337 void 338 initclocks_pcpu(void) 339 { 340 struct globaldata *gd = mycpu; 341 342 crit_enter(); 343 if (gd->gd_cpuid == 0) { 344 gd->gd_time_seconds = 1; 345 gd->gd_cpuclock_base = sys_cputimer->count(); 346 hardtime[0].time_second = gd->gd_time_seconds; 347 hardtime[0].cpuclock_base = gd->gd_cpuclock_base; 348 } else { 349 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; 350 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; 351 } 352 353 systimer_intr_enable(); 354 355 crit_exit(); 356 } 357 358 /* 359 * Called on a 10-second interval after the system is operational. 360 * Return the collection data for USERPCT and install the data for 361 * SYSTPCT and IDLEPCT. 362 */ 363 static 364 uint64_t 365 collect_cputime_callback(int n) 366 { 367 static long cpu_base[CPUSTATES]; 368 long cpu_states[CPUSTATES]; 369 long total; 370 long acc; 371 long lsb; 372 373 bzero(cpu_states, sizeof(cpu_states)); 374 for (n = 0; n < ncpus; ++n) { 375 cpu_states[CP_USER] += cputime_percpu[n].cp_user; 376 cpu_states[CP_NICE] += cputime_percpu[n].cp_nice; 377 cpu_states[CP_SYS] += cputime_percpu[n].cp_sys; 378 cpu_states[CP_INTR] += cputime_percpu[n].cp_intr; 379 cpu_states[CP_IDLE] += cputime_percpu[n].cp_idle; 380 } 381 382 acc = 0; 383 for (n = 0; n < CPUSTATES; ++n) { 384 total = cpu_states[n] - cpu_base[n]; 385 cpu_base[n] = cpu_states[n]; 386 cpu_states[n] = total; 387 acc += total; 388 } 389 if (acc == 0) /* prevent degenerate divide by 0 */ 390 acc = 1; 391 lsb = acc / (10000 * 2); 392 kcollect_setvalue(KCOLLECT_SYSTPCT, 393 (cpu_states[CP_SYS] + lsb) * 10000 / acc); 394 kcollect_setvalue(KCOLLECT_IDLEPCT, 395 (cpu_states[CP_IDLE] + lsb) * 10000 / acc); 396 kcollect_setvalue(KCOLLECT_INTRPCT, 397 (cpu_states[CP_INTR] + lsb) * 10000 / acc); 398 return((cpu_states[CP_USER] + cpu_states[CP_NICE] + lsb) * 10000 / acc); 399 } 400 401 /* 402 * This routine is called on just the BSP, just after SMP initialization 403 * completes to * finish initializing any clocks that might contend/block 404 * (e.g. like on a token). We can't do this in initclocks_pcpu() because 405 * that function is called from the idle thread bootstrap for each cpu and 406 * not allowed to block at all. 407 */ 408 static 409 void 410 initclocks_other(void *dummy) 411 { 412 struct globaldata *ogd = mycpu; 413 struct globaldata *gd; 414 int n; 415 416 for (n = 0; n < ncpus; ++n) { 417 lwkt_setcpu_self(globaldata_find(n)); 418 gd = mycpu; 419 420 /* 421 * Use a non-queued periodic systimer to prevent multiple 422 * ticks from building up if the sysclock jumps forward 423 * (8254 gets reset). The sysclock will never jump backwards. 424 * Our time sync is based on the actual sysclock, not the 425 * ticks count. 426 * 427 * Install statclock before hardclock to prevent statclock 428 * from misinterpreting gd_flags for tick assignment when 429 * they overlap. Also offset the statclock by half of 430 * its interval to try to avoid being coincident with 431 * callouts. 432 */ 433 systimer_init_periodic_flags(&gd->gd_statclock, statclock, 434 NULL, stathz, 435 SYSTF_MSSYNC | SYSTF_FIRST | 436 SYSTF_OFFSET50 | SYSTF_OFFSETCPU); 437 systimer_init_periodic_flags(&gd->gd_hardclock, hardclock, 438 NULL, hz, 439 SYSTF_MSSYNC | SYSTF_OFFSETCPU); 440 } 441 lwkt_setcpu_self(ogd); 442 443 /* 444 * Regular data collection 445 */ 446 kcollect_register(KCOLLECT_USERPCT, "user", collect_cputime_callback, 447 KCOLLECT_SCALE(KCOLLECT_USERPCT_FORMAT, 0)); 448 kcollect_register(KCOLLECT_SYSTPCT, "syst", NULL, 449 KCOLLECT_SCALE(KCOLLECT_SYSTPCT_FORMAT, 0)); 450 kcollect_register(KCOLLECT_IDLEPCT, "idle", NULL, 451 KCOLLECT_SCALE(KCOLLECT_IDLEPCT_FORMAT, 0)); 452 } 453 SYSINIT(clocks2, SI_BOOT2_POST_SMP, SI_ORDER_ANY, initclocks_other, NULL); 454 455 /* 456 * This method is called on just the BSP, after all the usched implementations 457 * are initialized. This avoids races between usched initialization functions 458 * and usched_schedulerclock(). 459 */ 460 static 461 void 462 initclocks_usched(void *dummy) 463 { 464 struct globaldata *ogd = mycpu; 465 struct globaldata *gd; 466 int n; 467 468 for (n = 0; n < ncpus; ++n) { 469 lwkt_setcpu_self(globaldata_find(n)); 470 gd = mycpu; 471 472 /* XXX correct the frequency for scheduler / estcpu tests */ 473 systimer_init_periodic_flags(&gd->gd_schedclock, schedclock, 474 NULL, ESTCPUFREQ, SYSTF_MSSYNC); 475 } 476 lwkt_setcpu_self(ogd); 477 } 478 SYSINIT(clocks3, SI_BOOT2_USCHED, SI_ORDER_ANY, initclocks_usched, NULL); 479 480 /* 481 * This sets the current real time of day. Timespecs are in seconds and 482 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base, 483 * instead we adjust basetime so basetime + gd_* results in the current 484 * time of day. This way the gd_* fields are guaranteed to represent 485 * a monotonically increasing 'uptime' value. 486 * 487 * When set_timeofday() is called from userland, the system call forces it 488 * onto cpu #0 since only cpu #0 can update basetime_index. 489 */ 490 void 491 set_timeofday(struct timespec *ts) 492 { 493 struct timespec *nbt; 494 int ni; 495 496 /* 497 * XXX SMP / non-atomic basetime updates 498 */ 499 crit_enter(); 500 ni = (basetime_index + 1) & BASETIME_ARYMASK; 501 cpu_lfence(); 502 nbt = &basetime[ni]; 503 nanouptime(nbt); 504 nbt->tv_sec = ts->tv_sec - nbt->tv_sec; 505 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec; 506 if (nbt->tv_nsec < 0) { 507 nbt->tv_nsec += 1000000000; 508 --nbt->tv_sec; 509 } 510 511 /* 512 * Note that basetime diverges from boottime as the clock drift is 513 * compensated for, so we cannot do away with boottime. When setting 514 * the absolute time of day the drift is 0 (for an instant) and we 515 * can simply assign boottime to basetime. 516 * 517 * Note that nanouptime() is based on gd_time_seconds which is drift 518 * compensated up to a point (it is guaranteed to remain monotonically 519 * increasing). gd_time_seconds is thus our best uptime guess and 520 * suitable for use in the boottime calculation. It is already taken 521 * into account in the basetime calculation above. 522 */ 523 spin_lock(&ntp_spin); 524 boottime.tv_sec = nbt->tv_sec; 525 ntp_delta = 0; 526 527 /* 528 * We now have a new basetime, make sure all other cpus have it, 529 * then update the index. 530 */ 531 cpu_sfence(); 532 basetime_index = ni; 533 spin_unlock(&ntp_spin); 534 535 crit_exit(); 536 } 537 538 /* 539 * Each cpu has its own hardclock, but we only increment ticks and softticks 540 * on cpu #0. 541 * 542 * NOTE! systimer! the MP lock might not be held here. We can only safely 543 * manipulate objects owned by the current cpu. 544 */ 545 static void 546 hardclock(systimer_t info, int in_ipi, struct intrframe *frame) 547 { 548 sysclock_t cputicks; 549 struct proc *p; 550 struct globaldata *gd = mycpu; 551 552 if ((gd->gd_reqflags & RQF_IPIQ) == 0 && lwkt_need_ipiq_process(gd)) { 553 /* Defer to doreti on passive IPIQ processing */ 554 need_ipiq(); 555 } 556 557 /* 558 * We update the compensation base to calculate fine-grained time 559 * from the sys_cputimer on a per-cpu basis in order to avoid 560 * having to mess around with locks. sys_cputimer is assumed to 561 * be consistent across all cpus. CPU N copies the base state from 562 * CPU 0 using the same FIFO trick that we use for basetime (so we 563 * don't catch a CPU 0 update in the middle). 564 * 565 * Note that we never allow info->time (aka gd->gd_hardclock.time) 566 * to reverse index gd_cpuclock_base, but that it is possible for 567 * it to temporarily get behind in the seconds if something in the 568 * system locks interrupts for a long period of time. Since periodic 569 * timers count events, though everything should resynch again 570 * immediately. 571 */ 572 if (gd->gd_cpuid == 0) { 573 int ni; 574 575 cputicks = info->time - gd->gd_cpuclock_base; 576 if (cputicks >= sys_cputimer->freq) { 577 cputicks /= sys_cputimer->freq; 578 if (cputicks != 0 && cputicks != 1) 579 kprintf("Warning: hardclock missed > 1 sec\n"); 580 gd->gd_time_seconds += cputicks; 581 gd->gd_cpuclock_base += sys_cputimer->freq * cputicks; 582 /* uncorrected monotonic 1-sec gran */ 583 time_uptime += cputicks; 584 } 585 ni = (basetime_index + 1) & BASETIME_ARYMASK; 586 hardtime[ni].time_second = gd->gd_time_seconds; 587 hardtime[ni].cpuclock_base = gd->gd_cpuclock_base; 588 } else { 589 int ni; 590 591 ni = basetime_index; 592 cpu_lfence(); 593 gd->gd_time_seconds = hardtime[ni].time_second; 594 gd->gd_cpuclock_base = hardtime[ni].cpuclock_base; 595 } 596 597 /* 598 * The system-wide ticks counter and NTP related timedelta/tickdelta 599 * adjustments only occur on cpu #0. NTP adjustments are accomplished 600 * by updating basetime. 601 */ 602 if (gd->gd_cpuid == 0) { 603 struct timespec *nbt; 604 struct timespec nts; 605 int leap; 606 int ni; 607 608 /* 609 * Update system-wide ticks 610 */ 611 ++ticks; 612 613 /* 614 * Update system-wide ticktime for getnanotime() and getmicrotime() 615 */ 616 nanotime(&nts); 617 atomic_add_int_nonlocked(&ticktime_update, 1); 618 cpu_sfence(); 619 if (ticktime_update & 2) 620 ticktime2 = nts; 621 else 622 ticktime0 = nts; 623 cpu_sfence(); 624 atomic_add_int_nonlocked(&ticktime_update, 1); 625 626 #if 0 627 if (tco->tc_poll_pps) 628 tco->tc_poll_pps(tco); 629 #endif 630 631 /* 632 * Calculate the new basetime index. We are in a critical section 633 * on cpu #0 and can safely play with basetime_index. Start 634 * with the current basetime and then make adjustments. 635 */ 636 ni = (basetime_index + 1) & BASETIME_ARYMASK; 637 nbt = &basetime[ni]; 638 *nbt = basetime[basetime_index]; 639 640 /* 641 * ntp adjustments only occur on cpu 0 and are protected by 642 * ntp_spin. This spinlock virtually never conflicts. 643 */ 644 spin_lock(&ntp_spin); 645 646 /* 647 * Apply adjtime corrections. (adjtime() API) 648 * 649 * adjtime() only runs on cpu #0 so our critical section is 650 * sufficient to access these variables. 651 */ 652 if (ntp_delta != 0) { 653 nbt->tv_nsec += ntp_tick_delta; 654 ntp_delta -= ntp_tick_delta; 655 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) || 656 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) { 657 ntp_tick_delta = ntp_delta; 658 } 659 } 660 661 /* 662 * Apply permanent frequency corrections. (sysctl API) 663 */ 664 if (ntp_tick_permanent != 0) { 665 ntp_tick_acc += ntp_tick_permanent; 666 if (ntp_tick_acc >= (1LL << 32)) { 667 nbt->tv_nsec += ntp_tick_acc >> 32; 668 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32; 669 } else if (ntp_tick_acc <= -(1LL << 32)) { 670 /* Negate ntp_tick_acc to avoid shifting the sign bit. */ 671 nbt->tv_nsec -= (-ntp_tick_acc) >> 32; 672 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32; 673 } 674 } 675 676 if (nbt->tv_nsec >= 1000000000) { 677 nbt->tv_sec++; 678 nbt->tv_nsec -= 1000000000; 679 } else if (nbt->tv_nsec < 0) { 680 nbt->tv_sec--; 681 nbt->tv_nsec += 1000000000; 682 } 683 684 /* 685 * Another per-tick compensation. (for ntp_adjtime() API) 686 */ 687 if (nsec_adj != 0) { 688 nsec_acc += nsec_adj; 689 if (nsec_acc >= 0x100000000LL) { 690 nbt->tv_nsec += nsec_acc >> 32; 691 nsec_acc = (nsec_acc & 0xFFFFFFFFLL); 692 } else if (nsec_acc <= -0x100000000LL) { 693 nbt->tv_nsec -= -nsec_acc >> 32; 694 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL); 695 } 696 if (nbt->tv_nsec >= 1000000000) { 697 nbt->tv_nsec -= 1000000000; 698 ++nbt->tv_sec; 699 } else if (nbt->tv_nsec < 0) { 700 nbt->tv_nsec += 1000000000; 701 --nbt->tv_sec; 702 } 703 } 704 spin_unlock(&ntp_spin); 705 706 /************************************************************ 707 * LEAP SECOND CORRECTION * 708 ************************************************************ 709 * 710 * Taking into account all the corrections made above, figure 711 * out the new real time. If the seconds field has changed 712 * then apply any pending leap-second corrections. 713 */ 714 getnanotime_nbt(nbt, &nts); 715 716 if (time_second != nts.tv_sec) { 717 /* 718 * Apply leap second (sysctl API). Adjust nts for changes 719 * so we do not have to call getnanotime_nbt again. 720 */ 721 if (ntp_leap_second) { 722 if (ntp_leap_second == nts.tv_sec) { 723 if (ntp_leap_insert) { 724 nbt->tv_sec++; 725 nts.tv_sec++; 726 } else { 727 nbt->tv_sec--; 728 nts.tv_sec--; 729 } 730 ntp_leap_second--; 731 } 732 } 733 734 /* 735 * Apply leap second (ntp_adjtime() API), calculate a new 736 * nsec_adj field. ntp_update_second() returns nsec_adj 737 * as a per-second value but we need it as a per-tick value. 738 */ 739 leap = ntp_update_second(time_second, &nsec_adj); 740 nsec_adj /= hz; 741 nbt->tv_sec += leap; 742 nts.tv_sec += leap; 743 744 /* 745 * Update the time_second 'approximate time' global. 746 */ 747 time_second = nts.tv_sec; 748 749 /* 750 * Clear the IPC hint for the currently running thread once 751 * per second, allowing us to disconnect the hint from a 752 * thread which may no longer care. 753 */ 754 curthread->td_wakefromcpu = -1; 755 } 756 757 /* 758 * Finally, our new basetime is ready to go live! 759 */ 760 cpu_sfence(); 761 basetime_index = ni; 762 763 /* 764 * Update kpmap on each tick. TS updates are integrated with 765 * fences and upticks allowing userland to read the data 766 * deterministically. 767 */ 768 if (kpmap) { 769 int w; 770 771 w = (kpmap->upticks + 1) & 1; 772 getnanouptime(&kpmap->ts_uptime[w]); 773 getnanotime(&kpmap->ts_realtime[w]); 774 cpu_sfence(); 775 ++kpmap->upticks; 776 cpu_sfence(); 777 } 778 } 779 780 /* 781 * lwkt thread scheduler fair queueing 782 */ 783 lwkt_schedulerclock(curthread); 784 785 /* 786 * softticks are handled for all cpus 787 */ 788 hardclock_softtick(gd); 789 790 /* 791 * Rollup accumulated vmstats, copy-back for critical path checks. 792 */ 793 vmstats_rollup_cpu(gd); 794 vfscache_rollup_cpu(gd); 795 mycpu->gd_vmstats = vmstats; 796 797 /* 798 * ITimer handling is per-tick, per-cpu. 799 * 800 * We must acquire the per-process token in order for ksignal() 801 * to be non-blocking. For the moment this requires an AST fault, 802 * the ksignal() cannot be safely issued from this hard interrupt. 803 * 804 * XXX Even the trytoken here isn't right, and itimer operation in 805 * a multi threaded environment is going to be weird at the 806 * very least. 807 */ 808 if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) { 809 crit_enter_hard(); 810 if (p->p_upmap) 811 ++p->p_upmap->runticks; 812 813 if (frame && CLKF_USERMODE(frame) && 814 timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) && 815 itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) { 816 p->p_flags |= P_SIGVTALRM; 817 need_user_resched(); 818 } 819 if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) && 820 itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) { 821 p->p_flags |= P_SIGPROF; 822 need_user_resched(); 823 } 824 crit_exit_hard(); 825 lwkt_reltoken(&p->p_token); 826 } 827 setdelayed(); 828 } 829 830 /* 831 * The statistics clock typically runs at a 125Hz rate, and is intended 832 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu. 833 * 834 * NOTE! systimer! the MP lock might not be held here. We can only safely 835 * manipulate objects owned by the current cpu. 836 * 837 * The stats clock is responsible for grabbing a profiling sample. 838 * Most of the statistics are only used by user-level statistics programs. 839 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and 840 * p->p_estcpu. 841 * 842 * Like the other clocks, the stat clock is called from what is effectively 843 * a fast interrupt, so the context should be the thread/process that got 844 * interrupted. 845 */ 846 static void 847 statclock(systimer_t info, int in_ipi, struct intrframe *frame) 848 { 849 globaldata_t gd = mycpu; 850 thread_t td; 851 struct proc *p; 852 int bump; 853 sysclock_t cv; 854 sysclock_t scv; 855 856 /* 857 * How big was our timeslice relative to the last time? Calculate 858 * in microseconds. 859 * 860 * NOTE: Use of microuptime() is typically MPSAFE, but usually not 861 * during early boot. Just use the systimer count to be nice 862 * to e.g. qemu. The systimer has a better chance of being 863 * MPSAFE at early boot. 864 */ 865 cv = sys_cputimer->count(); 866 scv = gd->statint.gd_statcv; 867 if (scv == 0) { 868 bump = 1; 869 } else { 870 bump = muldivu64(sys_cputimer->freq64_usec, 871 (cv - scv), 1L << 32); 872 if (bump < 0) 873 bump = 0; 874 if (bump > 1000000) 875 bump = 1000000; 876 } 877 gd->statint.gd_statcv = cv; 878 879 #if 0 880 stv = &gd->gd_stattv; 881 if (stv->tv_sec == 0) { 882 bump = 1; 883 } else { 884 bump = tv.tv_usec - stv->tv_usec + 885 (tv.tv_sec - stv->tv_sec) * 1000000; 886 if (bump < 0) 887 bump = 0; 888 if (bump > 1000000) 889 bump = 1000000; 890 } 891 *stv = tv; 892 #endif 893 894 td = curthread; 895 p = td->td_proc; 896 897 /* 898 * If this is an interrupt thread used for the clock interrupt, adjust 899 * td to the thread it is preempting. If a frame is available, it will 900 * be related to the thread being preempted. 901 */ 902 if ((td->td_flags & TDF_CLKTHREAD) && td->td_preempted) 903 td = td->td_preempted; 904 905 if (frame && CLKF_USERMODE(frame)) { 906 /* 907 * Came from userland, handle user time and deal with 908 * possible process. 909 */ 910 if (p && (p->p_flags & P_PROFIL)) 911 addupc_intr(p, CLKF_PC(frame), 1); 912 td->td_uticks += bump; 913 914 /* 915 * Charge the time as appropriate 916 */ 917 if (p && p->p_nice > NZERO) 918 cpu_time.cp_nice += bump; 919 else 920 cpu_time.cp_user += bump; 921 } else { 922 int intr_nest = gd->gd_intr_nesting_level; 923 924 if (in_ipi) { 925 /* 926 * IPI processing code will bump gd_intr_nesting_level 927 * up by one, which breaks following CLKF_INTR testing, 928 * so we subtract it by one here. 929 */ 930 --intr_nest; 931 } 932 933 /* 934 * Came from kernel mode, so we were: 935 * - handling an interrupt, 936 * - doing syscall or trap work on behalf of the current 937 * user process, or 938 * - spinning in the idle loop. 939 * Whichever it is, charge the time as appropriate. 940 * Note that we charge interrupts to the current process, 941 * regardless of whether they are ``for'' that process, 942 * so that we know how much of its real time was spent 943 * in ``non-process'' (i.e., interrupt) work. 944 * 945 * XXX assume system if frame is NULL. A NULL frame 946 * can occur if ipi processing is done from a crit_exit(). 947 */ 948 if ((frame && CLKF_INTR(intr_nest)) || 949 cpu_interrupt_running(td)) { 950 /* 951 * If we interrupted an interrupt thread, well, 952 * count it as interrupt time. 953 */ 954 td->td_iticks += bump; 955 #ifdef DEBUG_PCTRACK 956 if (frame) 957 do_pctrack(frame, PCTRACK_INT); 958 #endif 959 cpu_time.cp_intr += bump; 960 } else if (gd->gd_flags & GDF_VIRTUSER) { 961 /* 962 * The vkernel doesn't do a good job providing trap 963 * frames that we can test. If the GDF_VIRTUSER 964 * flag is set we probably interrupted user mode. 965 * 966 * We also use this flag on the host when entering 967 * VMM mode. 968 */ 969 td->td_uticks += bump; 970 971 /* 972 * Charge the time as appropriate 973 */ 974 if (p && p->p_nice > NZERO) 975 cpu_time.cp_nice += bump; 976 else 977 cpu_time.cp_user += bump; 978 } else { 979 if (clock_debug2 > 0) { 980 --clock_debug2; 981 kprintf("statclock preempt %s (%p %p)\n", td->td_comm, td, &gd->gd_idlethread); 982 } 983 td->td_sticks += bump; 984 if (td == &gd->gd_idlethread) { 985 /* 986 * We want to count token contention as 987 * system time. When token contention occurs 988 * the cpu may only be outside its critical 989 * section while switching through the idle 990 * thread. In this situation, various flags 991 * will be set in gd_reqflags. 992 * 993 * INTPEND is not necessarily useful because 994 * it will be set if the clock interrupt 995 * happens to be on an interrupt thread, the 996 * cpu_interrupt_running() call does a better 997 * job so we've already handled it. 998 */ 999 if (gd->gd_reqflags & 1000 (RQF_IDLECHECK_WK_MASK & ~RQF_INTPEND)) { 1001 cpu_time.cp_sys += bump; 1002 } else { 1003 cpu_time.cp_idle += bump; 1004 } 1005 } else { 1006 /* 1007 * System thread was running. 1008 */ 1009 #ifdef DEBUG_PCTRACK 1010 if (frame) 1011 do_pctrack(frame, PCTRACK_SYS); 1012 #endif 1013 cpu_time.cp_sys += bump; 1014 } 1015 } 1016 } 1017 } 1018 1019 #ifdef DEBUG_PCTRACK 1020 /* 1021 * Sample the PC when in the kernel or in an interrupt. User code can 1022 * retrieve the information and generate a histogram or other output. 1023 */ 1024 1025 static void 1026 do_pctrack(struct intrframe *frame, int which) 1027 { 1028 struct kinfo_pctrack *pctrack; 1029 1030 pctrack = &cputime_pctrack[mycpu->gd_cpuid][which]; 1031 pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] = 1032 (void *)CLKF_PC(frame); 1033 ++pctrack->pc_index; 1034 } 1035 1036 static int 1037 sysctl_pctrack(SYSCTL_HANDLER_ARGS) 1038 { 1039 struct kinfo_pcheader head; 1040 int error; 1041 int cpu; 1042 int ntrack; 1043 1044 head.pc_ntrack = PCTRACK_SIZE; 1045 head.pc_arysize = PCTRACK_ARYSIZE; 1046 1047 if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0) 1048 return (error); 1049 1050 for (cpu = 0; cpu < ncpus; ++cpu) { 1051 for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) { 1052 error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack], 1053 sizeof(struct kinfo_pctrack)); 1054 if (error) 1055 break; 1056 } 1057 if (error) 1058 break; 1059 } 1060 return (error); 1061 } 1062 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 1063 sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking"); 1064 1065 #endif 1066 1067 /* 1068 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer, 1069 * the MP lock might not be held. We can safely manipulate parts of curproc 1070 * but that's about it. 1071 * 1072 * Each cpu has its own scheduler clock. 1073 */ 1074 static void 1075 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 1076 { 1077 struct lwp *lp; 1078 struct rusage *ru; 1079 struct vmspace *vm; 1080 long rss; 1081 1082 if ((lp = lwkt_preempted_proc()) != NULL) { 1083 /* 1084 * Account for cpu time used and hit the scheduler. Note 1085 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD 1086 * HERE. 1087 */ 1088 ++lp->lwp_cpticks; 1089 usched_schedulerclock(lp, info->periodic, info->time); 1090 } else { 1091 usched_schedulerclock(NULL, info->periodic, info->time); 1092 } 1093 if ((lp = curthread->td_lwp) != NULL) { 1094 /* 1095 * Update resource usage integrals and maximums. 1096 */ 1097 if ((ru = &lp->lwp_proc->p_ru) && 1098 (vm = lp->lwp_proc->p_vmspace) != NULL) { 1099 ru->ru_ixrss += pgtok(btoc(vm->vm_tsize)); 1100 ru->ru_idrss += pgtok(btoc(vm->vm_dsize)); 1101 ru->ru_isrss += pgtok(btoc(vm->vm_ssize)); 1102 if (lwkt_trytoken(&vm->vm_map.token)) { 1103 rss = pgtok(vmspace_resident_count(vm)); 1104 if (ru->ru_maxrss < rss) 1105 ru->ru_maxrss = rss; 1106 lwkt_reltoken(&vm->vm_map.token); 1107 } 1108 } 1109 } 1110 /* Increment the global sched_ticks */ 1111 if (mycpu->gd_cpuid == 0) 1112 ++sched_ticks; 1113 } 1114 1115 /* 1116 * Compute number of ticks for the specified amount of time. The 1117 * return value is intended to be used in a clock interrupt timed 1118 * operation and guaranteed to meet or exceed the requested time. 1119 * If the representation overflows, return INT_MAX. The minimum return 1120 * value is 1 ticks and the function will average the calculation up. 1121 * If any value greater then 0 microseconds is supplied, a value 1122 * of at least 2 will be returned to ensure that a near-term clock 1123 * interrupt does not cause the timeout to occur (degenerately) early. 1124 * 1125 * Note that limit checks must take into account microseconds, which is 1126 * done simply by using the smaller signed long maximum instead of 1127 * the unsigned long maximum. 1128 * 1129 * If ints have 32 bits, then the maximum value for any timeout in 1130 * 10ms ticks is 248 days. 1131 */ 1132 int 1133 tvtohz_high(struct timeval *tv) 1134 { 1135 int ticks; 1136 long sec, usec; 1137 1138 sec = tv->tv_sec; 1139 usec = tv->tv_usec; 1140 if (usec < 0) { 1141 sec--; 1142 usec += 1000000; 1143 } 1144 if (sec < 0) { 1145 #ifdef DIAGNOSTIC 1146 if (usec > 0) { 1147 sec++; 1148 usec -= 1000000; 1149 } 1150 kprintf("tvtohz_high: negative time difference " 1151 "%ld sec %ld usec\n", 1152 sec, usec); 1153 #endif 1154 ticks = 1; 1155 } else if (sec <= INT_MAX / hz) { 1156 ticks = (int)(sec * hz + howmany((u_long)usec, ustick)) + 1; 1157 } else { 1158 ticks = INT_MAX; 1159 } 1160 return (ticks); 1161 } 1162 1163 int 1164 tstohz_high(struct timespec *ts) 1165 { 1166 int ticks; 1167 long sec, nsec; 1168 1169 sec = ts->tv_sec; 1170 nsec = ts->tv_nsec; 1171 if (nsec < 0) { 1172 sec--; 1173 nsec += 1000000000; 1174 } 1175 if (sec < 0) { 1176 #ifdef DIAGNOSTIC 1177 if (nsec > 0) { 1178 sec++; 1179 nsec -= 1000000000; 1180 } 1181 kprintf("tstohz_high: negative time difference " 1182 "%ld sec %ld nsec\n", 1183 sec, nsec); 1184 #endif 1185 ticks = 1; 1186 } else if (sec <= INT_MAX / hz) { 1187 ticks = (int)(sec * hz + howmany((u_long)nsec, nstick)) + 1; 1188 } else { 1189 ticks = INT_MAX; 1190 } 1191 return (ticks); 1192 } 1193 1194 1195 /* 1196 * Compute number of ticks for the specified amount of time, erroring on 1197 * the side of it being too low to ensure that sleeping the returned number 1198 * of ticks will not result in a late return. 1199 * 1200 * The supplied timeval may not be negative and should be normalized. A 1201 * return value of 0 is possible if the timeval converts to less then 1202 * 1 tick. 1203 * 1204 * If ints have 32 bits, then the maximum value for any timeout in 1205 * 10ms ticks is 248 days. 1206 */ 1207 int 1208 tvtohz_low(struct timeval *tv) 1209 { 1210 int ticks; 1211 long sec; 1212 1213 sec = tv->tv_sec; 1214 if (sec <= INT_MAX / hz) 1215 ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick); 1216 else 1217 ticks = INT_MAX; 1218 return (ticks); 1219 } 1220 1221 int 1222 tstohz_low(struct timespec *ts) 1223 { 1224 int ticks; 1225 long sec; 1226 1227 sec = ts->tv_sec; 1228 if (sec <= INT_MAX / hz) 1229 ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick); 1230 else 1231 ticks = INT_MAX; 1232 return (ticks); 1233 } 1234 1235 /* 1236 * Start profiling on a process. 1237 * 1238 * Caller must hold p->p_token(); 1239 * 1240 * Kernel profiling passes proc0 which never exits and hence 1241 * keeps the profile clock running constantly. 1242 */ 1243 void 1244 startprofclock(struct proc *p) 1245 { 1246 if ((p->p_flags & P_PROFIL) == 0) { 1247 p->p_flags |= P_PROFIL; 1248 #if 0 /* XXX */ 1249 if (++profprocs == 1 && stathz != 0) { 1250 crit_enter(); 1251 psdiv = psratio; 1252 setstatclockrate(profhz); 1253 crit_exit(); 1254 } 1255 #endif 1256 } 1257 } 1258 1259 /* 1260 * Stop profiling on a process. 1261 * 1262 * caller must hold p->p_token 1263 */ 1264 void 1265 stopprofclock(struct proc *p) 1266 { 1267 if (p->p_flags & P_PROFIL) { 1268 p->p_flags &= ~P_PROFIL; 1269 #if 0 /* XXX */ 1270 if (--profprocs == 0 && stathz != 0) { 1271 crit_enter(); 1272 psdiv = 1; 1273 setstatclockrate(stathz); 1274 crit_exit(); 1275 } 1276 #endif 1277 } 1278 } 1279 1280 /* 1281 * Return information about system clocks. 1282 */ 1283 static int 1284 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 1285 { 1286 struct kinfo_clockinfo clkinfo; 1287 /* 1288 * Construct clockinfo structure. 1289 */ 1290 clkinfo.ci_hz = hz; 1291 clkinfo.ci_tick = ustick; 1292 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000; 1293 clkinfo.ci_profhz = profhz; 1294 clkinfo.ci_stathz = stathz ? stathz : hz; 1295 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 1296 } 1297 1298 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 1299 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 1300 1301 /* 1302 * We have eight functions for looking at the clock, four for 1303 * microseconds and four for nanoseconds. For each there is fast 1304 * but less precise version "get{nano|micro}[up]time" which will 1305 * return a time which is up to 1/HZ previous to the call, whereas 1306 * the raw version "{nano|micro}[up]time" will return a timestamp 1307 * which is as precise as possible. The "up" variants return the 1308 * time relative to system boot, these are well suited for time 1309 * interval measurements. 1310 * 1311 * Each cpu independently maintains the current time of day, so all 1312 * we need to do to protect ourselves from changes is to do a loop 1313 * check on the seconds field changing out from under us. 1314 * 1315 * The system timer maintains a 32 bit count and due to various issues 1316 * it is possible for the calculated delta to occasionally exceed 1317 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec 1318 * multiplication can easily overflow, so we deal with the case. For 1319 * uniformity we deal with the case in the usec case too. 1320 * 1321 * All the [get][micro,nano][time,uptime]() routines are MPSAFE. 1322 * 1323 * NEW CODE (!) 1324 * 1325 * cpu 0 now maintains global ticktimes and an update counter. The 1326 * getnanotime() and getmicrotime() routines use these globals. 1327 */ 1328 void 1329 getmicrouptime(struct timeval *tvp) 1330 { 1331 struct globaldata *gd = mycpu; 1332 sysclock_t delta; 1333 1334 do { 1335 tvp->tv_sec = gd->gd_time_seconds; 1336 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1337 } while (tvp->tv_sec != gd->gd_time_seconds); 1338 1339 if (delta >= sys_cputimer->freq) { 1340 tvp->tv_sec += delta / sys_cputimer->freq; 1341 delta %= sys_cputimer->freq; 1342 } 1343 tvp->tv_usec = muldivu64(sys_cputimer->freq64_usec, delta, 1L << 32); 1344 if (tvp->tv_usec >= 1000000) { 1345 tvp->tv_usec -= 1000000; 1346 ++tvp->tv_sec; 1347 } 1348 } 1349 1350 void 1351 getnanouptime(struct timespec *tsp) 1352 { 1353 struct globaldata *gd = mycpu; 1354 sysclock_t delta; 1355 1356 do { 1357 tsp->tv_sec = gd->gd_time_seconds; 1358 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1359 } while (tsp->tv_sec != gd->gd_time_seconds); 1360 1361 if (delta >= sys_cputimer->freq) { 1362 tsp->tv_sec += delta / sys_cputimer->freq; 1363 delta %= sys_cputimer->freq; 1364 } 1365 tsp->tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1366 } 1367 1368 void 1369 microuptime(struct timeval *tvp) 1370 { 1371 struct globaldata *gd = mycpu; 1372 sysclock_t delta; 1373 1374 do { 1375 tvp->tv_sec = gd->gd_time_seconds; 1376 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1377 } while (tvp->tv_sec != gd->gd_time_seconds); 1378 1379 if (delta >= sys_cputimer->freq) { 1380 tvp->tv_sec += delta / sys_cputimer->freq; 1381 delta %= sys_cputimer->freq; 1382 } 1383 tvp->tv_usec = muldivu64(sys_cputimer->freq64_usec, delta, 1L << 32); 1384 } 1385 1386 void 1387 nanouptime(struct timespec *tsp) 1388 { 1389 struct globaldata *gd = mycpu; 1390 sysclock_t delta; 1391 1392 do { 1393 tsp->tv_sec = gd->gd_time_seconds; 1394 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1395 } while (tsp->tv_sec != gd->gd_time_seconds); 1396 1397 if (delta >= sys_cputimer->freq) { 1398 tsp->tv_sec += delta / sys_cputimer->freq; 1399 delta %= sys_cputimer->freq; 1400 } 1401 tsp->tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1402 } 1403 1404 /* 1405 * realtime routines 1406 */ 1407 void 1408 getmicrotime(struct timeval *tvp) 1409 { 1410 struct timespec ts; 1411 int counter; 1412 1413 do { 1414 counter = *(volatile int *)&ticktime_update; 1415 cpu_lfence(); 1416 switch(counter & 3) { 1417 case 0: /* ticktime2 completed update */ 1418 ts = ticktime2; 1419 break; 1420 case 1: /* ticktime0 update in progress */ 1421 ts = ticktime2; 1422 break; 1423 case 2: /* ticktime0 completed update */ 1424 ts = ticktime0; 1425 break; 1426 case 3: /* ticktime2 update in progress */ 1427 ts = ticktime0; 1428 break; 1429 } 1430 cpu_lfence(); 1431 } while (counter != *(volatile int *)&ticktime_update); 1432 tvp->tv_sec = ts.tv_sec; 1433 tvp->tv_usec = ts.tv_nsec / 1000; 1434 } 1435 1436 void 1437 getnanotime(struct timespec *tsp) 1438 { 1439 struct timespec ts; 1440 int counter; 1441 1442 do { 1443 counter = *(volatile int *)&ticktime_update; 1444 cpu_lfence(); 1445 switch(counter & 3) { 1446 case 0: /* ticktime2 completed update */ 1447 ts = ticktime2; 1448 break; 1449 case 1: /* ticktime0 update in progress */ 1450 ts = ticktime2; 1451 break; 1452 case 2: /* ticktime0 completed update */ 1453 ts = ticktime0; 1454 break; 1455 case 3: /* ticktime2 update in progress */ 1456 ts = ticktime0; 1457 break; 1458 } 1459 cpu_lfence(); 1460 } while (counter != *(volatile int *)&ticktime_update); 1461 *tsp = ts; 1462 } 1463 1464 static void 1465 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp) 1466 { 1467 struct globaldata *gd = mycpu; 1468 sysclock_t delta; 1469 1470 do { 1471 tsp->tv_sec = gd->gd_time_seconds; 1472 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1473 } while (tsp->tv_sec != gd->gd_time_seconds); 1474 1475 if (delta >= sys_cputimer->freq) { 1476 tsp->tv_sec += delta / sys_cputimer->freq; 1477 delta %= sys_cputimer->freq; 1478 } 1479 tsp->tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1480 1481 tsp->tv_sec += nbt->tv_sec; 1482 tsp->tv_nsec += nbt->tv_nsec; 1483 while (tsp->tv_nsec >= 1000000000) { 1484 tsp->tv_nsec -= 1000000000; 1485 ++tsp->tv_sec; 1486 } 1487 } 1488 1489 1490 void 1491 microtime(struct timeval *tvp) 1492 { 1493 struct globaldata *gd = mycpu; 1494 struct timespec *bt; 1495 sysclock_t delta; 1496 1497 do { 1498 tvp->tv_sec = gd->gd_time_seconds; 1499 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1500 } while (tvp->tv_sec != gd->gd_time_seconds); 1501 1502 if (delta >= sys_cputimer->freq) { 1503 tvp->tv_sec += delta / sys_cputimer->freq; 1504 delta %= sys_cputimer->freq; 1505 } 1506 tvp->tv_usec = muldivu64(sys_cputimer->freq64_usec, delta, 1L << 32); 1507 1508 bt = &basetime[basetime_index]; 1509 cpu_lfence(); 1510 tvp->tv_sec += bt->tv_sec; 1511 tvp->tv_usec += bt->tv_nsec / 1000; 1512 while (tvp->tv_usec >= 1000000) { 1513 tvp->tv_usec -= 1000000; 1514 ++tvp->tv_sec; 1515 } 1516 } 1517 1518 void 1519 nanotime(struct timespec *tsp) 1520 { 1521 struct globaldata *gd = mycpu; 1522 struct timespec *bt; 1523 sysclock_t delta; 1524 1525 do { 1526 tsp->tv_sec = gd->gd_time_seconds; 1527 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1528 } while (tsp->tv_sec != gd->gd_time_seconds); 1529 1530 if (delta >= sys_cputimer->freq) { 1531 tsp->tv_sec += delta / sys_cputimer->freq; 1532 delta %= sys_cputimer->freq; 1533 } 1534 tsp->tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1535 1536 bt = &basetime[basetime_index]; 1537 cpu_lfence(); 1538 tsp->tv_sec += bt->tv_sec; 1539 tsp->tv_nsec += bt->tv_nsec; 1540 while (tsp->tv_nsec >= 1000000000) { 1541 tsp->tv_nsec -= 1000000000; 1542 ++tsp->tv_sec; 1543 } 1544 } 1545 1546 /* 1547 * Get an approximate time_t. It does not have to be accurate. This 1548 * function is called only from KTR and can be called with the system in 1549 * any state so do not use a critical section or other complex operation 1550 * here. 1551 * 1552 * NOTE: This is not exactly synchronized with real time. To do that we 1553 * would have to do what microtime does and check for a nanoseconds 1554 * overflow. 1555 */ 1556 time_t 1557 get_approximate_time_t(void) 1558 { 1559 struct globaldata *gd = mycpu; 1560 struct timespec *bt; 1561 1562 bt = &basetime[basetime_index]; 1563 return(gd->gd_time_seconds + bt->tv_sec); 1564 } 1565 1566 int 1567 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 1568 { 1569 pps_params_t *app; 1570 struct pps_fetch_args *fapi; 1571 #ifdef PPS_SYNC 1572 struct pps_kcbind_args *kapi; 1573 #endif 1574 1575 switch (cmd) { 1576 case PPS_IOC_CREATE: 1577 return (0); 1578 case PPS_IOC_DESTROY: 1579 return (0); 1580 case PPS_IOC_SETPARAMS: 1581 app = (pps_params_t *)data; 1582 if (app->mode & ~pps->ppscap) 1583 return (EINVAL); 1584 pps->ppsparam = *app; 1585 return (0); 1586 case PPS_IOC_GETPARAMS: 1587 app = (pps_params_t *)data; 1588 *app = pps->ppsparam; 1589 app->api_version = PPS_API_VERS_1; 1590 return (0); 1591 case PPS_IOC_GETCAP: 1592 *(int*)data = pps->ppscap; 1593 return (0); 1594 case PPS_IOC_FETCH: 1595 fapi = (struct pps_fetch_args *)data; 1596 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 1597 return (EINVAL); 1598 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 1599 return (EOPNOTSUPP); 1600 pps->ppsinfo.current_mode = pps->ppsparam.mode; 1601 fapi->pps_info_buf = pps->ppsinfo; 1602 return (0); 1603 case PPS_IOC_KCBIND: 1604 #ifdef PPS_SYNC 1605 kapi = (struct pps_kcbind_args *)data; 1606 /* XXX Only root should be able to do this */ 1607 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 1608 return (EINVAL); 1609 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 1610 return (EINVAL); 1611 if (kapi->edge & ~pps->ppscap) 1612 return (EINVAL); 1613 pps->kcmode = kapi->edge; 1614 return (0); 1615 #else 1616 return (EOPNOTSUPP); 1617 #endif 1618 default: 1619 return (ENOTTY); 1620 } 1621 } 1622 1623 void 1624 pps_init(struct pps_state *pps) 1625 { 1626 pps->ppscap |= PPS_TSFMT_TSPEC; 1627 if (pps->ppscap & PPS_CAPTUREASSERT) 1628 pps->ppscap |= PPS_OFFSETASSERT; 1629 if (pps->ppscap & PPS_CAPTURECLEAR) 1630 pps->ppscap |= PPS_OFFSETCLEAR; 1631 } 1632 1633 void 1634 pps_event(struct pps_state *pps, sysclock_t count, int event) 1635 { 1636 struct globaldata *gd; 1637 struct timespec *tsp; 1638 struct timespec *osp; 1639 struct timespec *bt; 1640 struct timespec ts; 1641 sysclock_t *pcount; 1642 #ifdef PPS_SYNC 1643 sysclock_t tcount; 1644 #endif 1645 sysclock_t delta; 1646 pps_seq_t *pseq; 1647 int foff; 1648 #ifdef PPS_SYNC 1649 int fhard; 1650 #endif 1651 int ni; 1652 1653 gd = mycpu; 1654 1655 /* Things would be easier with arrays... */ 1656 if (event == PPS_CAPTUREASSERT) { 1657 tsp = &pps->ppsinfo.assert_timestamp; 1658 osp = &pps->ppsparam.assert_offset; 1659 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1660 #ifdef PPS_SYNC 1661 fhard = pps->kcmode & PPS_CAPTUREASSERT; 1662 #endif 1663 pcount = &pps->ppscount[0]; 1664 pseq = &pps->ppsinfo.assert_sequence; 1665 } else { 1666 tsp = &pps->ppsinfo.clear_timestamp; 1667 osp = &pps->ppsparam.clear_offset; 1668 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1669 #ifdef PPS_SYNC 1670 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1671 #endif 1672 pcount = &pps->ppscount[1]; 1673 pseq = &pps->ppsinfo.clear_sequence; 1674 } 1675 1676 /* Nothing really happened */ 1677 if (*pcount == count) 1678 return; 1679 1680 *pcount = count; 1681 1682 do { 1683 ts.tv_sec = gd->gd_time_seconds; 1684 delta = count - gd->gd_cpuclock_base; 1685 } while (ts.tv_sec != gd->gd_time_seconds); 1686 1687 if (delta >= sys_cputimer->freq) { 1688 ts.tv_sec += delta / sys_cputimer->freq; 1689 delta %= sys_cputimer->freq; 1690 } 1691 ts.tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1692 ni = basetime_index; 1693 cpu_lfence(); 1694 bt = &basetime[ni]; 1695 ts.tv_sec += bt->tv_sec; 1696 ts.tv_nsec += bt->tv_nsec; 1697 while (ts.tv_nsec >= 1000000000) { 1698 ts.tv_nsec -= 1000000000; 1699 ++ts.tv_sec; 1700 } 1701 1702 (*pseq)++; 1703 *tsp = ts; 1704 1705 if (foff) { 1706 timespecadd(tsp, osp, tsp); 1707 if (tsp->tv_nsec < 0) { 1708 tsp->tv_nsec += 1000000000; 1709 tsp->tv_sec -= 1; 1710 } 1711 } 1712 #ifdef PPS_SYNC 1713 if (fhard) { 1714 /* magic, at its best... */ 1715 tcount = count - pps->ppscount[2]; 1716 pps->ppscount[2] = count; 1717 if (tcount >= sys_cputimer->freq) { 1718 delta = (1000000000 * (tcount / sys_cputimer->freq) + 1719 sys_cputimer->freq64_nsec * 1720 (tcount % sys_cputimer->freq)) >> 32; 1721 } else { 1722 delta = muldivu64(sys_cputimer->freq64_nsec, 1723 tcount, 1L << 32); 1724 } 1725 hardpps(tsp, delta); 1726 } 1727 #endif 1728 } 1729 1730 /* 1731 * Return the tsc target value for a delay of (ns). 1732 * 1733 * Returns -1 if the TSC is not supported. 1734 */ 1735 tsc_uclock_t 1736 tsc_get_target(int ns) 1737 { 1738 #if defined(_RDTSC_SUPPORTED_) 1739 if (cpu_feature & CPUID_TSC) { 1740 return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000); 1741 } 1742 #endif 1743 return(-1); 1744 } 1745 1746 /* 1747 * Compare the tsc against the passed target 1748 * 1749 * Returns +1 if the target has been reached 1750 * Returns 0 if the target has not yet been reached 1751 * Returns -1 if the TSC is not supported. 1752 * 1753 * Typical use: while (tsc_test_target(target) == 0) { ...poll... } 1754 */ 1755 int 1756 tsc_test_target(int64_t target) 1757 { 1758 #if defined(_RDTSC_SUPPORTED_) 1759 if (cpu_feature & CPUID_TSC) { 1760 if ((int64_t)(target - rdtsc()) <= 0) 1761 return(1); 1762 return(0); 1763 } 1764 #endif 1765 return(-1); 1766 } 1767 1768 /* 1769 * Delay the specified number of nanoseconds using the tsc. This function 1770 * returns immediately if the TSC is not supported. At least one cpu_pause() 1771 * will be issued. 1772 */ 1773 void 1774 tsc_delay(int ns) 1775 { 1776 int64_t clk; 1777 1778 clk = tsc_get_target(ns); 1779 cpu_pause(); 1780 cpu_pause(); 1781 while (tsc_test_target(clk) == 0) { 1782 cpu_pause(); 1783 cpu_pause(); 1784 cpu_pause(); 1785 cpu_pause(); 1786 } 1787 } 1788