1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 68 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ 69 */ 70 71 #include "opt_ntp.h" 72 #include "opt_pctrack.h" 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/callout.h> 77 #include <sys/kernel.h> 78 #include <sys/kinfo.h> 79 #include <sys/proc.h> 80 #include <sys/malloc.h> 81 #include <sys/resource.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/priv.h> 85 #include <sys/timex.h> 86 #include <sys/timepps.h> 87 #include <sys/upmap.h> 88 #include <sys/lock.h> 89 #include <sys/sysctl.h> 90 #include <sys/kcollect.h> 91 #include <sys/exislock.h> 92 93 #include <vm/vm.h> 94 #include <vm/pmap.h> 95 #include <vm/vm_map.h> 96 #include <vm/vm_extern.h> 97 98 #include <sys/thread2.h> 99 #include <sys/spinlock2.h> 100 101 #include <machine/cpu.h> 102 #include <machine/limits.h> 103 #include <machine/smp.h> 104 #include <machine/cpufunc.h> 105 #include <machine/specialreg.h> 106 #include <machine/clock.h> 107 108 #ifdef DEBUG_PCTRACK 109 static void do_pctrack(struct intrframe *frame, int which); 110 #endif 111 112 static void initclocks (void *dummy); 113 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL); 114 115 /* 116 * Some of these don't belong here, but it's easiest to concentrate them. 117 * Note that cpu_time counts in microseconds, but most userland programs 118 * just compare relative times against the total by delta. 119 */ 120 struct kinfo_cputime cputime_percpu[MAXCPU]; 121 #ifdef DEBUG_PCTRACK 122 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE }; 123 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE]; 124 #endif 125 126 __read_mostly static int sniff_enable = 1; 127 __read_mostly static int sniff_target = -1; 128 __read_mostly static int clock_debug2 = 0; 129 SYSCTL_INT(_kern, OID_AUTO, sniff_enable, CTLFLAG_RW, &sniff_enable, 0 , ""); 130 SYSCTL_INT(_kern, OID_AUTO, sniff_target, CTLFLAG_RW, &sniff_target, 0 , ""); 131 SYSCTL_INT(_debug, OID_AUTO, clock_debug2, CTLFLAG_RW, &clock_debug2, 0 , ""); 132 133 __read_mostly long pseudo_ticks = 1; /* existential timed locks */ 134 135 static int 136 sysctl_cputime(SYSCTL_HANDLER_ARGS) 137 { 138 int cpu, error = 0; 139 int root_error; 140 size_t size = sizeof(struct kinfo_cputime); 141 struct kinfo_cputime tmp; 142 143 /* 144 * NOTE: For security reasons, only root can sniff %rip 145 */ 146 root_error = priv_check_cred(curthread->td_ucred, PRIV_ROOT, 0); 147 148 for (cpu = 0; cpu < ncpus; ++cpu) { 149 tmp = cputime_percpu[cpu]; 150 if (root_error == 0) { 151 tmp.cp_sample_pc = 152 (int64_t)globaldata_find(cpu)->gd_sample_pc; 153 tmp.cp_sample_sp = 154 (int64_t)globaldata_find(cpu)->gd_sample_sp; 155 } 156 if ((error = SYSCTL_OUT(req, &tmp, size)) != 0) 157 break; 158 } 159 160 if (root_error == 0) { 161 if (sniff_enable) { 162 int n = sniff_target; 163 if (n < 0) 164 smp_sniff(); 165 else if (n < ncpus) 166 cpu_sniff(n); 167 } 168 } 169 170 return (error); 171 } 172 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 173 sysctl_cputime, "S,kinfo_cputime", "CPU time statistics"); 174 175 static int 176 sysctl_cp_time(SYSCTL_HANDLER_ARGS) 177 { 178 long cpu_states[CPUSTATES] = {0}; 179 int cpu, error = 0; 180 size_t size = sizeof(cpu_states); 181 182 for (cpu = 0; cpu < ncpus; ++cpu) { 183 cpu_states[CP_USER] += cputime_percpu[cpu].cp_user; 184 cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice; 185 cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys; 186 cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr; 187 cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle; 188 } 189 190 error = SYSCTL_OUT(req, cpu_states, size); 191 192 return (error); 193 } 194 195 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 196 sysctl_cp_time, "LU", "CPU time statistics"); 197 198 static int 199 sysctl_cp_times(SYSCTL_HANDLER_ARGS) 200 { 201 long cpu_states[CPUSTATES] = {0}; 202 int cpu, error; 203 size_t size = sizeof(cpu_states); 204 205 for (error = 0, cpu = 0; error == 0 && cpu < ncpus; ++cpu) { 206 cpu_states[CP_USER] = cputime_percpu[cpu].cp_user; 207 cpu_states[CP_NICE] = cputime_percpu[cpu].cp_nice; 208 cpu_states[CP_SYS] = cputime_percpu[cpu].cp_sys; 209 cpu_states[CP_INTR] = cputime_percpu[cpu].cp_intr; 210 cpu_states[CP_IDLE] = cputime_percpu[cpu].cp_idle; 211 error = SYSCTL_OUT(req, cpu_states, size); 212 } 213 214 return (error); 215 } 216 217 SYSCTL_PROC(_kern, OID_AUTO, cp_times, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 218 sysctl_cp_times, "LU", "per-CPU time statistics"); 219 220 /* 221 * boottime is used to calculate the 'real' uptime. Do not confuse this with 222 * microuptime(). microtime() is not drift compensated. The real uptime 223 * with compensation is nanotime() - bootime. boottime is recalculated 224 * whenever the real time is set based on the compensated elapsed time 225 * in seconds (gd->gd_time_seconds). 226 * 227 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic. 228 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to 229 * the real time. 230 * 231 * WARNING! time_second can backstep on time corrections. Also, unlike 232 * time_second, time_uptime is not a "real" time_t (seconds 233 * since the Epoch) but seconds since booting. 234 */ 235 __read_mostly struct timespec boottime; /* boot time (realtime) for ref only */ 236 __read_mostly struct timespec ticktime0;/* updated every tick */ 237 __read_mostly struct timespec ticktime2;/* updated every tick */ 238 __read_mostly int ticktime_update; 239 __read_mostly time_t time_second; /* read-only 'passive' rt in seconds */ 240 __read_mostly time_t time_uptime; /* read-only 'passive' ut in seconds */ 241 242 /* 243 * basetime is used to calculate the compensated real time of day. The 244 * basetime can be modified on a per-tick basis by the adjtime(), 245 * ntp_adjtime(), and sysctl-based time correction APIs. 246 * 247 * Note that frequency corrections can also be made by adjusting 248 * gd_cpuclock_base. 249 * 250 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is 251 * used on both SMP and UP systems to avoid MP races between cpu's and 252 * interrupt races on UP systems. 253 */ 254 struct hardtime { 255 __uint32_t time_second; 256 sysclock_t cpuclock_base; 257 }; 258 259 #define BASETIME_ARYSIZE 16 260 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1) 261 static struct timespec basetime[BASETIME_ARYSIZE]; 262 static struct hardtime hardtime[BASETIME_ARYSIZE]; 263 static volatile int basetime_index; 264 265 static int 266 sysctl_get_basetime(SYSCTL_HANDLER_ARGS) 267 { 268 struct timespec *bt; 269 int error; 270 int index; 271 272 /* 273 * Because basetime data and index may be updated by another cpu, 274 * a load fence is required to ensure that the data we read has 275 * not been speculatively read relative to a possibly updated index. 276 */ 277 index = basetime_index; 278 cpu_lfence(); 279 bt = &basetime[index]; 280 error = SYSCTL_OUT(req, bt, sizeof(*bt)); 281 return (error); 282 } 283 284 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 285 &boottime, timespec, "System boottime"); 286 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, 287 sysctl_get_basetime, "S,timespec", "System basetime"); 288 289 static void hardclock(systimer_t info, int, struct intrframe *frame); 290 static void statclock(systimer_t info, int, struct intrframe *frame); 291 static void schedclock(systimer_t info, int, struct intrframe *frame); 292 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp); 293 294 /* 295 * Use __read_mostly for ticks and sched_ticks because these variables are 296 * used all over the kernel and only updated once per tick. 297 */ 298 __read_mostly int ticks; /* system master ticks at hz */ 299 __read_mostly int sched_ticks; /* global schedule clock ticks */ 300 __read_mostly int clocks_running; /* tsleep/timeout clocks operational */ 301 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */ 302 int64_t nsec_acc; /* accumulator */ 303 304 /* NTPD time correction fields */ 305 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */ 306 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */ 307 int64_t ntp_delta; /* one-time correction in nsec */ 308 int64_t ntp_big_delta = 1000000000; 309 int32_t ntp_tick_delta; /* current adjustment rate */ 310 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */ 311 time_t ntp_leap_second; /* time of next leap second */ 312 int ntp_leap_insert; /* whether to insert or remove a second */ 313 struct spinlock ntp_spin; 314 315 /* 316 * Finish initializing clock frequencies and start all clocks running. 317 */ 318 /* ARGSUSED*/ 319 static void 320 initclocks(void *dummy) 321 { 322 /*psratio = profhz / stathz;*/ 323 spin_init(&ntp_spin, "ntp"); 324 initclocks_pcpu(); 325 clocks_running = 1; 326 if (kpmap) { 327 kpmap->tsc_freq = tsc_frequency; 328 kpmap->tick_freq = hz; 329 } 330 } 331 332 /* 333 * Called on a per-cpu basis from the idle thread bootstrap on each cpu 334 * during SMP initialization. 335 * 336 * This routine is called concurrently during low-level SMP initialization 337 * and may not block in any way. Meaning, among other things, we can't 338 * acquire any tokens. 339 */ 340 void 341 initclocks_pcpu(void) 342 { 343 struct globaldata *gd = mycpu; 344 345 crit_enter(); 346 if (gd->gd_cpuid == 0) { 347 gd->gd_time_seconds = 1; 348 gd->gd_cpuclock_base = sys_cputimer->count(); 349 hardtime[0].time_second = gd->gd_time_seconds; 350 hardtime[0].cpuclock_base = gd->gd_cpuclock_base; 351 } else { 352 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; 353 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; 354 } 355 356 systimer_intr_enable(); 357 358 crit_exit(); 359 } 360 361 /* 362 * Called on a 10-second interval after the system is operational. 363 * Return the collection data for USERPCT and install the data for 364 * SYSTPCT and IDLEPCT. 365 */ 366 static 367 uint64_t 368 collect_cputime_callback(int n) 369 { 370 static long cpu_base[CPUSTATES]; 371 long cpu_states[CPUSTATES]; 372 long total; 373 long acc; 374 long lsb; 375 376 bzero(cpu_states, sizeof(cpu_states)); 377 for (n = 0; n < ncpus; ++n) { 378 cpu_states[CP_USER] += cputime_percpu[n].cp_user; 379 cpu_states[CP_NICE] += cputime_percpu[n].cp_nice; 380 cpu_states[CP_SYS] += cputime_percpu[n].cp_sys; 381 cpu_states[CP_INTR] += cputime_percpu[n].cp_intr; 382 cpu_states[CP_IDLE] += cputime_percpu[n].cp_idle; 383 } 384 385 acc = 0; 386 for (n = 0; n < CPUSTATES; ++n) { 387 total = cpu_states[n] - cpu_base[n]; 388 cpu_base[n] = cpu_states[n]; 389 cpu_states[n] = total; 390 acc += total; 391 } 392 if (acc == 0) /* prevent degenerate divide by 0 */ 393 acc = 1; 394 lsb = acc / (10000 * 2); 395 kcollect_setvalue(KCOLLECT_SYSTPCT, 396 (cpu_states[CP_SYS] + lsb) * 10000 / acc); 397 kcollect_setvalue(KCOLLECT_IDLEPCT, 398 (cpu_states[CP_IDLE] + lsb) * 10000 / acc); 399 kcollect_setvalue(KCOLLECT_INTRPCT, 400 (cpu_states[CP_INTR] + lsb) * 10000 / acc); 401 return((cpu_states[CP_USER] + cpu_states[CP_NICE] + lsb) * 10000 / acc); 402 } 403 404 /* 405 * This routine is called on just the BSP, just after SMP initialization 406 * completes to * finish initializing any clocks that might contend/block 407 * (e.g. like on a token). We can't do this in initclocks_pcpu() because 408 * that function is called from the idle thread bootstrap for each cpu and 409 * not allowed to block at all. 410 */ 411 static 412 void 413 initclocks_other(void *dummy) 414 { 415 struct globaldata *ogd = mycpu; 416 struct globaldata *gd; 417 int n; 418 419 for (n = 0; n < ncpus; ++n) { 420 lwkt_setcpu_self(globaldata_find(n)); 421 gd = mycpu; 422 423 /* 424 * Use a non-queued periodic systimer to prevent multiple 425 * ticks from building up if the sysclock jumps forward 426 * (8254 gets reset). The sysclock will never jump backwards. 427 * Our time sync is based on the actual sysclock, not the 428 * ticks count. 429 * 430 * Install statclock before hardclock to prevent statclock 431 * from misinterpreting gd_flags for tick assignment when 432 * they overlap. Also offset the statclock by half of 433 * its interval to try to avoid being coincident with 434 * callouts. 435 */ 436 systimer_init_periodic_flags(&gd->gd_statclock, statclock, 437 NULL, stathz, 438 SYSTF_MSSYNC | SYSTF_FIRST | 439 SYSTF_OFFSET50 | SYSTF_OFFSETCPU); 440 systimer_init_periodic_flags(&gd->gd_hardclock, hardclock, 441 NULL, hz, 442 SYSTF_MSSYNC | SYSTF_OFFSETCPU); 443 } 444 lwkt_setcpu_self(ogd); 445 446 /* 447 * Regular data collection 448 */ 449 kcollect_register(KCOLLECT_USERPCT, "user", collect_cputime_callback, 450 KCOLLECT_SCALE(KCOLLECT_USERPCT_FORMAT, 0)); 451 kcollect_register(KCOLLECT_SYSTPCT, "syst", NULL, 452 KCOLLECT_SCALE(KCOLLECT_SYSTPCT_FORMAT, 0)); 453 kcollect_register(KCOLLECT_IDLEPCT, "idle", NULL, 454 KCOLLECT_SCALE(KCOLLECT_IDLEPCT_FORMAT, 0)); 455 } 456 SYSINIT(clocks2, SI_BOOT2_POST_SMP, SI_ORDER_ANY, initclocks_other, NULL); 457 458 /* 459 * This method is called on just the BSP, after all the usched implementations 460 * are initialized. This avoids races between usched initialization functions 461 * and usched_schedulerclock(). 462 */ 463 static 464 void 465 initclocks_usched(void *dummy) 466 { 467 struct globaldata *ogd = mycpu; 468 struct globaldata *gd; 469 int n; 470 471 for (n = 0; n < ncpus; ++n) { 472 lwkt_setcpu_self(globaldata_find(n)); 473 gd = mycpu; 474 475 /* XXX correct the frequency for scheduler / estcpu tests */ 476 systimer_init_periodic_flags(&gd->gd_schedclock, schedclock, 477 NULL, ESTCPUFREQ, SYSTF_MSSYNC); 478 } 479 lwkt_setcpu_self(ogd); 480 } 481 SYSINIT(clocks3, SI_BOOT2_USCHED, SI_ORDER_ANY, initclocks_usched, NULL); 482 483 /* 484 * This sets the current real time of day. Timespecs are in seconds and 485 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base, 486 * instead we adjust basetime so basetime + gd_* results in the current 487 * time of day. This way the gd_* fields are guaranteed to represent 488 * a monotonically increasing 'uptime' value. 489 * 490 * When set_timeofday() is called from userland, the system call forces it 491 * onto cpu #0 since only cpu #0 can update basetime_index. 492 */ 493 void 494 set_timeofday(struct timespec *ts) 495 { 496 struct timespec *nbt; 497 int ni; 498 499 /* 500 * XXX SMP / non-atomic basetime updates 501 */ 502 crit_enter(); 503 ni = (basetime_index + 1) & BASETIME_ARYMASK; 504 cpu_lfence(); 505 nbt = &basetime[ni]; 506 nanouptime(nbt); 507 nbt->tv_sec = ts->tv_sec - nbt->tv_sec; 508 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec; 509 if (nbt->tv_nsec < 0) { 510 nbt->tv_nsec += 1000000000; 511 --nbt->tv_sec; 512 } 513 514 /* 515 * Note that basetime diverges from boottime as the clock drift is 516 * compensated for, so we cannot do away with boottime. When setting 517 * the absolute time of day the drift is 0 (for an instant) and we 518 * can simply assign boottime to basetime. 519 * 520 * Note that nanouptime() is based on gd_time_seconds which is drift 521 * compensated up to a point (it is guaranteed to remain monotonically 522 * increasing). gd_time_seconds is thus our best uptime guess and 523 * suitable for use in the boottime calculation. It is already taken 524 * into account in the basetime calculation above. 525 */ 526 spin_lock(&ntp_spin); 527 boottime.tv_sec = nbt->tv_sec; 528 ntp_delta = 0; 529 530 /* 531 * We now have a new basetime, make sure all other cpus have it, 532 * then update the index. 533 */ 534 cpu_sfence(); 535 basetime_index = ni; 536 spin_unlock(&ntp_spin); 537 538 crit_exit(); 539 } 540 541 /* 542 * Each cpu has its own hardclock, but we only increment ticks and softticks 543 * on cpu #0. 544 * 545 * NOTE! systimer! the MP lock might not be held here. We can only safely 546 * manipulate objects owned by the current cpu. 547 */ 548 static void 549 hardclock(systimer_t info, int in_ipi, struct intrframe *frame) 550 { 551 sysclock_t cputicks; 552 struct proc *p; 553 struct globaldata *gd = mycpu; 554 555 if ((gd->gd_reqflags & RQF_IPIQ) == 0 && lwkt_need_ipiq_process(gd)) { 556 /* Defer to doreti on passive IPIQ processing */ 557 need_ipiq(); 558 } 559 560 /* 561 * We update the compensation base to calculate fine-grained time 562 * from the sys_cputimer on a per-cpu basis in order to avoid 563 * having to mess around with locks. sys_cputimer is assumed to 564 * be consistent across all cpus. CPU N copies the base state from 565 * CPU 0 using the same FIFO trick that we use for basetime (so we 566 * don't catch a CPU 0 update in the middle). 567 * 568 * Note that we never allow info->time (aka gd->gd_hardclock.time) 569 * to reverse index gd_cpuclock_base, but that it is possible for 570 * it to temporarily get behind in the seconds if something in the 571 * system locks interrupts for a long period of time. Since periodic 572 * timers count events, though everything should resynch again 573 * immediately. 574 */ 575 if (gd->gd_cpuid == 0) { 576 int ni; 577 578 cputicks = info->time - gd->gd_cpuclock_base; 579 if (cputicks >= sys_cputimer->freq) { 580 cputicks /= sys_cputimer->freq; 581 if (cputicks != 0 && cputicks != 1) 582 kprintf("Warning: hardclock missed > 1 sec\n"); 583 gd->gd_time_seconds += cputicks; 584 gd->gd_cpuclock_base += sys_cputimer->freq * cputicks; 585 /* uncorrected monotonic 1-sec gran */ 586 time_uptime += cputicks; 587 } 588 ni = (basetime_index + 1) & BASETIME_ARYMASK; 589 hardtime[ni].time_second = gd->gd_time_seconds; 590 hardtime[ni].cpuclock_base = gd->gd_cpuclock_base; 591 } else { 592 int ni; 593 594 ni = basetime_index; 595 cpu_lfence(); 596 gd->gd_time_seconds = hardtime[ni].time_second; 597 gd->gd_cpuclock_base = hardtime[ni].cpuclock_base; 598 } 599 600 /* 601 * The system-wide ticks counter and NTP related timedelta/tickdelta 602 * adjustments only occur on cpu #0. NTP adjustments are accomplished 603 * by updating basetime. 604 */ 605 if (gd->gd_cpuid == 0) { 606 struct timespec *nbt; 607 struct timespec nts; 608 int leap; 609 int ni; 610 611 /* 612 * Update system-wide ticks 613 */ 614 ++ticks; 615 616 /* 617 * Update system-wide ticktime for getnanotime() and getmicrotime() 618 */ 619 nanotime(&nts); 620 atomic_add_int_nonlocked(&ticktime_update, 1); 621 cpu_sfence(); 622 if (ticktime_update & 2) 623 ticktime2 = nts; 624 else 625 ticktime0 = nts; 626 cpu_sfence(); 627 atomic_add_int_nonlocked(&ticktime_update, 1); 628 629 #if 0 630 if (tco->tc_poll_pps) 631 tco->tc_poll_pps(tco); 632 #endif 633 634 /* 635 * Calculate the new basetime index. We are in a critical section 636 * on cpu #0 and can safely play with basetime_index. Start 637 * with the current basetime and then make adjustments. 638 */ 639 ni = (basetime_index + 1) & BASETIME_ARYMASK; 640 nbt = &basetime[ni]; 641 *nbt = basetime[basetime_index]; 642 643 /* 644 * ntp adjustments only occur on cpu 0 and are protected by 645 * ntp_spin. This spinlock virtually never conflicts. 646 */ 647 spin_lock(&ntp_spin); 648 649 /* 650 * Apply adjtime corrections. (adjtime() API) 651 * 652 * adjtime() only runs on cpu #0 so our critical section is 653 * sufficient to access these variables. 654 */ 655 if (ntp_delta != 0) { 656 nbt->tv_nsec += ntp_tick_delta; 657 ntp_delta -= ntp_tick_delta; 658 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) || 659 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) { 660 ntp_tick_delta = ntp_delta; 661 } 662 } 663 664 /* 665 * Apply permanent frequency corrections. (sysctl API) 666 */ 667 if (ntp_tick_permanent != 0) { 668 ntp_tick_acc += ntp_tick_permanent; 669 if (ntp_tick_acc >= (1LL << 32)) { 670 nbt->tv_nsec += ntp_tick_acc >> 32; 671 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32; 672 } else if (ntp_tick_acc <= -(1LL << 32)) { 673 /* Negate ntp_tick_acc to avoid shifting the sign bit. */ 674 nbt->tv_nsec -= (-ntp_tick_acc) >> 32; 675 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32; 676 } 677 } 678 679 if (nbt->tv_nsec >= 1000000000) { 680 nbt->tv_sec++; 681 nbt->tv_nsec -= 1000000000; 682 } else if (nbt->tv_nsec < 0) { 683 nbt->tv_sec--; 684 nbt->tv_nsec += 1000000000; 685 } 686 687 /* 688 * Another per-tick compensation. (for ntp_adjtime() API) 689 */ 690 if (nsec_adj != 0) { 691 nsec_acc += nsec_adj; 692 if (nsec_acc >= 0x100000000LL) { 693 nbt->tv_nsec += nsec_acc >> 32; 694 nsec_acc = (nsec_acc & 0xFFFFFFFFLL); 695 } else if (nsec_acc <= -0x100000000LL) { 696 nbt->tv_nsec -= -nsec_acc >> 32; 697 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL); 698 } 699 if (nbt->tv_nsec >= 1000000000) { 700 nbt->tv_nsec -= 1000000000; 701 ++nbt->tv_sec; 702 } else if (nbt->tv_nsec < 0) { 703 nbt->tv_nsec += 1000000000; 704 --nbt->tv_sec; 705 } 706 } 707 spin_unlock(&ntp_spin); 708 709 /************************************************************ 710 * LEAP SECOND CORRECTION * 711 ************************************************************ 712 * 713 * Taking into account all the corrections made above, figure 714 * out the new real time. If the seconds field has changed 715 * then apply any pending leap-second corrections. 716 */ 717 getnanotime_nbt(nbt, &nts); 718 719 if (time_second != nts.tv_sec) { 720 /* 721 * Apply leap second (sysctl API). Adjust nts for changes 722 * so we do not have to call getnanotime_nbt again. 723 */ 724 if (ntp_leap_second) { 725 if (ntp_leap_second == nts.tv_sec) { 726 if (ntp_leap_insert) { 727 nbt->tv_sec++; 728 nts.tv_sec++; 729 } else { 730 nbt->tv_sec--; 731 nts.tv_sec--; 732 } 733 ntp_leap_second--; 734 } 735 } 736 737 /* 738 * Apply leap second (ntp_adjtime() API), calculate a new 739 * nsec_adj field. ntp_update_second() returns nsec_adj 740 * as a per-second value but we need it as a per-tick value. 741 */ 742 leap = ntp_update_second(time_second, &nsec_adj); 743 nsec_adj /= hz; 744 nbt->tv_sec += leap; 745 nts.tv_sec += leap; 746 747 /* 748 * Update the time_second 'approximate time' global. 749 */ 750 time_second = nts.tv_sec; 751 752 /* 753 * Clear the IPC hint for the currently running thread once 754 * per second, allowing us to disconnect the hint from a 755 * thread which may no longer care. 756 */ 757 curthread->td_wakefromcpu = -1; 758 } 759 760 /* 761 * Finally, our new basetime is ready to go live! 762 */ 763 cpu_sfence(); 764 basetime_index = ni; 765 766 /* 767 * Update kpmap on each tick. TS updates are integrated with 768 * fences and upticks allowing userland to read the data 769 * deterministically. 770 */ 771 if (kpmap) { 772 int w; 773 774 w = (kpmap->upticks + 1) & 1; 775 getnanouptime(&kpmap->ts_uptime[w]); 776 getnanotime(&kpmap->ts_realtime[w]); 777 cpu_sfence(); 778 ++kpmap->upticks; 779 cpu_sfence(); 780 } 781 782 /* 783 * Handle exislock pseudo_ticks. We make things as simple as 784 * possible for the critical path arming code by adding a little 785 * complication here. 786 * 787 * When we find that all cores have been armed, we increment 788 * pseudo_ticks and disarm all the cores. 789 */ 790 { 791 globaldata_t gd; 792 int n; 793 794 for (n = 0; n < ncpus; ++n) { 795 gd = globaldata_find(n); 796 if (gd->gd_exisarmed == 0) 797 break; 798 } 799 800 if (n == ncpus) { 801 for (n = 0; n < ncpus; ++n) { 802 gd = globaldata_find(n); 803 gd->gd_exisarmed = 0; 804 } 805 ++pseudo_ticks; 806 } 807 } 808 } 809 810 /* 811 * lwkt thread scheduler fair queueing 812 */ 813 lwkt_schedulerclock(curthread); 814 815 /* 816 * Cycle the existential lock system on odd ticks in order to re-arm 817 * our cpu (in case the cpu is idle or nobody is using any exis locks). 818 */ 819 if (ticks & 1) { 820 exis_hold_gd(gd); 821 exis_drop_gd(gd); 822 } 823 824 /* 825 * softticks are handled for all cpus 826 */ 827 hardclock_softtick(gd); 828 829 /* 830 * Rollup accumulated vmstats, copy-back for critical path checks. 831 */ 832 vmstats_rollup_cpu(gd); 833 vfscache_rollup_cpu(gd); 834 mycpu->gd_vmstats = vmstats; 835 836 /* 837 * ITimer handling is per-tick, per-cpu. 838 * 839 * We must acquire the per-process token in order for ksignal() 840 * to be non-blocking. For the moment this requires an AST fault, 841 * the ksignal() cannot be safely issued from this hard interrupt. 842 * 843 * XXX Even the trytoken here isn't right, and itimer operation in 844 * a multi threaded environment is going to be weird at the 845 * very least. 846 */ 847 if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) { 848 crit_enter_hard(); 849 if (p->p_upmap) 850 ++p->p_upmap->runticks; 851 852 if (frame && CLKF_USERMODE(frame) && 853 timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) && 854 itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) { 855 p->p_flags |= P_SIGVTALRM; 856 need_user_resched(); 857 } 858 if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) && 859 itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) { 860 p->p_flags |= P_SIGPROF; 861 need_user_resched(); 862 } 863 crit_exit_hard(); 864 lwkt_reltoken(&p->p_token); 865 } 866 setdelayed(); 867 } 868 869 /* 870 * The statistics clock typically runs at a 125Hz rate, and is intended 871 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu. 872 * 873 * NOTE! systimer! the MP lock might not be held here. We can only safely 874 * manipulate objects owned by the current cpu. 875 * 876 * The stats clock is responsible for grabbing a profiling sample. 877 * Most of the statistics are only used by user-level statistics programs. 878 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and 879 * p->p_estcpu. 880 * 881 * Like the other clocks, the stat clock is called from what is effectively 882 * a fast interrupt, so the context should be the thread/process that got 883 * interrupted. 884 */ 885 static void 886 statclock(systimer_t info, int in_ipi, struct intrframe *frame) 887 { 888 globaldata_t gd = mycpu; 889 thread_t td; 890 struct proc *p; 891 int bump; 892 sysclock_t cv; 893 sysclock_t scv; 894 895 /* 896 * How big was our timeslice relative to the last time? Calculate 897 * in microseconds. 898 * 899 * NOTE: Use of microuptime() is typically MPSAFE, but usually not 900 * during early boot. Just use the systimer count to be nice 901 * to e.g. qemu. The systimer has a better chance of being 902 * MPSAFE at early boot. 903 */ 904 cv = sys_cputimer->count(); 905 scv = gd->statint.gd_statcv; 906 if (scv == 0) { 907 bump = 1; 908 } else { 909 bump = muldivu64(sys_cputimer->freq64_usec, 910 (cv - scv), 1L << 32); 911 if (bump < 0) 912 bump = 0; 913 if (bump > 1000000) 914 bump = 1000000; 915 } 916 gd->statint.gd_statcv = cv; 917 918 #if 0 919 stv = &gd->gd_stattv; 920 if (stv->tv_sec == 0) { 921 bump = 1; 922 } else { 923 bump = tv.tv_usec - stv->tv_usec + 924 (tv.tv_sec - stv->tv_sec) * 1000000; 925 if (bump < 0) 926 bump = 0; 927 if (bump > 1000000) 928 bump = 1000000; 929 } 930 *stv = tv; 931 #endif 932 933 td = curthread; 934 p = td->td_proc; 935 936 /* 937 * If this is an interrupt thread used for the clock interrupt, adjust 938 * td to the thread it is preempting. If a frame is available, it will 939 * be related to the thread being preempted. 940 */ 941 if ((td->td_flags & TDF_CLKTHREAD) && td->td_preempted) 942 td = td->td_preempted; 943 944 if (frame && CLKF_USERMODE(frame)) { 945 /* 946 * Came from userland, handle user time and deal with 947 * possible process. 948 */ 949 if (p && (p->p_flags & P_PROFIL)) 950 addupc_intr(p, CLKF_PC(frame), 1); 951 td->td_uticks += bump; 952 953 /* 954 * Charge the time as appropriate 955 */ 956 if (p && p->p_nice > NZERO) 957 cpu_time.cp_nice += bump; 958 else 959 cpu_time.cp_user += bump; 960 } else { 961 int intr_nest = gd->gd_intr_nesting_level; 962 963 if (in_ipi) { 964 /* 965 * IPI processing code will bump gd_intr_nesting_level 966 * up by one, which breaks following CLKF_INTR testing, 967 * so we subtract it by one here. 968 */ 969 --intr_nest; 970 } 971 972 /* 973 * Came from kernel mode, so we were: 974 * - handling an interrupt, 975 * - doing syscall or trap work on behalf of the current 976 * user process, or 977 * - spinning in the idle loop. 978 * Whichever it is, charge the time as appropriate. 979 * Note that we charge interrupts to the current process, 980 * regardless of whether they are ``for'' that process, 981 * so that we know how much of its real time was spent 982 * in ``non-process'' (i.e., interrupt) work. 983 * 984 * XXX assume system if frame is NULL. A NULL frame 985 * can occur if ipi processing is done from a crit_exit(). 986 */ 987 if ((frame && CLKF_INTR(intr_nest)) || 988 cpu_interrupt_running(td)) { 989 /* 990 * If we interrupted an interrupt thread, well, 991 * count it as interrupt time. 992 */ 993 td->td_iticks += bump; 994 #ifdef DEBUG_PCTRACK 995 if (frame) 996 do_pctrack(frame, PCTRACK_INT); 997 #endif 998 cpu_time.cp_intr += bump; 999 } else if (gd->gd_flags & GDF_VIRTUSER) { 1000 /* 1001 * The vkernel doesn't do a good job providing trap 1002 * frames that we can test. If the GDF_VIRTUSER 1003 * flag is set we probably interrupted user mode. 1004 * 1005 * We also use this flag on the host when entering 1006 * VMM mode. 1007 */ 1008 td->td_uticks += bump; 1009 1010 /* 1011 * Charge the time as appropriate 1012 */ 1013 if (p && p->p_nice > NZERO) 1014 cpu_time.cp_nice += bump; 1015 else 1016 cpu_time.cp_user += bump; 1017 } else { 1018 if (clock_debug2 > 0) { 1019 --clock_debug2; 1020 kprintf("statclock preempt %s (%p %p)\n", td->td_comm, td, &gd->gd_idlethread); 1021 } 1022 td->td_sticks += bump; 1023 if (td == &gd->gd_idlethread) { 1024 /* 1025 * We want to count token contention as 1026 * system time. When token contention occurs 1027 * the cpu may only be outside its critical 1028 * section while switching through the idle 1029 * thread. In this situation, various flags 1030 * will be set in gd_reqflags. 1031 * 1032 * INTPEND is not necessarily useful because 1033 * it will be set if the clock interrupt 1034 * happens to be on an interrupt thread, the 1035 * cpu_interrupt_running() call does a better 1036 * job so we've already handled it. 1037 */ 1038 if (gd->gd_reqflags & 1039 (RQF_IDLECHECK_WK_MASK & ~RQF_INTPEND)) { 1040 cpu_time.cp_sys += bump; 1041 } else { 1042 cpu_time.cp_idle += bump; 1043 } 1044 } else { 1045 /* 1046 * System thread was running. 1047 */ 1048 #ifdef DEBUG_PCTRACK 1049 if (frame) 1050 do_pctrack(frame, PCTRACK_SYS); 1051 #endif 1052 cpu_time.cp_sys += bump; 1053 } 1054 } 1055 } 1056 } 1057 1058 #ifdef DEBUG_PCTRACK 1059 /* 1060 * Sample the PC when in the kernel or in an interrupt. User code can 1061 * retrieve the information and generate a histogram or other output. 1062 */ 1063 1064 static void 1065 do_pctrack(struct intrframe *frame, int which) 1066 { 1067 struct kinfo_pctrack *pctrack; 1068 1069 pctrack = &cputime_pctrack[mycpu->gd_cpuid][which]; 1070 pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] = 1071 (void *)CLKF_PC(frame); 1072 ++pctrack->pc_index; 1073 } 1074 1075 static int 1076 sysctl_pctrack(SYSCTL_HANDLER_ARGS) 1077 { 1078 struct kinfo_pcheader head; 1079 int error; 1080 int cpu; 1081 int ntrack; 1082 1083 head.pc_ntrack = PCTRACK_SIZE; 1084 head.pc_arysize = PCTRACK_ARYSIZE; 1085 1086 if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0) 1087 return (error); 1088 1089 for (cpu = 0; cpu < ncpus; ++cpu) { 1090 for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) { 1091 error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack], 1092 sizeof(struct kinfo_pctrack)); 1093 if (error) 1094 break; 1095 } 1096 if (error) 1097 break; 1098 } 1099 return (error); 1100 } 1101 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 1102 sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking"); 1103 1104 #endif 1105 1106 /* 1107 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer, 1108 * the MP lock might not be held. We can safely manipulate parts of curproc 1109 * but that's about it. 1110 * 1111 * Each cpu has its own scheduler clock. 1112 */ 1113 static void 1114 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 1115 { 1116 struct lwp *lp; 1117 struct rusage *ru; 1118 struct vmspace *vm; 1119 long rss; 1120 1121 if ((lp = lwkt_preempted_proc()) != NULL) { 1122 /* 1123 * Account for cpu time used and hit the scheduler. Note 1124 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD 1125 * HERE. 1126 */ 1127 ++lp->lwp_cpticks; 1128 usched_schedulerclock(lp, info->periodic, info->time); 1129 } else { 1130 usched_schedulerclock(NULL, info->periodic, info->time); 1131 } 1132 if ((lp = curthread->td_lwp) != NULL) { 1133 /* 1134 * Update resource usage integrals and maximums. 1135 */ 1136 if ((ru = &lp->lwp_proc->p_ru) && 1137 (vm = lp->lwp_proc->p_vmspace) != NULL) { 1138 ru->ru_ixrss += pgtok(btoc(vm->vm_tsize)); 1139 ru->ru_idrss += pgtok(btoc(vm->vm_dsize)); 1140 ru->ru_isrss += pgtok(btoc(vm->vm_ssize)); 1141 if (lwkt_trytoken(&vm->vm_map.token)) { 1142 rss = pgtok(vmspace_resident_count(vm)); 1143 if (ru->ru_maxrss < rss) 1144 ru->ru_maxrss = rss; 1145 lwkt_reltoken(&vm->vm_map.token); 1146 } 1147 } 1148 } 1149 /* Increment the global sched_ticks */ 1150 if (mycpu->gd_cpuid == 0) 1151 ++sched_ticks; 1152 } 1153 1154 /* 1155 * Compute number of ticks for the specified amount of time. The 1156 * return value is intended to be used in a clock interrupt timed 1157 * operation and guaranteed to meet or exceed the requested time. 1158 * If the representation overflows, return INT_MAX. The minimum return 1159 * value is 1 ticks and the function will average the calculation up. 1160 * If any value greater then 0 microseconds is supplied, a value 1161 * of at least 2 will be returned to ensure that a near-term clock 1162 * interrupt does not cause the timeout to occur (degenerately) early. 1163 * 1164 * Note that limit checks must take into account microseconds, which is 1165 * done simply by using the smaller signed long maximum instead of 1166 * the unsigned long maximum. 1167 * 1168 * If ints have 32 bits, then the maximum value for any timeout in 1169 * 10ms ticks is 248 days. 1170 */ 1171 int 1172 tvtohz_high(struct timeval *tv) 1173 { 1174 int ticks; 1175 long sec, usec; 1176 1177 sec = tv->tv_sec; 1178 usec = tv->tv_usec; 1179 if (usec < 0) { 1180 sec--; 1181 usec += 1000000; 1182 } 1183 if (sec < 0) { 1184 #ifdef DIAGNOSTIC 1185 if (usec > 0) { 1186 sec++; 1187 usec -= 1000000; 1188 } 1189 kprintf("tvtohz_high: negative time difference " 1190 "%ld sec %ld usec\n", 1191 sec, usec); 1192 #endif 1193 ticks = 1; 1194 } else if (sec <= INT_MAX / hz) { 1195 ticks = (int)(sec * hz + howmany((u_long)usec, ustick)) + 1; 1196 } else { 1197 ticks = INT_MAX; 1198 } 1199 return (ticks); 1200 } 1201 1202 int 1203 tstohz_high(struct timespec *ts) 1204 { 1205 int ticks; 1206 long sec, nsec; 1207 1208 sec = ts->tv_sec; 1209 nsec = ts->tv_nsec; 1210 if (nsec < 0) { 1211 sec--; 1212 nsec += 1000000000; 1213 } 1214 if (sec < 0) { 1215 #ifdef DIAGNOSTIC 1216 if (nsec > 0) { 1217 sec++; 1218 nsec -= 1000000000; 1219 } 1220 kprintf("tstohz_high: negative time difference " 1221 "%ld sec %ld nsec\n", 1222 sec, nsec); 1223 #endif 1224 ticks = 1; 1225 } else if (sec <= INT_MAX / hz) { 1226 ticks = (int)(sec * hz + howmany((u_long)nsec, nstick)) + 1; 1227 } else { 1228 ticks = INT_MAX; 1229 } 1230 return (ticks); 1231 } 1232 1233 1234 /* 1235 * Compute number of ticks for the specified amount of time, erroring on 1236 * the side of it being too low to ensure that sleeping the returned number 1237 * of ticks will not result in a late return. 1238 * 1239 * The supplied timeval may not be negative and should be normalized. A 1240 * return value of 0 is possible if the timeval converts to less then 1241 * 1 tick. 1242 * 1243 * If ints have 32 bits, then the maximum value for any timeout in 1244 * 10ms ticks is 248 days. 1245 */ 1246 int 1247 tvtohz_low(struct timeval *tv) 1248 { 1249 int ticks; 1250 long sec; 1251 1252 sec = tv->tv_sec; 1253 if (sec <= INT_MAX / hz) 1254 ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick); 1255 else 1256 ticks = INT_MAX; 1257 return (ticks); 1258 } 1259 1260 int 1261 tstohz_low(struct timespec *ts) 1262 { 1263 int ticks; 1264 long sec; 1265 1266 sec = ts->tv_sec; 1267 if (sec <= INT_MAX / hz) 1268 ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick); 1269 else 1270 ticks = INT_MAX; 1271 return (ticks); 1272 } 1273 1274 /* 1275 * Start profiling on a process. 1276 * 1277 * Caller must hold p->p_token(); 1278 * 1279 * Kernel profiling passes proc0 which never exits and hence 1280 * keeps the profile clock running constantly. 1281 */ 1282 void 1283 startprofclock(struct proc *p) 1284 { 1285 if ((p->p_flags & P_PROFIL) == 0) { 1286 p->p_flags |= P_PROFIL; 1287 #if 0 /* XXX */ 1288 if (++profprocs == 1 && stathz != 0) { 1289 crit_enter(); 1290 psdiv = psratio; 1291 setstatclockrate(profhz); 1292 crit_exit(); 1293 } 1294 #endif 1295 } 1296 } 1297 1298 /* 1299 * Stop profiling on a process. 1300 * 1301 * caller must hold p->p_token 1302 */ 1303 void 1304 stopprofclock(struct proc *p) 1305 { 1306 if (p->p_flags & P_PROFIL) { 1307 p->p_flags &= ~P_PROFIL; 1308 #if 0 /* XXX */ 1309 if (--profprocs == 0 && stathz != 0) { 1310 crit_enter(); 1311 psdiv = 1; 1312 setstatclockrate(stathz); 1313 crit_exit(); 1314 } 1315 #endif 1316 } 1317 } 1318 1319 /* 1320 * Return information about system clocks. 1321 */ 1322 static int 1323 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 1324 { 1325 struct kinfo_clockinfo clkinfo; 1326 /* 1327 * Construct clockinfo structure. 1328 */ 1329 clkinfo.ci_hz = hz; 1330 clkinfo.ci_tick = ustick; 1331 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000; 1332 clkinfo.ci_profhz = profhz; 1333 clkinfo.ci_stathz = stathz ? stathz : hz; 1334 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 1335 } 1336 1337 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 1338 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 1339 1340 /* 1341 * We have eight functions for looking at the clock, four for 1342 * microseconds and four for nanoseconds. For each there is fast 1343 * but less precise version "get{nano|micro}[up]time" which will 1344 * return a time which is up to 1/HZ previous to the call, whereas 1345 * the raw version "{nano|micro}[up]time" will return a timestamp 1346 * which is as precise as possible. The "up" variants return the 1347 * time relative to system boot, these are well suited for time 1348 * interval measurements. 1349 * 1350 * Each cpu independently maintains the current time of day, so all 1351 * we need to do to protect ourselves from changes is to do a loop 1352 * check on the seconds field changing out from under us. 1353 * 1354 * The system timer maintains a 32 bit count and due to various issues 1355 * it is possible for the calculated delta to occasionally exceed 1356 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec 1357 * multiplication can easily overflow, so we deal with the case. For 1358 * uniformity we deal with the case in the usec case too. 1359 * 1360 * All the [get][micro,nano][time,uptime]() routines are MPSAFE. 1361 * 1362 * NEW CODE (!) 1363 * 1364 * cpu 0 now maintains global ticktimes and an update counter. The 1365 * getnanotime() and getmicrotime() routines use these globals. 1366 */ 1367 void 1368 getmicrouptime(struct timeval *tvp) 1369 { 1370 struct globaldata *gd = mycpu; 1371 sysclock_t delta; 1372 1373 do { 1374 tvp->tv_sec = gd->gd_time_seconds; 1375 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1376 } while (tvp->tv_sec != gd->gd_time_seconds); 1377 1378 if (delta >= sys_cputimer->freq) { 1379 tvp->tv_sec += delta / sys_cputimer->freq; 1380 delta %= sys_cputimer->freq; 1381 } 1382 tvp->tv_usec = muldivu64(sys_cputimer->freq64_usec, delta, 1L << 32); 1383 if (tvp->tv_usec >= 1000000) { 1384 tvp->tv_usec -= 1000000; 1385 ++tvp->tv_sec; 1386 } 1387 } 1388 1389 void 1390 getnanouptime(struct timespec *tsp) 1391 { 1392 struct globaldata *gd = mycpu; 1393 sysclock_t delta; 1394 1395 do { 1396 tsp->tv_sec = gd->gd_time_seconds; 1397 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1398 } while (tsp->tv_sec != gd->gd_time_seconds); 1399 1400 if (delta >= sys_cputimer->freq) { 1401 tsp->tv_sec += delta / sys_cputimer->freq; 1402 delta %= sys_cputimer->freq; 1403 } 1404 tsp->tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1405 } 1406 1407 void 1408 microuptime(struct timeval *tvp) 1409 { 1410 struct globaldata *gd = mycpu; 1411 sysclock_t delta; 1412 1413 do { 1414 tvp->tv_sec = gd->gd_time_seconds; 1415 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1416 } while (tvp->tv_sec != gd->gd_time_seconds); 1417 1418 if (delta >= sys_cputimer->freq) { 1419 tvp->tv_sec += delta / sys_cputimer->freq; 1420 delta %= sys_cputimer->freq; 1421 } 1422 tvp->tv_usec = muldivu64(sys_cputimer->freq64_usec, delta, 1L << 32); 1423 } 1424 1425 void 1426 nanouptime(struct timespec *tsp) 1427 { 1428 struct globaldata *gd = mycpu; 1429 sysclock_t delta; 1430 1431 do { 1432 tsp->tv_sec = gd->gd_time_seconds; 1433 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1434 } while (tsp->tv_sec != gd->gd_time_seconds); 1435 1436 if (delta >= sys_cputimer->freq) { 1437 tsp->tv_sec += delta / sys_cputimer->freq; 1438 delta %= sys_cputimer->freq; 1439 } 1440 tsp->tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1441 } 1442 1443 /* 1444 * realtime routines 1445 */ 1446 void 1447 getmicrotime(struct timeval *tvp) 1448 { 1449 struct timespec ts; 1450 int counter; 1451 1452 do { 1453 counter = *(volatile int *)&ticktime_update; 1454 cpu_lfence(); 1455 switch(counter & 3) { 1456 case 0: /* ticktime2 completed update */ 1457 ts = ticktime2; 1458 break; 1459 case 1: /* ticktime0 update in progress */ 1460 ts = ticktime2; 1461 break; 1462 case 2: /* ticktime0 completed update */ 1463 ts = ticktime0; 1464 break; 1465 case 3: /* ticktime2 update in progress */ 1466 ts = ticktime0; 1467 break; 1468 } 1469 cpu_lfence(); 1470 } while (counter != *(volatile int *)&ticktime_update); 1471 tvp->tv_sec = ts.tv_sec; 1472 tvp->tv_usec = ts.tv_nsec / 1000; 1473 } 1474 1475 void 1476 getnanotime(struct timespec *tsp) 1477 { 1478 struct timespec ts; 1479 int counter; 1480 1481 do { 1482 counter = *(volatile int *)&ticktime_update; 1483 cpu_lfence(); 1484 switch(counter & 3) { 1485 case 0: /* ticktime2 completed update */ 1486 ts = ticktime2; 1487 break; 1488 case 1: /* ticktime0 update in progress */ 1489 ts = ticktime2; 1490 break; 1491 case 2: /* ticktime0 completed update */ 1492 ts = ticktime0; 1493 break; 1494 case 3: /* ticktime2 update in progress */ 1495 ts = ticktime0; 1496 break; 1497 } 1498 cpu_lfence(); 1499 } while (counter != *(volatile int *)&ticktime_update); 1500 *tsp = ts; 1501 } 1502 1503 static void 1504 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp) 1505 { 1506 struct globaldata *gd = mycpu; 1507 sysclock_t delta; 1508 1509 do { 1510 tsp->tv_sec = gd->gd_time_seconds; 1511 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1512 } while (tsp->tv_sec != gd->gd_time_seconds); 1513 1514 if (delta >= sys_cputimer->freq) { 1515 tsp->tv_sec += delta / sys_cputimer->freq; 1516 delta %= sys_cputimer->freq; 1517 } 1518 tsp->tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1519 1520 tsp->tv_sec += nbt->tv_sec; 1521 tsp->tv_nsec += nbt->tv_nsec; 1522 while (tsp->tv_nsec >= 1000000000) { 1523 tsp->tv_nsec -= 1000000000; 1524 ++tsp->tv_sec; 1525 } 1526 } 1527 1528 1529 void 1530 microtime(struct timeval *tvp) 1531 { 1532 struct globaldata *gd = mycpu; 1533 struct timespec *bt; 1534 sysclock_t delta; 1535 1536 do { 1537 tvp->tv_sec = gd->gd_time_seconds; 1538 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1539 } while (tvp->tv_sec != gd->gd_time_seconds); 1540 1541 if (delta >= sys_cputimer->freq) { 1542 tvp->tv_sec += delta / sys_cputimer->freq; 1543 delta %= sys_cputimer->freq; 1544 } 1545 tvp->tv_usec = muldivu64(sys_cputimer->freq64_usec, delta, 1L << 32); 1546 1547 bt = &basetime[basetime_index]; 1548 cpu_lfence(); 1549 tvp->tv_sec += bt->tv_sec; 1550 tvp->tv_usec += bt->tv_nsec / 1000; 1551 while (tvp->tv_usec >= 1000000) { 1552 tvp->tv_usec -= 1000000; 1553 ++tvp->tv_sec; 1554 } 1555 } 1556 1557 void 1558 nanotime(struct timespec *tsp) 1559 { 1560 struct globaldata *gd = mycpu; 1561 struct timespec *bt; 1562 sysclock_t delta; 1563 1564 do { 1565 tsp->tv_sec = gd->gd_time_seconds; 1566 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1567 } while (tsp->tv_sec != gd->gd_time_seconds); 1568 1569 if (delta >= sys_cputimer->freq) { 1570 tsp->tv_sec += delta / sys_cputimer->freq; 1571 delta %= sys_cputimer->freq; 1572 } 1573 tsp->tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1574 1575 bt = &basetime[basetime_index]; 1576 cpu_lfence(); 1577 tsp->tv_sec += bt->tv_sec; 1578 tsp->tv_nsec += bt->tv_nsec; 1579 while (tsp->tv_nsec >= 1000000000) { 1580 tsp->tv_nsec -= 1000000000; 1581 ++tsp->tv_sec; 1582 } 1583 } 1584 1585 /* 1586 * Get an approximate time_t. It does not have to be accurate. This 1587 * function is called only from KTR and can be called with the system in 1588 * any state so do not use a critical section or other complex operation 1589 * here. 1590 * 1591 * NOTE: This is not exactly synchronized with real time. To do that we 1592 * would have to do what microtime does and check for a nanoseconds 1593 * overflow. 1594 */ 1595 time_t 1596 get_approximate_time_t(void) 1597 { 1598 struct globaldata *gd = mycpu; 1599 struct timespec *bt; 1600 1601 bt = &basetime[basetime_index]; 1602 return(gd->gd_time_seconds + bt->tv_sec); 1603 } 1604 1605 int 1606 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 1607 { 1608 pps_params_t *app; 1609 struct pps_fetch_args *fapi; 1610 #ifdef PPS_SYNC 1611 struct pps_kcbind_args *kapi; 1612 #endif 1613 1614 switch (cmd) { 1615 case PPS_IOC_CREATE: 1616 return (0); 1617 case PPS_IOC_DESTROY: 1618 return (0); 1619 case PPS_IOC_SETPARAMS: 1620 app = (pps_params_t *)data; 1621 if (app->mode & ~pps->ppscap) 1622 return (EINVAL); 1623 pps->ppsparam = *app; 1624 return (0); 1625 case PPS_IOC_GETPARAMS: 1626 app = (pps_params_t *)data; 1627 *app = pps->ppsparam; 1628 app->api_version = PPS_API_VERS_1; 1629 return (0); 1630 case PPS_IOC_GETCAP: 1631 *(int*)data = pps->ppscap; 1632 return (0); 1633 case PPS_IOC_FETCH: 1634 fapi = (struct pps_fetch_args *)data; 1635 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 1636 return (EINVAL); 1637 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 1638 return (EOPNOTSUPP); 1639 pps->ppsinfo.current_mode = pps->ppsparam.mode; 1640 fapi->pps_info_buf = pps->ppsinfo; 1641 return (0); 1642 case PPS_IOC_KCBIND: 1643 #ifdef PPS_SYNC 1644 kapi = (struct pps_kcbind_args *)data; 1645 /* XXX Only root should be able to do this */ 1646 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 1647 return (EINVAL); 1648 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 1649 return (EINVAL); 1650 if (kapi->edge & ~pps->ppscap) 1651 return (EINVAL); 1652 pps->kcmode = kapi->edge; 1653 return (0); 1654 #else 1655 return (EOPNOTSUPP); 1656 #endif 1657 default: 1658 return (ENOTTY); 1659 } 1660 } 1661 1662 void 1663 pps_init(struct pps_state *pps) 1664 { 1665 pps->ppscap |= PPS_TSFMT_TSPEC; 1666 if (pps->ppscap & PPS_CAPTUREASSERT) 1667 pps->ppscap |= PPS_OFFSETASSERT; 1668 if (pps->ppscap & PPS_CAPTURECLEAR) 1669 pps->ppscap |= PPS_OFFSETCLEAR; 1670 } 1671 1672 void 1673 pps_event(struct pps_state *pps, sysclock_t count, int event) 1674 { 1675 struct globaldata *gd; 1676 struct timespec *tsp; 1677 struct timespec *osp; 1678 struct timespec *bt; 1679 struct timespec ts; 1680 sysclock_t *pcount; 1681 #ifdef PPS_SYNC 1682 sysclock_t tcount; 1683 #endif 1684 sysclock_t delta; 1685 pps_seq_t *pseq; 1686 int foff; 1687 #ifdef PPS_SYNC 1688 int fhard; 1689 #endif 1690 int ni; 1691 1692 gd = mycpu; 1693 1694 /* Things would be easier with arrays... */ 1695 if (event == PPS_CAPTUREASSERT) { 1696 tsp = &pps->ppsinfo.assert_timestamp; 1697 osp = &pps->ppsparam.assert_offset; 1698 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1699 #ifdef PPS_SYNC 1700 fhard = pps->kcmode & PPS_CAPTUREASSERT; 1701 #endif 1702 pcount = &pps->ppscount[0]; 1703 pseq = &pps->ppsinfo.assert_sequence; 1704 } else { 1705 tsp = &pps->ppsinfo.clear_timestamp; 1706 osp = &pps->ppsparam.clear_offset; 1707 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1708 #ifdef PPS_SYNC 1709 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1710 #endif 1711 pcount = &pps->ppscount[1]; 1712 pseq = &pps->ppsinfo.clear_sequence; 1713 } 1714 1715 /* Nothing really happened */ 1716 if (*pcount == count) 1717 return; 1718 1719 *pcount = count; 1720 1721 do { 1722 ts.tv_sec = gd->gd_time_seconds; 1723 delta = count - gd->gd_cpuclock_base; 1724 } while (ts.tv_sec != gd->gd_time_seconds); 1725 1726 if (delta >= sys_cputimer->freq) { 1727 ts.tv_sec += delta / sys_cputimer->freq; 1728 delta %= sys_cputimer->freq; 1729 } 1730 ts.tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1731 ni = basetime_index; 1732 cpu_lfence(); 1733 bt = &basetime[ni]; 1734 ts.tv_sec += bt->tv_sec; 1735 ts.tv_nsec += bt->tv_nsec; 1736 while (ts.tv_nsec >= 1000000000) { 1737 ts.tv_nsec -= 1000000000; 1738 ++ts.tv_sec; 1739 } 1740 1741 (*pseq)++; 1742 *tsp = ts; 1743 1744 if (foff) { 1745 timespecadd(tsp, osp, tsp); 1746 if (tsp->tv_nsec < 0) { 1747 tsp->tv_nsec += 1000000000; 1748 tsp->tv_sec -= 1; 1749 } 1750 } 1751 #ifdef PPS_SYNC 1752 if (fhard) { 1753 /* magic, at its best... */ 1754 tcount = count - pps->ppscount[2]; 1755 pps->ppscount[2] = count; 1756 if (tcount >= sys_cputimer->freq) { 1757 delta = (1000000000 * (tcount / sys_cputimer->freq) + 1758 sys_cputimer->freq64_nsec * 1759 (tcount % sys_cputimer->freq)) >> 32; 1760 } else { 1761 delta = muldivu64(sys_cputimer->freq64_nsec, 1762 tcount, 1L << 32); 1763 } 1764 hardpps(tsp, delta); 1765 } 1766 #endif 1767 } 1768 1769 /* 1770 * Return the tsc target value for a delay of (ns). 1771 * 1772 * Returns -1 if the TSC is not supported. 1773 */ 1774 tsc_uclock_t 1775 tsc_get_target(int ns) 1776 { 1777 #if defined(_RDTSC_SUPPORTED_) 1778 if (cpu_feature & CPUID_TSC) { 1779 return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000); 1780 } 1781 #endif 1782 return(-1); 1783 } 1784 1785 /* 1786 * Compare the tsc against the passed target 1787 * 1788 * Returns +1 if the target has been reached 1789 * Returns 0 if the target has not yet been reached 1790 * Returns -1 if the TSC is not supported. 1791 * 1792 * Typical use: while (tsc_test_target(target) == 0) { ...poll... } 1793 */ 1794 int 1795 tsc_test_target(int64_t target) 1796 { 1797 #if defined(_RDTSC_SUPPORTED_) 1798 if (cpu_feature & CPUID_TSC) { 1799 if ((int64_t)(target - rdtsc()) <= 0) 1800 return(1); 1801 return(0); 1802 } 1803 #endif 1804 return(-1); 1805 } 1806 1807 /* 1808 * Delay the specified number of nanoseconds using the tsc. This function 1809 * returns immediately if the TSC is not supported. At least one cpu_pause() 1810 * will be issued. 1811 */ 1812 void 1813 tsc_delay(int ns) 1814 { 1815 int64_t clk; 1816 1817 clk = tsc_get_target(ns); 1818 cpu_pause(); 1819 cpu_pause(); 1820 while (tsc_test_target(clk) == 0) { 1821 cpu_pause(); 1822 cpu_pause(); 1823 cpu_pause(); 1824 cpu_pause(); 1825 } 1826 } 1827