1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 68 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ 69 */ 70 71 #include "opt_ntp.h" 72 #include "opt_pctrack.h" 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/callout.h> 77 #include <sys/kernel.h> 78 #include <sys/kinfo.h> 79 #include <sys/proc.h> 80 #include <sys/malloc.h> 81 #include <sys/resource.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/priv.h> 85 #include <sys/timex.h> 86 #include <sys/timepps.h> 87 #include <sys/upmap.h> 88 #include <sys/lock.h> 89 #include <sys/sysctl.h> 90 #include <sys/kcollect.h> 91 #include <sys/exislock.h> 92 #include <sys/exislock2.h> 93 94 #include <vm/vm.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_extern.h> 98 99 #include <sys/thread2.h> 100 #include <sys/spinlock2.h> 101 102 #include <machine/cpu.h> 103 #include <machine/limits.h> 104 #include <machine/smp.h> 105 #include <machine/cpufunc.h> 106 #include <machine/specialreg.h> 107 #include <machine/clock.h> 108 109 #ifdef DEBUG_PCTRACK 110 static void do_pctrack(struct intrframe *frame, int which); 111 #endif 112 113 static void initclocks (void *dummy); 114 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL); 115 116 /* 117 * Some of these don't belong here, but it's easiest to concentrate them. 118 * Note that cpu_time counts in microseconds, but most userland programs 119 * just compare relative times against the total by delta. 120 */ 121 struct kinfo_cputime cputime_percpu[MAXCPU]; 122 #ifdef DEBUG_PCTRACK 123 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE }; 124 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE]; 125 #endif 126 127 __read_mostly static int sniff_enable = 1; 128 __read_mostly static int sniff_target = -1; 129 __read_mostly static int clock_debug2 = 0; 130 SYSCTL_INT(_kern, OID_AUTO, sniff_enable, CTLFLAG_RW, &sniff_enable, 0 , ""); 131 SYSCTL_INT(_kern, OID_AUTO, sniff_target, CTLFLAG_RW, &sniff_target, 0 , ""); 132 SYSCTL_INT(_debug, OID_AUTO, clock_debug2, CTLFLAG_RW, &clock_debug2, 0 , ""); 133 134 __read_mostly long pseudo_ticks = 1; /* existential timed locks */ 135 136 static int 137 sysctl_cputime(SYSCTL_HANDLER_ARGS) 138 { 139 int cpu, error = 0; 140 int root_error; 141 size_t size = sizeof(struct kinfo_cputime); 142 struct kinfo_cputime tmp; 143 144 /* 145 * NOTE: For security reasons, only root can sniff %rip 146 */ 147 root_error = priv_check_cred(curthread->td_ucred, PRIV_ROOT, 0); 148 149 for (cpu = 0; cpu < ncpus; ++cpu) { 150 tmp = cputime_percpu[cpu]; 151 if (root_error == 0) { 152 tmp.cp_sample_pc = 153 (int64_t)globaldata_find(cpu)->gd_sample_pc; 154 tmp.cp_sample_sp = 155 (int64_t)globaldata_find(cpu)->gd_sample_sp; 156 } 157 if ((error = SYSCTL_OUT(req, &tmp, size)) != 0) 158 break; 159 } 160 161 if (root_error == 0) { 162 if (sniff_enable) { 163 int n = sniff_target; 164 if (n < 0) 165 smp_sniff(); 166 else if (n < ncpus) 167 cpu_sniff(n); 168 } 169 } 170 171 return (error); 172 } 173 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 174 sysctl_cputime, "S,kinfo_cputime", "CPU time statistics"); 175 176 static int 177 sysctl_cp_time(SYSCTL_HANDLER_ARGS) 178 { 179 long cpu_states[CPUSTATES] = {0}; 180 int cpu, error = 0; 181 size_t size = sizeof(cpu_states); 182 183 for (cpu = 0; cpu < ncpus; ++cpu) { 184 cpu_states[CP_USER] += cputime_percpu[cpu].cp_user; 185 cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice; 186 cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys; 187 cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr; 188 cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle; 189 } 190 191 error = SYSCTL_OUT(req, cpu_states, size); 192 193 return (error); 194 } 195 196 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 197 sysctl_cp_time, "LU", "CPU time statistics"); 198 199 static int 200 sysctl_cp_times(SYSCTL_HANDLER_ARGS) 201 { 202 long cpu_states[CPUSTATES] = {0}; 203 int cpu, error; 204 size_t size = sizeof(cpu_states); 205 206 for (error = 0, cpu = 0; error == 0 && cpu < ncpus; ++cpu) { 207 cpu_states[CP_USER] = cputime_percpu[cpu].cp_user; 208 cpu_states[CP_NICE] = cputime_percpu[cpu].cp_nice; 209 cpu_states[CP_SYS] = cputime_percpu[cpu].cp_sys; 210 cpu_states[CP_INTR] = cputime_percpu[cpu].cp_intr; 211 cpu_states[CP_IDLE] = cputime_percpu[cpu].cp_idle; 212 error = SYSCTL_OUT(req, cpu_states, size); 213 } 214 215 return (error); 216 } 217 218 SYSCTL_PROC(_kern, OID_AUTO, cp_times, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0, 219 sysctl_cp_times, "LU", "per-CPU time statistics"); 220 221 /* 222 * boottime is used to calculate the 'real' uptime. Do not confuse this with 223 * microuptime(). microtime() is not drift compensated. The real uptime 224 * with compensation is nanotime() - bootime. boottime is recalculated 225 * whenever the real time is set based on the compensated elapsed time 226 * in seconds (gd->gd_time_seconds). 227 * 228 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic. 229 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to 230 * the real time. 231 * 232 * WARNING! time_second can backstep on time corrections. Also, unlike 233 * time_second, time_uptime is not a "real" time_t (seconds 234 * since the Epoch) but seconds since booting. 235 */ 236 __read_mostly struct timespec boottime; /* boot time (realtime) for ref only */ 237 __read_mostly struct timespec ticktime0;/* updated every tick */ 238 __read_mostly struct timespec ticktime2;/* updated every tick */ 239 __read_mostly int ticktime_update; 240 __read_mostly time_t time_second; /* read-only 'passive' rt in seconds */ 241 __read_mostly time_t time_uptime; /* read-only 'passive' ut in seconds */ 242 243 /* 244 * basetime is used to calculate the compensated real time of day. The 245 * basetime can be modified on a per-tick basis by the adjtime(), 246 * ntp_adjtime(), and sysctl-based time correction APIs. 247 * 248 * Note that frequency corrections can also be made by adjusting 249 * gd_cpuclock_base. 250 * 251 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is 252 * used on both SMP and UP systems to avoid MP races between cpu's and 253 * interrupt races on UP systems. 254 */ 255 struct hardtime { 256 __uint32_t time_second; 257 sysclock_t cpuclock_base; 258 }; 259 260 #define BASETIME_ARYSIZE 16 261 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1) 262 static struct timespec basetime[BASETIME_ARYSIZE]; 263 static struct hardtime hardtime[BASETIME_ARYSIZE]; 264 static volatile int basetime_index; 265 266 static int 267 sysctl_get_basetime(SYSCTL_HANDLER_ARGS) 268 { 269 struct timespec *bt; 270 int error; 271 int index; 272 273 /* 274 * Because basetime data and index may be updated by another cpu, 275 * a load fence is required to ensure that the data we read has 276 * not been speculatively read relative to a possibly updated index. 277 */ 278 index = basetime_index; 279 cpu_lfence(); 280 bt = &basetime[index]; 281 error = SYSCTL_OUT(req, bt, sizeof(*bt)); 282 return (error); 283 } 284 285 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 286 &boottime, timespec, "System boottime"); 287 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, 288 sysctl_get_basetime, "S,timespec", "System basetime"); 289 290 static void hardclock(systimer_t info, int, struct intrframe *frame); 291 static void statclock(systimer_t info, int, struct intrframe *frame); 292 static void schedclock(systimer_t info, int, struct intrframe *frame); 293 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp); 294 295 /* 296 * Use __read_mostly for ticks and sched_ticks because these variables are 297 * used all over the kernel and only updated once per tick. 298 */ 299 __read_mostly int ticks; /* system master ticks at hz */ 300 __read_mostly int sched_ticks; /* global schedule clock ticks */ 301 __read_mostly int clocks_running; /* tsleep/timeout clocks operational */ 302 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */ 303 int64_t nsec_acc; /* accumulator */ 304 305 /* NTPD time correction fields */ 306 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */ 307 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */ 308 int64_t ntp_delta; /* one-time correction in nsec */ 309 int64_t ntp_big_delta = 1000000000; 310 int32_t ntp_tick_delta; /* current adjustment rate */ 311 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */ 312 time_t ntp_leap_second; /* time of next leap second */ 313 int ntp_leap_insert; /* whether to insert or remove a second */ 314 struct spinlock ntp_spin; 315 316 /* 317 * Finish initializing clock frequencies and start all clocks running. 318 */ 319 /* ARGSUSED*/ 320 static void 321 initclocks(void *dummy) 322 { 323 /*psratio = profhz / stathz;*/ 324 spin_init(&ntp_spin, "ntp"); 325 initclocks_pcpu(); 326 clocks_running = 1; 327 if (kpmap) { 328 kpmap->tsc_freq = tsc_frequency; 329 kpmap->tick_freq = hz; 330 } 331 } 332 333 /* 334 * Called on a per-cpu basis from the idle thread bootstrap on each cpu 335 * during SMP initialization. 336 * 337 * This routine is called concurrently during low-level SMP initialization 338 * and may not block in any way. Meaning, among other things, we can't 339 * acquire any tokens. 340 */ 341 void 342 initclocks_pcpu(void) 343 { 344 struct globaldata *gd = mycpu; 345 346 crit_enter(); 347 if (gd->gd_cpuid == 0) { 348 gd->gd_time_seconds = 1; 349 gd->gd_cpuclock_base = sys_cputimer->count(); 350 hardtime[0].time_second = gd->gd_time_seconds; 351 hardtime[0].cpuclock_base = gd->gd_cpuclock_base; 352 } else { 353 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; 354 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; 355 } 356 357 systimer_intr_enable(); 358 359 crit_exit(); 360 } 361 362 /* 363 * Called on a 10-second interval after the system is operational. 364 * Return the collection data for USERPCT and install the data for 365 * SYSTPCT and IDLEPCT. 366 */ 367 static 368 uint64_t 369 collect_cputime_callback(int n) 370 { 371 static long cpu_base[CPUSTATES]; 372 long cpu_states[CPUSTATES]; 373 long total; 374 long acc; 375 long lsb; 376 377 bzero(cpu_states, sizeof(cpu_states)); 378 for (n = 0; n < ncpus; ++n) { 379 cpu_states[CP_USER] += cputime_percpu[n].cp_user; 380 cpu_states[CP_NICE] += cputime_percpu[n].cp_nice; 381 cpu_states[CP_SYS] += cputime_percpu[n].cp_sys; 382 cpu_states[CP_INTR] += cputime_percpu[n].cp_intr; 383 cpu_states[CP_IDLE] += cputime_percpu[n].cp_idle; 384 } 385 386 acc = 0; 387 for (n = 0; n < CPUSTATES; ++n) { 388 total = cpu_states[n] - cpu_base[n]; 389 cpu_base[n] = cpu_states[n]; 390 cpu_states[n] = total; 391 acc += total; 392 } 393 if (acc == 0) /* prevent degenerate divide by 0 */ 394 acc = 1; 395 lsb = acc / (10000 * 2); 396 kcollect_setvalue(KCOLLECT_SYSTPCT, 397 (cpu_states[CP_SYS] + lsb) * 10000 / acc); 398 kcollect_setvalue(KCOLLECT_IDLEPCT, 399 (cpu_states[CP_IDLE] + lsb) * 10000 / acc); 400 kcollect_setvalue(KCOLLECT_INTRPCT, 401 (cpu_states[CP_INTR] + lsb) * 10000 / acc); 402 return((cpu_states[CP_USER] + cpu_states[CP_NICE] + lsb) * 10000 / acc); 403 } 404 405 /* 406 * This routine is called on just the BSP, just after SMP initialization 407 * completes to * finish initializing any clocks that might contend/block 408 * (e.g. like on a token). We can't do this in initclocks_pcpu() because 409 * that function is called from the idle thread bootstrap for each cpu and 410 * not allowed to block at all. 411 */ 412 static 413 void 414 initclocks_other(void *dummy) 415 { 416 struct globaldata *ogd = mycpu; 417 struct globaldata *gd; 418 int n; 419 420 for (n = 0; n < ncpus; ++n) { 421 lwkt_setcpu_self(globaldata_find(n)); 422 gd = mycpu; 423 424 /* 425 * Use a non-queued periodic systimer to prevent multiple 426 * ticks from building up if the sysclock jumps forward 427 * (8254 gets reset). The sysclock will never jump backwards. 428 * Our time sync is based on the actual sysclock, not the 429 * ticks count. 430 * 431 * Install statclock before hardclock to prevent statclock 432 * from misinterpreting gd_flags for tick assignment when 433 * they overlap. Also offset the statclock by half of 434 * its interval to try to avoid being coincident with 435 * callouts. 436 */ 437 systimer_init_periodic_flags(&gd->gd_statclock, statclock, 438 NULL, stathz, 439 SYSTF_MSSYNC | SYSTF_FIRST | 440 SYSTF_OFFSET50 | SYSTF_OFFSETCPU); 441 systimer_init_periodic_flags(&gd->gd_hardclock, hardclock, 442 NULL, hz, 443 SYSTF_MSSYNC | SYSTF_OFFSETCPU); 444 } 445 lwkt_setcpu_self(ogd); 446 447 /* 448 * Regular data collection 449 */ 450 kcollect_register(KCOLLECT_USERPCT, "user", collect_cputime_callback, 451 KCOLLECT_SCALE(KCOLLECT_USERPCT_FORMAT, 0)); 452 kcollect_register(KCOLLECT_SYSTPCT, "syst", NULL, 453 KCOLLECT_SCALE(KCOLLECT_SYSTPCT_FORMAT, 0)); 454 kcollect_register(KCOLLECT_IDLEPCT, "idle", NULL, 455 KCOLLECT_SCALE(KCOLLECT_IDLEPCT_FORMAT, 0)); 456 } 457 SYSINIT(clocks2, SI_BOOT2_POST_SMP, SI_ORDER_ANY, initclocks_other, NULL); 458 459 /* 460 * This method is called on just the BSP, after all the usched implementations 461 * are initialized. This avoids races between usched initialization functions 462 * and usched_schedulerclock(). 463 */ 464 static 465 void 466 initclocks_usched(void *dummy) 467 { 468 struct globaldata *ogd = mycpu; 469 struct globaldata *gd; 470 int n; 471 472 for (n = 0; n < ncpus; ++n) { 473 lwkt_setcpu_self(globaldata_find(n)); 474 gd = mycpu; 475 476 /* XXX correct the frequency for scheduler / estcpu tests */ 477 systimer_init_periodic_flags(&gd->gd_schedclock, schedclock, 478 NULL, ESTCPUFREQ, 479 SYSTF_MSSYNC | SYSTF_OFFSETCPU); 480 } 481 lwkt_setcpu_self(ogd); 482 } 483 SYSINIT(clocks3, SI_BOOT2_USCHED, SI_ORDER_ANY, initclocks_usched, NULL); 484 485 /* 486 * This sets the current real time of day. Timespecs are in seconds and 487 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base, 488 * instead we adjust basetime so basetime + gd_* results in the current 489 * time of day. This way the gd_* fields are guaranteed to represent 490 * a monotonically increasing 'uptime' value. 491 * 492 * When set_timeofday() is called from userland, the system call forces it 493 * onto cpu #0 since only cpu #0 can update basetime_index. 494 */ 495 void 496 set_timeofday(struct timespec *ts) 497 { 498 struct timespec *nbt; 499 int ni; 500 501 /* 502 * XXX SMP / non-atomic basetime updates 503 */ 504 crit_enter(); 505 ni = (basetime_index + 1) & BASETIME_ARYMASK; 506 cpu_lfence(); 507 nbt = &basetime[ni]; 508 nanouptime(nbt); 509 nbt->tv_sec = ts->tv_sec - nbt->tv_sec; 510 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec; 511 if (nbt->tv_nsec < 0) { 512 nbt->tv_nsec += 1000000000; 513 --nbt->tv_sec; 514 } 515 516 /* 517 * Note that basetime diverges from boottime as the clock drift is 518 * compensated for, so we cannot do away with boottime. When setting 519 * the absolute time of day the drift is 0 (for an instant) and we 520 * can simply assign boottime to basetime. 521 * 522 * Note that nanouptime() is based on gd_time_seconds which is drift 523 * compensated up to a point (it is guaranteed to remain monotonically 524 * increasing). gd_time_seconds is thus our best uptime guess and 525 * suitable for use in the boottime calculation. It is already taken 526 * into account in the basetime calculation above. 527 */ 528 spin_lock(&ntp_spin); 529 boottime.tv_sec = nbt->tv_sec; 530 ntp_delta = 0; 531 532 /* 533 * We now have a new basetime, make sure all other cpus have it, 534 * then update the index. 535 */ 536 cpu_sfence(); 537 basetime_index = ni; 538 spin_unlock(&ntp_spin); 539 540 crit_exit(); 541 } 542 543 /* 544 * Each cpu has its own hardclock, but we only increment ticks and softticks 545 * on cpu #0. 546 * 547 * NOTE! systimer! the MP lock might not be held here. We can only safely 548 * manipulate objects owned by the current cpu. 549 */ 550 static void 551 hardclock(systimer_t info, int in_ipi, struct intrframe *frame) 552 { 553 sysclock_t cputicks; 554 struct proc *p; 555 struct globaldata *gd = mycpu; 556 557 if ((gd->gd_reqflags & RQF_IPIQ) == 0 && lwkt_need_ipiq_process(gd)) { 558 /* Defer to doreti on passive IPIQ processing */ 559 need_ipiq(); 560 } 561 562 /* 563 * We update the compensation base to calculate fine-grained time 564 * from the sys_cputimer on a per-cpu basis in order to avoid 565 * having to mess around with locks. sys_cputimer is assumed to 566 * be consistent across all cpus. CPU N copies the base state from 567 * CPU 0 using the same FIFO trick that we use for basetime (so we 568 * don't catch a CPU 0 update in the middle). 569 * 570 * Note that we never allow info->time (aka gd->gd_hardclock.time) 571 * to reverse index gd_cpuclock_base, but that it is possible for 572 * it to temporarily get behind in the seconds if something in the 573 * system locks interrupts for a long period of time. Since periodic 574 * timers count events, though everything should resynch again 575 * immediately. 576 */ 577 if (gd->gd_cpuid == 0) { 578 int ni; 579 580 cputicks = info->time - gd->gd_cpuclock_base; 581 if (cputicks >= sys_cputimer->freq) { 582 cputicks /= sys_cputimer->freq; 583 if (cputicks != 0 && cputicks != 1) 584 kprintf("Warning: hardclock missed > 1 sec\n"); 585 gd->gd_time_seconds += cputicks; 586 gd->gd_cpuclock_base += sys_cputimer->freq * cputicks; 587 /* uncorrected monotonic 1-sec gran */ 588 time_uptime += cputicks; 589 } 590 ni = (basetime_index + 1) & BASETIME_ARYMASK; 591 hardtime[ni].time_second = gd->gd_time_seconds; 592 hardtime[ni].cpuclock_base = gd->gd_cpuclock_base; 593 } else { 594 int ni; 595 596 ni = basetime_index; 597 cpu_lfence(); 598 gd->gd_time_seconds = hardtime[ni].time_second; 599 gd->gd_cpuclock_base = hardtime[ni].cpuclock_base; 600 } 601 602 /* 603 * The system-wide ticks counter and NTP related timedelta/tickdelta 604 * adjustments only occur on cpu #0. NTP adjustments are accomplished 605 * by updating basetime. 606 */ 607 if (gd->gd_cpuid == 0) { 608 struct timespec *nbt; 609 struct timespec nts; 610 int leap; 611 int ni; 612 613 /* 614 * Update system-wide ticks 615 */ 616 ++ticks; 617 618 /* 619 * Update system-wide ticktime for getnanotime() and getmicrotime() 620 */ 621 nanotime(&nts); 622 atomic_add_int_nonlocked(&ticktime_update, 1); 623 cpu_sfence(); 624 if (ticktime_update & 2) 625 ticktime2 = nts; 626 else 627 ticktime0 = nts; 628 cpu_sfence(); 629 atomic_add_int_nonlocked(&ticktime_update, 1); 630 631 #if 0 632 if (tco->tc_poll_pps) 633 tco->tc_poll_pps(tco); 634 #endif 635 636 /* 637 * Calculate the new basetime index. We are in a critical section 638 * on cpu #0 and can safely play with basetime_index. Start 639 * with the current basetime and then make adjustments. 640 */ 641 ni = (basetime_index + 1) & BASETIME_ARYMASK; 642 nbt = &basetime[ni]; 643 *nbt = basetime[basetime_index]; 644 645 /* 646 * ntp adjustments only occur on cpu 0 and are protected by 647 * ntp_spin. This spinlock virtually never conflicts. 648 */ 649 spin_lock(&ntp_spin); 650 651 /* 652 * Apply adjtime corrections. (adjtime() API) 653 * 654 * adjtime() only runs on cpu #0 so our critical section is 655 * sufficient to access these variables. 656 */ 657 if (ntp_delta != 0) { 658 nbt->tv_nsec += ntp_tick_delta; 659 ntp_delta -= ntp_tick_delta; 660 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) || 661 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) { 662 ntp_tick_delta = ntp_delta; 663 } 664 } 665 666 /* 667 * Apply permanent frequency corrections. (sysctl API) 668 */ 669 if (ntp_tick_permanent != 0) { 670 ntp_tick_acc += ntp_tick_permanent; 671 if (ntp_tick_acc >= (1LL << 32)) { 672 nbt->tv_nsec += ntp_tick_acc >> 32; 673 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32; 674 } else if (ntp_tick_acc <= -(1LL << 32)) { 675 /* Negate ntp_tick_acc to avoid shifting the sign bit. */ 676 nbt->tv_nsec -= (-ntp_tick_acc) >> 32; 677 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32; 678 } 679 } 680 681 if (nbt->tv_nsec >= 1000000000) { 682 nbt->tv_sec++; 683 nbt->tv_nsec -= 1000000000; 684 } else if (nbt->tv_nsec < 0) { 685 nbt->tv_sec--; 686 nbt->tv_nsec += 1000000000; 687 } 688 689 /* 690 * Another per-tick compensation. (for ntp_adjtime() API) 691 */ 692 if (nsec_adj != 0) { 693 nsec_acc += nsec_adj; 694 if (nsec_acc >= 0x100000000LL) { 695 nbt->tv_nsec += nsec_acc >> 32; 696 nsec_acc = (nsec_acc & 0xFFFFFFFFLL); 697 } else if (nsec_acc <= -0x100000000LL) { 698 nbt->tv_nsec -= -nsec_acc >> 32; 699 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL); 700 } 701 if (nbt->tv_nsec >= 1000000000) { 702 nbt->tv_nsec -= 1000000000; 703 ++nbt->tv_sec; 704 } else if (nbt->tv_nsec < 0) { 705 nbt->tv_nsec += 1000000000; 706 --nbt->tv_sec; 707 } 708 } 709 spin_unlock(&ntp_spin); 710 711 /************************************************************ 712 * LEAP SECOND CORRECTION * 713 ************************************************************ 714 * 715 * Taking into account all the corrections made above, figure 716 * out the new real time. If the seconds field has changed 717 * then apply any pending leap-second corrections. 718 */ 719 getnanotime_nbt(nbt, &nts); 720 721 if (time_second != nts.tv_sec) { 722 /* 723 * Apply leap second (sysctl API). Adjust nts for changes 724 * so we do not have to call getnanotime_nbt again. 725 */ 726 if (ntp_leap_second) { 727 if (ntp_leap_second == nts.tv_sec) { 728 if (ntp_leap_insert) { 729 nbt->tv_sec++; 730 nts.tv_sec++; 731 } else { 732 nbt->tv_sec--; 733 nts.tv_sec--; 734 } 735 ntp_leap_second--; 736 } 737 } 738 739 /* 740 * Apply leap second (ntp_adjtime() API), calculate a new 741 * nsec_adj field. ntp_update_second() returns nsec_adj 742 * as a per-second value but we need it as a per-tick value. 743 */ 744 leap = ntp_update_second(time_second, &nsec_adj); 745 nsec_adj /= hz; 746 nbt->tv_sec += leap; 747 nts.tv_sec += leap; 748 749 /* 750 * Update the time_second 'approximate time' global. 751 */ 752 time_second = nts.tv_sec; 753 754 /* 755 * Clear the IPC hint for the currently running thread once 756 * per second, allowing us to disconnect the hint from a 757 * thread which may no longer care. 758 */ 759 curthread->td_wakefromcpu = -1; 760 } 761 762 /* 763 * Finally, our new basetime is ready to go live! 764 */ 765 cpu_sfence(); 766 basetime_index = ni; 767 768 /* 769 * Update kpmap on each tick. TS updates are integrated with 770 * fences and upticks allowing userland to read the data 771 * deterministically. 772 */ 773 if (kpmap) { 774 int w; 775 776 w = (kpmap->upticks + 1) & 1; 777 getnanouptime(&kpmap->ts_uptime[w]); 778 getnanotime(&kpmap->ts_realtime[w]); 779 cpu_sfence(); 780 ++kpmap->upticks; 781 cpu_sfence(); 782 } 783 784 /* 785 * Handle exislock pseudo_ticks. We make things as simple as 786 * possible for the critical path arming code by adding a little 787 * complication here. 788 * 789 * When we find that all cores have been armed, we increment 790 * pseudo_ticks and disarm all the cores. 791 */ 792 { 793 globaldata_t gd; 794 int n; 795 796 for (n = 0; n < ncpus; ++n) { 797 gd = globaldata_find(n); 798 if (gd->gd_exisarmed == 0) 799 break; 800 } 801 802 if (n == ncpus) { 803 for (n = 0; n < ncpus; ++n) { 804 gd = globaldata_find(n); 805 gd->gd_exisarmed = 0; 806 } 807 ++pseudo_ticks; 808 } 809 } 810 } 811 812 /* 813 * lwkt thread scheduler fair queueing 814 */ 815 lwkt_schedulerclock(curthread); 816 817 /* 818 * Cycle the existential lock system on odd ticks in order to re-arm 819 * our cpu (in case the cpu is idle or nobody is using any exis locks). 820 */ 821 if (ticks & 1) { 822 exis_hold_gd(gd); 823 exis_drop_gd(gd); 824 } 825 826 /* 827 * softticks are handled for all cpus 828 */ 829 hardclock_softtick(gd); 830 831 /* 832 * Rollup accumulated vmstats, copy-back for critical path checks. 833 */ 834 vmstats_rollup_cpu(gd); 835 vfscache_rollup_cpu(gd); 836 mycpu->gd_vmstats = vmstats; 837 838 /* 839 * ITimer handling is per-tick, per-cpu. 840 * 841 * We must acquire the per-process token in order for ksignal() 842 * to be non-blocking. For the moment this requires an AST fault, 843 * the ksignal() cannot be safely issued from this hard interrupt. 844 * 845 * XXX Even the trytoken here isn't right, and itimer operation in 846 * a multi threaded environment is going to be weird at the 847 * very least. 848 */ 849 if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) { 850 crit_enter_hard(); 851 if (p->p_upmap) 852 ++p->p_upmap->runticks; 853 854 if (frame && CLKF_USERMODE(frame) && 855 timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) && 856 itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) { 857 p->p_flags |= P_SIGVTALRM; 858 need_user_resched(); 859 } 860 if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) && 861 itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) { 862 p->p_flags |= P_SIGPROF; 863 need_user_resched(); 864 } 865 crit_exit_hard(); 866 lwkt_reltoken(&p->p_token); 867 } 868 setdelayed(); 869 } 870 871 /* 872 * The statistics clock typically runs at a 125Hz rate, and is intended 873 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu. 874 * 875 * NOTE! systimer! the MP lock might not be held here. We can only safely 876 * manipulate objects owned by the current cpu. 877 * 878 * The stats clock is responsible for grabbing a profiling sample. 879 * Most of the statistics are only used by user-level statistics programs. 880 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and 881 * p->p_estcpu. 882 * 883 * Like the other clocks, the stat clock is called from what is effectively 884 * a fast interrupt, so the context should be the thread/process that got 885 * interrupted. 886 */ 887 static void 888 statclock(systimer_t info, int in_ipi, struct intrframe *frame) 889 { 890 globaldata_t gd = mycpu; 891 thread_t td; 892 struct proc *p; 893 int bump; 894 sysclock_t cv; 895 sysclock_t scv; 896 897 /* 898 * How big was our timeslice relative to the last time? Calculate 899 * in microseconds. 900 * 901 * NOTE: Use of microuptime() is typically MPSAFE, but usually not 902 * during early boot. Just use the systimer count to be nice 903 * to e.g. qemu. The systimer has a better chance of being 904 * MPSAFE at early boot. 905 */ 906 cv = sys_cputimer->count(); 907 scv = gd->statint.gd_statcv; 908 if (scv == 0) { 909 bump = 1; 910 } else { 911 bump = muldivu64(sys_cputimer->freq64_usec, 912 (cv - scv), 1L << 32); 913 if (bump < 0) 914 bump = 0; 915 if (bump > 1000000) 916 bump = 1000000; 917 } 918 gd->statint.gd_statcv = cv; 919 920 #if 0 921 stv = &gd->gd_stattv; 922 if (stv->tv_sec == 0) { 923 bump = 1; 924 } else { 925 bump = tv.tv_usec - stv->tv_usec + 926 (tv.tv_sec - stv->tv_sec) * 1000000; 927 if (bump < 0) 928 bump = 0; 929 if (bump > 1000000) 930 bump = 1000000; 931 } 932 *stv = tv; 933 #endif 934 935 td = curthread; 936 p = td->td_proc; 937 938 /* 939 * If this is an interrupt thread used for the clock interrupt, adjust 940 * td to the thread it is preempting. If a frame is available, it will 941 * be related to the thread being preempted. 942 */ 943 if ((td->td_flags & TDF_CLKTHREAD) && td->td_preempted) 944 td = td->td_preempted; 945 946 if (frame && CLKF_USERMODE(frame)) { 947 /* 948 * Came from userland, handle user time and deal with 949 * possible process. 950 */ 951 if (p && (p->p_flags & P_PROFIL)) 952 addupc_intr(p, CLKF_PC(frame), 1); 953 td->td_uticks += bump; 954 955 /* 956 * Charge the time as appropriate 957 */ 958 if (p && p->p_nice > NZERO) 959 cpu_time.cp_nice += bump; 960 else 961 cpu_time.cp_user += bump; 962 } else { 963 int intr_nest = gd->gd_intr_nesting_level; 964 965 if (in_ipi) { 966 /* 967 * IPI processing code will bump gd_intr_nesting_level 968 * up by one, which breaks following CLKF_INTR testing, 969 * so we subtract it by one here. 970 */ 971 --intr_nest; 972 } 973 974 /* 975 * Came from kernel mode, so we were: 976 * - handling an interrupt, 977 * - doing syscall or trap work on behalf of the current 978 * user process, or 979 * - spinning in the idle loop. 980 * Whichever it is, charge the time as appropriate. 981 * Note that we charge interrupts to the current process, 982 * regardless of whether they are ``for'' that process, 983 * so that we know how much of its real time was spent 984 * in ``non-process'' (i.e., interrupt) work. 985 * 986 * XXX assume system if frame is NULL. A NULL frame 987 * can occur if ipi processing is done from a crit_exit(). 988 */ 989 if ((frame && CLKF_INTR(intr_nest)) || 990 cpu_interrupt_running(td)) { 991 /* 992 * If we interrupted an interrupt thread, well, 993 * count it as interrupt time. 994 */ 995 td->td_iticks += bump; 996 #ifdef DEBUG_PCTRACK 997 if (frame) 998 do_pctrack(frame, PCTRACK_INT); 999 #endif 1000 cpu_time.cp_intr += bump; 1001 } else if (gd->gd_flags & GDF_VIRTUSER) { 1002 /* 1003 * The vkernel doesn't do a good job providing trap 1004 * frames that we can test. If the GDF_VIRTUSER 1005 * flag is set we probably interrupted user mode. 1006 * 1007 * We also use this flag on the host when entering 1008 * VMM mode. 1009 */ 1010 td->td_uticks += bump; 1011 1012 /* 1013 * Charge the time as appropriate 1014 */ 1015 if (p && p->p_nice > NZERO) 1016 cpu_time.cp_nice += bump; 1017 else 1018 cpu_time.cp_user += bump; 1019 } else { 1020 if (clock_debug2 > 0) { 1021 --clock_debug2; 1022 kprintf("statclock preempt %s (%p %p)\n", td->td_comm, td, &gd->gd_idlethread); 1023 } 1024 td->td_sticks += bump; 1025 if (td == &gd->gd_idlethread) { 1026 /* 1027 * We want to count token contention as 1028 * system time. When token contention occurs 1029 * the cpu may only be outside its critical 1030 * section while switching through the idle 1031 * thread. In this situation, various flags 1032 * will be set in gd_reqflags. 1033 * 1034 * INTPEND is not necessarily useful because 1035 * it will be set if the clock interrupt 1036 * happens to be on an interrupt thread, the 1037 * cpu_interrupt_running() call does a better 1038 * job so we've already handled it. 1039 */ 1040 if (gd->gd_reqflags & 1041 (RQF_IDLECHECK_WK_MASK & ~RQF_INTPEND)) { 1042 cpu_time.cp_sys += bump; 1043 } else { 1044 cpu_time.cp_idle += bump; 1045 } 1046 } else { 1047 /* 1048 * System thread was running. 1049 */ 1050 #ifdef DEBUG_PCTRACK 1051 if (frame) 1052 do_pctrack(frame, PCTRACK_SYS); 1053 #endif 1054 cpu_time.cp_sys += bump; 1055 } 1056 } 1057 } 1058 } 1059 1060 #ifdef DEBUG_PCTRACK 1061 /* 1062 * Sample the PC when in the kernel or in an interrupt. User code can 1063 * retrieve the information and generate a histogram or other output. 1064 */ 1065 1066 static void 1067 do_pctrack(struct intrframe *frame, int which) 1068 { 1069 struct kinfo_pctrack *pctrack; 1070 1071 pctrack = &cputime_pctrack[mycpu->gd_cpuid][which]; 1072 pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] = 1073 (void *)CLKF_PC(frame); 1074 ++pctrack->pc_index; 1075 } 1076 1077 static int 1078 sysctl_pctrack(SYSCTL_HANDLER_ARGS) 1079 { 1080 struct kinfo_pcheader head; 1081 int error; 1082 int cpu; 1083 int ntrack; 1084 1085 head.pc_ntrack = PCTRACK_SIZE; 1086 head.pc_arysize = PCTRACK_ARYSIZE; 1087 1088 if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0) 1089 return (error); 1090 1091 for (cpu = 0; cpu < ncpus; ++cpu) { 1092 for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) { 1093 error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack], 1094 sizeof(struct kinfo_pctrack)); 1095 if (error) 1096 break; 1097 } 1098 if (error) 1099 break; 1100 } 1101 return (error); 1102 } 1103 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, 1104 sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking"); 1105 1106 #endif 1107 1108 /* 1109 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer, 1110 * the MP lock might not be held. We can safely manipulate parts of curproc 1111 * but that's about it. 1112 * 1113 * Each cpu has its own scheduler clock. 1114 */ 1115 static void 1116 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame) 1117 { 1118 struct lwp *lp; 1119 struct rusage *ru; 1120 struct vmspace *vm; 1121 long rss; 1122 1123 if ((lp = lwkt_preempted_proc()) != NULL) { 1124 /* 1125 * Account for cpu time used and hit the scheduler. Note 1126 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD 1127 * HERE. 1128 */ 1129 ++lp->lwp_cpticks; 1130 usched_schedulerclock(lp, info->periodic, info->time); 1131 } else { 1132 usched_schedulerclock(NULL, info->periodic, info->time); 1133 } 1134 if ((lp = curthread->td_lwp) != NULL) { 1135 /* 1136 * Update resource usage integrals and maximums. 1137 */ 1138 if ((ru = &lp->lwp_proc->p_ru) && 1139 (vm = lp->lwp_proc->p_vmspace) != NULL) { 1140 ru->ru_ixrss += pgtok(btoc(vm->vm_tsize)); 1141 ru->ru_idrss += pgtok(btoc(vm->vm_dsize)); 1142 ru->ru_isrss += pgtok(btoc(vm->vm_ssize)); 1143 if (lwkt_trytoken(&vm->vm_map.token)) { 1144 rss = pgtok(vmspace_resident_count(vm)); 1145 if (ru->ru_maxrss < rss) 1146 ru->ru_maxrss = rss; 1147 lwkt_reltoken(&vm->vm_map.token); 1148 } 1149 } 1150 } 1151 /* Increment the global sched_ticks */ 1152 if (mycpu->gd_cpuid == 0) 1153 ++sched_ticks; 1154 } 1155 1156 /* 1157 * Compute number of ticks for the specified amount of time. The 1158 * return value is intended to be used in a clock interrupt timed 1159 * operation and guaranteed to meet or exceed the requested time. 1160 * If the representation overflows, return INT_MAX. The minimum return 1161 * value is 1 ticks and the function will average the calculation up. 1162 * If any value greater then 0 microseconds is supplied, a value 1163 * of at least 2 will be returned to ensure that a near-term clock 1164 * interrupt does not cause the timeout to occur (degenerately) early. 1165 * 1166 * Note that limit checks must take into account microseconds, which is 1167 * done simply by using the smaller signed long maximum instead of 1168 * the unsigned long maximum. 1169 * 1170 * If ints have 32 bits, then the maximum value for any timeout in 1171 * 10ms ticks is 248 days. 1172 */ 1173 int 1174 tvtohz_high(struct timeval *tv) 1175 { 1176 int ticks; 1177 long sec, usec; 1178 1179 sec = tv->tv_sec; 1180 usec = tv->tv_usec; 1181 if (usec < 0) { 1182 sec--; 1183 usec += 1000000; 1184 } 1185 if (sec < 0) { 1186 #ifdef DIAGNOSTIC 1187 if (usec > 0) { 1188 sec++; 1189 usec -= 1000000; 1190 } 1191 kprintf("tvtohz_high: negative time difference " 1192 "%ld sec %ld usec\n", 1193 sec, usec); 1194 #endif 1195 ticks = 1; 1196 } else if (sec <= INT_MAX / hz) { 1197 ticks = (int)(sec * hz + howmany((u_long)usec, ustick)) + 1; 1198 } else { 1199 ticks = INT_MAX; 1200 } 1201 return (ticks); 1202 } 1203 1204 int 1205 tstohz_high(struct timespec *ts) 1206 { 1207 int ticks; 1208 long sec, nsec; 1209 1210 sec = ts->tv_sec; 1211 nsec = ts->tv_nsec; 1212 if (nsec < 0) { 1213 sec--; 1214 nsec += 1000000000; 1215 } 1216 if (sec < 0) { 1217 #ifdef DIAGNOSTIC 1218 if (nsec > 0) { 1219 sec++; 1220 nsec -= 1000000000; 1221 } 1222 kprintf("tstohz_high: negative time difference " 1223 "%ld sec %ld nsec\n", 1224 sec, nsec); 1225 #endif 1226 ticks = 1; 1227 } else if (sec <= INT_MAX / hz) { 1228 ticks = (int)(sec * hz + howmany((u_long)nsec, nstick)) + 1; 1229 } else { 1230 ticks = INT_MAX; 1231 } 1232 return (ticks); 1233 } 1234 1235 1236 /* 1237 * Compute number of ticks for the specified amount of time, erroring on 1238 * the side of it being too low to ensure that sleeping the returned number 1239 * of ticks will not result in a late return. 1240 * 1241 * The supplied timeval may not be negative and should be normalized. A 1242 * return value of 0 is possible if the timeval converts to less then 1243 * 1 tick. 1244 * 1245 * If ints have 32 bits, then the maximum value for any timeout in 1246 * 10ms ticks is 248 days. 1247 */ 1248 int 1249 tvtohz_low(struct timeval *tv) 1250 { 1251 int ticks; 1252 long sec; 1253 1254 sec = tv->tv_sec; 1255 if (sec <= INT_MAX / hz) 1256 ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick); 1257 else 1258 ticks = INT_MAX; 1259 return (ticks); 1260 } 1261 1262 int 1263 tstohz_low(struct timespec *ts) 1264 { 1265 int ticks; 1266 long sec; 1267 1268 sec = ts->tv_sec; 1269 if (sec <= INT_MAX / hz) 1270 ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick); 1271 else 1272 ticks = INT_MAX; 1273 return (ticks); 1274 } 1275 1276 /* 1277 * Start profiling on a process. 1278 * 1279 * Caller must hold p->p_token(); 1280 * 1281 * Kernel profiling passes proc0 which never exits and hence 1282 * keeps the profile clock running constantly. 1283 */ 1284 void 1285 startprofclock(struct proc *p) 1286 { 1287 if ((p->p_flags & P_PROFIL) == 0) { 1288 p->p_flags |= P_PROFIL; 1289 #if 0 /* XXX */ 1290 if (++profprocs == 1 && stathz != 0) { 1291 crit_enter(); 1292 psdiv = psratio; 1293 setstatclockrate(profhz); 1294 crit_exit(); 1295 } 1296 #endif 1297 } 1298 } 1299 1300 /* 1301 * Stop profiling on a process. 1302 * 1303 * caller must hold p->p_token 1304 */ 1305 void 1306 stopprofclock(struct proc *p) 1307 { 1308 if (p->p_flags & P_PROFIL) { 1309 p->p_flags &= ~P_PROFIL; 1310 #if 0 /* XXX */ 1311 if (--profprocs == 0 && stathz != 0) { 1312 crit_enter(); 1313 psdiv = 1; 1314 setstatclockrate(stathz); 1315 crit_exit(); 1316 } 1317 #endif 1318 } 1319 } 1320 1321 /* 1322 * Return information about system clocks. 1323 */ 1324 static int 1325 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 1326 { 1327 struct kinfo_clockinfo clkinfo; 1328 /* 1329 * Construct clockinfo structure. 1330 */ 1331 clkinfo.ci_hz = hz; 1332 clkinfo.ci_tick = ustick; 1333 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000; 1334 clkinfo.ci_profhz = profhz; 1335 clkinfo.ci_stathz = stathz ? stathz : hz; 1336 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 1337 } 1338 1339 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 1340 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 1341 1342 /* 1343 * We have eight functions for looking at the clock, four for 1344 * microseconds and four for nanoseconds. For each there is fast 1345 * but less precise version "get{nano|micro}[up]time" which will 1346 * return a time which is up to 1/HZ previous to the call, whereas 1347 * the raw version "{nano|micro}[up]time" will return a timestamp 1348 * which is as precise as possible. The "up" variants return the 1349 * time relative to system boot, these are well suited for time 1350 * interval measurements. 1351 * 1352 * Each cpu independently maintains the current time of day, so all 1353 * we need to do to protect ourselves from changes is to do a loop 1354 * check on the seconds field changing out from under us. 1355 * 1356 * The system timer maintains a 32 bit count and due to various issues 1357 * it is possible for the calculated delta to occasionally exceed 1358 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec 1359 * multiplication can easily overflow, so we deal with the case. For 1360 * uniformity we deal with the case in the usec case too. 1361 * 1362 * All the [get][micro,nano][time,uptime]() routines are MPSAFE. 1363 * 1364 * NEW CODE (!) 1365 * 1366 * cpu 0 now maintains global ticktimes and an update counter. The 1367 * getnanotime() and getmicrotime() routines use these globals. 1368 */ 1369 void 1370 getmicrouptime(struct timeval *tvp) 1371 { 1372 struct globaldata *gd = mycpu; 1373 sysclock_t delta; 1374 1375 do { 1376 tvp->tv_sec = gd->gd_time_seconds; 1377 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1378 } while (tvp->tv_sec != gd->gd_time_seconds); 1379 1380 if (delta >= sys_cputimer->freq) { 1381 tvp->tv_sec += delta / sys_cputimer->freq; 1382 delta %= sys_cputimer->freq; 1383 } 1384 tvp->tv_usec = muldivu64(sys_cputimer->freq64_usec, delta, 1L << 32); 1385 if (tvp->tv_usec >= 1000000) { 1386 tvp->tv_usec -= 1000000; 1387 ++tvp->tv_sec; 1388 } 1389 } 1390 1391 void 1392 getnanouptime(struct timespec *tsp) 1393 { 1394 struct globaldata *gd = mycpu; 1395 sysclock_t delta; 1396 1397 do { 1398 tsp->tv_sec = gd->gd_time_seconds; 1399 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1400 } while (tsp->tv_sec != gd->gd_time_seconds); 1401 1402 if (delta >= sys_cputimer->freq) { 1403 tsp->tv_sec += delta / sys_cputimer->freq; 1404 delta %= sys_cputimer->freq; 1405 } 1406 tsp->tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1407 } 1408 1409 void 1410 microuptime(struct timeval *tvp) 1411 { 1412 struct globaldata *gd = mycpu; 1413 sysclock_t delta; 1414 1415 do { 1416 tvp->tv_sec = gd->gd_time_seconds; 1417 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1418 } while (tvp->tv_sec != gd->gd_time_seconds); 1419 1420 if (delta >= sys_cputimer->freq) { 1421 tvp->tv_sec += delta / sys_cputimer->freq; 1422 delta %= sys_cputimer->freq; 1423 } 1424 tvp->tv_usec = muldivu64(sys_cputimer->freq64_usec, delta, 1L << 32); 1425 } 1426 1427 void 1428 nanouptime(struct timespec *tsp) 1429 { 1430 struct globaldata *gd = mycpu; 1431 sysclock_t delta; 1432 1433 do { 1434 tsp->tv_sec = gd->gd_time_seconds; 1435 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1436 } while (tsp->tv_sec != gd->gd_time_seconds); 1437 1438 if (delta >= sys_cputimer->freq) { 1439 tsp->tv_sec += delta / sys_cputimer->freq; 1440 delta %= sys_cputimer->freq; 1441 } 1442 tsp->tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1443 } 1444 1445 /* 1446 * realtime routines 1447 */ 1448 void 1449 getmicrotime(struct timeval *tvp) 1450 { 1451 struct timespec ts; 1452 int counter; 1453 1454 do { 1455 counter = *(volatile int *)&ticktime_update; 1456 cpu_lfence(); 1457 switch(counter & 3) { 1458 case 0: /* ticktime2 completed update */ 1459 ts = ticktime2; 1460 break; 1461 case 1: /* ticktime0 update in progress */ 1462 ts = ticktime2; 1463 break; 1464 case 2: /* ticktime0 completed update */ 1465 ts = ticktime0; 1466 break; 1467 case 3: /* ticktime2 update in progress */ 1468 ts = ticktime0; 1469 break; 1470 } 1471 cpu_lfence(); 1472 } while (counter != *(volatile int *)&ticktime_update); 1473 tvp->tv_sec = ts.tv_sec; 1474 tvp->tv_usec = ts.tv_nsec / 1000; 1475 } 1476 1477 void 1478 getnanotime(struct timespec *tsp) 1479 { 1480 struct timespec ts; 1481 int counter; 1482 1483 do { 1484 counter = *(volatile int *)&ticktime_update; 1485 cpu_lfence(); 1486 switch(counter & 3) { 1487 case 0: /* ticktime2 completed update */ 1488 ts = ticktime2; 1489 break; 1490 case 1: /* ticktime0 update in progress */ 1491 ts = ticktime2; 1492 break; 1493 case 2: /* ticktime0 completed update */ 1494 ts = ticktime0; 1495 break; 1496 case 3: /* ticktime2 update in progress */ 1497 ts = ticktime0; 1498 break; 1499 } 1500 cpu_lfence(); 1501 } while (counter != *(volatile int *)&ticktime_update); 1502 *tsp = ts; 1503 } 1504 1505 static void 1506 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp) 1507 { 1508 struct globaldata *gd = mycpu; 1509 sysclock_t delta; 1510 1511 do { 1512 tsp->tv_sec = gd->gd_time_seconds; 1513 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base; 1514 } while (tsp->tv_sec != gd->gd_time_seconds); 1515 1516 if (delta >= sys_cputimer->freq) { 1517 tsp->tv_sec += delta / sys_cputimer->freq; 1518 delta %= sys_cputimer->freq; 1519 } 1520 tsp->tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1521 1522 tsp->tv_sec += nbt->tv_sec; 1523 tsp->tv_nsec += nbt->tv_nsec; 1524 while (tsp->tv_nsec >= 1000000000) { 1525 tsp->tv_nsec -= 1000000000; 1526 ++tsp->tv_sec; 1527 } 1528 } 1529 1530 1531 void 1532 microtime(struct timeval *tvp) 1533 { 1534 struct globaldata *gd = mycpu; 1535 struct timespec *bt; 1536 sysclock_t delta; 1537 1538 do { 1539 tvp->tv_sec = gd->gd_time_seconds; 1540 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1541 } while (tvp->tv_sec != gd->gd_time_seconds); 1542 1543 if (delta >= sys_cputimer->freq) { 1544 tvp->tv_sec += delta / sys_cputimer->freq; 1545 delta %= sys_cputimer->freq; 1546 } 1547 tvp->tv_usec = muldivu64(sys_cputimer->freq64_usec, delta, 1L << 32); 1548 1549 bt = &basetime[basetime_index]; 1550 cpu_lfence(); 1551 tvp->tv_sec += bt->tv_sec; 1552 tvp->tv_usec += bt->tv_nsec / 1000; 1553 while (tvp->tv_usec >= 1000000) { 1554 tvp->tv_usec -= 1000000; 1555 ++tvp->tv_sec; 1556 } 1557 } 1558 1559 void 1560 nanotime(struct timespec *tsp) 1561 { 1562 struct globaldata *gd = mycpu; 1563 struct timespec *bt; 1564 sysclock_t delta; 1565 1566 do { 1567 tsp->tv_sec = gd->gd_time_seconds; 1568 delta = sys_cputimer->count() - gd->gd_cpuclock_base; 1569 } while (tsp->tv_sec != gd->gd_time_seconds); 1570 1571 if (delta >= sys_cputimer->freq) { 1572 tsp->tv_sec += delta / sys_cputimer->freq; 1573 delta %= sys_cputimer->freq; 1574 } 1575 tsp->tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1576 1577 bt = &basetime[basetime_index]; 1578 cpu_lfence(); 1579 tsp->tv_sec += bt->tv_sec; 1580 tsp->tv_nsec += bt->tv_nsec; 1581 while (tsp->tv_nsec >= 1000000000) { 1582 tsp->tv_nsec -= 1000000000; 1583 ++tsp->tv_sec; 1584 } 1585 } 1586 1587 /* 1588 * Get an approximate time_t. It does not have to be accurate. This 1589 * function is called only from KTR and can be called with the system in 1590 * any state so do not use a critical section or other complex operation 1591 * here. 1592 * 1593 * NOTE: This is not exactly synchronized with real time. To do that we 1594 * would have to do what microtime does and check for a nanoseconds 1595 * overflow. 1596 */ 1597 time_t 1598 get_approximate_time_t(void) 1599 { 1600 struct globaldata *gd = mycpu; 1601 struct timespec *bt; 1602 1603 bt = &basetime[basetime_index]; 1604 return(gd->gd_time_seconds + bt->tv_sec); 1605 } 1606 1607 int 1608 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 1609 { 1610 pps_params_t *app; 1611 struct pps_fetch_args *fapi; 1612 #ifdef PPS_SYNC 1613 struct pps_kcbind_args *kapi; 1614 #endif 1615 1616 switch (cmd) { 1617 case PPS_IOC_CREATE: 1618 return (0); 1619 case PPS_IOC_DESTROY: 1620 return (0); 1621 case PPS_IOC_SETPARAMS: 1622 app = (pps_params_t *)data; 1623 if (app->mode & ~pps->ppscap) 1624 return (EINVAL); 1625 pps->ppsparam = *app; 1626 return (0); 1627 case PPS_IOC_GETPARAMS: 1628 app = (pps_params_t *)data; 1629 *app = pps->ppsparam; 1630 app->api_version = PPS_API_VERS_1; 1631 return (0); 1632 case PPS_IOC_GETCAP: 1633 *(int*)data = pps->ppscap; 1634 return (0); 1635 case PPS_IOC_FETCH: 1636 fapi = (struct pps_fetch_args *)data; 1637 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 1638 return (EINVAL); 1639 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 1640 return (EOPNOTSUPP); 1641 pps->ppsinfo.current_mode = pps->ppsparam.mode; 1642 fapi->pps_info_buf = pps->ppsinfo; 1643 return (0); 1644 case PPS_IOC_KCBIND: 1645 #ifdef PPS_SYNC 1646 kapi = (struct pps_kcbind_args *)data; 1647 /* XXX Only root should be able to do this */ 1648 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 1649 return (EINVAL); 1650 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 1651 return (EINVAL); 1652 if (kapi->edge & ~pps->ppscap) 1653 return (EINVAL); 1654 pps->kcmode = kapi->edge; 1655 return (0); 1656 #else 1657 return (EOPNOTSUPP); 1658 #endif 1659 default: 1660 return (ENOTTY); 1661 } 1662 } 1663 1664 void 1665 pps_init(struct pps_state *pps) 1666 { 1667 pps->ppscap |= PPS_TSFMT_TSPEC; 1668 if (pps->ppscap & PPS_CAPTUREASSERT) 1669 pps->ppscap |= PPS_OFFSETASSERT; 1670 if (pps->ppscap & PPS_CAPTURECLEAR) 1671 pps->ppscap |= PPS_OFFSETCLEAR; 1672 } 1673 1674 void 1675 pps_event(struct pps_state *pps, sysclock_t count, int event) 1676 { 1677 struct globaldata *gd; 1678 struct timespec *tsp; 1679 struct timespec *osp; 1680 struct timespec *bt; 1681 struct timespec ts; 1682 sysclock_t *pcount; 1683 #ifdef PPS_SYNC 1684 sysclock_t tcount; 1685 #endif 1686 sysclock_t delta; 1687 pps_seq_t *pseq; 1688 int foff; 1689 #ifdef PPS_SYNC 1690 int fhard; 1691 #endif 1692 int ni; 1693 1694 gd = mycpu; 1695 1696 /* Things would be easier with arrays... */ 1697 if (event == PPS_CAPTUREASSERT) { 1698 tsp = &pps->ppsinfo.assert_timestamp; 1699 osp = &pps->ppsparam.assert_offset; 1700 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1701 #ifdef PPS_SYNC 1702 fhard = pps->kcmode & PPS_CAPTUREASSERT; 1703 #endif 1704 pcount = &pps->ppscount[0]; 1705 pseq = &pps->ppsinfo.assert_sequence; 1706 } else { 1707 tsp = &pps->ppsinfo.clear_timestamp; 1708 osp = &pps->ppsparam.clear_offset; 1709 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1710 #ifdef PPS_SYNC 1711 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1712 #endif 1713 pcount = &pps->ppscount[1]; 1714 pseq = &pps->ppsinfo.clear_sequence; 1715 } 1716 1717 /* Nothing really happened */ 1718 if (*pcount == count) 1719 return; 1720 1721 *pcount = count; 1722 1723 do { 1724 ts.tv_sec = gd->gd_time_seconds; 1725 delta = count - gd->gd_cpuclock_base; 1726 } while (ts.tv_sec != gd->gd_time_seconds); 1727 1728 if (delta >= sys_cputimer->freq) { 1729 ts.tv_sec += delta / sys_cputimer->freq; 1730 delta %= sys_cputimer->freq; 1731 } 1732 ts.tv_nsec = muldivu64(sys_cputimer->freq64_nsec, delta, 1L << 32); 1733 ni = basetime_index; 1734 cpu_lfence(); 1735 bt = &basetime[ni]; 1736 ts.tv_sec += bt->tv_sec; 1737 ts.tv_nsec += bt->tv_nsec; 1738 while (ts.tv_nsec >= 1000000000) { 1739 ts.tv_nsec -= 1000000000; 1740 ++ts.tv_sec; 1741 } 1742 1743 (*pseq)++; 1744 *tsp = ts; 1745 1746 if (foff) { 1747 timespecadd(tsp, osp, tsp); 1748 if (tsp->tv_nsec < 0) { 1749 tsp->tv_nsec += 1000000000; 1750 tsp->tv_sec -= 1; 1751 } 1752 } 1753 #ifdef PPS_SYNC 1754 if (fhard) { 1755 /* magic, at its best... */ 1756 tcount = count - pps->ppscount[2]; 1757 pps->ppscount[2] = count; 1758 if (tcount >= sys_cputimer->freq) { 1759 delta = (1000000000 * (tcount / sys_cputimer->freq) + 1760 sys_cputimer->freq64_nsec * 1761 (tcount % sys_cputimer->freq)) >> 32; 1762 } else { 1763 delta = muldivu64(sys_cputimer->freq64_nsec, 1764 tcount, 1L << 32); 1765 } 1766 hardpps(tsp, delta); 1767 } 1768 #endif 1769 } 1770 1771 /* 1772 * Return the tsc target value for a delay of (ns). 1773 * 1774 * Returns -1 if the TSC is not supported. 1775 */ 1776 tsc_uclock_t 1777 tsc_get_target(int ns) 1778 { 1779 #if defined(_RDTSC_SUPPORTED_) 1780 if (cpu_feature & CPUID_TSC) { 1781 return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000); 1782 } 1783 #endif 1784 return(-1); 1785 } 1786 1787 /* 1788 * Compare the tsc against the passed target 1789 * 1790 * Returns +1 if the target has been reached 1791 * Returns 0 if the target has not yet been reached 1792 * Returns -1 if the TSC is not supported. 1793 * 1794 * Typical use: while (tsc_test_target(target) == 0) { ...poll... } 1795 */ 1796 int 1797 tsc_test_target(int64_t target) 1798 { 1799 #if defined(_RDTSC_SUPPORTED_) 1800 if (cpu_feature & CPUID_TSC) { 1801 if ((int64_t)(target - rdtsc()) <= 0) 1802 return(1); 1803 return(0); 1804 } 1805 #endif 1806 return(-1); 1807 } 1808 1809 /* 1810 * Delay the specified number of nanoseconds using the tsc. This function 1811 * returns immediately if the TSC is not supported. At least one cpu_pause() 1812 * will be issued. 1813 */ 1814 void 1815 tsc_delay(int ns) 1816 { 1817 int64_t clk; 1818 1819 clk = tsc_get_target(ns); 1820 cpu_pause(); 1821 cpu_pause(); 1822 while (tsc_test_target(clk) == 0) { 1823 cpu_pause(); 1824 cpu_pause(); 1825 cpu_pause(); 1826 cpu_pause(); 1827 } 1828 } 1829