1 /* $OpenBSD: clock.c,v 1.67 2020/10/20 15:59:17 cheloha Exp $ */ 2 /* $NetBSD: clock.c,v 1.41 2001/07/24 19:29:25 eeh Exp $ */ 3 4 /* 5 * Copyright (c) 1992, 1993 6 * The Regents of the University of California. All rights reserved. 7 * Copyright (c) 1994 Gordon W. Ross 8 * Copyright (c) 1993 Adam Glass 9 * Copyright (c) 1996 Paul Kranenburg 10 * Copyright (c) 1996 11 * The President and Fellows of Harvard College. All rights reserved. 12 * 13 * This software was developed by the Computer Systems Engineering group 14 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 15 * contributed to Berkeley. 16 * 17 * All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Harvard University. 20 * This product includes software developed by the University of 21 * California, Lawrence Berkeley Laboratory. 22 * 23 * Redistribution and use in source and binary forms, with or without 24 * modification, are permitted provided that the following conditions 25 * are met: 26 * 27 * 1. Redistributions of source code must retain the above copyright 28 * notice, this list of conditions and the following disclaimer. 29 * 2. Redistributions in binary form must reproduce the above copyright 30 * notice, this list of conditions and the following disclaimer in the 31 * documentation and/or other materials provided with the distribution. 32 * 3. All advertising materials mentioning features or use of this software 33 * must display the following acknowledgement: 34 * This product includes software developed by the University of 35 * California, Berkeley and its contributors. 36 * This product includes software developed by Paul Kranenburg. 37 * This product includes software developed by Harvard University. 38 * 4. Neither the name of the University nor the names of its contributors 39 * may be used to endorse or promote products derived from this software 40 * without specific prior written permission. 41 * 42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 45 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 52 * SUCH DAMAGE. 53 * 54 * @(#)clock.c 8.1 (Berkeley) 6/11/93 55 * 56 */ 57 58 /* 59 * Clock driver. This is the id prom and eeprom driver as well 60 * and includes the timer register functions too. 61 */ 62 63 /* Define this for a 1/4s clock to ease debugging */ 64 /* #define INTR_DEBUG */ 65 66 #include <sys/param.h> 67 #include <sys/kernel.h> 68 #include <sys/device.h> 69 #include <sys/proc.h> 70 #include <sys/resourcevar.h> 71 #include <sys/malloc.h> 72 #include <sys/systm.h> 73 #ifdef GPROF 74 #include <sys/gmon.h> 75 #endif 76 #include <sys/sched.h> 77 #include <sys/timetc.h> 78 #include <sys/atomic.h> 79 80 #include <machine/bus.h> 81 #include <machine/autoconf.h> 82 #include <machine/cpu.h> 83 #include <machine/idprom.h> 84 85 #include <dev/clock_subr.h> 86 #include <dev/ic/mk48txxreg.h> 87 88 #include <sparc64/sparc64/intreg.h> 89 #include <sparc64/sparc64/timerreg.h> 90 #include <sparc64/dev/iommureg.h> 91 #include <sparc64/dev/sbusreg.h> 92 #include <dev/sbus/sbusvar.h> 93 #include <sparc64/dev/ebusreg.h> 94 #include <sparc64/dev/ebusvar.h> 95 #include <sparc64/dev/fhcvar.h> 96 97 extern u_int64_t cpu_clockrate; 98 99 struct clock_wenable_info { 100 bus_space_tag_t cwi_bt; 101 bus_space_handle_t cwi_bh; 102 bus_size_t cwi_size; 103 }; 104 105 struct cfdriver clock_cd = { 106 NULL, "clock", DV_DULL 107 }; 108 109 u_int tick_get_timecount(struct timecounter *); 110 111 struct timecounter tick_timecounter = { 112 tick_get_timecount, NULL, ~0u, 0, "tick", 0, 113 NULL, TC_TICK 114 }; 115 116 u_int sys_tick_get_timecount(struct timecounter *); 117 118 struct timecounter sys_tick_timecounter = { 119 sys_tick_get_timecount, NULL, ~0u, 0, "sys_tick", 1000, 120 NULL, TC_SYS_TICK 121 }; 122 123 /* 124 * Statistics clock interval and variance, in usec. Variance must be a 125 * power of two. Since this gives us an even number, not an odd number, 126 * we discard one case and compensate. That is, a variance of 1024 would 127 * give us offsets in [0..1023]. Instead, we take offsets in [1..1023]. 128 * This is symmetric about the point 512, or statvar/2, and thus averages 129 * to that value (assuming uniform random numbers). 130 */ 131 /* XXX fix comment to match value */ 132 int statvar = 8192; 133 int statmin; /* statclock interval - 1/2*variance */ 134 135 static long tick_increment; 136 137 void tick_start(void); 138 void sys_tick_start(void); 139 void stick_start(void); 140 141 int tickintr(void *); 142 int sys_tickintr(void *); 143 int stickintr(void *); 144 int schedintr(void *); 145 146 static struct intrhand level10 = { clockintr }; 147 static struct intrhand level0 = { tickintr }; 148 static struct intrhand level14 = { statintr }; 149 static struct intrhand schedint = { schedintr }; 150 151 /* 152 * clock (eeprom) attaches at the sbus or the ebus (PCI) 153 */ 154 static int clockmatch_sbus(struct device *, void *, void *); 155 static void clockattach_sbus(struct device *, struct device *, void *); 156 static int clockmatch_ebus(struct device *, void *, void *); 157 static void clockattach_ebus(struct device *, struct device *, void *); 158 static int clockmatch_fhc(struct device *, void *, void *); 159 static void clockattach_fhc(struct device *, struct device *, void *); 160 static void clockattach(int, bus_space_tag_t, bus_space_handle_t); 161 162 struct cfattach clock_sbus_ca = { 163 sizeof(struct device), clockmatch_sbus, clockattach_sbus 164 }; 165 166 struct cfattach clock_ebus_ca = { 167 sizeof(struct device), clockmatch_ebus, clockattach_ebus 168 }; 169 170 struct cfattach clock_fhc_ca = { 171 sizeof(struct device), clockmatch_fhc, clockattach_fhc 172 }; 173 174 /* Global TOD clock handle & idprom pointer */ 175 extern todr_chip_handle_t todr_handle; 176 static struct idprom *idprom; 177 178 static int timermatch(struct device *, void *, void *); 179 static void timerattach(struct device *, struct device *, void *); 180 181 struct timerreg_4u timerreg_4u; /* XXX - need more cleanup */ 182 183 struct cfattach timer_ca = { 184 sizeof(struct device), timermatch, timerattach 185 }; 186 187 struct cfdriver timer_cd = { 188 NULL, "timer", DV_DULL 189 }; 190 191 int clock_bus_wenable(struct todr_chip_handle *, int); 192 struct chiptime; 193 void myetheraddr(u_char *); 194 struct idprom *getidprom(void); 195 int chiptotime(int, int, int, int, int, int); 196 void timetochip(struct chiptime *); 197 void stopcounter(struct timer_4u *); 198 199 int timerblurb = 10; /* Guess a value; used before clock is attached */ 200 201 /* 202 * The OPENPROM calls the clock the "eeprom", so we have to have our 203 * own special match function to call it the "clock". 204 */ 205 static int 206 clockmatch_sbus(parent, cf, aux) 207 struct device *parent; 208 void *cf; 209 void *aux; 210 { 211 struct sbus_attach_args *sa = aux; 212 213 return (strcmp("eeprom", sa->sa_name) == 0); 214 } 215 216 static int 217 clockmatch_ebus(parent, cf, aux) 218 struct device *parent; 219 void *cf; 220 void *aux; 221 { 222 struct ebus_attach_args *ea = aux; 223 224 return (strcmp("eeprom", ea->ea_name) == 0); 225 } 226 227 static int 228 clockmatch_fhc(parent, cf, aux) 229 struct device *parent; 230 void *cf; 231 void *aux; 232 { 233 struct fhc_attach_args *fa = aux; 234 235 return (strcmp("eeprom", fa->fa_name) == 0); 236 } 237 238 /* 239 * Attach a clock (really `eeprom') to the sbus or ebus. 240 * 241 * We ignore any existing virtual address as we need to map 242 * this read-only and make it read-write only temporarily, 243 * whenever we read or write the clock chip. The clock also 244 * contains the ID ``PROM'', and I have already had the pleasure 245 * of reloading the cpu type, Ethernet address, etc, by hand from 246 * the console FORTH interpreter. I intend not to enjoy it again. 247 * 248 * the MK48T02 is 2K. the MK48T08 is 8K, and the MK48T59 is 249 * supposed to be identical to it. 250 * 251 * This is *UGLY*! We probably have multiple mappings. But I do 252 * know that this all fits inside an 8K page, so I'll just map in 253 * once. 254 * 255 * What we really need is some way to record the bus attach args 256 * so we can call *_bus_map() later with BUS_SPACE_MAP_READONLY 257 * or not to write enable/disable the device registers. This is 258 * a non-trivial operation. 259 */ 260 261 /* ARGSUSED */ 262 static void 263 clockattach_sbus(parent, self, aux) 264 struct device *parent, *self; 265 void *aux; 266 { 267 struct sbus_attach_args *sa = aux; 268 bus_space_tag_t bt = sa->sa_bustag; 269 int sz; 270 static struct clock_wenable_info cwi; 271 272 /* use sa->sa_regs[0].size? */ 273 sz = 8192; 274 275 if (sbus_bus_map(bt, 276 sa->sa_slot, 277 (sa->sa_offset & ~NBPG), 278 sz, 279 BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_READONLY, 280 0, &cwi.cwi_bh) != 0) { 281 printf("%s: can't map register\n", self->dv_xname); 282 return; 283 } 284 clockattach(sa->sa_node, bt, cwi.cwi_bh); 285 286 /* Save info for the clock wenable call. */ 287 cwi.cwi_bt = bt; 288 cwi.cwi_size = sz; 289 todr_handle->bus_cookie = &cwi; 290 todr_handle->todr_setwen = clock_bus_wenable; 291 } 292 293 /* 294 * Write en/dis-able clock registers. We coordinate so that several 295 * writers can run simultaneously. 296 * XXX There is still a race here. The page change and the "writers" 297 * change are not atomic. 298 */ 299 int 300 clock_bus_wenable(handle, onoff) 301 struct todr_chip_handle *handle; 302 int onoff; 303 { 304 int s, err = 0; 305 int prot; /* nonzero => change prot */ 306 volatile static int writers; 307 struct clock_wenable_info *cwi = handle->bus_cookie; 308 309 s = splhigh(); 310 if (onoff) 311 prot = writers++ == 0 ? 1 : 0; 312 else 313 prot = --writers == 0 ? 1 : 0; 314 splx(s); 315 316 if (prot) { 317 err = bus_space_protect(cwi->cwi_bt, cwi->cwi_bh, cwi->cwi_size, 318 onoff ? 0 : BUS_SPACE_MAP_READONLY); 319 if (err) 320 printf("clock_wenable_info: WARNING -- cannot %s " 321 "page protection\n", onoff ? "disable" : "enable"); 322 } 323 return (err); 324 } 325 326 /* ARGSUSED */ 327 static void 328 clockattach_ebus(parent, self, aux) 329 struct device *parent, *self; 330 void *aux; 331 { 332 struct ebus_attach_args *ea = aux; 333 bus_space_tag_t bt; 334 int sz; 335 static struct clock_wenable_info cwi; 336 337 /* hard code to 8K? */ 338 sz = ea->ea_regs[0].size; 339 340 if (ea->ea_nvaddrs) { 341 if (bus_space_map(ea->ea_memtag, ea->ea_vaddrs[0], 0, 342 BUS_SPACE_MAP_PROMADDRESS, &cwi.cwi_bh) != 0) { 343 printf("%s: can't map register\n", self->dv_xname); 344 return; 345 } 346 bt = ea->ea_memtag; 347 } else if (ebus_bus_map(ea->ea_iotag, 0, 348 EBUS_PADDR_FROM_REG(&ea->ea_regs[0]), sz, 0, 0, &cwi.cwi_bh) == 0) { 349 bt = ea->ea_iotag; 350 } else if (ebus_bus_map(ea->ea_memtag, 0, 351 EBUS_PADDR_FROM_REG(&ea->ea_regs[0]), sz, 352 BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_READONLY, 353 0, &cwi.cwi_bh) == 0) { 354 bt = ea->ea_memtag; 355 } else { 356 printf("%s: can't map register\n", self->dv_xname); 357 return; 358 } 359 360 clockattach(ea->ea_node, bt, cwi.cwi_bh); 361 362 /* Save info for the clock wenable call. */ 363 cwi.cwi_bt = bt; 364 cwi.cwi_size = sz; 365 todr_handle->bus_cookie = &cwi; 366 todr_handle->todr_setwen = (ea->ea_memtag == bt) ? 367 clock_bus_wenable : NULL; 368 } 369 370 static void 371 clockattach_fhc(parent, self, aux) 372 struct device *parent, *self; 373 void *aux; 374 { 375 struct fhc_attach_args *fa = aux; 376 bus_space_tag_t bt = fa->fa_bustag; 377 int sz; 378 static struct clock_wenable_info cwi; 379 380 /* use sa->sa_regs[0].size? */ 381 sz = 8192; 382 383 if (fhc_bus_map(bt, fa->fa_reg[0].fbr_slot, 384 (fa->fa_reg[0].fbr_offset & ~NBPG), fa->fa_reg[0].fbr_size, 385 BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_READONLY, &cwi.cwi_bh) != 0) { 386 printf("%s: can't map register\n", self->dv_xname); 387 return; 388 } 389 390 clockattach(fa->fa_node, bt, cwi.cwi_bh); 391 392 /* Save info for the clock wenable call. */ 393 cwi.cwi_bt = bt; 394 cwi.cwi_size = sz; 395 todr_handle->bus_cookie = &cwi; 396 todr_handle->todr_setwen = clock_bus_wenable; 397 } 398 399 static void 400 clockattach(node, bt, bh) 401 int node; 402 bus_space_tag_t bt; 403 bus_space_handle_t bh; 404 { 405 char *model; 406 struct idprom *idp; 407 int h; 408 409 model = getpropstring(node, "model"); 410 411 #ifdef DIAGNOSTIC 412 if (model == NULL) 413 panic("clockattach: no model property"); 414 #endif 415 416 /* Our TOD clock year 0 is 1968 */ 417 if ((todr_handle = mk48txx_attach(bt, bh, model, 1968)) == NULL) 418 panic("Can't attach %s tod clock", model); 419 420 #define IDPROM_OFFSET (8*1024 - 40) /* XXX - get nvram sz from driver */ 421 if (idprom == NULL) { 422 idp = getidprom(); 423 if (idp == NULL) 424 idp = (struct idprom *)(bus_space_vaddr(bt, bh) + 425 IDPROM_OFFSET); 426 idprom = idp; 427 } else 428 idp = idprom; 429 h = idp->id_machine << 24; 430 h |= idp->id_hostid[0] << 16; 431 h |= idp->id_hostid[1] << 8; 432 h |= idp->id_hostid[2]; 433 hostid = h; 434 printf("\n"); 435 } 436 437 struct idprom * 438 getidprom(void) 439 { 440 struct idprom *idp = NULL; 441 int node, n; 442 443 node = findroot(); 444 if (getprop(node, "idprom", sizeof(*idp), &n, (void **)&idp) != 0) 445 return (NULL); 446 if (n != 1) { 447 free(idp, M_DEVBUF, 0); 448 return (NULL); 449 } 450 return (idp); 451 } 452 453 /* 454 * The sun4u OPENPROMs call the timer the "counter-timer", except for 455 * the lame UltraSPARC IIi PCI machines that don't have them. 456 */ 457 static int 458 timermatch(parent, cf, aux) 459 struct device *parent; 460 void *cf; 461 void *aux; 462 { 463 #ifndef MULTIPROCESSOR 464 struct mainbus_attach_args *ma = aux; 465 466 if (!timerreg_4u.t_timer || !timerreg_4u.t_clrintr) 467 return (strcmp("counter-timer", ma->ma_name) == 0); 468 else 469 #endif 470 return (0); 471 } 472 473 static void 474 timerattach(parent, self, aux) 475 struct device *parent, *self; 476 void *aux; 477 { 478 struct mainbus_attach_args *ma = aux; 479 u_int *va = ma->ma_address; 480 481 /* 482 * What we should have are 3 sets of registers that reside on 483 * different parts of SYSIO or PSYCHO. We'll use the prom 484 * mappings cause we can't get rid of them and set up appropriate 485 * pointers on the timerreg_4u structure. 486 */ 487 timerreg_4u.t_timer = (struct timer_4u *)(u_long)va[0]; 488 timerreg_4u.t_clrintr = (int64_t *)(u_long)va[1]; 489 timerreg_4u.t_mapintr = (int64_t *)(u_long)va[2]; 490 491 /* Install the appropriate interrupt vector here */ 492 level10.ih_number = INTVEC(ma->ma_interrupts[0]); 493 level10.ih_clr = (void *)&timerreg_4u.t_clrintr[0]; 494 level10.ih_map = (void *)&timerreg_4u.t_mapintr[0]; 495 strlcpy(level10.ih_name, "clock", sizeof(level10.ih_name)); 496 intr_establish(10, &level10); 497 498 level14.ih_number = INTVEC(ma->ma_interrupts[1]); 499 level14.ih_clr = (void *)&timerreg_4u.t_clrintr[1]; 500 level14.ih_map = (void *)&timerreg_4u.t_mapintr[1]; 501 strlcpy(level14.ih_name, "prof", sizeof(level14.ih_name)); 502 intr_establish(14, &level14); 503 504 printf(" ivec 0x%llx, 0x%llx\n", INTVEC(level10.ih_number), 505 INTVEC(level14.ih_number)); 506 } 507 508 void 509 stopcounter(creg) 510 struct timer_4u *creg; 511 { 512 /* Stop the clock */ 513 volatile int discard; 514 discard = creg->t_limit; 515 creg->t_limit = 0; 516 } 517 518 /* 519 * XXX this belongs elsewhere 520 */ 521 void 522 myetheraddr(cp) 523 u_char *cp; 524 { 525 struct idprom *idp; 526 527 if ((idp = idprom) == NULL) { 528 int node, n; 529 530 node = findroot(); 531 if (getprop(node, "idprom", sizeof *idp, &n, (void **)&idp) || 532 n != 1) { 533 printf("\nmyetheraddr: clock not setup yet, " 534 "and no idprom property in /\n"); 535 return; 536 } 537 } 538 539 cp[0] = idp->id_ether[0]; 540 cp[1] = idp->id_ether[1]; 541 cp[2] = idp->id_ether[2]; 542 cp[3] = idp->id_ether[3]; 543 cp[4] = idp->id_ether[4]; 544 cp[5] = idp->id_ether[5]; 545 if (idprom == NULL) 546 free(idp, M_DEVBUF, 0); 547 } 548 549 /* 550 * Set up the real-time and statistics clocks. Leave stathz 0 only if 551 * no alternative timer is available. 552 * 553 * The frequencies of these clocks must be an even number of microseconds. 554 */ 555 void 556 cpu_initclocks(void) 557 { 558 int statint, minint; 559 #ifdef DEBUG 560 extern int intrdebug; 561 #endif 562 u_int sys_tick_rate; 563 int impl = 0; 564 565 #ifdef DEBUG 566 /* Set a 1s clock */ 567 if (intrdebug) { 568 hz = 1; 569 tick = 1000000 / hz; 570 tick_nsec = 1000000000 / hz; 571 printf("intrdebug set: 1Hz clock\n"); 572 } 573 #endif 574 575 if (1000000 % hz) { 576 printf("cannot get %d Hz clock; using 100 Hz\n", hz); 577 hz = 100; 578 tick = 1000000 / hz; 579 tick_nsec = 1000000000 / hz; 580 } 581 582 /* Make sure we have a sane cpu_clockrate -- we'll need it */ 583 if (!cpu_clockrate) 584 /* Default to 200MHz clock XXXXX */ 585 cpu_clockrate = 200000000; 586 587 tick_timecounter.tc_frequency = cpu_clockrate; 588 tc_init(&tick_timecounter); 589 590 /* 591 * UltraSPARC IIe processors do have a STICK register, but it 592 * lives on the PCI host bridge and isn't accessable through 593 * ASR24. 594 */ 595 if (CPU_ISSUN4U || CPU_ISSUN4US) 596 impl = (getver() & VER_IMPL) >> VER_IMPL_SHIFT; 597 598 sys_tick_rate = getpropint(findroot(), "stick-frequency", 0); 599 if (sys_tick_rate > 0 && impl != IMPL_HUMMINGBIRD) { 600 sys_tick_timecounter.tc_frequency = sys_tick_rate; 601 tc_init(&sys_tick_timecounter); 602 } 603 604 /* 605 * Now handle machines w/o counter-timers. 606 */ 607 608 if (!timerreg_4u.t_timer || !timerreg_4u.t_clrintr) { 609 struct cpu_info *ci; 610 611 /* We don't have a counter-timer -- use %tick */ 612 level0.ih_clr = 0; 613 614 /* 615 * Establish a level 10 interrupt handler 616 * 617 * We will have a conflict with the softint handler, 618 * so we set the ih_number to 1. 619 */ 620 level0.ih_number = 1; 621 strlcpy(level0.ih_name, "clock", sizeof(level0.ih_name)); 622 intr_establish(10, &level0); 623 624 /* We only have one timer so we have no statclock */ 625 stathz = 0; 626 627 if (sys_tick_rate > 0) { 628 tick_increment = sys_tick_rate / hz; 629 if (impl == IMPL_HUMMINGBIRD) { 630 level0.ih_fun = stickintr; 631 cpu_start_clock = stick_start; 632 } else { 633 level0.ih_fun = sys_tickintr; 634 cpu_start_clock = sys_tick_start; 635 } 636 } else { 637 /* set the next interrupt time */ 638 tick_increment = cpu_clockrate / hz; 639 level0.ih_fun = tickintr; 640 cpu_start_clock = tick_start; 641 } 642 643 for (ci = cpus; ci != NULL; ci = ci->ci_next) 644 memcpy(&ci->ci_tickintr, &level0, sizeof(level0)); 645 646 cpu_start_clock(); 647 648 return; 649 } 650 651 if (stathz == 0) 652 stathz = hz; 653 if (1000000 % stathz) { 654 printf("cannot get %d Hz statclock; using 100 Hz\n", stathz); 655 stathz = 100; 656 } 657 658 profhz = stathz; /* always */ 659 660 statint = 1000000 / stathz; 661 minint = statint / 2 + 100; 662 while (statvar > minint) 663 statvar >>= 1; 664 665 /* 666 * Establish scheduler softint. 667 */ 668 schedint.ih_pil = PIL_SCHED; 669 schedint.ih_clr = NULL; 670 schedint.ih_arg = 0; 671 schedint.ih_pending = 0; 672 schedhz = stathz/4; 673 674 /* 675 * Enable timers 676 * 677 * Also need to map the interrupts cause we're not a child of the sbus. 678 * N.B. By default timer[0] is disabled and timer[1] is enabled. 679 */ 680 stxa((vaddr_t)&timerreg_4u.t_timer[0].t_limit, ASI_NUCLEUS, 681 tmr_ustolim(tick)|TMR_LIM_IEN|TMR_LIM_PERIODIC|TMR_LIM_RELOAD); 682 stxa((vaddr_t)&timerreg_4u.t_mapintr[0], ASI_NUCLEUS, 683 timerreg_4u.t_mapintr[0]|INTMAP_V); 684 685 #ifdef DEBUG 686 if (intrdebug) 687 /* Neglect to enable timer */ 688 stxa((vaddr_t)&timerreg_4u.t_timer[1].t_limit, ASI_NUCLEUS, 689 tmr_ustolim(statint)|TMR_LIM_RELOAD); 690 else 691 #endif 692 stxa((vaddr_t)&timerreg_4u.t_timer[1].t_limit, ASI_NUCLEUS, 693 tmr_ustolim(statint)|TMR_LIM_IEN|TMR_LIM_RELOAD); 694 stxa((vaddr_t)&timerreg_4u.t_mapintr[1], ASI_NUCLEUS, 695 timerreg_4u.t_mapintr[1]|INTMAP_V); 696 697 statmin = statint - (statvar >> 1); 698 699 tick_enable(); 700 } 701 702 /* 703 * Dummy setstatclockrate(), since we know profhz==hz. 704 */ 705 /* ARGSUSED */ 706 void 707 setstatclockrate(newhz) 708 int newhz; 709 { 710 /* nothing */ 711 } 712 713 /* 714 * Level 10 (clock) interrupts. If we are using the FORTH PROM for 715 * console input, we need to check for that here as well, and generate 716 * a software interrupt to read it. 717 */ 718 #ifdef DEBUG 719 static int clockcheck = 0; 720 #endif 721 int 722 clockintr(cap) 723 void *cap; 724 { 725 #ifdef DEBUG 726 static int64_t tick_base = 0; 727 struct timeval ctime; 728 int64_t t; 729 730 t = tick() & TICK_TICKS; 731 732 microtime(&ctime); 733 if (!tick_base) { 734 tick_base = (ctime.tv_sec * 1000000LL + ctime.tv_usec) 735 * 1000000LL / cpu_clockrate; 736 tick_base -= t; 737 } else if (clockcheck) { 738 int64_t tk = t; 739 int64_t clk = (ctime.tv_sec * 1000000LL + ctime.tv_usec); 740 t -= tick_base; 741 t = t * 1000000LL / cpu_clockrate; 742 if (t - clk > hz) { 743 printf("Clock lost an interrupt!\n"); 744 printf("Actual: %llx Expected: %llx tick %llx " 745 "tick_base %llx\n", (long long)t, (long long)clk, 746 (long long)tk, (long long)tick_base); 747 #ifdef DDB 748 db_enter(); 749 #endif 750 tick_base = 0; 751 } 752 } 753 #endif 754 /* Let locore.s clear the interrupt for us. */ 755 hardclock((struct clockframe *)cap); 756 757 return (1); 758 } 759 760 /* 761 * Level 10 (clock) interrupts. If we are using the FORTH PROM for 762 * console input, we need to check for that here as well, and generate 763 * a software interrupt to read it. 764 * 765 * %tick is really a level-14 interrupt. We need to remap this in 766 * locore.s to a level 10. 767 */ 768 int 769 tickintr(cap) 770 void *cap; 771 { 772 struct cpu_info *ci = curcpu(); 773 u_int64_t s; 774 775 /* 776 * No need to worry about overflow; %tick is architecturally 777 * defined not to do that for at least 10 years. 778 */ 779 while (ci->ci_tick < tick()) { 780 ci->ci_tick += tick_increment; 781 hardclock((struct clockframe *)cap); 782 atomic_add_long((unsigned long *)&level0.ih_count.ec_count, 1); 783 } 784 785 /* Reset the interrupt. */ 786 s = intr_disable(); 787 tickcmpr_set(ci->ci_tick); 788 intr_restore(s); 789 790 return (1); 791 } 792 793 int 794 sys_tickintr(cap) 795 void *cap; 796 { 797 struct cpu_info *ci = curcpu(); 798 u_int64_t s; 799 800 /* 801 * Do we need to worry about overflow here? 802 */ 803 while (ci->ci_tick < sys_tick()) { 804 ci->ci_tick += tick_increment; 805 hardclock((struct clockframe *)cap); 806 atomic_add_long((unsigned long *)&level0.ih_count.ec_count, 1); 807 } 808 809 /* Reset the interrupt. */ 810 s = intr_disable(); 811 sys_tickcmpr_set(ci->ci_tick); 812 intr_restore(s); 813 814 return (1); 815 } 816 817 int 818 stickintr(cap) 819 void *cap; 820 { 821 struct cpu_info *ci = curcpu(); 822 u_int64_t s; 823 824 /* 825 * Do we need to worry about overflow here? 826 */ 827 while (ci->ci_tick < stick()) { 828 ci->ci_tick += tick_increment; 829 hardclock((struct clockframe *)cap); 830 atomic_add_long((unsigned long *)&level0.ih_count.ec_count, 1); 831 } 832 833 /* Reset the interrupt. */ 834 s = intr_disable(); 835 stickcmpr_set(ci->ci_tick); 836 intr_restore(s); 837 838 return (1); 839 } 840 841 /* 842 * Level 14 (stat clock) interrupt handler. 843 */ 844 int 845 statintr(cap) 846 void *cap; 847 { 848 u_long newint, r, var; 849 struct cpu_info *ci = curcpu(); 850 851 #ifdef NOT_DEBUG 852 printf("statclock: count %x:%x, limit %x:%x\n", 853 timerreg_4u.t_timer[1].t_count, timerreg_4u.t_timer[1].t_limit); 854 #endif 855 #ifdef NOT_DEBUG 856 prom_printf("!"); 857 #endif 858 statclock((struct clockframe *)cap); 859 #ifdef NOTDEF_DEBUG 860 /* Don't re-schedule the IRQ */ 861 return 1; 862 #endif 863 /* 864 * Compute new randomized interval. The intervals are uniformly 865 * distributed on [statint - statvar / 2, statint + statvar / 2], 866 * and therefore have mean statint, giving a stathz frequency clock. 867 */ 868 var = statvar; 869 do { 870 r = random() & (var - 1); 871 } while (r == 0); 872 newint = statmin + r; 873 874 if (schedhz) 875 if ((++ci->ci_schedstate.spc_schedticks & 3) == 0) 876 send_softint(-1, PIL_SCHED, &schedint); 877 stxa((vaddr_t)&timerreg_4u.t_timer[1].t_limit, ASI_NUCLEUS, 878 tmr_ustolim(newint)|TMR_LIM_IEN|TMR_LIM_RELOAD); 879 880 return (1); 881 } 882 883 int 884 schedintr(arg) 885 void *arg; 886 { 887 if (curproc) 888 schedclock(curproc); 889 return (1); 890 } 891 892 void 893 tick_start(void) 894 { 895 struct cpu_info *ci = curcpu(); 896 u_int64_t s; 897 898 tick_enable(); 899 900 /* 901 * Try to make the tick interrupts as synchronously as possible on 902 * all CPUs to avoid inaccuracies for migrating processes. 903 */ 904 905 s = intr_disable(); 906 ci->ci_tick = roundup(tick(), tick_increment); 907 tickcmpr_set(ci->ci_tick); 908 intr_restore(s); 909 } 910 911 void 912 sys_tick_start(void) 913 { 914 struct cpu_info *ci = curcpu(); 915 u_int64_t s; 916 917 if (CPU_ISSUN4U || CPU_ISSUN4US) { 918 tick_enable(); 919 sys_tick_enable(); 920 } 921 922 /* 923 * Try to make the tick interrupts as synchronously as possible on 924 * all CPUs to avoid inaccuracies for migrating processes. 925 */ 926 927 s = intr_disable(); 928 ci->ci_tick = roundup(sys_tick(), tick_increment); 929 sys_tickcmpr_set(ci->ci_tick); 930 intr_restore(s); 931 } 932 933 void 934 stick_start(void) 935 { 936 struct cpu_info *ci = curcpu(); 937 u_int64_t s; 938 939 tick_enable(); 940 941 /* 942 * Try to make the tick interrupts as synchronously as possible on 943 * all CPUs to avoid inaccuracies for migrating processes. 944 */ 945 946 s = intr_disable(); 947 ci->ci_tick = roundup(stick(), tick_increment); 948 stickcmpr_set(ci->ci_tick); 949 intr_restore(s); 950 } 951 952 u_int 953 tick_get_timecount(struct timecounter *tc) 954 { 955 u_int64_t tick; 956 957 __asm volatile("rd %%tick, %0" : "=r" (tick)); 958 959 return (tick & ~0u); 960 } 961 962 u_int 963 sys_tick_get_timecount(struct timecounter *tc) 964 { 965 u_int64_t tick; 966 967 __asm volatile("rd %%sys_tick, %0" : "=r" (tick)); 968 969 return (tick & ~0u); 970 } 971