1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2011 The FreeBSD Foundation 5 * Copyright (c) 2013 Ruslan Bukin <br@bsdpad.com> 6 * All rights reserved. 7 * 8 * Based on mpcore_timer.c developed by Ben Gray <ben.r.gray@gmail.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The name of the company nor the name of the author may be used to 19 * endorse or promote products derived from this software without specific 20 * prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /** 36 * Cortex-A7, Cortex-A15, ARMv8 and later Generic Timer 37 */ 38 39 #include "opt_acpi.h" 40 #include "opt_platform.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/bus.h> 45 #include <sys/kernel.h> 46 #include <sys/module.h> 47 #include <sys/malloc.h> 48 #include <sys/rman.h> 49 #include <sys/timeet.h> 50 #include <sys/timetc.h> 51 #include <sys/smp.h> 52 #include <sys/vdso.h> 53 #include <sys/watchdog.h> 54 55 #include <machine/bus.h> 56 #include <machine/cpu.h> 57 #include <machine/intr.h> 58 #include <machine/machdep.h> 59 #include <machine/md_var.h> 60 61 #if defined(__aarch64__) 62 #include <machine/undefined.h> 63 #endif 64 65 #ifdef FDT 66 #include <dev/ofw/openfirm.h> 67 #include <dev/ofw/ofw_bus.h> 68 #include <dev/ofw/ofw_bus_subr.h> 69 #endif 70 71 #ifdef DEV_ACPI 72 #include <contrib/dev/acpica/include/acpi.h> 73 #include <dev/acpica/acpivar.h> 74 #endif 75 76 #define GT_PHYS_SECURE 0 77 #define GT_PHYS_NONSECURE 1 78 #define GT_VIRT 2 79 #define GT_HYP_PHYS 3 80 #define GT_HYP_VIRT 4 81 #define GT_IRQ_COUNT 5 82 83 #define GT_CTRL_ENABLE (1 << 0) 84 #define GT_CTRL_INT_MASK (1 << 1) 85 #define GT_CTRL_INT_STAT (1 << 2) 86 #define GT_REG_CTRL 0 87 #define GT_REG_TVAL 1 88 89 #define GT_CNTKCTL_PL0PTEN (1 << 9) /* PL0 Physical timer reg access */ 90 #define GT_CNTKCTL_PL0VTEN (1 << 8) /* PL0 Virtual timer reg access */ 91 #define GT_CNTKCTL_EVNTI (0xf << 4) /* Virtual counter event bits */ 92 #define GT_CNTKCTL_EVNTDIR (1 << 3) /* Virtual counter event transition */ 93 #define GT_CNTKCTL_EVNTEN (1 << 2) /* Enables virtual counter events */ 94 #define GT_CNTKCTL_PL0VCTEN (1 << 1) /* PL0 CNTVCT and CNTFRQ access */ 95 #define GT_CNTKCTL_PL0PCTEN (1 << 0) /* PL0 CNTPCT and CNTFRQ access */ 96 97 struct arm_tmr_softc; 98 99 struct arm_tmr_irq { 100 struct resource *res; 101 void *ihl; 102 int rid; 103 int idx; 104 }; 105 106 struct arm_tmr_softc { 107 struct arm_tmr_irq irqs[GT_IRQ_COUNT]; 108 uint64_t (*get_cntxct)(bool); 109 uint32_t clkfreq; 110 int irq_count; 111 struct eventtimer et; 112 bool physical_sys; 113 bool physical_user; 114 }; 115 116 static struct arm_tmr_softc *arm_tmr_sc = NULL; 117 118 static const struct arm_tmr_irq_defs { 119 int idx; 120 const char *name; 121 int flags; 122 } arm_tmr_irq_defs[] = { 123 { 124 .idx = GT_PHYS_SECURE, 125 .name = "sec-phys", 126 .flags = RF_ACTIVE | RF_OPTIONAL, 127 }, 128 { 129 .idx = GT_PHYS_NONSECURE, 130 .name = "phys", 131 .flags = RF_ACTIVE, 132 }, 133 { 134 .idx = GT_VIRT, 135 .name = "virt", 136 .flags = RF_ACTIVE, 137 }, 138 { 139 .idx = GT_HYP_PHYS, 140 .name = "hyp-phys", 141 .flags = RF_ACTIVE | RF_OPTIONAL, 142 }, 143 { 144 .idx = GT_HYP_VIRT, 145 .name = "hyp-virt", 146 .flags = RF_ACTIVE | RF_OPTIONAL, 147 }, 148 }; 149 150 static int arm_tmr_attach(device_t); 151 152 static uint32_t arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th, 153 struct timecounter *tc); 154 static void arm_tmr_do_delay(int usec, void *); 155 156 static timecounter_get_t arm_tmr_get_timecount; 157 158 static struct timecounter arm_tmr_timecount = { 159 .tc_name = "ARM MPCore Timecounter", 160 .tc_get_timecount = arm_tmr_get_timecount, 161 .tc_poll_pps = NULL, 162 .tc_counter_mask = ~0u, 163 .tc_frequency = 0, 164 .tc_quality = 1000, 165 .tc_fill_vdso_timehands = arm_tmr_fill_vdso_timehands, 166 }; 167 168 #ifdef __arm__ 169 #define get_el0(x) cp15_## x ##_get() 170 #define get_el1(x) cp15_## x ##_get() 171 #define set_el0(x, val) cp15_## x ##_set(val) 172 #define set_el1(x, val) cp15_## x ##_set(val) 173 #define HAS_PHYS true 174 #else /* __aarch64__ */ 175 #define get_el0(x) READ_SPECIALREG(x ##_el0) 176 #define get_el1(x) READ_SPECIALREG(x ##_el1) 177 #define set_el0(x, val) WRITE_SPECIALREG(x ##_el0, val) 178 #define set_el1(x, val) WRITE_SPECIALREG(x ##_el1, val) 179 #define HAS_PHYS has_hyp() 180 #endif 181 182 static int 183 get_freq(void) 184 { 185 return (get_el0(cntfrq)); 186 } 187 188 static uint64_t 189 get_cntxct_a64_unstable(bool physical) 190 { 191 uint64_t val 192 ; 193 isb(); 194 if (physical) { 195 do { 196 val = get_el0(cntpct); 197 } 198 while (((val + 1) & 0x7FF) <= 1); 199 } 200 else { 201 do { 202 val = get_el0(cntvct); 203 } 204 while (((val + 1) & 0x7FF) <= 1); 205 } 206 207 return (val); 208 } 209 210 static uint64_t 211 get_cntxct(bool physical) 212 { 213 uint64_t val; 214 215 isb(); 216 if (physical) 217 val = get_el0(cntpct); 218 else 219 val = get_el0(cntvct); 220 221 return (val); 222 } 223 224 static int 225 set_ctrl(uint32_t val, bool physical) 226 { 227 228 if (physical) 229 set_el0(cntp_ctl, val); 230 else 231 set_el0(cntv_ctl, val); 232 isb(); 233 234 return (0); 235 } 236 237 static int 238 set_tval(uint32_t val, bool physical) 239 { 240 241 if (physical) 242 set_el0(cntp_tval, val); 243 else 244 set_el0(cntv_tval, val); 245 isb(); 246 247 return (0); 248 } 249 250 static int 251 get_ctrl(bool physical) 252 { 253 uint32_t val; 254 255 if (physical) 256 val = get_el0(cntp_ctl); 257 else 258 val = get_el0(cntv_ctl); 259 260 return (val); 261 } 262 263 static void 264 setup_user_access(void *arg __unused) 265 { 266 uint32_t cntkctl; 267 268 cntkctl = get_el1(cntkctl); 269 cntkctl &= ~(GT_CNTKCTL_PL0PTEN | GT_CNTKCTL_PL0VTEN | 270 GT_CNTKCTL_EVNTEN | GT_CNTKCTL_PL0PCTEN); 271 /* Always enable the virtual timer */ 272 cntkctl |= GT_CNTKCTL_PL0VCTEN; 273 /* Enable the physical timer if supported */ 274 if (arm_tmr_sc->physical_user) { 275 cntkctl |= GT_CNTKCTL_PL0PCTEN; 276 } 277 set_el1(cntkctl, cntkctl); 278 isb(); 279 } 280 281 #ifdef __aarch64__ 282 static int 283 cntpct_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame, 284 uint32_t esr) 285 { 286 uint64_t val; 287 int reg; 288 289 if ((insn & MRS_MASK) != MRS_VALUE) 290 return (0); 291 292 if (MRS_SPECIAL(insn) != MRS_SPECIAL(CNTPCT_EL0)) 293 return (0); 294 295 reg = MRS_REGISTER(insn); 296 val = READ_SPECIALREG(cntvct_el0); 297 if (reg < nitems(frame->tf_x)) { 298 frame->tf_x[reg] = val; 299 } else if (reg == 30) { 300 frame->tf_lr = val; 301 } 302 303 /* 304 * We will handle this instruction, move to the next so we 305 * don't trap here again. 306 */ 307 frame->tf_elr += INSN_SIZE; 308 309 return (1); 310 } 311 #endif 312 313 static void 314 tmr_setup_user_access(void *arg __unused) 315 { 316 #ifdef __aarch64__ 317 int emulate; 318 #endif 319 320 if (arm_tmr_sc != NULL) { 321 smp_rendezvous(NULL, setup_user_access, NULL, NULL); 322 #ifdef __aarch64__ 323 if (TUNABLE_INT_FETCH("hw.emulate_phys_counter", &emulate) && 324 emulate != 0) { 325 install_undef_handler(true, cntpct_handler); 326 } 327 #endif 328 } 329 } 330 SYSINIT(tmr_ua, SI_SUB_SMP, SI_ORDER_ANY, tmr_setup_user_access, NULL); 331 332 static unsigned 333 arm_tmr_get_timecount(struct timecounter *tc) 334 { 335 336 return (arm_tmr_sc->get_cntxct(arm_tmr_sc->physical_sys)); 337 } 338 339 static int 340 arm_tmr_start(struct eventtimer *et, sbintime_t first, 341 sbintime_t period __unused) 342 { 343 struct arm_tmr_softc *sc; 344 int counts, ctrl; 345 346 sc = (struct arm_tmr_softc *)et->et_priv; 347 348 if (first != 0) { 349 counts = ((uint32_t)et->et_frequency * first) >> 32; 350 ctrl = get_ctrl(sc->physical_sys); 351 ctrl &= ~GT_CTRL_INT_MASK; 352 ctrl |= GT_CTRL_ENABLE; 353 set_tval(counts, sc->physical_sys); 354 set_ctrl(ctrl, sc->physical_sys); 355 return (0); 356 } 357 358 return (EINVAL); 359 360 } 361 362 static void 363 arm_tmr_disable(bool physical) 364 { 365 int ctrl; 366 367 ctrl = get_ctrl(physical); 368 ctrl &= ~GT_CTRL_ENABLE; 369 set_ctrl(ctrl, physical); 370 } 371 372 static int 373 arm_tmr_stop(struct eventtimer *et) 374 { 375 struct arm_tmr_softc *sc; 376 377 sc = (struct arm_tmr_softc *)et->et_priv; 378 arm_tmr_disable(sc->physical_sys); 379 380 return (0); 381 } 382 383 static int 384 arm_tmr_intr(void *arg) 385 { 386 struct arm_tmr_softc *sc; 387 int ctrl; 388 389 sc = (struct arm_tmr_softc *)arg; 390 ctrl = get_ctrl(sc->physical_sys); 391 if (ctrl & GT_CTRL_INT_STAT) { 392 ctrl |= GT_CTRL_INT_MASK; 393 set_ctrl(ctrl, sc->physical_sys); 394 } 395 396 if (sc->et.et_active) 397 sc->et.et_event_cb(&sc->et, sc->et.et_arg); 398 399 return (FILTER_HANDLED); 400 } 401 402 static int 403 arm_tmr_attach_irq(device_t dev, struct arm_tmr_softc *sc, 404 const struct arm_tmr_irq_defs *irq_def, int rid, int flags) 405 { 406 struct arm_tmr_irq *irq; 407 408 irq = &sc->irqs[sc->irq_count]; 409 irq->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 410 &rid, flags); 411 if (irq->res == NULL) { 412 if (bootverbose || (flags & RF_OPTIONAL) == 0) { 413 device_printf(dev, 414 "could not allocate irq for %s interrupt '%s'\n", 415 (flags & RF_OPTIONAL) != 0 ? "optional" : 416 "required", irq_def->name); 417 } 418 419 if ((flags & RF_OPTIONAL) == 0) 420 return (ENXIO); 421 } else { 422 if (bootverbose) 423 device_printf(dev, "allocated irq for '%s'\n", 424 irq_def->name); 425 irq->rid = rid; 426 irq->idx = irq_def->idx; 427 sc->irq_count++; 428 } 429 430 return (0); 431 } 432 433 #ifdef FDT 434 static int 435 arm_tmr_fdt_probe(device_t dev) 436 { 437 438 if (!ofw_bus_status_okay(dev)) 439 return (ENXIO); 440 441 if (ofw_bus_is_compatible(dev, "arm,armv8-timer")) { 442 device_set_desc(dev, "ARMv8 Generic Timer"); 443 return (BUS_PROBE_DEFAULT); 444 } else if (ofw_bus_is_compatible(dev, "arm,armv7-timer")) { 445 device_set_desc(dev, "ARMv7 Generic Timer"); 446 return (BUS_PROBE_DEFAULT); 447 } 448 449 return (ENXIO); 450 } 451 452 static int 453 arm_tmr_fdt_attach(device_t dev) 454 { 455 struct arm_tmr_softc *sc; 456 const struct arm_tmr_irq_defs *irq_def; 457 size_t i; 458 phandle_t node; 459 int error, rid; 460 bool has_names; 461 462 sc = device_get_softc(dev); 463 node = ofw_bus_get_node(dev); 464 465 has_names = OF_hasprop(node, "interrupt-names"); 466 for (i = 0; i < nitems(arm_tmr_irq_defs); i++) { 467 int flags; 468 469 /* 470 * If we don't have names to go off of, we assume that they're 471 * in the "usual" order with sec-phys first and allocate by idx. 472 */ 473 irq_def = &arm_tmr_irq_defs[i]; 474 rid = irq_def->idx; 475 flags = irq_def->flags; 476 if (has_names) { 477 error = ofw_bus_find_string_index(node, 478 "interrupt-names", irq_def->name, &rid); 479 480 /* 481 * If we have names, missing a name means we don't 482 * have it. 483 */ 484 if (error != 0) { 485 /* 486 * Could be noisy on a lot of platforms for no 487 * good cause. 488 */ 489 if (bootverbose || (flags & RF_OPTIONAL) == 0) { 490 device_printf(dev, 491 "could not find irq for %s interrupt '%s'\n", 492 (flags & RF_OPTIONAL) != 0 ? 493 "optional" : "required", 494 irq_def->name); 495 } 496 497 if ((flags & RF_OPTIONAL) == 0) 498 goto out; 499 500 continue; 501 } 502 503 /* 504 * Warn about failing to activate if we did actually 505 * have the name present. 506 */ 507 flags &= ~RF_OPTIONAL; 508 } 509 510 error = arm_tmr_attach_irq(dev, sc, irq_def, rid, flags); 511 if (error != 0) 512 goto out; 513 } 514 515 error = arm_tmr_attach(dev); 516 out: 517 if (error != 0) { 518 for (i = 0; i < sc->irq_count; i++) { 519 bus_release_resource(dev, SYS_RES_IRQ, sc->irqs[i].rid, 520 sc->irqs[i].res); 521 } 522 } 523 524 return (error); 525 526 } 527 #endif 528 529 #ifdef DEV_ACPI 530 static void 531 arm_tmr_acpi_add_irq(device_t parent, device_t dev, int rid, u_int irq) 532 { 533 534 BUS_SET_RESOURCE(parent, dev, SYS_RES_IRQ, rid, irq, 1); 535 } 536 537 static void 538 arm_tmr_acpi_identify(driver_t *driver, device_t parent) 539 { 540 ACPI_TABLE_GTDT *gtdt; 541 vm_paddr_t physaddr; 542 device_t dev; 543 544 physaddr = acpi_find_table(ACPI_SIG_GTDT); 545 if (physaddr == 0) 546 return; 547 548 gtdt = acpi_map_table(physaddr, ACPI_SIG_GTDT); 549 if (gtdt == NULL) { 550 device_printf(parent, "gic: Unable to map the GTDT\n"); 551 return; 552 } 553 554 dev = BUS_ADD_CHILD(parent, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE, 555 "generic_timer", -1); 556 if (dev == NULL) { 557 device_printf(parent, "add gic child failed\n"); 558 goto out; 559 } 560 561 arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_SECURE, 562 gtdt->SecureEl1Interrupt); 563 arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_NONSECURE, 564 gtdt->NonSecureEl1Interrupt); 565 arm_tmr_acpi_add_irq(parent, dev, GT_VIRT, 566 gtdt->VirtualTimerInterrupt); 567 568 out: 569 acpi_unmap_table(gtdt); 570 } 571 572 static int 573 arm_tmr_acpi_probe(device_t dev) 574 { 575 576 device_set_desc(dev, "ARM Generic Timer"); 577 return (BUS_PROBE_NOWILDCARD); 578 } 579 580 static int 581 arm_tmr_acpi_attach(device_t dev) 582 { 583 const struct arm_tmr_irq_defs *irq_def; 584 struct arm_tmr_softc *sc; 585 int error; 586 587 sc = device_get_softc(dev); 588 for (int i = 0; i < nitems(arm_tmr_irq_defs); i++) { 589 irq_def = &arm_tmr_irq_defs[i]; 590 error = arm_tmr_attach_irq(dev, sc, irq_def, irq_def->idx, 591 irq_def->flags); 592 if (error != 0) 593 goto out; 594 } 595 596 error = arm_tmr_attach(dev); 597 out: 598 if (error != 0) { 599 for (int i = 0; i < sc->irq_count; i++) { 600 bus_release_resource(dev, SYS_RES_IRQ, 601 sc->irqs[i].rid, sc->irqs[i].res); 602 } 603 } 604 return (error); 605 } 606 #endif 607 608 static int 609 arm_tmr_attach(device_t dev) 610 { 611 struct arm_tmr_softc *sc; 612 #ifdef INVARIANTS 613 const struct arm_tmr_irq_defs *irq_def; 614 #endif 615 #ifdef FDT 616 phandle_t node; 617 pcell_t clock; 618 #endif 619 #ifdef __aarch64__ 620 int user_phys; 621 #endif 622 int error; 623 int i, first_timer, last_timer; 624 625 sc = device_get_softc(dev); 626 if (arm_tmr_sc) 627 return (ENXIO); 628 629 sc->get_cntxct = &get_cntxct; 630 #ifdef FDT 631 /* Get the base clock frequency */ 632 node = ofw_bus_get_node(dev); 633 if (node > 0) { 634 error = OF_getencprop(node, "clock-frequency", &clock, 635 sizeof(clock)); 636 if (error > 0) 637 sc->clkfreq = clock; 638 639 if (OF_hasprop(node, "allwinner,sun50i-a64-unstable-timer")) { 640 sc->get_cntxct = &get_cntxct_a64_unstable; 641 if (bootverbose) 642 device_printf(dev, 643 "Enabling allwinner unstable timer workaround\n"); 644 } 645 } 646 #endif 647 648 if (sc->clkfreq == 0) { 649 /* Try to get clock frequency from timer */ 650 sc->clkfreq = get_freq(); 651 } 652 653 if (sc->clkfreq == 0) { 654 device_printf(dev, "No clock frequency specified\n"); 655 return (ENXIO); 656 } 657 658 #ifdef INVARIANTS 659 /* Confirm that non-optional irqs were allocated before coming in. */ 660 for (i = 0; i < nitems(arm_tmr_irq_defs); i++) { 661 int j; 662 663 irq_def = &arm_tmr_irq_defs[i]; 664 665 /* Skip optional interrupts */ 666 if ((irq_def->flags & RF_OPTIONAL) != 0) 667 continue; 668 669 for (j = 0; j < sc->irq_count; j++) { 670 if (sc->irqs[j].idx == irq_def->idx) 671 break; 672 } 673 KASSERT(j < sc->irq_count, ("%s: Missing required interrupt %s", 674 __func__, irq_def->name)); 675 } 676 #endif 677 678 #ifdef __aarch64__ 679 /* 680 * Use the virtual timer when we can't use the hypervisor. 681 * A hypervisor guest may change the virtual timer registers while 682 * executing so any use of the virtual timer interrupt needs to be 683 * coordinated with the virtual machine manager. 684 */ 685 if (!HAS_PHYS) { 686 sc->physical_sys = false; 687 first_timer = GT_VIRT; 688 last_timer = GT_VIRT; 689 } else 690 #endif 691 /* Otherwise set up the secure and non-secure physical timers. */ 692 { 693 sc->physical_sys = true; 694 first_timer = GT_PHYS_SECURE; 695 last_timer = GT_PHYS_NONSECURE; 696 } 697 698 #ifdef __aarch64__ 699 /* 700 * The virtual timer is always available on arm and arm64, tell 701 * userspace to use it. 702 */ 703 sc->physical_user = false; 704 /* Allow use of the physical counter in userspace when available */ 705 if (TUNABLE_INT_FETCH("hw.userspace_allow_phys_counter", &user_phys) && 706 user_phys != 0) 707 sc->physical_user = sc->physical_sys; 708 #else 709 /* 710 * The virtual timer depends on setting cntvoff from the hypervisor 711 * privilege level/el2, however this is only set on arm64. 712 */ 713 sc->physical_user = true; 714 #endif 715 716 arm_tmr_sc = sc; 717 718 /* Setup secure, non-secure and virtual IRQs handler */ 719 for (i = 0; i < sc->irq_count; i++) { 720 /* Only enable IRQs on timers we expect to use */ 721 if (sc->irqs[i].idx < first_timer || 722 sc->irqs[i].idx > last_timer) 723 continue; 724 error = bus_setup_intr(dev, sc->irqs[i].res, INTR_TYPE_CLK, 725 arm_tmr_intr, NULL, sc, &sc->irqs[i].ihl); 726 if (error) { 727 device_printf(dev, "Unable to alloc int resource.\n"); 728 for (int j = 0; j < i; j++) 729 bus_teardown_intr(dev, sc->irqs[j].res, 730 &sc->irqs[j].ihl); 731 return (ENXIO); 732 } 733 } 734 735 /* Disable the timers until we are ready */ 736 arm_tmr_disable(false); 737 if (HAS_PHYS) 738 arm_tmr_disable(true); 739 740 arm_tmr_timecount.tc_frequency = sc->clkfreq; 741 tc_init(&arm_tmr_timecount); 742 743 sc->et.et_name = "ARM MPCore Eventtimer"; 744 sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; 745 sc->et.et_quality = 1000; 746 747 sc->et.et_frequency = sc->clkfreq; 748 sc->et.et_min_period = (0x00000010LLU << 32) / sc->et.et_frequency; 749 sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency; 750 sc->et.et_start = arm_tmr_start; 751 sc->et.et_stop = arm_tmr_stop; 752 sc->et.et_priv = sc; 753 et_register(&sc->et); 754 755 #if defined(__arm__) 756 arm_set_delay(arm_tmr_do_delay, sc); 757 #endif 758 759 return (0); 760 } 761 762 #ifdef FDT 763 static device_method_t arm_tmr_fdt_methods[] = { 764 DEVMETHOD(device_probe, arm_tmr_fdt_probe), 765 DEVMETHOD(device_attach, arm_tmr_fdt_attach), 766 { 0, 0 } 767 }; 768 769 static DEFINE_CLASS_0(generic_timer, arm_tmr_fdt_driver, arm_tmr_fdt_methods, 770 sizeof(struct arm_tmr_softc)); 771 772 EARLY_DRIVER_MODULE(timer, simplebus, arm_tmr_fdt_driver, 0, 0, 773 BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); 774 EARLY_DRIVER_MODULE(timer, ofwbus, arm_tmr_fdt_driver, 0, 0, 775 BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); 776 #endif 777 778 #ifdef DEV_ACPI 779 static device_method_t arm_tmr_acpi_methods[] = { 780 DEVMETHOD(device_identify, arm_tmr_acpi_identify), 781 DEVMETHOD(device_probe, arm_tmr_acpi_probe), 782 DEVMETHOD(device_attach, arm_tmr_acpi_attach), 783 { 0, 0 } 784 }; 785 786 static DEFINE_CLASS_0(generic_timer, arm_tmr_acpi_driver, arm_tmr_acpi_methods, 787 sizeof(struct arm_tmr_softc)); 788 789 EARLY_DRIVER_MODULE(timer, acpi, arm_tmr_acpi_driver, 0, 0, 790 BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); 791 #endif 792 793 static void 794 arm_tmr_do_delay(int usec, void *arg) 795 { 796 struct arm_tmr_softc *sc = arg; 797 int32_t counts, counts_per_usec; 798 uint32_t first, last; 799 800 /* Get the number of times to count */ 801 counts_per_usec = ((arm_tmr_timecount.tc_frequency / 1000000) + 1); 802 803 /* 804 * Clamp the timeout at a maximum value (about 32 seconds with 805 * a 66MHz clock). *Nobody* should be delay()ing for anywhere 806 * near that length of time and if they are, they should be hung 807 * out to dry. 808 */ 809 if (usec >= (0x80000000U / counts_per_usec)) 810 counts = (0x80000000U / counts_per_usec) - 1; 811 else 812 counts = usec * counts_per_usec; 813 814 first = sc->get_cntxct(sc->physical_sys); 815 816 while (counts > 0) { 817 last = sc->get_cntxct(sc->physical_sys); 818 counts -= (int32_t)(last - first); 819 first = last; 820 } 821 } 822 823 #if defined(__aarch64__) 824 void 825 DELAY(int usec) 826 { 827 int32_t counts; 828 829 TSENTER(); 830 /* 831 * Check the timers are setup, if not just 832 * use a for loop for the meantime 833 */ 834 if (arm_tmr_sc == NULL) { 835 for (; usec > 0; usec--) 836 for (counts = 200; counts > 0; counts--) 837 /* 838 * Prevent the compiler from optimizing 839 * out the loop 840 */ 841 cpufunc_nullop(); 842 } else 843 arm_tmr_do_delay(usec, arm_tmr_sc); 844 TSEXIT(); 845 } 846 #endif 847 848 static uint32_t 849 arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th, 850 struct timecounter *tc) 851 { 852 853 vdso_th->th_algo = VDSO_TH_ALGO_ARM_GENTIM; 854 vdso_th->th_physical = arm_tmr_sc->physical_user; 855 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 856 return (1); 857 } 858