1 /* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/i386/i386/mpapic.c,v 1.37.2.7 2003/01/25 02:31:47 peter Exp $ 26 */ 27 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/kernel.h> 31 #include <sys/bus.h> 32 #include <sys/machintr.h> 33 #include <machine/globaldata.h> 34 #include <machine/smp.h> 35 #include <machine/md_var.h> 36 #include <machine/pmap.h> 37 #include <machine_base/apic/lapic.h> 38 #include <machine_base/apic/ioapic.h> 39 #include <machine_base/apic/ioapic_abi.h> 40 #include <machine_base/apic/apicvar.h> 41 #include <machine_base/icu/icu_var.h> 42 #include <machine/segments.h> 43 #include <sys/thread2.h> 44 45 #include <machine/cputypes.h> 46 #include <machine/intr_machdep.h> 47 48 extern int naps; 49 50 volatile lapic_t *lapic; 51 52 static void lapic_timer_calibrate(void); 53 static void lapic_timer_set_divisor(int); 54 static void lapic_timer_fixup_handler(void *); 55 static void lapic_timer_restart_handler(void *); 56 57 void lapic_timer_process(void); 58 void lapic_timer_process_frame(struct intrframe *); 59 void lapic_timer_always(struct intrframe *); 60 61 static int lapic_timer_enable = 1; 62 TUNABLE_INT("hw.lapic_timer_enable", &lapic_timer_enable); 63 64 static void lapic_timer_intr_reload(struct cputimer_intr *, sysclock_t); 65 static void lapic_timer_intr_enable(struct cputimer_intr *); 66 static void lapic_timer_intr_restart(struct cputimer_intr *); 67 static void lapic_timer_intr_pmfixup(struct cputimer_intr *); 68 69 static struct cputimer_intr lapic_cputimer_intr = { 70 .freq = 0, 71 .reload = lapic_timer_intr_reload, 72 .enable = lapic_timer_intr_enable, 73 .config = cputimer_intr_default_config, 74 .restart = lapic_timer_intr_restart, 75 .pmfixup = lapic_timer_intr_pmfixup, 76 .initclock = cputimer_intr_default_initclock, 77 .next = SLIST_ENTRY_INITIALIZER, 78 .name = "lapic", 79 .type = CPUTIMER_INTR_LAPIC, 80 .prio = CPUTIMER_INTR_PRIO_LAPIC, 81 .caps = CPUTIMER_INTR_CAP_NONE 82 }; 83 84 static int lapic_timer_divisor_idx = -1; 85 static const uint32_t lapic_timer_divisors[] = { 86 APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16, 87 APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128, APIC_TDCR_1 88 }; 89 #define APIC_TIMER_NDIVISORS (int)(NELEM(lapic_timer_divisors)) 90 91 /* 92 * APIC ID <-> CPU ID mapping structures. 93 */ 94 int cpu_id_to_apic_id[NAPICID]; 95 int apic_id_to_cpu_id[NAPICID]; 96 int lapic_enable = 1; 97 98 /* 99 * Enable LAPIC, configure interrupts. 100 */ 101 void 102 lapic_init(boolean_t bsp) 103 { 104 uint32_t timer; 105 u_int temp; 106 107 /* 108 * Install vectors 109 * 110 * Since IDT is shared between BSP and APs, these vectors 111 * only need to be installed once; we do it on BSP. 112 */ 113 if (bsp) { 114 /* Install a 'Spurious INTerrupt' vector */ 115 setidt(XSPURIOUSINT_OFFSET, Xspuriousint, 116 SDT_SYSIGT, SEL_KPL, 0); 117 118 /* Install a timer vector */ 119 setidt(XTIMER_OFFSET, Xtimer, 120 SDT_SYSIGT, SEL_KPL, 0); 121 122 #ifdef SMP 123 /* Install an inter-CPU IPI for TLB invalidation */ 124 setidt(XINVLTLB_OFFSET, Xinvltlb, 125 SDT_SYSIGT, SEL_KPL, 0); 126 127 /* Install an inter-CPU IPI for IPIQ messaging */ 128 setidt(XIPIQ_OFFSET, Xipiq, 129 SDT_SYSIGT, SEL_KPL, 0); 130 131 /* Install an inter-CPU IPI for CPU stop/restart */ 132 setidt(XCPUSTOP_OFFSET, Xcpustop, 133 SDT_SYSIGT, SEL_KPL, 0); 134 #endif 135 } 136 137 /* 138 * Setup LINT0 as ExtINT on the BSP. This is theoretically an 139 * aggregate interrupt input from the 8259. The INTA cycle 140 * will be routed to the external controller (the 8259) which 141 * is expected to supply the vector. 142 * 143 * Must be setup edge triggered, active high. 144 * 145 * Disable LINT0 on BSP, if I/O APIC is enabled. 146 * 147 * Disable LINT0 on the APs. It doesn't matter what delivery 148 * mode we use because we leave it masked. 149 */ 150 temp = lapic->lvt_lint0; 151 temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK | 152 APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK); 153 if (bsp) { 154 temp |= APIC_LVT_DM_EXTINT; 155 if (ioapic_enable) 156 temp |= APIC_LVT_MASKED; 157 } else { 158 temp |= APIC_LVT_DM_FIXED | APIC_LVT_MASKED; 159 } 160 lapic->lvt_lint0 = temp; 161 162 /* 163 * Setup LINT1 as NMI. 164 * 165 * Must be setup edge trigger, active high. 166 * 167 * Enable LINT1 on BSP, if I/O APIC is enabled. 168 * 169 * Disable LINT1 on the APs. 170 */ 171 temp = lapic->lvt_lint1; 172 temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK | 173 APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK); 174 temp |= APIC_LVT_MASKED | APIC_LVT_DM_NMI; 175 if (bsp && ioapic_enable) 176 temp &= ~APIC_LVT_MASKED; 177 lapic->lvt_lint1 = temp; 178 179 /* 180 * Mask the LAPIC error interrupt, LAPIC performance counter 181 * interrupt. 182 */ 183 lapic->lvt_error = lapic->lvt_error | APIC_LVT_MASKED; 184 lapic->lvt_pcint = lapic->lvt_pcint | APIC_LVT_MASKED; 185 186 /* 187 * Set LAPIC timer vector and mask the LAPIC timer interrupt. 188 */ 189 timer = lapic->lvt_timer; 190 timer &= ~APIC_LVTT_VECTOR; 191 timer |= XTIMER_OFFSET; 192 timer |= APIC_LVTT_MASKED; 193 lapic->lvt_timer = timer; 194 195 /* 196 * Set the Task Priority Register as needed. At the moment allow 197 * interrupts on all cpus (the APs will remain CLId until they are 198 * ready to deal). 199 */ 200 temp = lapic->tpr; 201 temp &= ~APIC_TPR_PRIO; /* clear priority field */ 202 lapic->tpr = temp; 203 204 /* 205 * Enable the LAPIC 206 */ 207 temp = lapic->svr; 208 temp |= APIC_SVR_ENABLE; /* enable the LAPIC */ 209 temp &= ~APIC_SVR_FOCUS_DISABLE; /* enable lopri focus processor */ 210 211 /* 212 * Set the spurious interrupt vector. The low 4 bits of the vector 213 * must be 1111. 214 */ 215 if ((XSPURIOUSINT_OFFSET & 0x0F) != 0x0F) 216 panic("bad XSPURIOUSINT_OFFSET: 0x%08x", XSPURIOUSINT_OFFSET); 217 temp &= ~APIC_SVR_VECTOR; 218 temp |= XSPURIOUSINT_OFFSET; 219 220 lapic->svr = temp; 221 222 /* 223 * Pump out a few EOIs to clean out interrupts that got through 224 * before we were able to set the TPR. 225 */ 226 lapic->eoi = 0; 227 lapic->eoi = 0; 228 lapic->eoi = 0; 229 230 if (bsp) { 231 lapic_timer_calibrate(); 232 if (lapic_timer_enable) { 233 cputimer_intr_register(&lapic_cputimer_intr); 234 cputimer_intr_select(&lapic_cputimer_intr, 0); 235 } 236 } else { 237 lapic_timer_set_divisor(lapic_timer_divisor_idx); 238 } 239 240 if (bootverbose) 241 apic_dump("apic_initialize()"); 242 } 243 244 static void 245 lapic_timer_set_divisor(int divisor_idx) 246 { 247 KKASSERT(divisor_idx >= 0 && divisor_idx < APIC_TIMER_NDIVISORS); 248 lapic->dcr_timer = lapic_timer_divisors[divisor_idx]; 249 } 250 251 static void 252 lapic_timer_oneshot(u_int count) 253 { 254 uint32_t value; 255 256 value = lapic->lvt_timer; 257 value &= ~APIC_LVTT_PERIODIC; 258 lapic->lvt_timer = value; 259 lapic->icr_timer = count; 260 } 261 262 static void 263 lapic_timer_oneshot_quick(u_int count) 264 { 265 lapic->icr_timer = count; 266 } 267 268 static void 269 lapic_timer_calibrate(void) 270 { 271 sysclock_t value; 272 273 /* Try to calibrate the local APIC timer. */ 274 for (lapic_timer_divisor_idx = 0; 275 lapic_timer_divisor_idx < APIC_TIMER_NDIVISORS; 276 lapic_timer_divisor_idx++) { 277 lapic_timer_set_divisor(lapic_timer_divisor_idx); 278 lapic_timer_oneshot(APIC_TIMER_MAX_COUNT); 279 DELAY(2000000); 280 value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer; 281 if (value != APIC_TIMER_MAX_COUNT) 282 break; 283 } 284 if (lapic_timer_divisor_idx >= APIC_TIMER_NDIVISORS) 285 panic("lapic: no proper timer divisor?!\n"); 286 lapic_cputimer_intr.freq = value / 2; 287 288 kprintf("lapic: divisor index %d, frequency %u Hz\n", 289 lapic_timer_divisor_idx, lapic_cputimer_intr.freq); 290 } 291 292 static void 293 lapic_timer_process_oncpu(struct globaldata *gd, struct intrframe *frame) 294 { 295 sysclock_t count; 296 297 gd->gd_timer_running = 0; 298 299 count = sys_cputimer->count(); 300 if (TAILQ_FIRST(&gd->gd_systimerq) != NULL) 301 systimer_intr(&count, 0, frame); 302 } 303 304 void 305 lapic_timer_process(void) 306 { 307 lapic_timer_process_oncpu(mycpu, NULL); 308 } 309 310 void 311 lapic_timer_process_frame(struct intrframe *frame) 312 { 313 lapic_timer_process_oncpu(mycpu, frame); 314 } 315 316 /* 317 * This manual debugging code is called unconditionally from Xtimer 318 * (the lapic timer interrupt) whether the current thread is in a 319 * critical section or not) and can be useful in tracking down lockups. 320 * 321 * NOTE: MANUAL DEBUG CODE 322 */ 323 #if 0 324 static int saveticks[SMP_MAXCPU]; 325 static int savecounts[SMP_MAXCPU]; 326 #endif 327 328 void 329 lapic_timer_always(struct intrframe *frame) 330 { 331 #if 0 332 globaldata_t gd = mycpu; 333 int cpu = gd->gd_cpuid; 334 char buf[64]; 335 short *gptr; 336 int i; 337 338 if (cpu <= 20) { 339 gptr = (short *)0xFFFFFFFF800b8000 + 80 * cpu; 340 *gptr = ((*gptr + 1) & 0x00FF) | 0x0700; 341 ++gptr; 342 343 ksnprintf(buf, sizeof(buf), " %p %16s %d %16s ", 344 (void *)frame->if_rip, gd->gd_curthread->td_comm, ticks, 345 gd->gd_infomsg); 346 for (i = 0; buf[i]; ++i) { 347 gptr[i] = 0x0700 | (unsigned char)buf[i]; 348 } 349 } 350 #if 0 351 if (saveticks[gd->gd_cpuid] != ticks) { 352 saveticks[gd->gd_cpuid] = ticks; 353 savecounts[gd->gd_cpuid] = 0; 354 } 355 ++savecounts[gd->gd_cpuid]; 356 if (savecounts[gd->gd_cpuid] > 2000 && panicstr == NULL) { 357 panic("cpud %d panicing on ticks failure", 358 gd->gd_cpuid); 359 } 360 for (i = 0; i < ncpus; ++i) { 361 int delta; 362 if (saveticks[i] && panicstr == NULL) { 363 delta = saveticks[i] - ticks; 364 if (delta < -10 || delta > 10) { 365 panic("cpu %d panicing on cpu %d watchdog", 366 gd->gd_cpuid, i); 367 } 368 } 369 } 370 #endif 371 #endif 372 } 373 374 static void 375 lapic_timer_intr_reload(struct cputimer_intr *cti, sysclock_t reload) 376 { 377 struct globaldata *gd = mycpu; 378 379 reload = (int64_t)reload * cti->freq / sys_cputimer->freq; 380 if (reload < 2) 381 reload = 2; 382 383 if (gd->gd_timer_running) { 384 if (reload < lapic->ccr_timer) 385 lapic_timer_oneshot_quick(reload); 386 } else { 387 gd->gd_timer_running = 1; 388 lapic_timer_oneshot_quick(reload); 389 } 390 } 391 392 static void 393 lapic_timer_intr_enable(struct cputimer_intr *cti __unused) 394 { 395 uint32_t timer; 396 397 timer = lapic->lvt_timer; 398 timer &= ~(APIC_LVTT_MASKED | APIC_LVTT_PERIODIC); 399 lapic->lvt_timer = timer; 400 401 lapic_timer_fixup_handler(NULL); 402 } 403 404 static void 405 lapic_timer_fixup_handler(void *arg) 406 { 407 int *started = arg; 408 409 if (started != NULL) 410 *started = 0; 411 412 if (cpu_vendor_id == CPU_VENDOR_AMD) { 413 /* 414 * Detect the presence of C1E capability mostly on latest 415 * dual-cores (or future) k8 family. This feature renders 416 * the local APIC timer dead, so we disable it by reading 417 * the Interrupt Pending Message register and clearing both 418 * C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27). 419 * 420 * Reference: 421 * "BIOS and Kernel Developer's Guide for AMD NPT 422 * Family 0Fh Processors" 423 * #32559 revision 3.00 424 */ 425 if ((cpu_id & 0x00000f00) == 0x00000f00 && 426 (cpu_id & 0x0fff0000) >= 0x00040000) { 427 uint64_t msr; 428 429 msr = rdmsr(0xc0010055); 430 if (msr & 0x18000000) { 431 struct globaldata *gd = mycpu; 432 433 kprintf("cpu%d: AMD C1E detected\n", 434 gd->gd_cpuid); 435 wrmsr(0xc0010055, msr & ~0x18000000ULL); 436 437 /* 438 * We are kinda stalled; 439 * kick start again. 440 */ 441 gd->gd_timer_running = 1; 442 lapic_timer_oneshot_quick(2); 443 444 if (started != NULL) 445 *started = 1; 446 } 447 } 448 } 449 } 450 451 static void 452 lapic_timer_restart_handler(void *dummy __unused) 453 { 454 int started; 455 456 lapic_timer_fixup_handler(&started); 457 if (!started) { 458 struct globaldata *gd = mycpu; 459 460 gd->gd_timer_running = 1; 461 lapic_timer_oneshot_quick(2); 462 } 463 } 464 465 /* 466 * This function is called only by ACPI-CA code currently: 467 * - AMD C1E fixup. AMD C1E only seems to happen after ACPI 468 * module controls PM. So once ACPI-CA is attached, we try 469 * to apply the fixup to prevent LAPIC timer from hanging. 470 */ 471 static void 472 lapic_timer_intr_pmfixup(struct cputimer_intr *cti __unused) 473 { 474 #ifdef SMP 475 lwkt_send_ipiq_mask(smp_active_mask, 476 lapic_timer_fixup_handler, NULL); 477 #else 478 lapic_timer_fixup_handler(NULL); 479 #endif 480 } 481 482 static void 483 lapic_timer_intr_restart(struct cputimer_intr *cti __unused) 484 { 485 #ifdef SMP 486 lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_restart_handler, NULL); 487 #else 488 lapic_timer_restart_handler(NULL); 489 #endif 490 } 491 492 493 /* 494 * dump contents of local APIC registers 495 */ 496 void 497 apic_dump(char* str) 498 { 499 kprintf("SMP: CPU%d %s:\n", mycpu->gd_cpuid, str); 500 kprintf(" lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n", 501 lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr); 502 } 503 504 #ifdef SMP 505 506 /* 507 * Inter Processor Interrupt functions. 508 */ 509 510 /* 511 * Send APIC IPI 'vector' to 'destType' via 'deliveryMode'. 512 * 513 * destType is 1 of: APIC_DEST_SELF, APIC_DEST_ALLISELF, APIC_DEST_ALLESELF 514 * vector is any valid SYSTEM INT vector 515 * delivery_mode is 1 of: APIC_DELMODE_FIXED, APIC_DELMODE_LOWPRIO 516 * 517 * WARNINGS! 518 * 519 * We now implement a per-cpu interlock (gd->gd_npoll) to prevent more than 520 * one IPI from being sent to any given cpu at a time. Thus we no longer 521 * have to process incoming IPIs while waiting for the status to clear. 522 * No deadlock should be possible. 523 * 524 * We now physically disable interrupts for the lapic ICR operation. If 525 * we do not do this then it looks like an EOI sent to the lapic (which 526 * occurs even with a critical section) can interfere with the command 527 * register ready status and cause an IPI to be lost. 528 * 529 * e.g. an interrupt can occur, issue the EOI, IRET, and cause the command 530 * register to busy just before we write to icr_lo, resulting in a lost 531 * issuance. This only appears to occur on Intel cpus and is not 532 * documented. It could simply be that cpus are so fast these days that 533 * it was always an issue, but is only now rearing its ugly head. This 534 * is conjecture. 535 */ 536 int 537 apic_ipi(int dest_type, int vector, int delivery_mode) 538 { 539 unsigned long rflags; 540 u_long icr_lo; 541 542 rflags = read_rflags(); 543 cpu_disable_intr(); 544 while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) { 545 cpu_pause(); 546 } 547 icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) | dest_type | 548 delivery_mode | vector; 549 lapic->icr_lo = icr_lo; 550 write_rflags(rflags); 551 552 return 0; 553 } 554 555 void 556 single_apic_ipi(int cpu, int vector, int delivery_mode) 557 { 558 unsigned long rflags; 559 u_long icr_lo; 560 u_long icr_hi; 561 562 rflags = read_rflags(); 563 cpu_disable_intr(); 564 while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) { 565 cpu_pause(); 566 } 567 icr_hi = lapic->icr_hi & ~APIC_ID_MASK; 568 icr_hi |= (CPUID_TO_APICID(cpu) << 24); 569 lapic->icr_hi = icr_hi; 570 571 /* build ICR_LOW */ 572 icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) | 573 APIC_DEST_DESTFLD | delivery_mode | vector; 574 575 /* write APIC ICR */ 576 lapic->icr_lo = icr_lo; 577 write_rflags(rflags); 578 } 579 580 #if 0 581 582 /* 583 * Returns 0 if the apic is busy, 1 if we were able to queue the request. 584 * 585 * NOT WORKING YET! The code as-is may end up not queueing an IPI at all 586 * to the target, and the scheduler does not 'poll' for IPI messages. 587 */ 588 int 589 single_apic_ipi_passive(int cpu, int vector, int delivery_mode) 590 { 591 u_long icr_lo; 592 u_long icr_hi; 593 594 crit_enter(); 595 if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) { 596 crit_exit(); 597 return(0); 598 } 599 icr_hi = lapic->icr_hi & ~APIC_ID_MASK; 600 icr_hi |= (CPUID_TO_APICID(cpu) << 24); 601 lapic->icr_hi = icr_hi; 602 603 /* build IRC_LOW */ 604 icr_lo = (lapic->icr_lo & APIC_RESV2_MASK) 605 | APIC_DEST_DESTFLD | delivery_mode | vector; 606 607 /* write APIC ICR */ 608 lapic->icr_lo = icr_lo; 609 crit_exit(); 610 return(1); 611 } 612 613 #endif 614 615 /* 616 * Send APIC IPI 'vector' to 'target's via 'delivery_mode'. 617 * 618 * target is a bitmask of destination cpus. Vector is any 619 * valid system INT vector. Delivery mode may be either 620 * APIC_DELMODE_FIXED or APIC_DELMODE_LOWPRIO. 621 */ 622 void 623 selected_apic_ipi(cpumask_t target, int vector, int delivery_mode) 624 { 625 crit_enter(); 626 while (target) { 627 int n = BSFCPUMASK(target); 628 target &= ~CPUMASK(n); 629 single_apic_ipi(n, vector, delivery_mode); 630 } 631 crit_exit(); 632 } 633 634 #endif /* SMP */ 635 636 /* 637 * Timer code, in development... 638 * - suggested by rgrimes@gndrsh.aac.dev.com 639 */ 640 int 641 get_apic_timer_frequency(void) 642 { 643 return(lapic_cputimer_intr.freq); 644 } 645 646 /* 647 * Load a 'downcount time' in uSeconds. 648 */ 649 void 650 set_apic_timer(int us) 651 { 652 u_int count; 653 654 /* 655 * When we reach here, lapic timer's frequency 656 * must have been calculated as well as the 657 * divisor (lapic->dcr_timer is setup during the 658 * divisor calculation). 659 */ 660 KKASSERT(lapic_cputimer_intr.freq != 0 && 661 lapic_timer_divisor_idx >= 0); 662 663 count = ((us * (int64_t)lapic_cputimer_intr.freq) + 999999) / 1000000; 664 lapic_timer_oneshot(count); 665 } 666 667 668 /* 669 * Read remaining time in timer. 670 */ 671 int 672 read_apic_timer(void) 673 { 674 #if 0 675 /** XXX FIXME: we need to return the actual remaining time, 676 * for now we just return the remaining count. 677 */ 678 #else 679 return lapic->ccr_timer; 680 #endif 681 } 682 683 684 /* 685 * Spin-style delay, set delay time in uS, spin till it drains. 686 */ 687 void 688 u_sleep(int count) 689 { 690 set_apic_timer(count); 691 while (read_apic_timer()) 692 /* spin */ ; 693 } 694 695 int 696 lapic_unused_apic_id(int start) 697 { 698 int i; 699 700 for (i = start; i < NAPICID; ++i) { 701 if (APICID_TO_CPUID(i) == -1) 702 return i; 703 } 704 return NAPICID; 705 } 706 707 void 708 lapic_map(vm_paddr_t lapic_addr) 709 { 710 lapic = pmap_mapdev_uncacheable(lapic_addr, sizeof(struct LAPIC)); 711 } 712 713 static TAILQ_HEAD(, lapic_enumerator) lapic_enumerators = 714 TAILQ_HEAD_INITIALIZER(lapic_enumerators); 715 716 int 717 lapic_config(void) 718 { 719 struct lapic_enumerator *e; 720 int error, i, ap_max; 721 722 KKASSERT(lapic_enable); 723 724 for (i = 0; i < NAPICID; ++i) 725 APICID_TO_CPUID(i) = -1; 726 727 TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) { 728 error = e->lapic_probe(e); 729 if (!error) 730 break; 731 } 732 if (e == NULL) { 733 kprintf("LAPIC: Can't find LAPIC\n"); 734 return ENXIO; 735 } 736 737 e->lapic_enumerate(e); 738 739 ap_max = MAXCPU - 1; 740 TUNABLE_INT_FETCH("hw.ap_max", &ap_max); 741 if (ap_max > MAXCPU - 1) 742 ap_max = MAXCPU - 1; 743 744 if (naps > ap_max) { 745 kprintf("LAPIC: Warning use only %d out of %d " 746 "available APs\n", 747 ap_max, naps); 748 naps = ap_max; 749 } 750 751 return 0; 752 } 753 754 void 755 lapic_enumerator_register(struct lapic_enumerator *ne) 756 { 757 struct lapic_enumerator *e; 758 759 TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) { 760 if (e->lapic_prio < ne->lapic_prio) { 761 TAILQ_INSERT_BEFORE(e, ne, lapic_link); 762 return; 763 } 764 } 765 TAILQ_INSERT_TAIL(&lapic_enumerators, ne, lapic_link); 766 } 767 768 void 769 lapic_set_cpuid(int cpu_id, int apic_id) 770 { 771 CPUID_TO_APICID(cpu_id) = apic_id; 772 APICID_TO_CPUID(apic_id) = cpu_id; 773 } 774 775 void 776 lapic_fixup_noioapic(void) 777 { 778 u_int temp; 779 780 /* Only allowed on BSP */ 781 KKASSERT(mycpuid == 0); 782 KKASSERT(!ioapic_enable); 783 784 temp = lapic->lvt_lint0; 785 temp &= ~APIC_LVT_MASKED; 786 lapic->lvt_lint0 = temp; 787 788 temp = lapic->lvt_lint1; 789 temp |= APIC_LVT_MASKED; 790 lapic->lvt_lint1 = temp; 791 } 792 793 static void 794 lapic_sysinit(void *dummy __unused) 795 { 796 if (lapic_enable) { 797 int error; 798 799 error = lapic_config(); 800 if (error) 801 lapic_enable = 0; 802 } 803 804 if (lapic_enable) { 805 /* Initialize BSP's local APIC */ 806 lapic_init(TRUE); 807 } else if (ioapic_enable) { 808 ioapic_enable = 0; 809 icu_reinit_noioapic(); 810 } 811 } 812 SYSINIT(lapic, SI_BOOT2_LAPIC, SI_ORDER_FIRST, lapic_sysinit, NULL) 813