1 /* 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved. 3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $ 27 * $DragonFly: src/sys/kern/kern_intr.c,v 1.55 2008/09/01 12:49:00 sephe Exp $ 28 * 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/malloc.h> 34 #include <sys/kernel.h> 35 #include <sys/sysctl.h> 36 #include <sys/thread.h> 37 #include <sys/proc.h> 38 #include <sys/random.h> 39 #include <sys/serialize.h> 40 #include <sys/interrupt.h> 41 #include <sys/bus.h> 42 #include <sys/machintr.h> 43 44 #include <machine/frame.h> 45 46 #include <sys/interrupt.h> 47 48 #include <sys/thread2.h> 49 #include <sys/mplock2.h> 50 51 struct info_info; 52 53 typedef struct intrec { 54 struct intrec *next; 55 struct intr_info *info; 56 inthand2_t *handler; 57 void *argument; 58 char *name; 59 int intr; 60 int intr_flags; 61 struct lwkt_serialize *serializer; 62 } *intrec_t; 63 64 struct intr_info { 65 intrec_t i_reclist; 66 struct thread i_thread; 67 struct random_softc i_random; 68 int i_running; 69 long i_count; /* interrupts dispatched */ 70 int i_mplock_required; 71 int i_fast; 72 int i_slow; 73 int i_state; 74 int i_errorticks; 75 unsigned long i_straycount; 76 } intr_info_ary[MAX_INTS]; 77 78 int max_installed_hard_intr; 79 int max_installed_soft_intr; 80 81 #define EMERGENCY_INTR_POLLING_FREQ_MAX 20000 82 83 /* 84 * Assert that callers into interrupt handlers don't return with 85 * dangling tokens, spinlocks, or mp locks. 86 */ 87 #ifdef INVARIANTS 88 # ifdef SMP 89 /* INVARIANTS & SMP */ 90 # define SMP_INVARIANTS_DECLARE \ 91 int mpcount; 92 93 # define SMP_INVARIANTS_GET(td) \ 94 mpcount = (td)->td_mpcount 95 96 # define SMP_INVARIANTS_TEST(td, name) \ 97 KASSERT(mpcount == (td)->td_mpcount, \ 98 ("mpcount mismatch after interrupt handler %s", \ 99 name)) 100 101 # define SMP_INVARIANTS_ADJMP(count) \ 102 mpcount += (count) 103 104 # else 105 /* INVARIANTS & !SMP */ 106 # define SMP_INVARIANTS_DECLARE 107 # define SMP_INVARIANTS_GET(td) 108 # define SMP_INVARIANTS_TEST(td, name) 109 110 # endif /* ndef SMP */ 111 112 #define TD_INVARIANTS_DECLARE \ 113 SMP_INVARIANTS_DECLARE \ 114 int spincount; \ 115 lwkt_tokref_t curstop 116 117 #define TD_INVARIANTS_GET(td) \ 118 do { \ 119 SMP_INVARIANTS_GET(td); \ 120 spincount = (td)->td_gd->gd_spinlocks_wr; \ 121 curstop = (td)->td_toks_stop; \ 122 } while(0) 123 124 #define TD_INVARIANTS_TEST(td, name) \ 125 do { \ 126 KASSERT(spincount == (td)->td_gd->gd_spinlocks_wr, \ 127 ("spincount mismatch after interrupt handler %s", \ 128 name)); \ 129 KASSERT(curstop == (td)->td_toks_stop, \ 130 ("token count mismatch after interrupt handler %s", \ 131 name)); \ 132 SMP_INVARIANTS_TEST(td, name); \ 133 } while(0) 134 135 #else 136 /* !INVARIANTS */ 137 # ifdef SMP 138 /* !INVARIANTS & SMP */ 139 # define SMP_INVARIANTS_ADJMP(count) 140 141 # endif 142 143 #define TD_INVARIANTS_DECLARE 144 #define TD_INVARIANTS_GET(td) 145 #define TD_INVARIANTS_TEST(td, name) 146 147 #endif /* ndef INVARIANTS */ 148 149 static int sysctl_emergency_freq(SYSCTL_HANDLER_ARGS); 150 static int sysctl_emergency_enable(SYSCTL_HANDLER_ARGS); 151 static void emergency_intr_timer_callback(systimer_t, struct intrframe *); 152 static void ithread_handler(void *arg); 153 static void ithread_emergency(void *arg); 154 static void report_stray_interrupt(int intr, struct intr_info *info); 155 static void int_moveto_destcpu(int *, int *, int); 156 static void int_moveto_origcpu(int, int); 157 158 int intr_info_size = sizeof(intr_info_ary) / sizeof(intr_info_ary[0]); 159 160 static struct systimer emergency_intr_timer; 161 static struct thread emergency_intr_thread; 162 163 #define ISTATE_NOTHREAD 0 164 #define ISTATE_NORMAL 1 165 #define ISTATE_LIVELOCKED 2 166 167 static int livelock_limit = 40000; 168 static int livelock_lowater = 20000; 169 static int livelock_debug = -1; 170 SYSCTL_INT(_kern, OID_AUTO, livelock_limit, 171 CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit"); 172 SYSCTL_INT(_kern, OID_AUTO, livelock_lowater, 173 CTLFLAG_RW, &livelock_lowater, 0, "Livelock low-water mark restore"); 174 SYSCTL_INT(_kern, OID_AUTO, livelock_debug, 175 CTLFLAG_RW, &livelock_debug, 0, "Livelock debug intr#"); 176 177 static int emergency_intr_enable = 0; /* emergency interrupt polling */ 178 TUNABLE_INT("kern.emergency_intr_enable", &emergency_intr_enable); 179 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_enable, CTLTYPE_INT | CTLFLAG_RW, 180 0, 0, sysctl_emergency_enable, "I", "Emergency Interrupt Poll Enable"); 181 182 static int emergency_intr_freq = 10; /* emergency polling frequency */ 183 TUNABLE_INT("kern.emergency_intr_freq", &emergency_intr_freq); 184 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_freq, CTLTYPE_INT | CTLFLAG_RW, 185 0, 0, sysctl_emergency_freq, "I", "Emergency Interrupt Poll Frequency"); 186 187 /* 188 * Sysctl support routines 189 */ 190 static int 191 sysctl_emergency_enable(SYSCTL_HANDLER_ARGS) 192 { 193 int error, enabled; 194 195 enabled = emergency_intr_enable; 196 error = sysctl_handle_int(oidp, &enabled, 0, req); 197 if (error || req->newptr == NULL) 198 return error; 199 emergency_intr_enable = enabled; 200 if (emergency_intr_enable) { 201 systimer_adjust_periodic(&emergency_intr_timer, 202 emergency_intr_freq); 203 } else { 204 systimer_adjust_periodic(&emergency_intr_timer, 1); 205 } 206 return 0; 207 } 208 209 static int 210 sysctl_emergency_freq(SYSCTL_HANDLER_ARGS) 211 { 212 int error, phz; 213 214 phz = emergency_intr_freq; 215 error = sysctl_handle_int(oidp, &phz, 0, req); 216 if (error || req->newptr == NULL) 217 return error; 218 if (phz <= 0) 219 return EINVAL; 220 else if (phz > EMERGENCY_INTR_POLLING_FREQ_MAX) 221 phz = EMERGENCY_INTR_POLLING_FREQ_MAX; 222 223 emergency_intr_freq = phz; 224 if (emergency_intr_enable) { 225 systimer_adjust_periodic(&emergency_intr_timer, 226 emergency_intr_freq); 227 } else { 228 systimer_adjust_periodic(&emergency_intr_timer, 1); 229 } 230 return 0; 231 } 232 233 /* 234 * Register an SWI or INTerrupt handler. 235 */ 236 void * 237 register_swi(int intr, inthand2_t *handler, void *arg, const char *name, 238 struct lwkt_serialize *serializer) 239 { 240 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 241 panic("register_swi: bad intr %d", intr); 242 return(register_int(intr, handler, arg, name, serializer, 0)); 243 } 244 245 void * 246 register_swi_mp(int intr, inthand2_t *handler, void *arg, const char *name, 247 struct lwkt_serialize *serializer) 248 { 249 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 250 panic("register_swi: bad intr %d", intr); 251 return(register_int(intr, handler, arg, name, serializer, INTR_MPSAFE)); 252 } 253 254 void * 255 register_int(int intr, inthand2_t *handler, void *arg, const char *name, 256 struct lwkt_serialize *serializer, int intr_flags) 257 { 258 struct intr_info *info; 259 struct intrec **list; 260 intrec_t rec; 261 int orig_cpuid, cpuid; 262 263 if (intr < 0 || intr >= MAX_INTS) 264 panic("register_int: bad intr %d", intr); 265 if (name == NULL) 266 name = "???"; 267 info = &intr_info_ary[intr]; 268 269 /* 270 * Construct an interrupt handler record 271 */ 272 rec = kmalloc(sizeof(struct intrec), M_DEVBUF, M_INTWAIT); 273 rec->name = kmalloc(strlen(name) + 1, M_DEVBUF, M_INTWAIT); 274 strcpy(rec->name, name); 275 276 rec->info = info; 277 rec->handler = handler; 278 rec->argument = arg; 279 rec->intr = intr; 280 rec->intr_flags = intr_flags; 281 rec->next = NULL; 282 rec->serializer = serializer; 283 284 /* 285 * Create an emergency polling thread and set up a systimer to wake 286 * it up. 287 */ 288 if (emergency_intr_thread.td_kstack == NULL) { 289 lwkt_create(ithread_emergency, NULL, NULL, &emergency_intr_thread, 290 TDF_STOPREQ | TDF_INTTHREAD, -1, "ithread emerg"); 291 systimer_init_periodic_nq(&emergency_intr_timer, 292 emergency_intr_timer_callback, &emergency_intr_thread, 293 (emergency_intr_enable ? emergency_intr_freq : 1)); 294 } 295 296 int_moveto_destcpu(&orig_cpuid, &cpuid, intr); 297 298 /* 299 * Create an interrupt thread if necessary, leave it in an unscheduled 300 * state. 301 */ 302 if (info->i_state == ISTATE_NOTHREAD) { 303 info->i_state = ISTATE_NORMAL; 304 lwkt_create(ithread_handler, (void *)(intptr_t)intr, NULL, 305 &info->i_thread, TDF_STOPREQ | TDF_INTTHREAD, -1, 306 "ithread %d", intr); 307 if (intr >= FIRST_SOFTINT) 308 lwkt_setpri(&info->i_thread, TDPRI_SOFT_NORM); 309 else 310 lwkt_setpri(&info->i_thread, TDPRI_INT_MED); 311 info->i_thread.td_preemptable = lwkt_preempt; 312 } 313 314 list = &info->i_reclist; 315 316 /* 317 * Keep track of how many fast and slow interrupts we have. 318 * Set i_mplock_required if any handler in the chain requires 319 * the MP lock to operate. 320 */ 321 if ((intr_flags & INTR_MPSAFE) == 0) 322 info->i_mplock_required = 1; 323 if (intr_flags & INTR_CLOCK) 324 ++info->i_fast; 325 else 326 ++info->i_slow; 327 328 /* 329 * Enable random number generation keying off of this interrupt. 330 */ 331 if ((intr_flags & INTR_NOENTROPY) == 0 && info->i_random.sc_enabled == 0) { 332 info->i_random.sc_enabled = 1; 333 info->i_random.sc_intr = intr; 334 } 335 336 /* 337 * Add the record to the interrupt list. 338 */ 339 crit_enter(); 340 while (*list != NULL) 341 list = &(*list)->next; 342 *list = rec; 343 crit_exit(); 344 345 /* 346 * Update max_installed_hard_intr to make the emergency intr poll 347 * a bit more efficient. 348 */ 349 if (intr < FIRST_SOFTINT) { 350 if (max_installed_hard_intr <= intr) 351 max_installed_hard_intr = intr + 1; 352 } else { 353 if (max_installed_soft_intr <= intr) 354 max_installed_soft_intr = intr + 1; 355 } 356 357 /* 358 * Setup the machine level interrupt vector 359 * 360 * XXX temporary workaround for some ACPI brokedness. ACPI installs 361 * its interrupt too early, before the IOAPICs have been configured, 362 * which means the IOAPIC is not enabled by the registration of the 363 * ACPI interrupt. Anything else sharing that IRQ will wind up not 364 * being enabled. Temporarily work around the problem by always 365 * installing and enabling on every new interrupt handler, even 366 * if one has already been setup on that irq. 367 */ 368 if (intr < FIRST_SOFTINT /* && info->i_slow + info->i_fast == 1*/) { 369 if (machintr_vector_setup(intr, intr_flags)) 370 kprintf("machintr_vector_setup: failed on irq %d\n", intr); 371 } 372 373 int_moveto_origcpu(orig_cpuid, cpuid); 374 375 return(rec); 376 } 377 378 void 379 unregister_swi(void *id) 380 { 381 unregister_int(id); 382 } 383 384 void 385 unregister_int(void *id) 386 { 387 struct intr_info *info; 388 struct intrec **list; 389 intrec_t rec; 390 int intr, orig_cpuid, cpuid; 391 392 intr = ((intrec_t)id)->intr; 393 394 if (intr < 0 || intr >= MAX_INTS) 395 panic("register_int: bad intr %d", intr); 396 397 info = &intr_info_ary[intr]; 398 399 int_moveto_destcpu(&orig_cpuid, &cpuid, intr); 400 401 /* 402 * Remove the interrupt descriptor, adjust the descriptor count, 403 * and teardown the machine level vector if this was the last interrupt. 404 */ 405 crit_enter(); 406 list = &info->i_reclist; 407 while ((rec = *list) != NULL) { 408 if (rec == id) 409 break; 410 list = &rec->next; 411 } 412 if (rec) { 413 intrec_t rec0; 414 415 *list = rec->next; 416 if (rec->intr_flags & INTR_CLOCK) 417 --info->i_fast; 418 else 419 --info->i_slow; 420 if (intr < FIRST_SOFTINT && info->i_fast + info->i_slow == 0) 421 machintr_vector_teardown(intr); 422 423 /* 424 * Clear i_mplock_required if no handlers in the chain require the 425 * MP lock. 426 */ 427 for (rec0 = info->i_reclist; rec0; rec0 = rec0->next) { 428 if ((rec0->intr_flags & INTR_MPSAFE) == 0) 429 break; 430 } 431 if (rec0 == NULL) 432 info->i_mplock_required = 0; 433 } 434 435 crit_exit(); 436 437 int_moveto_origcpu(orig_cpuid, cpuid); 438 439 /* 440 * Free the record. 441 */ 442 if (rec != NULL) { 443 kfree(rec->name, M_DEVBUF); 444 kfree(rec, M_DEVBUF); 445 } else { 446 kprintf("warning: unregister_int: int %d handler for %s not found\n", 447 intr, ((intrec_t)id)->name); 448 } 449 } 450 451 const char * 452 get_registered_name(int intr) 453 { 454 intrec_t rec; 455 456 if (intr < 0 || intr >= MAX_INTS) 457 panic("register_int: bad intr %d", intr); 458 459 if ((rec = intr_info_ary[intr].i_reclist) == NULL) 460 return(NULL); 461 else if (rec->next) 462 return("mux"); 463 else 464 return(rec->name); 465 } 466 467 int 468 count_registered_ints(int intr) 469 { 470 struct intr_info *info; 471 472 if (intr < 0 || intr >= MAX_INTS) 473 panic("register_int: bad intr %d", intr); 474 info = &intr_info_ary[intr]; 475 return(info->i_fast + info->i_slow); 476 } 477 478 long 479 get_interrupt_counter(int intr) 480 { 481 struct intr_info *info; 482 483 if (intr < 0 || intr >= MAX_INTS) 484 panic("register_int: bad intr %d", intr); 485 info = &intr_info_ary[intr]; 486 return(info->i_count); 487 } 488 489 490 void 491 swi_setpriority(int intr, int pri) 492 { 493 struct intr_info *info; 494 495 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 496 panic("register_swi: bad intr %d", intr); 497 info = &intr_info_ary[intr]; 498 if (info->i_state != ISTATE_NOTHREAD) 499 lwkt_setpri(&info->i_thread, pri); 500 } 501 502 void 503 register_randintr(int intr) 504 { 505 struct intr_info *info; 506 507 if (intr < 0 || intr >= MAX_INTS) 508 panic("register_randintr: bad intr %d", intr); 509 info = &intr_info_ary[intr]; 510 info->i_random.sc_intr = intr; 511 info->i_random.sc_enabled = 1; 512 } 513 514 void 515 unregister_randintr(int intr) 516 { 517 struct intr_info *info; 518 519 if (intr < 0 || intr >= MAX_INTS) 520 panic("register_swi: bad intr %d", intr); 521 info = &intr_info_ary[intr]; 522 info->i_random.sc_enabled = -1; 523 } 524 525 int 526 next_registered_randintr(int intr) 527 { 528 struct intr_info *info; 529 530 if (intr < 0 || intr >= MAX_INTS) 531 panic("register_swi: bad intr %d", intr); 532 while (intr < MAX_INTS) { 533 info = &intr_info_ary[intr]; 534 if (info->i_random.sc_enabled > 0) 535 break; 536 ++intr; 537 } 538 return(intr); 539 } 540 541 /* 542 * Dispatch an interrupt. If there's nothing to do we have a stray 543 * interrupt and can just return, leaving the interrupt masked. 544 * 545 * We need to schedule the interrupt and set its i_running bit. If 546 * we are not on the interrupt thread's cpu we have to send a message 547 * to the correct cpu that will issue the desired action (interlocking 548 * with the interrupt thread's critical section). We do NOT attempt to 549 * reschedule interrupts whos i_running bit is already set because 550 * this would prematurely wakeup a livelock-limited interrupt thread. 551 * 552 * i_running is only tested/set on the same cpu as the interrupt thread. 553 * 554 * We are NOT in a critical section, which will allow the scheduled 555 * interrupt to preempt us. The MP lock might *NOT* be held here. 556 */ 557 #ifdef SMP 558 559 static void 560 sched_ithd_remote(void *arg) 561 { 562 sched_ithd((int)(intptr_t)arg); 563 } 564 565 #endif 566 567 void 568 sched_ithd(int intr) 569 { 570 struct intr_info *info; 571 572 info = &intr_info_ary[intr]; 573 574 ++info->i_count; 575 if (info->i_state != ISTATE_NOTHREAD) { 576 if (info->i_reclist == NULL) { 577 report_stray_interrupt(intr, info); 578 } else { 579 #ifdef SMP 580 if (info->i_thread.td_gd == mycpu) { 581 if (info->i_running == 0) { 582 info->i_running = 1; 583 if (info->i_state != ISTATE_LIVELOCKED) 584 lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */ 585 } 586 } else { 587 lwkt_send_ipiq(info->i_thread.td_gd, 588 sched_ithd_remote, (void *)(intptr_t)intr); 589 } 590 #else 591 if (info->i_running == 0) { 592 info->i_running = 1; 593 if (info->i_state != ISTATE_LIVELOCKED) 594 lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */ 595 } 596 #endif 597 } 598 } else { 599 report_stray_interrupt(intr, info); 600 } 601 } 602 603 static void 604 report_stray_interrupt(int intr, struct intr_info *info) 605 { 606 ++info->i_straycount; 607 if (info->i_straycount < 10) { 608 if (info->i_errorticks == ticks) 609 return; 610 info->i_errorticks = ticks; 611 kprintf("sched_ithd: stray interrupt %d on cpu %d\n", 612 intr, mycpuid); 613 } else if (info->i_straycount == 10) { 614 kprintf("sched_ithd: %ld stray interrupts %d on cpu %d - " 615 "there will be no further reports\n", 616 info->i_straycount, intr, mycpuid); 617 } 618 } 619 620 /* 621 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL 622 * might not be held). 623 */ 624 static void 625 ithread_livelock_wakeup(systimer_t st) 626 { 627 struct intr_info *info; 628 629 info = &intr_info_ary[(int)(intptr_t)st->data]; 630 if (info->i_state != ISTATE_NOTHREAD) 631 lwkt_schedule(&info->i_thread); 632 } 633 634 /* 635 * Schedule ithread within fast intr handler 636 * 637 * XXX Protect sched_ithd() call with gd_intr_nesting_level? 638 * Interrupts aren't enabled, but still... 639 */ 640 static __inline void 641 ithread_fast_sched(int intr, thread_t td) 642 { 643 ++td->td_nest_count; 644 645 /* 646 * We are already in critical section, exit it now to 647 * allow preemption. 648 */ 649 crit_exit_quick(td); 650 sched_ithd(intr); 651 crit_enter_quick(td); 652 653 --td->td_nest_count; 654 } 655 656 /* 657 * This function is called directly from the ICU or APIC vector code assembly 658 * to process an interrupt. The critical section and interrupt deferral 659 * checks have already been done but the function is entered WITHOUT 660 * a critical section held. The BGL may or may not be held. 661 * 662 * Must return non-zero if we do not want the vector code to re-enable 663 * the interrupt (which we don't if we have to schedule the interrupt) 664 */ 665 int ithread_fast_handler(struct intrframe *frame); 666 667 int 668 ithread_fast_handler(struct intrframe *frame) 669 { 670 int intr; 671 struct intr_info *info; 672 struct intrec **list; 673 int must_schedule; 674 #ifdef SMP 675 int got_mplock; 676 #endif 677 TD_INVARIANTS_DECLARE; 678 intrec_t rec, next_rec; 679 globaldata_t gd; 680 thread_t td; 681 682 intr = frame->if_vec; 683 gd = mycpu; 684 td = curthread; 685 686 /* We must be in critical section. */ 687 KKASSERT(td->td_critcount); 688 689 info = &intr_info_ary[intr]; 690 691 /* 692 * If we are not processing any FAST interrupts, just schedule the thing. 693 */ 694 if (info->i_fast == 0) { 695 ++gd->gd_cnt.v_intr; 696 ithread_fast_sched(intr, td); 697 return(1); 698 } 699 700 /* 701 * This should not normally occur since interrupts ought to be 702 * masked if the ithread has been scheduled or is running. 703 */ 704 if (info->i_running) 705 return(1); 706 707 /* 708 * Bump the interrupt nesting level to process any FAST interrupts. 709 * Obtain the MP lock as necessary. If the MP lock cannot be obtained, 710 * schedule the interrupt thread to deal with the issue instead. 711 * 712 * To reduce overhead, just leave the MP lock held once it has been 713 * obtained. 714 */ 715 ++gd->gd_intr_nesting_level; 716 ++gd->gd_cnt.v_intr; 717 must_schedule = info->i_slow; 718 #ifdef SMP 719 got_mplock = 0; 720 #endif 721 722 TD_INVARIANTS_GET(td); 723 list = &info->i_reclist; 724 725 for (rec = *list; rec; rec = next_rec) { 726 next_rec = rec->next; /* rec may be invalid after call */ 727 728 if (rec->intr_flags & INTR_CLOCK) { 729 #ifdef SMP 730 if ((rec->intr_flags & INTR_MPSAFE) == 0 && got_mplock == 0) { 731 if (try_mplock() == 0) { 732 /* Couldn't get the MP lock; just schedule it. */ 733 must_schedule = 1; 734 break; 735 } 736 got_mplock = 1; 737 SMP_INVARIANTS_ADJMP(1); 738 } 739 #endif 740 if (rec->serializer) { 741 must_schedule += lwkt_serialize_handler_try( 742 rec->serializer, rec->handler, 743 rec->argument, frame); 744 } else { 745 rec->handler(rec->argument, frame); 746 } 747 TD_INVARIANTS_TEST(td, rec->name); 748 } 749 } 750 751 /* 752 * Cleanup 753 */ 754 --gd->gd_intr_nesting_level; 755 #ifdef SMP 756 if (got_mplock) 757 rel_mplock(); 758 #endif 759 760 /* 761 * If we had a problem, or mixed fast and slow interrupt handlers are 762 * registered, schedule the ithread to catch the missed records (it 763 * will just re-run all of them). A return value of 0 indicates that 764 * all handlers have been run and the interrupt can be re-enabled, and 765 * a non-zero return indicates that the interrupt thread controls 766 * re-enablement. 767 */ 768 if (must_schedule > 0) 769 ithread_fast_sched(intr, td); 770 else if (must_schedule == 0) 771 ++info->i_count; 772 return(must_schedule); 773 } 774 775 /* 776 * Interrupt threads run this as their main loop. 777 * 778 * The handler begins execution outside a critical section and no MP lock. 779 * 780 * The i_running state starts at 0. When an interrupt occurs, the hardware 781 * interrupt is disabled and sched_ithd() The HW interrupt remains disabled 782 * until all routines have run. We then call ithread_done() to reenable 783 * the HW interrupt and deschedule us until the next interrupt. 784 * 785 * We are responsible for atomically checking i_running and ithread_done() 786 * is responsible for atomically checking for platform-specific delayed 787 * interrupts. i_running for our irq is only set in the context of our cpu, 788 * so a critical section is a sufficient interlock. 789 */ 790 #define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */ 791 792 static void 793 ithread_handler(void *arg) 794 { 795 struct intr_info *info; 796 int use_limit; 797 __uint32_t lseconds; 798 int intr; 799 int mpheld; 800 struct intrec **list; 801 intrec_t rec, nrec; 802 globaldata_t gd; 803 struct systimer ill_timer; /* enforced freq. timer */ 804 u_int ill_count; /* interrupt livelock counter */ 805 TD_INVARIANTS_DECLARE; 806 807 ill_count = 0; 808 intr = (int)(intptr_t)arg; 809 info = &intr_info_ary[intr]; 810 list = &info->i_reclist; 811 812 /* 813 * The loop must be entered with one critical section held. The thread 814 * does not hold the mplock on startup. 815 */ 816 gd = mycpu; 817 lseconds = gd->gd_time_seconds; 818 crit_enter_gd(gd); 819 mpheld = 0; 820 821 for (;;) { 822 /* 823 * The chain is only considered MPSAFE if all its interrupt handlers 824 * are MPSAFE. However, if intr_mpsafe has been turned off we 825 * always operate with the BGL. 826 */ 827 #ifdef SMP 828 if (info->i_mplock_required != mpheld) { 829 if (info->i_mplock_required) { 830 KKASSERT(mpheld == 0); 831 get_mplock(); 832 mpheld = 1; 833 } else { 834 KKASSERT(mpheld != 0); 835 rel_mplock(); 836 mpheld = 0; 837 } 838 } 839 #endif 840 841 TD_INVARIANTS_GET(gd->gd_curthread); 842 843 /* 844 * If an interrupt is pending, clear i_running and execute the 845 * handlers. Note that certain types of interrupts can re-trigger 846 * and set i_running again. 847 * 848 * Each handler is run in a critical section. Note that we run both 849 * FAST and SLOW designated service routines. 850 */ 851 if (info->i_running) { 852 ++ill_count; 853 info->i_running = 0; 854 855 if (*list == NULL) 856 report_stray_interrupt(intr, info); 857 858 for (rec = *list; rec; rec = nrec) { 859 nrec = rec->next; 860 if (rec->serializer) { 861 lwkt_serialize_handler_call(rec->serializer, rec->handler, 862 rec->argument, NULL); 863 } else { 864 rec->handler(rec->argument, NULL); 865 } 866 TD_INVARIANTS_TEST(gd->gd_curthread, rec->name); 867 } 868 } 869 870 /* 871 * This is our interrupt hook to add rate randomness to the random 872 * number generator. 873 */ 874 if (info->i_random.sc_enabled > 0) 875 add_interrupt_randomness(intr); 876 877 /* 878 * Unmask the interrupt to allow it to trigger again. This only 879 * applies to certain types of interrupts (typ level interrupts). 880 * This can result in the interrupt retriggering, but the retrigger 881 * will not be processed until we cycle our critical section. 882 * 883 * Only unmask interrupts while handlers are installed. It is 884 * possible to hit a situation where no handlers are installed 885 * due to a device driver livelocking and then tearing down its 886 * interrupt on close (the parallel bus being a good example). 887 */ 888 if (*list) 889 machintr_intren(intr); 890 891 /* 892 * Do a quick exit/enter to catch any higher-priority interrupt 893 * sources, such as the statclock, so thread time accounting 894 * will still work. This may also cause an interrupt to re-trigger. 895 */ 896 crit_exit_gd(gd); 897 crit_enter_gd(gd); 898 899 /* 900 * LIVELOCK STATE MACHINE 901 */ 902 switch(info->i_state) { 903 case ISTATE_NORMAL: 904 /* 905 * Reset the count each second. 906 */ 907 if (lseconds != gd->gd_time_seconds) { 908 lseconds = gd->gd_time_seconds; 909 ill_count = 0; 910 } 911 912 /* 913 * If we did not exceed the frequency limit, we are done. 914 * If the interrupt has not retriggered we deschedule ourselves. 915 */ 916 if (ill_count <= livelock_limit) { 917 if (info->i_running == 0) { 918 lwkt_deschedule_self(gd->gd_curthread); 919 lwkt_switch(); 920 } 921 break; 922 } 923 924 /* 925 * Otherwise we are livelocked. Set up a periodic systimer 926 * to wake the thread up at the limit frequency. 927 */ 928 kprintf("intr %d at %d/%d hz, livelocked limit engaged!\n", 929 intr, ill_count, livelock_limit); 930 info->i_state = ISTATE_LIVELOCKED; 931 if ((use_limit = livelock_limit) < 100) 932 use_limit = 100; 933 else if (use_limit > 500000) 934 use_limit = 500000; 935 systimer_init_periodic_nq(&ill_timer, ithread_livelock_wakeup, 936 (void *)(intptr_t)intr, use_limit); 937 /* fall through */ 938 case ISTATE_LIVELOCKED: 939 /* 940 * Wait for our periodic timer to go off. Since the interrupt 941 * has re-armed it can still set i_running, but it will not 942 * reschedule us while we are in a livelocked state. 943 */ 944 lwkt_deschedule_self(gd->gd_curthread); 945 lwkt_switch(); 946 947 /* 948 * Check once a second to see if the livelock condition no 949 * longer applies. 950 */ 951 if (lseconds != gd->gd_time_seconds) { 952 lseconds = gd->gd_time_seconds; 953 if (ill_count < livelock_lowater) { 954 info->i_state = ISTATE_NORMAL; 955 systimer_del(&ill_timer); 956 kprintf("intr %d at %d/%d hz, livelock removed\n", 957 intr, ill_count, livelock_lowater); 958 } else if (livelock_debug == intr || 959 (bootverbose && cold)) { 960 kprintf("intr %d at %d/%d hz, in livelock\n", 961 intr, ill_count, livelock_lowater); 962 } 963 ill_count = 0; 964 } 965 break; 966 } 967 } 968 /* not reached */ 969 } 970 971 /* 972 * Emergency interrupt polling thread. The thread begins execution 973 * outside a critical section with the BGL held. 974 * 975 * If emergency interrupt polling is enabled, this thread will 976 * execute all system interrupts not marked INTR_NOPOLL at the 977 * specified polling frequency. 978 * 979 * WARNING! This thread runs *ALL* interrupt service routines that 980 * are not marked INTR_NOPOLL, which basically means everything except 981 * the 8254 clock interrupt and the ATA interrupt. It has very high 982 * overhead and should only be used in situations where the machine 983 * cannot otherwise be made to work. Due to the severe performance 984 * degredation, it should not be enabled on production machines. 985 */ 986 static void 987 ithread_emergency(void *arg __unused) 988 { 989 struct intr_info *info; 990 intrec_t rec, nrec; 991 int intr; 992 thread_t td __debugvar = curthread; 993 TD_INVARIANTS_DECLARE; 994 995 get_mplock(); 996 TD_INVARIANTS_GET(td); 997 998 for (;;) { 999 for (intr = 0; intr < max_installed_hard_intr; ++intr) { 1000 info = &intr_info_ary[intr]; 1001 for (rec = info->i_reclist; rec; rec = nrec) { 1002 if ((rec->intr_flags & INTR_NOPOLL) == 0) { 1003 if (rec->serializer) { 1004 lwkt_serialize_handler_call(rec->serializer, 1005 rec->handler, rec->argument, NULL); 1006 } else { 1007 rec->handler(rec->argument, NULL); 1008 } 1009 TD_INVARIANTS_TEST(td, rec->name); 1010 } 1011 nrec = rec->next; 1012 } 1013 } 1014 lwkt_deschedule_self(curthread); 1015 lwkt_switch(); 1016 } 1017 } 1018 1019 /* 1020 * Systimer callback - schedule the emergency interrupt poll thread 1021 * if emergency polling is enabled. 1022 */ 1023 static 1024 void 1025 emergency_intr_timer_callback(systimer_t info, struct intrframe *frame __unused) 1026 { 1027 if (emergency_intr_enable) 1028 lwkt_schedule(info->data); 1029 } 1030 1031 int 1032 ithread_cpuid(int intr) 1033 { 1034 const struct intr_info *info; 1035 1036 KKASSERT(intr >= 0 && intr < MAX_INTS); 1037 info = &intr_info_ary[intr]; 1038 1039 if (info->i_state == ISTATE_NOTHREAD) 1040 return -1; 1041 return info->i_thread.td_gd->gd_cpuid; 1042 } 1043 1044 /* 1045 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1046 * The data for this machine dependent, and the declarations are in machine 1047 * dependent code. The layout of intrnames and intrcnt however is machine 1048 * independent. 1049 * 1050 * We do not know the length of intrcnt and intrnames at compile time, so 1051 * calculate things at run time. 1052 */ 1053 1054 static int 1055 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1056 { 1057 struct intr_info *info; 1058 intrec_t rec; 1059 int error = 0; 1060 int len; 1061 int intr; 1062 char buf[64]; 1063 1064 for (intr = 0; error == 0 && intr < MAX_INTS; ++intr) { 1065 info = &intr_info_ary[intr]; 1066 1067 len = 0; 1068 buf[0] = 0; 1069 for (rec = info->i_reclist; rec; rec = rec->next) { 1070 ksnprintf(buf + len, sizeof(buf) - len, "%s%s", 1071 (len ? "/" : ""), rec->name); 1072 len += strlen(buf + len); 1073 } 1074 if (len == 0) { 1075 ksnprintf(buf, sizeof(buf), "irq%d", intr); 1076 len = strlen(buf); 1077 } 1078 error = SYSCTL_OUT(req, buf, len + 1); 1079 } 1080 return (error); 1081 } 1082 1083 1084 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1085 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1086 1087 static int 1088 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1089 { 1090 struct intr_info *info; 1091 int error = 0; 1092 int intr; 1093 1094 for (intr = 0; intr < max_installed_hard_intr; ++intr) { 1095 info = &intr_info_ary[intr]; 1096 1097 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count)); 1098 if (error) 1099 goto failed; 1100 } 1101 for (intr = FIRST_SOFTINT; intr < max_installed_soft_intr; ++intr) { 1102 info = &intr_info_ary[intr]; 1103 1104 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count)); 1105 if (error) 1106 goto failed; 1107 } 1108 failed: 1109 return(error); 1110 } 1111 1112 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1113 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1114 1115 static void 1116 int_moveto_destcpu(int *orig_cpuid0, int *cpuid0, int intr) 1117 { 1118 int orig_cpuid = mycpuid, cpuid; 1119 char envpath[32]; 1120 1121 cpuid = orig_cpuid; 1122 ksnprintf(envpath, sizeof(envpath), "hw.irq.%d.dest", intr); 1123 kgetenv_int(envpath, &cpuid); 1124 if (cpuid >= ncpus) 1125 cpuid = orig_cpuid; 1126 1127 if (cpuid != orig_cpuid) 1128 lwkt_migratecpu(cpuid); 1129 1130 *orig_cpuid0 = orig_cpuid; 1131 *cpuid0 = cpuid; 1132 } 1133 1134 static void 1135 int_moveto_origcpu(int orig_cpuid, int cpuid) 1136 { 1137 if (cpuid != orig_cpuid) 1138 lwkt_migratecpu(orig_cpuid); 1139 } 1140