1 /* 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved. 3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $ 27 * $DragonFly: src/sys/kern/kern_intr.c,v 1.40 2005/12/27 21:32:11 dillon Exp $ 28 * 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/malloc.h> 34 #include <sys/kernel.h> 35 #include <sys/sysctl.h> 36 #include <sys/thread.h> 37 #include <sys/proc.h> 38 #include <sys/thread2.h> 39 #include <sys/random.h> 40 #include <sys/serialize.h> 41 #include <sys/bus.h> 42 #include <sys/machintr.h> 43 44 #include <machine/ipl.h> 45 #include <machine/frame.h> 46 47 #include <sys/interrupt.h> 48 49 struct info_info; 50 51 typedef struct intrec { 52 struct intrec *next; 53 struct intr_info *info; 54 inthand2_t *handler; 55 void *argument; 56 char *name; 57 int intr; 58 int intr_flags; 59 struct lwkt_serialize *serializer; 60 } *intrec_t; 61 62 struct intr_info { 63 intrec_t i_reclist; 64 struct thread i_thread; 65 struct random_softc i_random; 66 int i_running; 67 long i_count; /* interrupts dispatched */ 68 int i_mplock_required; 69 int i_fast; 70 int i_slow; 71 int i_state; 72 } intr_info_ary[MAX_INTS]; 73 74 int max_installed_hard_intr; 75 int max_installed_soft_intr; 76 77 #define EMERGENCY_INTR_POLLING_FREQ_MAX 20000 78 79 static int sysctl_emergency_freq(SYSCTL_HANDLER_ARGS); 80 static int sysctl_emergency_enable(SYSCTL_HANDLER_ARGS); 81 static void emergency_intr_timer_callback(systimer_t, struct intrframe *); 82 static void ithread_handler(void *arg); 83 static void ithread_emergency(void *arg); 84 85 int intr_info_size = sizeof(intr_info_ary) / sizeof(intr_info_ary[0]); 86 87 static struct systimer emergency_intr_timer; 88 static struct thread emergency_intr_thread; 89 90 #define ISTATE_NOTHREAD 0 91 #define ISTATE_NORMAL 1 92 #define ISTATE_LIVELOCKED 2 93 94 #ifdef SMP 95 static int intr_mpsafe = 0; 96 TUNABLE_INT("kern.intr_mpsafe", &intr_mpsafe); 97 SYSCTL_INT(_kern, OID_AUTO, intr_mpsafe, 98 CTLFLAG_RW, &intr_mpsafe, 0, "Run INTR_MPSAFE handlers without the BGL"); 99 #endif 100 static int livelock_limit = 50000; 101 static int livelock_lowater = 20000; 102 SYSCTL_INT(_kern, OID_AUTO, livelock_limit, 103 CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit"); 104 SYSCTL_INT(_kern, OID_AUTO, livelock_lowater, 105 CTLFLAG_RW, &livelock_lowater, 0, "Livelock low-water mark restore"); 106 107 static int emergency_intr_enable = 0; /* emergency interrupt polling */ 108 TUNABLE_INT("kern.emergency_intr_enable", &emergency_intr_enable); 109 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_enable, CTLTYPE_INT | CTLFLAG_RW, 110 0, 0, sysctl_emergency_enable, "I", "Emergency Interrupt Poll Enable"); 111 112 static int emergency_intr_freq = 10; /* emergency polling frequency */ 113 TUNABLE_INT("kern.emergency_intr_freq", &emergency_intr_freq); 114 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_freq, CTLTYPE_INT | CTLFLAG_RW, 115 0, 0, sysctl_emergency_freq, "I", "Emergency Interrupt Poll Frequency"); 116 117 /* 118 * Sysctl support routines 119 */ 120 static int 121 sysctl_emergency_enable(SYSCTL_HANDLER_ARGS) 122 { 123 int error, enabled; 124 125 enabled = emergency_intr_enable; 126 error = sysctl_handle_int(oidp, &enabled, 0, req); 127 if (error || req->newptr == NULL) 128 return error; 129 emergency_intr_enable = enabled; 130 if (emergency_intr_enable) { 131 emergency_intr_timer.periodic = 132 sys_cputimer->fromhz(emergency_intr_freq); 133 } else { 134 emergency_intr_timer.periodic = sys_cputimer->fromhz(1); 135 } 136 return 0; 137 } 138 139 static int 140 sysctl_emergency_freq(SYSCTL_HANDLER_ARGS) 141 { 142 int error, phz; 143 144 phz = emergency_intr_freq; 145 error = sysctl_handle_int(oidp, &phz, 0, req); 146 if (error || req->newptr == NULL) 147 return error; 148 if (phz <= 0) 149 return EINVAL; 150 else if (phz > EMERGENCY_INTR_POLLING_FREQ_MAX) 151 phz = EMERGENCY_INTR_POLLING_FREQ_MAX; 152 153 emergency_intr_freq = phz; 154 if (emergency_intr_enable) { 155 emergency_intr_timer.periodic = 156 sys_cputimer->fromhz(emergency_intr_freq); 157 } else { 158 emergency_intr_timer.periodic = sys_cputimer->fromhz(1); 159 } 160 return 0; 161 } 162 163 /* 164 * Register an SWI or INTerrupt handler. 165 */ 166 void * 167 register_swi(int intr, inthand2_t *handler, void *arg, const char *name, 168 struct lwkt_serialize *serializer) 169 { 170 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 171 panic("register_swi: bad intr %d", intr); 172 return(register_int(intr, handler, arg, name, serializer, 0)); 173 } 174 175 void * 176 register_int(int intr, inthand2_t *handler, void *arg, const char *name, 177 struct lwkt_serialize *serializer, int intr_flags) 178 { 179 struct intr_info *info; 180 struct intrec **list; 181 intrec_t rec; 182 183 if (intr < 0 || intr >= MAX_INTS) 184 panic("register_int: bad intr %d", intr); 185 if (name == NULL) 186 name = "???"; 187 info = &intr_info_ary[intr]; 188 189 /* 190 * Construct an interrupt handler record 191 */ 192 rec = malloc(sizeof(struct intrec), M_DEVBUF, M_INTWAIT); 193 rec->name = malloc(strlen(name) + 1, M_DEVBUF, M_INTWAIT); 194 strcpy(rec->name, name); 195 196 rec->info = info; 197 rec->handler = handler; 198 rec->argument = arg; 199 rec->intr = intr; 200 rec->intr_flags = intr_flags; 201 rec->next = NULL; 202 rec->serializer = serializer; 203 204 /* 205 * Create an emergency polling thread and set up a systimer to wake 206 * it up. 207 */ 208 if (emergency_intr_thread.td_kstack == NULL) { 209 lwkt_create(ithread_emergency, NULL, NULL, 210 &emergency_intr_thread, TDF_STOPREQ|TDF_INTTHREAD, -1, 211 "ithread emerg"); 212 systimer_init_periodic_nq(&emergency_intr_timer, 213 emergency_intr_timer_callback, &emergency_intr_thread, 214 (emergency_intr_enable ? emergency_intr_freq : 1)); 215 } 216 217 /* 218 * Create an interrupt thread if necessary, leave it in an unscheduled 219 * state. 220 */ 221 if (info->i_state == ISTATE_NOTHREAD) { 222 info->i_state = ISTATE_NORMAL; 223 lwkt_create((void *)ithread_handler, (void *)intr, NULL, 224 &info->i_thread, TDF_STOPREQ|TDF_INTTHREAD|TDF_MPSAFE, -1, 225 "ithread %d", intr); 226 if (intr >= FIRST_SOFTINT) 227 lwkt_setpri(&info->i_thread, TDPRI_SOFT_NORM); 228 else 229 lwkt_setpri(&info->i_thread, TDPRI_INT_MED); 230 info->i_thread.td_preemptable = lwkt_preempt; 231 } 232 233 list = &info->i_reclist; 234 235 /* 236 * Keep track of how many fast and slow interrupts we have. 237 * Set i_mplock_required if any handler in the chain requires 238 * the MP lock to operate. 239 */ 240 if ((intr_flags & INTR_MPSAFE) == 0) 241 info->i_mplock_required = 1; 242 if (intr_flags & INTR_FAST) 243 ++info->i_fast; 244 else 245 ++info->i_slow; 246 247 /* 248 * Add the record to the interrupt list. 249 */ 250 crit_enter(); 251 while (*list != NULL) 252 list = &(*list)->next; 253 *list = rec; 254 crit_exit(); 255 256 /* 257 * Update max_installed_hard_intr to make the emergency intr poll 258 * a bit more efficient. 259 */ 260 if (intr < FIRST_SOFTINT) { 261 if (max_installed_hard_intr <= intr) 262 max_installed_hard_intr = intr + 1; 263 } else { 264 if (max_installed_soft_intr <= intr) 265 max_installed_soft_intr = intr + 1; 266 } 267 268 /* 269 * Setup the machine level interrupt vector 270 * 271 * XXX temporary workaround for some ACPI brokedness. ACPI installs 272 * its interrupt too early, before the IOAPICs have been configured, 273 * which means the IOAPIC is not enabled by the registration of the 274 * ACPI interrupt. Anything else sharing that IRQ will wind up not 275 * being enabled. Temporarily work around the problem by always 276 * installing and enabling on every new interrupt handler, even 277 * if one has already been setup on that irq. 278 */ 279 if (intr < FIRST_SOFTINT /* && info->i_slow + info->i_fast == 1*/) { 280 if (machintr_vector_setup(intr, intr_flags)) 281 printf("machintr_vector_setup: failed on irq %d\n", intr); 282 } 283 284 return(rec); 285 } 286 287 void 288 unregister_swi(void *id) 289 { 290 unregister_int(id); 291 } 292 293 void 294 unregister_int(void *id) 295 { 296 struct intr_info *info; 297 struct intrec **list; 298 intrec_t rec; 299 int intr; 300 301 intr = ((intrec_t)id)->intr; 302 303 if (intr < 0 || intr >= MAX_INTS) 304 panic("register_int: bad intr %d", intr); 305 306 info = &intr_info_ary[intr]; 307 308 /* 309 * Remove the interrupt descriptor, adjust the descriptor count, 310 * and teardown the machine level vector if this was the last interrupt. 311 */ 312 crit_enter(); 313 list = &info->i_reclist; 314 while ((rec = *list) != NULL) { 315 if (rec == id) 316 break; 317 list = &rec->next; 318 } 319 if (rec) { 320 intrec_t rec0; 321 322 *list = rec->next; 323 if (rec->intr_flags & INTR_FAST) 324 --info->i_fast; 325 else 326 --info->i_slow; 327 if (intr < FIRST_SOFTINT && info->i_fast + info->i_slow == 0) 328 machintr_vector_teardown(intr); 329 330 /* 331 * Clear i_mplock_required if no handlers in the chain require the 332 * MP lock. 333 */ 334 for (rec0 = info->i_reclist; rec0; rec0 = rec0->next) { 335 if ((rec0->intr_flags & INTR_MPSAFE) == 0) 336 break; 337 } 338 if (rec0 == NULL) 339 info->i_mplock_required = 0; 340 } 341 342 crit_exit(); 343 344 /* 345 * Free the record. 346 */ 347 if (rec != NULL) { 348 free(rec->name, M_DEVBUF); 349 free(rec, M_DEVBUF); 350 } else { 351 printf("warning: unregister_int: int %d handler for %s not found\n", 352 intr, ((intrec_t)id)->name); 353 } 354 } 355 356 const char * 357 get_registered_name(int intr) 358 { 359 intrec_t rec; 360 361 if (intr < 0 || intr >= MAX_INTS) 362 panic("register_int: bad intr %d", intr); 363 364 if ((rec = intr_info_ary[intr].i_reclist) == NULL) 365 return(NULL); 366 else if (rec->next) 367 return("mux"); 368 else 369 return(rec->name); 370 } 371 372 int 373 count_registered_ints(int intr) 374 { 375 struct intr_info *info; 376 377 if (intr < 0 || intr >= MAX_INTS) 378 panic("register_int: bad intr %d", intr); 379 info = &intr_info_ary[intr]; 380 return(info->i_fast + info->i_slow); 381 } 382 383 long 384 get_interrupt_counter(int intr) 385 { 386 struct intr_info *info; 387 388 if (intr < 0 || intr >= MAX_INTS) 389 panic("register_int: bad intr %d", intr); 390 info = &intr_info_ary[intr]; 391 return(info->i_count); 392 } 393 394 395 void 396 swi_setpriority(int intr, int pri) 397 { 398 struct intr_info *info; 399 400 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 401 panic("register_swi: bad intr %d", intr); 402 info = &intr_info_ary[intr]; 403 if (info->i_state != ISTATE_NOTHREAD) 404 lwkt_setpri(&info->i_thread, pri); 405 } 406 407 void 408 register_randintr(int intr) 409 { 410 struct intr_info *info; 411 412 if (intr < 0 || intr >= MAX_INTS) 413 panic("register_randintr: bad intr %d", intr); 414 info = &intr_info_ary[intr]; 415 info->i_random.sc_intr = intr; 416 info->i_random.sc_enabled = 1; 417 } 418 419 void 420 unregister_randintr(int intr) 421 { 422 struct intr_info *info; 423 424 if (intr < 0 || intr >= MAX_INTS) 425 panic("register_swi: bad intr %d", intr); 426 info = &intr_info_ary[intr]; 427 info->i_random.sc_enabled = 0; 428 } 429 430 int 431 next_registered_randintr(int intr) 432 { 433 struct intr_info *info; 434 435 if (intr < 0 || intr >= MAX_INTS) 436 panic("register_swi: bad intr %d", intr); 437 while (intr < MAX_INTS) { 438 info = &intr_info_ary[intr]; 439 if (info->i_random.sc_enabled) 440 break; 441 ++intr; 442 } 443 return(intr); 444 } 445 446 /* 447 * Dispatch an interrupt. If there's nothing to do we have a stray 448 * interrupt and can just return, leaving the interrupt masked. 449 * 450 * We need to schedule the interrupt and set its i_running bit. If 451 * we are not on the interrupt thread's cpu we have to send a message 452 * to the correct cpu that will issue the desired action (interlocking 453 * with the interrupt thread's critical section). We do NOT attempt to 454 * reschedule interrupts whos i_running bit is already set because 455 * this would prematurely wakeup a livelock-limited interrupt thread. 456 * 457 * i_running is only tested/set on the same cpu as the interrupt thread. 458 * 459 * We are NOT in a critical section, which will allow the scheduled 460 * interrupt to preempt us. The MP lock might *NOT* be held here. 461 */ 462 #ifdef SMP 463 464 static void 465 sched_ithd_remote(void *arg) 466 { 467 sched_ithd((int)arg); 468 } 469 470 #endif 471 472 void 473 sched_ithd(int intr) 474 { 475 struct intr_info *info; 476 477 info = &intr_info_ary[intr]; 478 479 ++info->i_count; 480 if (info->i_state != ISTATE_NOTHREAD) { 481 if (info->i_reclist == NULL) { 482 printf("sched_ithd: stray interrupt %d\n", intr); 483 } else { 484 #ifdef SMP 485 if (info->i_thread.td_gd == mycpu) { 486 if (info->i_running == 0) { 487 info->i_running = 1; 488 if (info->i_state != ISTATE_LIVELOCKED) 489 lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */ 490 } 491 } else { 492 lwkt_send_ipiq(info->i_thread.td_gd, 493 sched_ithd_remote, (void *)intr); 494 } 495 #else 496 if (info->i_running == 0) { 497 info->i_running = 1; 498 if (info->i_state != ISTATE_LIVELOCKED) 499 lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */ 500 } 501 #endif 502 } 503 } else { 504 printf("sched_ithd: stray interrupt %d\n", intr); 505 } 506 } 507 508 /* 509 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL 510 * might not be held). 511 */ 512 static void 513 ithread_livelock_wakeup(systimer_t st) 514 { 515 struct intr_info *info; 516 517 info = &intr_info_ary[(int)st->data]; 518 if (info->i_state != ISTATE_NOTHREAD) 519 lwkt_schedule(&info->i_thread); 520 } 521 522 /* 523 * This function is called drectly from the ICU or APIC vector code assembly 524 * to process an interrupt. The critical section and interrupt deferral 525 * checks have already been done but the function is entered WITHOUT 526 * a critical section held. The BGL may or may not be held. 527 * 528 * Must return non-zero if we do not want the vector code to re-enable 529 * the interrupt (which we don't if we have to schedule the interrupt) 530 */ 531 int ithread_fast_handler(struct intrframe frame); 532 533 int 534 ithread_fast_handler(struct intrframe frame) 535 { 536 int intr; 537 struct intr_info *info; 538 struct intrec **list; 539 int must_schedule; 540 #ifdef SMP 541 int got_mplock; 542 #endif 543 intrec_t rec, next_rec; 544 globaldata_t gd; 545 546 intr = frame.if_vec; 547 gd = mycpu; 548 549 info = &intr_info_ary[intr]; 550 551 /* 552 * If we are not processing any FAST interrupts, just schedule the thing. 553 * (since we aren't in a critical section, this can result in a 554 * preemption) 555 */ 556 if (info->i_fast == 0) { 557 sched_ithd(intr); 558 return(1); 559 } 560 561 /* 562 * This should not normally occur since interrupts ought to be 563 * masked if the ithread has been scheduled or is running. 564 */ 565 if (info->i_running) 566 return(1); 567 568 /* 569 * Bump the interrupt nesting level to process any FAST interrupts. 570 * Obtain the MP lock as necessary. If the MP lock cannot be obtained, 571 * schedule the interrupt thread to deal with the issue instead. 572 * 573 * To reduce overhead, just leave the MP lock held once it has been 574 * obtained. 575 */ 576 crit_enter_gd(gd); 577 ++gd->gd_intr_nesting_level; 578 ++gd->gd_cnt.v_intr; 579 must_schedule = info->i_slow; 580 #ifdef SMP 581 got_mplock = 0; 582 #endif 583 584 list = &info->i_reclist; 585 for (rec = *list; rec; rec = next_rec) { 586 next_rec = rec->next; /* rec may be invalid after call */ 587 588 if (rec->intr_flags & INTR_FAST) { 589 #ifdef SMP 590 if ((rec->intr_flags & INTR_MPSAFE) == 0 && got_mplock == 0) { 591 if (try_mplock() == 0) { 592 int owner; 593 594 /* 595 * If we couldn't get the MP lock try to forward it 596 * to the cpu holding the MP lock, setting must_schedule 597 * to -1 so we do not schedule and also do not unmask 598 * the interrupt. Otherwise just schedule it. 599 */ 600 owner = owner_mplock(); 601 if (owner >= 0 && owner != gd->gd_cpuid) { 602 lwkt_send_ipiq_bycpu(owner, forward_fastint_remote, 603 (void *)intr); 604 must_schedule = -1; 605 ++gd->gd_cnt.v_forwarded_ints; 606 } else { 607 must_schedule = 1; 608 } 609 break; 610 } 611 got_mplock = 1; 612 } 613 #endif 614 if (rec->serializer) { 615 must_schedule += lwkt_serialize_handler_try( 616 rec->serializer, rec->handler, 617 rec->argument, &frame); 618 } else { 619 rec->handler(rec->argument, &frame); 620 } 621 } 622 } 623 624 /* 625 * Cleanup 626 */ 627 --gd->gd_intr_nesting_level; 628 #ifdef SMP 629 if (got_mplock) 630 rel_mplock(); 631 #endif 632 crit_exit_gd(gd); 633 634 /* 635 * If we had a problem, schedule the thread to catch the missed 636 * records (it will just re-run all of them). A return value of 0 637 * indicates that all handlers have been run and the interrupt can 638 * be re-enabled, and a non-zero return indicates that the interrupt 639 * thread controls re-enablement. 640 */ 641 if (must_schedule > 0) 642 sched_ithd(intr); 643 else if (must_schedule == 0) 644 ++info->i_count; 645 return(must_schedule); 646 } 647 648 #if 0 649 650 6: ; \ 651 /* could not get the MP lock, forward the interrupt */ \ 652 movl mp_lock, %eax ; /* check race */ \ 653 cmpl $MP_FREE_LOCK,%eax ; \ 654 je 2b ; \ 655 incl PCPU(cnt)+V_FORWARDED_INTS ; \ 656 subl $12,%esp ; \ 657 movl $irq_num,8(%esp) ; \ 658 movl $forward_fastint_remote,4(%esp) ; \ 659 movl %eax,(%esp) ; \ 660 call lwkt_send_ipiq_bycpu ; \ 661 addl $12,%esp ; \ 662 jmp 5f ; 663 664 #endif 665 666 667 /* 668 * Interrupt threads run this as their main loop. 669 * 670 * The handler begins execution outside a critical section and with the BGL 671 * held. 672 * 673 * The i_running state starts at 0. When an interrupt occurs, the hardware 674 * interrupt is disabled and sched_ithd() The HW interrupt remains disabled 675 * until all routines have run. We then call ithread_done() to reenable 676 * the HW interrupt and deschedule us until the next interrupt. 677 * 678 * We are responsible for atomically checking i_running and ithread_done() 679 * is responsible for atomically checking for platform-specific delayed 680 * interrupts. i_running for our irq is only set in the context of our cpu, 681 * so a critical section is a sufficient interlock. 682 */ 683 #define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */ 684 685 static void 686 ithread_handler(void *arg) 687 { 688 struct intr_info *info; 689 int use_limit; 690 int lticks; 691 int lcount; 692 int intr; 693 int mpheld; 694 struct intrec **list; 695 intrec_t rec, nrec; 696 globaldata_t gd; 697 struct systimer ill_timer; /* enforced freq. timer */ 698 u_int ill_count; /* interrupt livelock counter */ 699 700 ill_count = 0; 701 lticks = ticks; 702 lcount = 0; 703 intr = (int)arg; 704 info = &intr_info_ary[intr]; 705 list = &info->i_reclist; 706 gd = mycpu; 707 708 /* 709 * The loop must be entered with one critical section held. The thread 710 * is created with TDF_MPSAFE so the MP lock is not held on start. 711 */ 712 crit_enter_gd(gd); 713 mpheld = 0; 714 715 for (;;) { 716 /* 717 * The chain is only considered MPSAFE if all its interrupt handlers 718 * are MPSAFE. However, if intr_mpsafe has been turned off we 719 * always operate with the BGL. 720 */ 721 #ifdef SMP 722 if (intr_mpsafe == 0) { 723 if (mpheld == 0) { 724 get_mplock(); 725 mpheld = 1; 726 } 727 } else if (info->i_mplock_required != mpheld) { 728 if (info->i_mplock_required) { 729 KKASSERT(mpheld == 0); 730 get_mplock(); 731 mpheld = 1; 732 } else { 733 KKASSERT(mpheld != 0); 734 rel_mplock(); 735 mpheld = 0; 736 } 737 } 738 #endif 739 740 /* 741 * If an interrupt is pending, clear i_running and execute the 742 * handlers. Note that certain types of interrupts can re-trigger 743 * and set i_running again. 744 * 745 * Each handler is run in a critical section. Note that we run both 746 * FAST and SLOW designated service routines. 747 */ 748 if (info->i_running) { 749 ++ill_count; 750 info->i_running = 0; 751 752 for (rec = *list; rec; rec = nrec) { 753 nrec = rec->next; 754 if (rec->serializer) { 755 lwkt_serialize_handler_call(rec->serializer, rec->handler, 756 rec->argument, NULL); 757 } else { 758 rec->handler(rec->argument, NULL); 759 } 760 } 761 } 762 763 /* 764 * This is our interrupt hook to add rate randomness to the random 765 * number generator. 766 */ 767 if (info->i_random.sc_enabled) 768 add_interrupt_randomness(intr); 769 770 /* 771 * Unmask the interrupt to allow it to trigger again. This only 772 * applies to certain types of interrupts (typ level interrupts). 773 * This can result in the interrupt retriggering, but the retrigger 774 * will not be processed until we cycle our critical section. 775 * 776 * Only unmask interrupts while handlers are installed. It is 777 * possible to hit a situation where no handlers are installed 778 * due to a device driver livelocking and then tearing down its 779 * interrupt on close (the parallel bus being a good example). 780 */ 781 if (*list) 782 machintr_intren(intr); 783 784 /* 785 * Do a quick exit/enter to catch any higher-priority interrupt 786 * sources, such as the statclock, so thread time accounting 787 * will still work. This may also cause an interrupt to re-trigger. 788 */ 789 crit_exit_gd(gd); 790 crit_enter_gd(gd); 791 792 /* 793 * LIVELOCK STATE MACHINE 794 */ 795 switch(info->i_state) { 796 case ISTATE_NORMAL: 797 /* 798 * Calculate a running average every tick. 799 */ 800 if (lticks != ticks) { 801 lticks = ticks; 802 ill_count -= ill_count / hz; 803 } 804 805 /* 806 * If we did not exceed the frequency limit, we are done. 807 * If the interrupt has not retriggered we deschedule ourselves. 808 */ 809 if (ill_count <= livelock_limit) { 810 if (info->i_running == 0) { 811 lwkt_deschedule_self(gd->gd_curthread); 812 lwkt_switch(); 813 } 814 break; 815 } 816 817 /* 818 * Otherwise we are livelocked. Set up a periodic systimer 819 * to wake the thread up at the limit frequency. 820 */ 821 printf("intr %d at %d > %d hz, livelocked limit engaged!\n", 822 intr, ill_count, livelock_limit); 823 info->i_state = ISTATE_LIVELOCKED; 824 if ((use_limit = livelock_limit) < 100) 825 use_limit = 100; 826 else if (use_limit > 500000) 827 use_limit = 500000; 828 systimer_init_periodic(&ill_timer, ithread_livelock_wakeup, 829 (void *)intr, use_limit); 830 lcount = 0; 831 /* fall through */ 832 case ISTATE_LIVELOCKED: 833 /* 834 * Wait for our periodic timer to go off. Since the interrupt 835 * has re-armed it can still set i_running, but it will not 836 * reschedule us while we are in a livelocked state. 837 */ 838 lwkt_deschedule_self(gd->gd_curthread); 839 lwkt_switch(); 840 841 /* 842 * Check to see if the livelock condition no longer applies. 843 * The interrupt must be able to operate normally for one 844 * full second before we restore normal operation. 845 */ 846 if (lticks != ticks) { 847 lticks = ticks; 848 if (ill_count < livelock_lowater) { 849 if (++lcount >= hz) { 850 info->i_state = ISTATE_NORMAL; 851 systimer_del(&ill_timer); 852 printf("intr %d at %d < %d hz, livelock removed\n", 853 intr, ill_count, livelock_lowater); 854 } 855 } else { 856 lcount = 0; 857 } 858 ill_count -= ill_count / hz; 859 } 860 break; 861 } 862 } 863 /* not reached */ 864 } 865 866 /* 867 * Emergency interrupt polling thread. The thread begins execution 868 * outside a critical section with the BGL held. 869 * 870 * If emergency interrupt polling is enabled, this thread will 871 * execute all system interrupts not marked INTR_NOPOLL at the 872 * specified polling frequency. 873 * 874 * WARNING! This thread runs *ALL* interrupt service routines that 875 * are not marked INTR_NOPOLL, which basically means everything except 876 * the 8254 clock interrupt and the ATA interrupt. It has very high 877 * overhead and should only be used in situations where the machine 878 * cannot otherwise be made to work. Due to the severe performance 879 * degredation, it should not be enabled on production machines. 880 */ 881 static void 882 ithread_emergency(void *arg __unused) 883 { 884 struct intr_info *info; 885 intrec_t rec, nrec; 886 int intr; 887 888 for (;;) { 889 for (intr = 0; intr < max_installed_hard_intr; ++intr) { 890 info = &intr_info_ary[intr]; 891 for (rec = info->i_reclist; rec; rec = nrec) { 892 if ((rec->intr_flags & INTR_NOPOLL) == 0) { 893 if (rec->serializer) { 894 lwkt_serialize_handler_call(rec->serializer, 895 rec->handler, rec->argument, NULL); 896 } else { 897 rec->handler(rec->argument, NULL); 898 } 899 } 900 nrec = rec->next; 901 } 902 } 903 lwkt_deschedule_self(curthread); 904 lwkt_switch(); 905 } 906 } 907 908 /* 909 * Systimer callback - schedule the emergency interrupt poll thread 910 * if emergency polling is enabled. 911 */ 912 static 913 void 914 emergency_intr_timer_callback(systimer_t info, struct intrframe *frame __unused) 915 { 916 if (emergency_intr_enable) 917 lwkt_schedule(info->data); 918 } 919 920 /* 921 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 922 * The data for this machine dependent, and the declarations are in machine 923 * dependent code. The layout of intrnames and intrcnt however is machine 924 * independent. 925 * 926 * We do not know the length of intrcnt and intrnames at compile time, so 927 * calculate things at run time. 928 */ 929 930 static int 931 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 932 { 933 struct intr_info *info; 934 intrec_t rec; 935 int error = 0; 936 int len; 937 int intr; 938 char buf[64]; 939 940 for (intr = 0; error == 0 && intr < MAX_INTS; ++intr) { 941 info = &intr_info_ary[intr]; 942 943 len = 0; 944 buf[0] = 0; 945 for (rec = info->i_reclist; rec; rec = rec->next) { 946 snprintf(buf + len, sizeof(buf) - len, "%s%s", 947 (len ? "/" : ""), rec->name); 948 len += strlen(buf + len); 949 } 950 if (len == 0) { 951 snprintf(buf, sizeof(buf), "irq%d", intr); 952 len = strlen(buf); 953 } 954 error = SYSCTL_OUT(req, buf, len + 1); 955 } 956 return (error); 957 } 958 959 960 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 961 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 962 963 static int 964 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 965 { 966 struct intr_info *info; 967 int error = 0; 968 int intr; 969 970 for (intr = 0; intr < max_installed_hard_intr; ++intr) { 971 info = &intr_info_ary[intr]; 972 973 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count)); 974 if (error) 975 goto failed; 976 } 977 for (intr = FIRST_SOFTINT; intr < max_installed_soft_intr; ++intr) { 978 info = &intr_info_ary[intr]; 979 980 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count)); 981 if (error) 982 goto failed; 983 } 984 failed: 985 return(error); 986 } 987 988 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 989 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 990 991