1 /* 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved. 3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $ 27 * 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/malloc.h> 33 #include <sys/kernel.h> 34 #include <sys/sysctl.h> 35 #include <sys/thread.h> 36 #include <sys/proc.h> 37 #include <sys/random.h> 38 #include <sys/serialize.h> 39 #include <sys/interrupt.h> 40 #include <sys/bus.h> 41 #include <sys/machintr.h> 42 43 #include <machine/frame.h> 44 45 #include <sys/thread2.h> 46 #include <sys/mplock2.h> 47 48 struct intr_info; 49 50 typedef struct intrec { 51 struct intrec *next; 52 struct intr_info *info; 53 inthand2_t *handler; 54 void *argument; 55 char *name; 56 int intr; 57 int intr_flags; 58 struct lwkt_serialize *serializer; 59 } *intrec_t; 60 61 struct intr_info { 62 intrec_t i_reclist; 63 struct thread *i_thread; /* don't embed struct thread */ 64 struct random_softc i_random; 65 long i_count; /* interrupts dispatched */ 66 int i_running; 67 int i_mplock_required; 68 int i_fast; 69 int i_slow; 70 int i_state; 71 int i_errorticks; 72 unsigned long i_straycount; 73 int i_cpuid; 74 int i_intr; 75 }; 76 77 struct intr_info_block { 78 struct intr_info ary[MAXCPU][MAX_INTS]; 79 }; 80 81 static struct intr_info_block *intr_block; 82 static struct intr_info *swi_info_ary[MAX_SOFTINTS]; 83 84 static int max_installed_hard_intr[MAXCPU]; 85 86 MALLOC_DEFINE(M_INTRMNG, "intrmng", "interrupt management"); 87 88 89 #define EMERGENCY_INTR_POLLING_FREQ_MAX 20000 90 91 /* 92 * Assert that callers into interrupt handlers don't return with 93 * dangling tokens, spinlocks, or mp locks. 94 */ 95 #ifdef INVARIANTS 96 97 #define TD_INVARIANTS_DECLARE \ 98 int spincount; \ 99 lwkt_tokref_t curstop 100 101 #define TD_INVARIANTS_GET(td) \ 102 do { \ 103 spincount = (td)->td_gd->gd_spinlocks; \ 104 curstop = (td)->td_toks_stop; \ 105 } while(0) 106 107 #define TD_INVARIANTS_TEST(td, name) \ 108 do { \ 109 KASSERT(spincount == (td)->td_gd->gd_spinlocks, \ 110 ("spincount mismatch after interrupt handler %s", \ 111 name)); \ 112 KASSERT(curstop == (td)->td_toks_stop, \ 113 ("token count mismatch after interrupt handler %s", \ 114 name)); \ 115 } while(0) 116 117 #else 118 119 /* !INVARIANTS */ 120 121 #define TD_INVARIANTS_DECLARE 122 #define TD_INVARIANTS_GET(td) 123 #define TD_INVARIANTS_TEST(td, name) 124 125 #endif /* ndef INVARIANTS */ 126 127 static int sysctl_emergency_freq(SYSCTL_HANDLER_ARGS); 128 static int sysctl_emergency_enable(SYSCTL_HANDLER_ARGS); 129 static void emergency_intr_timer_callback(systimer_t, int, struct intrframe *); 130 static void ithread_handler(void *arg); 131 static void ithread_emergency(void *arg); 132 static void report_stray_interrupt(struct intr_info *info, const char *func); 133 static void int_moveto_destcpu(int *, int); 134 static void int_moveto_origcpu(int, int); 135 static void sched_ithd_intern(struct intr_info *info); 136 137 static struct systimer emergency_intr_timer[MAXCPU]; 138 static struct thread *emergency_intr_thread[MAXCPU]; 139 140 #define ISTATE_NOTHREAD 0 141 #define ISTATE_NORMAL 1 142 #define ISTATE_LIVELOCKED 2 143 144 static int livelock_limit = 40000; 145 static int livelock_lowater = 20000; 146 static int livelock_debug = -1; 147 SYSCTL_INT(_kern, OID_AUTO, livelock_limit, 148 CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit"); 149 SYSCTL_INT(_kern, OID_AUTO, livelock_lowater, 150 CTLFLAG_RW, &livelock_lowater, 0, "Livelock low-water mark restore"); 151 SYSCTL_INT(_kern, OID_AUTO, livelock_debug, 152 CTLFLAG_RW, &livelock_debug, 0, "Livelock debug intr#"); 153 154 static int emergency_intr_enable = 0; /* emergency interrupt polling */ 155 TUNABLE_INT("kern.emergency_intr_enable", &emergency_intr_enable); 156 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_enable, CTLTYPE_INT | CTLFLAG_RW, 157 0, 0, sysctl_emergency_enable, "I", "Emergency Interrupt Poll Enable"); 158 159 static int emergency_intr_freq = 10; /* emergency polling frequency */ 160 TUNABLE_INT("kern.emergency_intr_freq", &emergency_intr_freq); 161 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_freq, CTLTYPE_INT | CTLFLAG_RW, 162 0, 0, sysctl_emergency_freq, "I", "Emergency Interrupt Poll Frequency"); 163 164 /* 165 * Sysctl support routines 166 */ 167 static int 168 sysctl_emergency_enable(SYSCTL_HANDLER_ARGS) 169 { 170 int error, enabled, cpuid, freq; 171 172 enabled = emergency_intr_enable; 173 error = sysctl_handle_int(oidp, &enabled, 0, req); 174 if (error || req->newptr == NULL) 175 return error; 176 emergency_intr_enable = enabled; 177 if (emergency_intr_enable) 178 freq = emergency_intr_freq; 179 else 180 freq = 1; 181 182 for (cpuid = 0; cpuid < ncpus; ++cpuid) 183 systimer_adjust_periodic(&emergency_intr_timer[cpuid], freq); 184 return 0; 185 } 186 187 static int 188 sysctl_emergency_freq(SYSCTL_HANDLER_ARGS) 189 { 190 int error, phz, cpuid, freq; 191 192 phz = emergency_intr_freq; 193 error = sysctl_handle_int(oidp, &phz, 0, req); 194 if (error || req->newptr == NULL) 195 return error; 196 if (phz <= 0) 197 return EINVAL; 198 else if (phz > EMERGENCY_INTR_POLLING_FREQ_MAX) 199 phz = EMERGENCY_INTR_POLLING_FREQ_MAX; 200 201 emergency_intr_freq = phz; 202 if (emergency_intr_enable) 203 freq = emergency_intr_freq; 204 else 205 freq = 1; 206 207 for (cpuid = 0; cpuid < ncpus; ++cpuid) 208 systimer_adjust_periodic(&emergency_intr_timer[cpuid], freq); 209 return 0; 210 } 211 212 /* 213 * Register an SWI or INTerrupt handler. 214 */ 215 void * 216 register_swi(int intr, inthand2_t *handler, void *arg, const char *name, 217 struct lwkt_serialize *serializer, int cpuid) 218 { 219 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 220 panic("register_swi: bad intr %d", intr); 221 222 if (cpuid < 0) 223 cpuid = intr % ncpus; 224 return(register_int(intr, handler, arg, name, serializer, 0, cpuid)); 225 } 226 227 void * 228 register_swi_mp(int intr, inthand2_t *handler, void *arg, const char *name, 229 struct lwkt_serialize *serializer, int cpuid) 230 { 231 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 232 panic("register_swi: bad intr %d", intr); 233 234 if (cpuid < 0) 235 cpuid = intr % ncpus; 236 return(register_int(intr, handler, arg, name, serializer, 237 INTR_MPSAFE, cpuid)); 238 } 239 240 void * 241 register_int(int intr, inthand2_t *handler, void *arg, const char *name, 242 struct lwkt_serialize *serializer, int intr_flags, int cpuid) 243 { 244 struct intr_info *info; 245 struct intrec **list; 246 intrec_t rec; 247 int orig_cpuid; 248 249 KKASSERT(cpuid >= 0 && cpuid < ncpus); 250 251 if (intr < 0 || intr >= MAX_INTS) 252 panic("register_int: bad intr %d", intr); 253 if (name == NULL) 254 name = "???"; 255 info = &intr_block->ary[cpuid][intr]; 256 257 /* 258 * Construct an interrupt handler record 259 */ 260 rec = kmalloc(sizeof(struct intrec), M_DEVBUF, M_INTWAIT); 261 rec->name = kmalloc(strlen(name) + 1, M_DEVBUF, M_INTWAIT); 262 strcpy(rec->name, name); 263 264 rec->info = info; 265 rec->handler = handler; 266 rec->argument = arg; 267 rec->intr = intr; 268 rec->intr_flags = intr_flags; 269 rec->next = NULL; 270 rec->serializer = serializer; 271 272 int_moveto_destcpu(&orig_cpuid, cpuid); 273 274 /* 275 * Create an emergency polling thread and set up a systimer to wake 276 * it up. objcache isn't operational yet so use kmalloc. 277 * 278 * objcache may not be operational yet, use kmalloc(). 279 */ 280 if (emergency_intr_thread[cpuid] == NULL) { 281 emergency_intr_thread[cpuid] = kmalloc(sizeof(struct thread), M_DEVBUF, 282 M_INTWAIT | M_ZERO); 283 lwkt_create(ithread_emergency, NULL, NULL, 284 emergency_intr_thread[cpuid], 285 TDF_NOSTART | TDF_INTTHREAD, cpuid, "ithreadE %d", 286 cpuid); 287 systimer_init_periodic_nq(&emergency_intr_timer[cpuid], 288 emergency_intr_timer_callback, 289 emergency_intr_thread[cpuid], 290 (emergency_intr_enable ? emergency_intr_freq : 1)); 291 } 292 293 /* 294 * Create an interrupt thread if necessary, leave it in an unscheduled 295 * state. 296 */ 297 if (info->i_state == ISTATE_NOTHREAD) { 298 info->i_state = ISTATE_NORMAL; 299 info->i_thread = kmalloc(sizeof(struct thread), M_DEVBUF, 300 M_INTWAIT | M_ZERO); 301 lwkt_create(ithread_handler, (void *)(intptr_t)intr, NULL, 302 info->i_thread, TDF_NOSTART | TDF_INTTHREAD, cpuid, 303 "ithread%d %d", intr, cpuid); 304 if (intr >= FIRST_SOFTINT) 305 lwkt_setpri(info->i_thread, TDPRI_SOFT_NORM); 306 else 307 lwkt_setpri(info->i_thread, TDPRI_INT_MED); 308 info->i_thread->td_preemptable = lwkt_preempt; 309 } 310 311 list = &info->i_reclist; 312 313 /* 314 * Keep track of how many fast and slow interrupts we have. 315 * Set i_mplock_required if any handler in the chain requires 316 * the MP lock to operate. 317 */ 318 if ((intr_flags & INTR_MPSAFE) == 0) 319 info->i_mplock_required = 1; 320 if (intr_flags & INTR_CLOCK) 321 ++info->i_fast; 322 else 323 ++info->i_slow; 324 325 /* 326 * Enable random number generation keying off of this interrupt. 327 */ 328 if ((intr_flags & INTR_NOENTROPY) == 0 && info->i_random.sc_enabled == 0) { 329 info->i_random.sc_enabled = 1; 330 info->i_random.sc_intr = intr; 331 } 332 333 /* 334 * Add the record to the interrupt list. 335 */ 336 crit_enter(); 337 while (*list != NULL) 338 list = &(*list)->next; 339 *list = rec; 340 crit_exit(); 341 342 /* 343 * Update max_installed_hard_intr to make the emergency intr poll 344 * a bit more efficient. 345 */ 346 if (intr < FIRST_SOFTINT) { 347 if (max_installed_hard_intr[cpuid] <= intr) 348 max_installed_hard_intr[cpuid] = intr + 1; 349 } 350 351 if (intr >= FIRST_SOFTINT) 352 swi_info_ary[intr - FIRST_SOFTINT] = info; 353 354 /* 355 * Setup the machine level interrupt vector 356 */ 357 if (intr < FIRST_SOFTINT && info->i_slow + info->i_fast == 1) 358 machintr_intr_setup(intr, intr_flags); 359 360 int_moveto_origcpu(orig_cpuid, cpuid); 361 362 return(rec); 363 } 364 365 void 366 unregister_swi(void *id, int intr, int cpuid) 367 { 368 if (cpuid < 0) 369 cpuid = intr % ncpus; 370 371 unregister_int(id, cpuid); 372 } 373 374 void 375 unregister_int(void *id, int cpuid) 376 { 377 struct intr_info *info; 378 struct intrec **list; 379 intrec_t rec; 380 int intr, orig_cpuid; 381 382 KKASSERT(cpuid >= 0 && cpuid < ncpus); 383 384 intr = ((intrec_t)id)->intr; 385 386 if (intr < 0 || intr >= MAX_INTS) 387 panic("register_int: bad intr %d", intr); 388 389 info = &intr_block->ary[cpuid][intr]; 390 391 int_moveto_destcpu(&orig_cpuid, cpuid); 392 393 /* 394 * Remove the interrupt descriptor, adjust the descriptor count, 395 * and teardown the machine level vector if this was the last interrupt. 396 */ 397 crit_enter(); 398 list = &info->i_reclist; 399 while ((rec = *list) != NULL) { 400 if (rec == id) 401 break; 402 list = &rec->next; 403 } 404 if (rec) { 405 intrec_t rec0; 406 407 *list = rec->next; 408 if (rec->intr_flags & INTR_CLOCK) 409 --info->i_fast; 410 else 411 --info->i_slow; 412 if (intr < FIRST_SOFTINT && info->i_fast + info->i_slow == 0) 413 machintr_intr_teardown(intr); 414 415 /* 416 * Clear i_mplock_required if no handlers in the chain require the 417 * MP lock. 418 */ 419 for (rec0 = info->i_reclist; rec0; rec0 = rec0->next) { 420 if ((rec0->intr_flags & INTR_MPSAFE) == 0) 421 break; 422 } 423 if (rec0 == NULL) 424 info->i_mplock_required = 0; 425 } 426 427 if (intr >= FIRST_SOFTINT && info->i_reclist == NULL) 428 swi_info_ary[intr - FIRST_SOFTINT] = NULL; 429 430 crit_exit(); 431 432 int_moveto_origcpu(orig_cpuid, cpuid); 433 434 /* 435 * Free the record. 436 */ 437 if (rec != NULL) { 438 kfree(rec->name, M_DEVBUF); 439 kfree(rec, M_DEVBUF); 440 } else { 441 kprintf("warning: unregister_int: int %d handler for %s not found\n", 442 intr, ((intrec_t)id)->name); 443 } 444 } 445 446 long 447 get_interrupt_counter(int intr, int cpuid) 448 { 449 struct intr_info *info; 450 451 KKASSERT(cpuid >= 0 && cpuid < ncpus); 452 453 if (intr < 0 || intr >= MAX_INTS) 454 panic("register_int: bad intr %d", intr); 455 info = &intr_block->ary[cpuid][intr]; 456 return(info->i_count); 457 } 458 459 void 460 register_randintr(int intr) 461 { 462 struct intr_info *info; 463 int cpuid; 464 465 if (intr < 0 || intr >= MAX_INTS) 466 panic("register_randintr: bad intr %d", intr); 467 468 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 469 info = &intr_block->ary[cpuid][intr]; 470 info->i_random.sc_intr = intr; 471 info->i_random.sc_enabled = 1; 472 } 473 } 474 475 void 476 unregister_randintr(int intr) 477 { 478 struct intr_info *info; 479 int cpuid; 480 481 if (intr < 0 || intr >= MAX_INTS) 482 panic("register_swi: bad intr %d", intr); 483 484 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 485 info = &intr_block->ary[cpuid][intr]; 486 info->i_random.sc_enabled = -1; 487 } 488 } 489 490 int 491 next_registered_randintr(int intr) 492 { 493 struct intr_info *info; 494 495 if (intr < 0 || intr >= MAX_INTS) 496 panic("register_swi: bad intr %d", intr); 497 498 while (intr < MAX_INTS) { 499 int cpuid; 500 501 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 502 info = &intr_block->ary[cpuid][intr]; 503 if (info->i_random.sc_enabled > 0) 504 return intr; 505 } 506 ++intr; 507 } 508 return intr; 509 } 510 511 /* 512 * Dispatch an interrupt. If there's nothing to do we have a stray 513 * interrupt and can just return, leaving the interrupt masked. 514 * 515 * We need to schedule the interrupt and set its i_running bit. If 516 * we are not on the interrupt thread's cpu we have to send a message 517 * to the correct cpu that will issue the desired action (interlocking 518 * with the interrupt thread's critical section). We do NOT attempt to 519 * reschedule interrupts whos i_running bit is already set because 520 * this would prematurely wakeup a livelock-limited interrupt thread. 521 * 522 * i_running is only tested/set on the same cpu as the interrupt thread. 523 * 524 * We are NOT in a critical section, which will allow the scheduled 525 * interrupt to preempt us. The MP lock might *NOT* be held here. 526 */ 527 static void 528 sched_ithd_remote(void *arg) 529 { 530 sched_ithd_intern(arg); 531 } 532 533 static void 534 sched_ithd_intern(struct intr_info *info) 535 { 536 ++info->i_count; 537 if (info->i_state != ISTATE_NOTHREAD) { 538 if (info->i_reclist == NULL) { 539 report_stray_interrupt(info, "sched_ithd"); 540 } else { 541 if (info->i_thread->td_gd == mycpu) { 542 if (info->i_running == 0) { 543 info->i_running = 1; 544 if (info->i_state != ISTATE_LIVELOCKED) 545 lwkt_schedule(info->i_thread); /* MIGHT PREEMPT */ 546 } 547 } else { 548 lwkt_send_ipiq(info->i_thread->td_gd, sched_ithd_remote, info); 549 } 550 } 551 } else { 552 report_stray_interrupt(info, "sched_ithd"); 553 } 554 } 555 556 void 557 sched_ithd_soft(int intr) 558 { 559 struct intr_info *info; 560 561 KKASSERT(intr >= FIRST_SOFTINT && intr < MAX_INTS); 562 563 info = swi_info_ary[intr - FIRST_SOFTINT]; 564 if (info != NULL) { 565 sched_ithd_intern(info); 566 } else { 567 kprintf("unregistered softint %d got scheduled on cpu%d\n", 568 intr, mycpuid); 569 } 570 } 571 572 void 573 sched_ithd_hard(int intr) 574 { 575 KKASSERT(intr >= 0 && intr < MAX_HARDINTS); 576 sched_ithd_intern(&intr_block->ary[mycpuid][intr]); 577 } 578 579 #ifdef _KERNEL_VIRTUAL 580 581 void 582 sched_ithd_hard_virtual(int intr) 583 { 584 KKASSERT(intr >= 0 && intr < MAX_HARDINTS); 585 sched_ithd_intern(&intr_block->ary[0][intr]); 586 } 587 588 void * 589 register_int_virtual(int intr, inthand2_t *handler, void *arg, const char *name, 590 struct lwkt_serialize *serializer, int intr_flags) 591 { 592 return register_int(intr, handler, arg, name, serializer, intr_flags, 0); 593 } 594 595 void 596 unregister_int_virtual(void *id) 597 { 598 unregister_int(id, 0); 599 } 600 601 #endif /* _KERN_VIRTUAL */ 602 603 static void 604 report_stray_interrupt(struct intr_info *info, const char *func) 605 { 606 ++info->i_straycount; 607 if (info->i_straycount < 10) { 608 if (info->i_errorticks == ticks) 609 return; 610 info->i_errorticks = ticks; 611 kprintf("%s: stray interrupt %d on cpu%d\n", 612 func, info->i_intr, mycpuid); 613 } else if (info->i_straycount == 10) { 614 kprintf("%s: %ld stray interrupts %d on cpu%d - " 615 "there will be no further reports\n", func, 616 info->i_straycount, info->i_intr, mycpuid); 617 } 618 } 619 620 /* 621 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL 622 * might not be held). 623 */ 624 static void 625 ithread_livelock_wakeup(systimer_t st, int in_ipi __unused, 626 struct intrframe *frame __unused) 627 { 628 struct intr_info *info; 629 630 info = &intr_block->ary[mycpuid][(int)(intptr_t)st->data]; 631 if (info->i_state != ISTATE_NOTHREAD) 632 lwkt_schedule(info->i_thread); 633 } 634 635 /* 636 * Schedule ithread within fast intr handler 637 * 638 * XXX Protect sched_ithd_hard() call with gd_intr_nesting_level? 639 * Interrupts aren't enabled, but still... 640 */ 641 static __inline void 642 ithread_fast_sched(int intr, thread_t td) 643 { 644 ++td->td_nest_count; 645 646 /* 647 * We are already in critical section, exit it now to 648 * allow preemption. 649 */ 650 crit_exit_quick(td); 651 sched_ithd_hard(intr); 652 crit_enter_quick(td); 653 654 --td->td_nest_count; 655 } 656 657 /* 658 * This function is called directly from the ICU or APIC vector code assembly 659 * to process an interrupt. The critical section and interrupt deferral 660 * checks have already been done but the function is entered WITHOUT 661 * a critical section held. The BGL may or may not be held. 662 * 663 * Must return non-zero if we do not want the vector code to re-enable 664 * the interrupt (which we don't if we have to schedule the interrupt) 665 */ 666 int ithread_fast_handler(struct intrframe *frame); 667 668 int 669 ithread_fast_handler(struct intrframe *frame) 670 { 671 int intr; 672 struct intr_info *info; 673 struct intrec **list; 674 int must_schedule; 675 int got_mplock; 676 TD_INVARIANTS_DECLARE; 677 intrec_t rec, nrec; 678 globaldata_t gd; 679 thread_t td; 680 681 intr = frame->if_vec; 682 gd = mycpu; 683 td = curthread; 684 685 /* We must be in critical section. */ 686 KKASSERT(td->td_critcount); 687 688 info = &intr_block->ary[mycpuid][intr]; 689 690 /* 691 * If we are not processing any FAST interrupts, just schedule the thing. 692 */ 693 if (info->i_fast == 0) { 694 ++gd->gd_cnt.v_intr; 695 ithread_fast_sched(intr, td); 696 return(1); 697 } 698 699 /* 700 * This should not normally occur since interrupts ought to be 701 * masked if the ithread has been scheduled or is running. 702 */ 703 if (info->i_running) 704 return(1); 705 706 /* 707 * Bump the interrupt nesting level to process any FAST interrupts. 708 * Obtain the MP lock as necessary. If the MP lock cannot be obtained, 709 * schedule the interrupt thread to deal with the issue instead. 710 * 711 * To reduce overhead, just leave the MP lock held once it has been 712 * obtained. 713 */ 714 ++gd->gd_intr_nesting_level; 715 ++gd->gd_cnt.v_intr; 716 must_schedule = info->i_slow; 717 got_mplock = 0; 718 719 TD_INVARIANTS_GET(td); 720 list = &info->i_reclist; 721 722 for (rec = *list; rec; rec = nrec) { 723 /* rec may be invalid after call */ 724 nrec = rec->next; 725 726 if (rec->intr_flags & INTR_CLOCK) { 727 if ((rec->intr_flags & INTR_MPSAFE) == 0 && got_mplock == 0) { 728 if (try_mplock() == 0) { 729 /* Couldn't get the MP lock; just schedule it. */ 730 must_schedule = 1; 731 break; 732 } 733 got_mplock = 1; 734 } 735 if (rec->serializer) { 736 must_schedule += lwkt_serialize_handler_try( 737 rec->serializer, rec->handler, 738 rec->argument, frame); 739 } else { 740 rec->handler(rec->argument, frame); 741 } 742 TD_INVARIANTS_TEST(td, rec->name); 743 } 744 } 745 746 /* 747 * Cleanup 748 */ 749 --gd->gd_intr_nesting_level; 750 if (got_mplock) 751 rel_mplock(); 752 753 /* 754 * If we had a problem, or mixed fast and slow interrupt handlers are 755 * registered, schedule the ithread to catch the missed records (it 756 * will just re-run all of them). A return value of 0 indicates that 757 * all handlers have been run and the interrupt can be re-enabled, and 758 * a non-zero return indicates that the interrupt thread controls 759 * re-enablement. 760 */ 761 if (must_schedule > 0) 762 ithread_fast_sched(intr, td); 763 else if (must_schedule == 0) 764 ++info->i_count; 765 return(must_schedule); 766 } 767 768 /* 769 * Interrupt threads run this as their main loop. 770 * 771 * The handler begins execution outside a critical section and no MP lock. 772 * 773 * The i_running state starts at 0. When an interrupt occurs, the hardware 774 * interrupt is disabled and sched_ithd_hard(). The HW interrupt remains 775 * disabled until all routines have run. We then call machintr_intr_enable() 776 * to reenable the HW interrupt and deschedule us until the next interrupt. 777 * 778 * We are responsible for atomically checking i_running. i_running for our 779 * irq is only set in the context of our cpu, so a critical section is a 780 * sufficient interlock. 781 */ 782 #define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */ 783 784 static void 785 ithread_handler(void *arg) 786 { 787 struct intr_info *info; 788 int use_limit; 789 uint32_t lseconds; 790 int intr, cpuid = mycpuid; 791 int mpheld; 792 struct intrec **list; 793 intrec_t rec, nrec; 794 globaldata_t gd; 795 struct systimer ill_timer; /* enforced freq. timer */ 796 u_int ill_count; /* interrupt livelock counter */ 797 TD_INVARIANTS_DECLARE; 798 799 ill_count = 0; 800 intr = (int)(intptr_t)arg; 801 info = &intr_block->ary[cpuid][intr]; 802 list = &info->i_reclist; 803 804 /* 805 * The loop must be entered with one critical section held. The thread 806 * does not hold the mplock on startup. 807 */ 808 gd = mycpu; 809 lseconds = gd->gd_time_seconds; 810 crit_enter_gd(gd); 811 mpheld = 0; 812 813 for (;;) { 814 /* 815 * The chain is only considered MPSAFE if all its interrupt handlers 816 * are MPSAFE. However, if intr_mpsafe has been turned off we 817 * always operate with the BGL. 818 */ 819 if (info->i_mplock_required != mpheld) { 820 if (info->i_mplock_required) { 821 KKASSERT(mpheld == 0); 822 get_mplock(); 823 mpheld = 1; 824 } else { 825 KKASSERT(mpheld != 0); 826 rel_mplock(); 827 mpheld = 0; 828 } 829 } 830 831 TD_INVARIANTS_GET(gd->gd_curthread); 832 833 /* 834 * If an interrupt is pending, clear i_running and execute the 835 * handlers. Note that certain types of interrupts can re-trigger 836 * and set i_running again. 837 * 838 * Each handler is run in a critical section. Note that we run both 839 * FAST and SLOW designated service routines. 840 */ 841 if (info->i_running) { 842 ++ill_count; 843 info->i_running = 0; 844 845 if (*list == NULL) 846 report_stray_interrupt(info, "ithread_handler"); 847 848 for (rec = *list; rec; rec = nrec) { 849 /* rec may be invalid after call */ 850 nrec = rec->next; 851 if (rec->serializer) { 852 lwkt_serialize_handler_call(rec->serializer, rec->handler, 853 rec->argument, NULL); 854 } else { 855 rec->handler(rec->argument, NULL); 856 } 857 TD_INVARIANTS_TEST(gd->gd_curthread, rec->name); 858 } 859 } 860 861 /* 862 * This is our interrupt hook to add rate randomness to the random 863 * number generator. 864 */ 865 if (info->i_random.sc_enabled > 0) 866 add_interrupt_randomness(intr); 867 868 /* 869 * Unmask the interrupt to allow it to trigger again. This only 870 * applies to certain types of interrupts (typ level interrupts). 871 * This can result in the interrupt retriggering, but the retrigger 872 * will not be processed until we cycle our critical section. 873 * 874 * Only unmask interrupts while handlers are installed. It is 875 * possible to hit a situation where no handlers are installed 876 * due to a device driver livelocking and then tearing down its 877 * interrupt on close (the parallel bus being a good example). 878 */ 879 if (intr < FIRST_SOFTINT && *list) 880 machintr_intr_enable(intr); 881 882 /* 883 * Do a quick exit/enter to catch any higher-priority interrupt 884 * sources, such as the statclock, so thread time accounting 885 * will still work. This may also cause an interrupt to re-trigger. 886 */ 887 crit_exit_gd(gd); 888 crit_enter_gd(gd); 889 890 /* 891 * LIVELOCK STATE MACHINE 892 */ 893 switch(info->i_state) { 894 case ISTATE_NORMAL: 895 /* 896 * Reset the count each second. 897 */ 898 if (lseconds != gd->gd_time_seconds) { 899 lseconds = gd->gd_time_seconds; 900 ill_count = 0; 901 } 902 903 /* 904 * If we did not exceed the frequency limit, we are done. 905 * If the interrupt has not retriggered we deschedule ourselves. 906 */ 907 if (ill_count <= livelock_limit) { 908 if (info->i_running == 0) { 909 lwkt_deschedule_self(gd->gd_curthread); 910 lwkt_switch(); 911 } 912 break; 913 } 914 915 /* 916 * Otherwise we are livelocked. Set up a periodic systimer 917 * to wake the thread up at the limit frequency. 918 */ 919 kprintf("intr %d on cpu%d at %d/%d hz, livelocked limit engaged!\n", 920 intr, cpuid, ill_count, livelock_limit); 921 info->i_state = ISTATE_LIVELOCKED; 922 if ((use_limit = livelock_limit) < 100) 923 use_limit = 100; 924 else if (use_limit > 500000) 925 use_limit = 500000; 926 systimer_init_periodic_nq(&ill_timer, ithread_livelock_wakeup, 927 (void *)(intptr_t)intr, use_limit); 928 /* fall through */ 929 case ISTATE_LIVELOCKED: 930 /* 931 * Wait for our periodic timer to go off. Since the interrupt 932 * has re-armed it can still set i_running, but it will not 933 * reschedule us while we are in a livelocked state. 934 */ 935 lwkt_deschedule_self(gd->gd_curthread); 936 lwkt_switch(); 937 938 /* 939 * Check once a second to see if the livelock condition no 940 * longer applies. 941 */ 942 if (lseconds != gd->gd_time_seconds) { 943 lseconds = gd->gd_time_seconds; 944 if (ill_count < livelock_lowater) { 945 info->i_state = ISTATE_NORMAL; 946 systimer_del(&ill_timer); 947 kprintf("intr %d on cpu%d at %d/%d hz, livelock removed\n", 948 intr, cpuid, ill_count, livelock_lowater); 949 } else if (livelock_debug == intr || 950 (bootverbose && cold)) { 951 kprintf("intr %d on cpu%d at %d/%d hz, in livelock\n", 952 intr, cpuid, ill_count, livelock_lowater); 953 } 954 ill_count = 0; 955 } 956 break; 957 } 958 } 959 /* NOT REACHED */ 960 } 961 962 /* 963 * Emergency interrupt polling thread. The thread begins execution 964 * outside a critical section with the BGL held. 965 * 966 * If emergency interrupt polling is enabled, this thread will 967 * execute all system interrupts not marked INTR_NOPOLL at the 968 * specified polling frequency. 969 * 970 * WARNING! This thread runs *ALL* interrupt service routines that 971 * are not marked INTR_NOPOLL, which basically means everything except 972 * the 8254 clock interrupt and the ATA interrupt. It has very high 973 * overhead and should only be used in situations where the machine 974 * cannot otherwise be made to work. Due to the severe performance 975 * degredation, it should not be enabled on production machines. 976 */ 977 static void 978 ithread_emergency(void *arg __unused) 979 { 980 globaldata_t gd = mycpu; 981 struct intr_info *info; 982 intrec_t rec, nrec; 983 int intr, cpuid = mycpuid; 984 TD_INVARIANTS_DECLARE; 985 986 get_mplock(); 987 crit_enter_gd(gd); 988 TD_INVARIANTS_GET(gd->gd_curthread); 989 990 for (;;) { 991 for (intr = 0; intr < max_installed_hard_intr[cpuid]; ++intr) { 992 info = &intr_block->ary[cpuid][intr]; 993 for (rec = info->i_reclist; rec; rec = nrec) { 994 /* rec may be invalid after call */ 995 nrec = rec->next; 996 if ((rec->intr_flags & INTR_NOPOLL) == 0) { 997 if (rec->serializer) { 998 lwkt_serialize_handler_try(rec->serializer, 999 rec->handler, rec->argument, NULL); 1000 } else { 1001 rec->handler(rec->argument, NULL); 1002 } 1003 TD_INVARIANTS_TEST(gd->gd_curthread, rec->name); 1004 } 1005 } 1006 } 1007 lwkt_deschedule_self(gd->gd_curthread); 1008 lwkt_switch(); 1009 } 1010 /* NOT REACHED */ 1011 } 1012 1013 /* 1014 * Systimer callback - schedule the emergency interrupt poll thread 1015 * if emergency polling is enabled. 1016 */ 1017 static 1018 void 1019 emergency_intr_timer_callback(systimer_t info, int in_ipi __unused, 1020 struct intrframe *frame __unused) 1021 { 1022 if (emergency_intr_enable) 1023 lwkt_schedule(info->data); 1024 } 1025 1026 /* 1027 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1028 * The data for this machine dependent, and the declarations are in machine 1029 * dependent code. The layout of intrnames and intrcnt however is machine 1030 * independent. 1031 * 1032 * We do not know the length of intrcnt and intrnames at compile time, so 1033 * calculate things at run time. 1034 */ 1035 1036 static int 1037 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1038 { 1039 struct intr_info *info; 1040 intrec_t rec; 1041 int error = 0; 1042 int len; 1043 int intr, cpuid; 1044 char buf[64]; 1045 1046 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 1047 for (intr = 0; error == 0 && intr < MAX_INTS; ++intr) { 1048 info = &intr_block->ary[cpuid][intr]; 1049 1050 len = 0; 1051 buf[0] = 0; 1052 for (rec = info->i_reclist; rec; rec = rec->next) { 1053 ksnprintf(buf + len, sizeof(buf) - len, "%s%s", 1054 (len ? "/" : ""), rec->name); 1055 len += strlen(buf + len); 1056 } 1057 if (len == 0) { 1058 ksnprintf(buf, sizeof(buf), "irq%d", intr); 1059 len = strlen(buf); 1060 } 1061 error = SYSCTL_OUT(req, buf, len + 1); 1062 } 1063 } 1064 return (error); 1065 } 1066 1067 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1068 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1069 1070 static int 1071 sysctl_intrcnt_all(SYSCTL_HANDLER_ARGS) 1072 { 1073 struct intr_info *info; 1074 int error = 0; 1075 int intr, cpuid; 1076 1077 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 1078 for (intr = 0; intr < MAX_INTS; ++intr) { 1079 info = &intr_block->ary[cpuid][intr]; 1080 1081 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count)); 1082 if (error) 1083 goto failed; 1084 } 1085 } 1086 failed: 1087 return(error); 1088 } 1089 1090 SYSCTL_PROC(_hw, OID_AUTO, intrcnt_all, CTLTYPE_OPAQUE | CTLFLAG_RD, 1091 NULL, 0, sysctl_intrcnt_all, "", "Interrupt Counts"); 1092 1093 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1094 NULL, 0, sysctl_intrcnt_all, "", "Interrupt Counts"); 1095 1096 static void 1097 int_moveto_destcpu(int *orig_cpuid0, int cpuid) 1098 { 1099 int orig_cpuid = mycpuid; 1100 1101 if (cpuid != orig_cpuid) 1102 lwkt_migratecpu(cpuid); 1103 1104 *orig_cpuid0 = orig_cpuid; 1105 } 1106 1107 static void 1108 int_moveto_origcpu(int orig_cpuid, int cpuid) 1109 { 1110 if (cpuid != orig_cpuid) 1111 lwkt_migratecpu(orig_cpuid); 1112 } 1113 1114 static void 1115 intr_init(void *dummy __unused) 1116 { 1117 int cpuid; 1118 1119 kprintf("Initialize MI interrupts\n"); 1120 1121 intr_block = kmalloc(sizeof(*intr_block), M_INTRMNG, 1122 M_INTWAIT | M_ZERO); 1123 1124 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 1125 int intr; 1126 1127 for (intr = 0; intr < MAX_INTS; ++intr) { 1128 struct intr_info *info = &intr_block->ary[cpuid][intr]; 1129 1130 info->i_cpuid = cpuid; 1131 info->i_intr = intr; 1132 } 1133 } 1134 } 1135 SYSINIT(intr_init, SI_BOOT2_FINISH_PIC, SI_ORDER_ANY, intr_init, NULL); 1136