1 /* 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved. 3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $ 27 * 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/malloc.h> 33 #include <sys/kernel.h> 34 #include <sys/sysctl.h> 35 #include <sys/thread.h> 36 #include <sys/proc.h> 37 #include <sys/random.h> 38 #include <sys/serialize.h> 39 #include <sys/interrupt.h> 40 #include <sys/bus.h> 41 #include <sys/machintr.h> 42 43 #include <machine/frame.h> 44 45 #include <sys/thread2.h> 46 #include <sys/mplock2.h> 47 48 struct intr_info; 49 50 typedef struct intrec { 51 struct intrec *next; 52 struct intr_info *info; 53 inthand2_t *handler; 54 void *argument; 55 char *name; 56 int intr; 57 int intr_flags; 58 struct lwkt_serialize *serializer; 59 } *intrec_t; 60 61 struct intr_info { 62 intrec_t i_reclist; 63 struct thread *i_thread; /* don't embed struct thread */ 64 struct random_softc i_random; 65 long i_count; /* interrupts dispatched */ 66 int i_running; 67 short i_mplock_required; 68 short i_flags; 69 int i_fast; 70 int i_slow; 71 int i_state; 72 int i_errorticks; 73 unsigned long i_straycount; 74 int i_cpuid; 75 int i_intr; 76 }; 77 78 struct intr_info_block { 79 struct intr_info ary[MAXCPU][MAX_INTS]; 80 }; 81 82 static struct intr_info_block *intr_block; 83 static struct intr_info *swi_info_ary[MAX_SOFTINTS]; 84 85 static int max_installed_hard_intr[MAXCPU]; 86 87 MALLOC_DEFINE(M_INTRMNG, "intrmng", "interrupt management"); 88 89 90 #define EMERGENCY_INTR_POLLING_FREQ_MAX 20000 91 92 /* 93 * Assert that callers into interrupt handlers don't return with 94 * dangling tokens, spinlocks, or mp locks. 95 */ 96 #ifdef INVARIANTS 97 98 #define TD_INVARIANTS_DECLARE \ 99 int spincount; \ 100 lwkt_tokref_t curstop 101 102 #define TD_INVARIANTS_GET(td) \ 103 do { \ 104 spincount = (td)->td_gd->gd_spinlocks; \ 105 curstop = (td)->td_toks_stop; \ 106 } while(0) 107 108 #define TD_INVARIANTS_TEST(td, name) \ 109 do { \ 110 KASSERT(spincount == (td)->td_gd->gd_spinlocks, \ 111 ("spincount mismatch after interrupt handler %s", \ 112 name)); \ 113 KASSERT(curstop == (td)->td_toks_stop, \ 114 ("token count mismatch after interrupt handler %s", \ 115 name)); \ 116 } while(0) 117 118 #else 119 120 /* !INVARIANTS */ 121 122 #define TD_INVARIANTS_DECLARE 123 #define TD_INVARIANTS_GET(td) 124 #define TD_INVARIANTS_TEST(td, name) 125 126 #endif /* ndef INVARIANTS */ 127 128 static int sysctl_emergency_freq(SYSCTL_HANDLER_ARGS); 129 static int sysctl_emergency_enable(SYSCTL_HANDLER_ARGS); 130 static void emergency_intr_timer_callback(systimer_t, int, struct intrframe *); 131 static void ithread_handler(void *arg); 132 static void ithread_emergency(void *arg); 133 static void report_stray_interrupt(struct intr_info *info, const char *func); 134 static void int_moveto_destcpu(int *, int); 135 static void int_moveto_origcpu(int, int); 136 static void sched_ithd_intern(struct intr_info *info); 137 138 static struct systimer emergency_intr_timer[MAXCPU]; 139 static struct thread *emergency_intr_thread[MAXCPU]; 140 141 #define ISTATE_NOTHREAD 0 142 #define ISTATE_NORMAL 1 143 #define ISTATE_LIVELOCKED 2 144 145 static int livelock_limit = 40000; 146 static int livelock_limit_hi = 120000; 147 static int livelock_lowater = 20000; 148 static int livelock_debug = -1; 149 SYSCTL_INT(_kern, OID_AUTO, livelock_limit, 150 CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit"); 151 SYSCTL_INT(_kern, OID_AUTO, livelock_limit_hi, 152 CTLFLAG_RW, &livelock_limit_hi, 0, 153 "Livelock interrupt rate limit (high frequency)"); 154 SYSCTL_INT(_kern, OID_AUTO, livelock_lowater, 155 CTLFLAG_RW, &livelock_lowater, 0, "Livelock low-water mark restore"); 156 SYSCTL_INT(_kern, OID_AUTO, livelock_debug, 157 CTLFLAG_RW, &livelock_debug, 0, "Livelock debug intr#"); 158 159 static int emergency_intr_enable = 0; /* emergency interrupt polling */ 160 TUNABLE_INT("kern.emergency_intr_enable", &emergency_intr_enable); 161 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_enable, CTLTYPE_INT | CTLFLAG_RW, 162 0, 0, sysctl_emergency_enable, "I", "Emergency Interrupt Poll Enable"); 163 164 static int emergency_intr_freq = 10; /* emergency polling frequency */ 165 TUNABLE_INT("kern.emergency_intr_freq", &emergency_intr_freq); 166 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_freq, CTLTYPE_INT | CTLFLAG_RW, 167 0, 0, sysctl_emergency_freq, "I", "Emergency Interrupt Poll Frequency"); 168 169 /* 170 * Sysctl support routines 171 */ 172 static int 173 sysctl_emergency_enable(SYSCTL_HANDLER_ARGS) 174 { 175 int error, enabled, cpuid, freq; 176 177 enabled = emergency_intr_enable; 178 error = sysctl_handle_int(oidp, &enabled, 0, req); 179 if (error || req->newptr == NULL) 180 return error; 181 emergency_intr_enable = enabled; 182 if (emergency_intr_enable) 183 freq = emergency_intr_freq; 184 else 185 freq = 1; 186 187 for (cpuid = 0; cpuid < ncpus; ++cpuid) 188 systimer_adjust_periodic(&emergency_intr_timer[cpuid], freq); 189 return 0; 190 } 191 192 static int 193 sysctl_emergency_freq(SYSCTL_HANDLER_ARGS) 194 { 195 int error, phz, cpuid, freq; 196 197 phz = emergency_intr_freq; 198 error = sysctl_handle_int(oidp, &phz, 0, req); 199 if (error || req->newptr == NULL) 200 return error; 201 if (phz <= 0) 202 return EINVAL; 203 else if (phz > EMERGENCY_INTR_POLLING_FREQ_MAX) 204 phz = EMERGENCY_INTR_POLLING_FREQ_MAX; 205 206 emergency_intr_freq = phz; 207 if (emergency_intr_enable) 208 freq = emergency_intr_freq; 209 else 210 freq = 1; 211 212 for (cpuid = 0; cpuid < ncpus; ++cpuid) 213 systimer_adjust_periodic(&emergency_intr_timer[cpuid], freq); 214 return 0; 215 } 216 217 /* 218 * Register an SWI or INTerrupt handler. 219 */ 220 void * 221 register_swi(int intr, inthand2_t *handler, void *arg, const char *name, 222 struct lwkt_serialize *serializer, int cpuid) 223 { 224 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 225 panic("register_swi: bad intr %d", intr); 226 227 if (cpuid < 0) 228 cpuid = intr % ncpus; 229 return(register_int(intr, handler, arg, name, serializer, 0, cpuid)); 230 } 231 232 void * 233 register_swi_mp(int intr, inthand2_t *handler, void *arg, const char *name, 234 struct lwkt_serialize *serializer, int cpuid) 235 { 236 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 237 panic("register_swi: bad intr %d", intr); 238 239 if (cpuid < 0) 240 cpuid = intr % ncpus; 241 return(register_int(intr, handler, arg, name, serializer, 242 INTR_MPSAFE, cpuid)); 243 } 244 245 void * 246 register_int(int intr, inthand2_t *handler, void *arg, const char *name, 247 struct lwkt_serialize *serializer, int intr_flags, int cpuid) 248 { 249 struct intr_info *info; 250 struct intrec **list; 251 intrec_t rec = NULL; 252 int orig_cpuid; 253 254 KKASSERT(cpuid >= 0 && cpuid < ncpus); 255 256 if (intr < 0 || intr >= MAX_INTS) 257 panic("register_int: bad intr %d", intr); 258 if (name == NULL) 259 name = "???"; 260 info = &intr_block->ary[cpuid][intr]; 261 262 int_moveto_destcpu(&orig_cpuid, cpuid); 263 264 /* 265 * This intr has been registered as exclusive one, so 266 * it can't shared. 267 */ 268 if (info->i_flags & INTR_EXCL) 269 goto done; 270 271 /* 272 * This intr has been registered as shared one, so it 273 * can't be used for exclusive handler. 274 */ 275 list = &info->i_reclist; 276 if ((intr_flags & INTR_EXCL) && *list != NULL) 277 goto done; 278 279 /* 280 * Construct an interrupt handler record 281 */ 282 rec = kmalloc(sizeof(struct intrec), M_DEVBUF, M_INTWAIT); 283 rec->name = kmalloc(strlen(name) + 1, M_DEVBUF, M_INTWAIT); 284 strcpy(rec->name, name); 285 286 rec->info = info; 287 rec->handler = handler; 288 rec->argument = arg; 289 rec->intr = intr; 290 rec->intr_flags = intr_flags; 291 rec->next = NULL; 292 rec->serializer = serializer; 293 294 /* 295 * Create an emergency polling thread and set up a systimer to wake 296 * it up. objcache isn't operational yet so use kmalloc. 297 * 298 * objcache may not be operational yet, use kmalloc(). 299 */ 300 if (emergency_intr_thread[cpuid] == NULL) { 301 emergency_intr_thread[cpuid] = kmalloc(sizeof(struct thread), M_DEVBUF, 302 M_INTWAIT | M_ZERO); 303 lwkt_create(ithread_emergency, NULL, NULL, 304 emergency_intr_thread[cpuid], 305 TDF_NOSTART | TDF_INTTHREAD, cpuid, "ithreadE %d", 306 cpuid); 307 systimer_init_periodic_nq(&emergency_intr_timer[cpuid], 308 emergency_intr_timer_callback, 309 emergency_intr_thread[cpuid], 310 (emergency_intr_enable ? emergency_intr_freq : 1)); 311 } 312 313 /* 314 * Create an interrupt thread if necessary, leave it in an unscheduled 315 * state. 316 */ 317 if (info->i_state == ISTATE_NOTHREAD) { 318 info->i_state = ISTATE_NORMAL; 319 info->i_thread = kmalloc(sizeof(struct thread), M_DEVBUF, 320 M_INTWAIT | M_ZERO); 321 lwkt_create(ithread_handler, (void *)(intptr_t)intr, NULL, 322 info->i_thread, TDF_NOSTART | TDF_INTTHREAD, cpuid, 323 "ithread%d %d", intr, cpuid); 324 if (intr >= FIRST_SOFTINT) 325 lwkt_setpri(info->i_thread, TDPRI_SOFT_NORM); 326 else 327 lwkt_setpri(info->i_thread, TDPRI_INT_MED); 328 info->i_thread->td_preemptable = lwkt_preempt; 329 } 330 331 /* 332 * Keep track of how many fast and slow interrupts we have. 333 * Set i_mplock_required if any handler in the chain requires 334 * the MP lock to operate. 335 */ 336 if ((intr_flags & INTR_MPSAFE) == 0) { 337 info->i_mplock_required = 1; 338 kprintf("interrupt uses mplock: %s\n", name); 339 } 340 if (intr_flags & INTR_CLOCK) 341 ++info->i_fast; 342 else 343 ++info->i_slow; 344 345 info->i_flags |= (intr_flags & INTR_EXCL); 346 if (info->i_slow + info->i_fast == 1 && (intr_flags & INTR_HIFREQ)) { 347 /* 348 * Allow high frequency interrupt, if this intr is not 349 * shared yet. 350 */ 351 info->i_flags |= INTR_HIFREQ; 352 } else { 353 info->i_flags &= ~INTR_HIFREQ; 354 } 355 356 /* 357 * Enable random number generation keying off of this interrupt. 358 */ 359 if ((intr_flags & INTR_NOENTROPY) == 0 && info->i_random.sc_enabled == 0) { 360 info->i_random.sc_enabled = 1; 361 info->i_random.sc_intr = intr; 362 } 363 364 /* 365 * Add the record to the interrupt list. 366 */ 367 crit_enter(); 368 while (*list != NULL) 369 list = &(*list)->next; 370 *list = rec; 371 crit_exit(); 372 373 /* 374 * Update max_installed_hard_intr to make the emergency intr poll 375 * a bit more efficient. 376 */ 377 if (intr < FIRST_SOFTINT) { 378 if (max_installed_hard_intr[cpuid] <= intr) 379 max_installed_hard_intr[cpuid] = intr + 1; 380 } 381 382 if (intr >= FIRST_SOFTINT) 383 swi_info_ary[intr - FIRST_SOFTINT] = info; 384 385 /* 386 * Setup the machine level interrupt vector 387 */ 388 if (intr < FIRST_SOFTINT && info->i_slow + info->i_fast == 1) 389 machintr_intr_setup(intr, intr_flags); 390 391 done: 392 int_moveto_origcpu(orig_cpuid, cpuid); 393 return(rec); 394 } 395 396 void 397 unregister_swi(void *id, int intr, int cpuid) 398 { 399 if (cpuid < 0) 400 cpuid = intr % ncpus; 401 402 unregister_int(id, cpuid); 403 } 404 405 void 406 unregister_int(void *id, int cpuid) 407 { 408 struct intr_info *info; 409 struct intrec **list; 410 intrec_t rec; 411 int intr, orig_cpuid; 412 413 KKASSERT(cpuid >= 0 && cpuid < ncpus); 414 415 intr = ((intrec_t)id)->intr; 416 417 if (intr < 0 || intr >= MAX_INTS) 418 panic("register_int: bad intr %d", intr); 419 420 info = &intr_block->ary[cpuid][intr]; 421 422 int_moveto_destcpu(&orig_cpuid, cpuid); 423 424 /* 425 * Remove the interrupt descriptor, adjust the descriptor count, 426 * and teardown the machine level vector if this was the last interrupt. 427 */ 428 crit_enter(); 429 list = &info->i_reclist; 430 while ((rec = *list) != NULL) { 431 if (rec == id) 432 break; 433 list = &rec->next; 434 } 435 if (rec) { 436 intrec_t rec0; 437 438 *list = rec->next; 439 if (rec->intr_flags & INTR_CLOCK) 440 --info->i_fast; 441 else 442 --info->i_slow; 443 if (intr < FIRST_SOFTINT && info->i_fast + info->i_slow == 0) 444 machintr_intr_teardown(intr); 445 446 /* 447 * Clear i_mplock_required if no handlers in the chain require the 448 * MP lock. 449 */ 450 for (rec0 = info->i_reclist; rec0; rec0 = rec0->next) { 451 if ((rec0->intr_flags & INTR_MPSAFE) == 0) 452 break; 453 } 454 if (rec0 == NULL) 455 info->i_mplock_required = 0; 456 } 457 458 if (info->i_reclist == NULL) { 459 info->i_flags = 0; 460 if (intr >= FIRST_SOFTINT) 461 swi_info_ary[intr - FIRST_SOFTINT] = NULL; 462 } else if (info->i_fast + info->i_slow == 1 && 463 (info->i_reclist->intr_flags & INTR_HIFREQ)) { 464 /* Unshared high frequency interrupt. */ 465 info->i_flags |= INTR_HIFREQ; 466 } 467 468 crit_exit(); 469 470 int_moveto_origcpu(orig_cpuid, cpuid); 471 472 /* 473 * Free the record. 474 */ 475 if (rec != NULL) { 476 kfree(rec->name, M_DEVBUF); 477 kfree(rec, M_DEVBUF); 478 } else { 479 kprintf("warning: unregister_int: int %d handler for %s not found\n", 480 intr, ((intrec_t)id)->name); 481 } 482 } 483 484 long 485 get_interrupt_counter(int intr, int cpuid) 486 { 487 struct intr_info *info; 488 489 KKASSERT(cpuid >= 0 && cpuid < ncpus); 490 491 if (intr < 0 || intr >= MAX_INTS) 492 panic("register_int: bad intr %d", intr); 493 info = &intr_block->ary[cpuid][intr]; 494 return(info->i_count); 495 } 496 497 void 498 register_randintr(int intr) 499 { 500 struct intr_info *info; 501 int cpuid; 502 503 if (intr < 0 || intr >= MAX_INTS) 504 panic("register_randintr: bad intr %d", intr); 505 506 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 507 info = &intr_block->ary[cpuid][intr]; 508 info->i_random.sc_intr = intr; 509 info->i_random.sc_enabled = 1; 510 } 511 } 512 513 void 514 unregister_randintr(int intr) 515 { 516 struct intr_info *info; 517 int cpuid; 518 519 if (intr < 0 || intr >= MAX_INTS) 520 panic("register_swi: bad intr %d", intr); 521 522 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 523 info = &intr_block->ary[cpuid][intr]; 524 info->i_random.sc_enabled = -1; 525 } 526 } 527 528 int 529 next_registered_randintr(int intr) 530 { 531 struct intr_info *info; 532 533 if (intr < 0 || intr >= MAX_INTS) 534 panic("register_swi: bad intr %d", intr); 535 536 while (intr < MAX_INTS) { 537 int cpuid; 538 539 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 540 info = &intr_block->ary[cpuid][intr]; 541 if (info->i_random.sc_enabled > 0) 542 return intr; 543 } 544 ++intr; 545 } 546 return intr; 547 } 548 549 /* 550 * Dispatch an interrupt. If there's nothing to do we have a stray 551 * interrupt and can just return, leaving the interrupt masked. 552 * 553 * We need to schedule the interrupt and set its i_running bit. If 554 * we are not on the interrupt thread's cpu we have to send a message 555 * to the correct cpu that will issue the desired action (interlocking 556 * with the interrupt thread's critical section). We do NOT attempt to 557 * reschedule interrupts whos i_running bit is already set because 558 * this would prematurely wakeup a livelock-limited interrupt thread. 559 * 560 * i_running is only tested/set on the same cpu as the interrupt thread. 561 * 562 * We are NOT in a critical section, which will allow the scheduled 563 * interrupt to preempt us. The MP lock might *NOT* be held here. 564 */ 565 static void 566 sched_ithd_remote(void *arg) 567 { 568 sched_ithd_intern(arg); 569 } 570 571 static void 572 sched_ithd_intern(struct intr_info *info) 573 { 574 ++info->i_count; 575 if (info->i_state != ISTATE_NOTHREAD) { 576 if (info->i_reclist == NULL) { 577 report_stray_interrupt(info, "sched_ithd"); 578 } else { 579 if (info->i_thread->td_gd == mycpu) { 580 if (info->i_running == 0) { 581 info->i_running = 1; 582 if (info->i_state != ISTATE_LIVELOCKED) 583 lwkt_schedule(info->i_thread); /* MIGHT PREEMPT */ 584 } 585 } else { 586 lwkt_send_ipiq(info->i_thread->td_gd, sched_ithd_remote, info); 587 } 588 } 589 } else { 590 report_stray_interrupt(info, "sched_ithd"); 591 } 592 } 593 594 void 595 sched_ithd_soft(int intr) 596 { 597 struct intr_info *info; 598 599 KKASSERT(intr >= FIRST_SOFTINT && intr < MAX_INTS); 600 601 info = swi_info_ary[intr - FIRST_SOFTINT]; 602 if (info != NULL) { 603 sched_ithd_intern(info); 604 } else { 605 kprintf("unregistered softint %d got scheduled on cpu%d\n", 606 intr, mycpuid); 607 } 608 } 609 610 void 611 sched_ithd_hard(int intr) 612 { 613 KKASSERT(intr >= 0 && intr < MAX_HARDINTS); 614 sched_ithd_intern(&intr_block->ary[mycpuid][intr]); 615 } 616 617 #ifdef _KERNEL_VIRTUAL 618 619 void 620 sched_ithd_hard_virtual(int intr) 621 { 622 KKASSERT(intr >= 0 && intr < MAX_HARDINTS); 623 sched_ithd_intern(&intr_block->ary[0][intr]); 624 } 625 626 void * 627 register_int_virtual(int intr, inthand2_t *handler, void *arg, const char *name, 628 struct lwkt_serialize *serializer, int intr_flags) 629 { 630 return register_int(intr, handler, arg, name, serializer, intr_flags, 0); 631 } 632 633 void 634 unregister_int_virtual(void *id) 635 { 636 unregister_int(id, 0); 637 } 638 639 #endif /* _KERN_VIRTUAL */ 640 641 static void 642 report_stray_interrupt(struct intr_info *info, const char *func) 643 { 644 ++info->i_straycount; 645 if (info->i_straycount < 10) { 646 if (info->i_errorticks == ticks) 647 return; 648 info->i_errorticks = ticks; 649 kprintf("%s: stray interrupt %d on cpu%d\n", 650 func, info->i_intr, mycpuid); 651 } else if (info->i_straycount == 10) { 652 kprintf("%s: %ld stray interrupts %d on cpu%d - " 653 "there will be no further reports\n", func, 654 info->i_straycount, info->i_intr, mycpuid); 655 } 656 } 657 658 /* 659 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL 660 * might not be held). 661 */ 662 static void 663 ithread_livelock_wakeup(systimer_t st, int in_ipi __unused, 664 struct intrframe *frame __unused) 665 { 666 struct intr_info *info; 667 668 info = &intr_block->ary[mycpuid][(int)(intptr_t)st->data]; 669 if (info->i_state != ISTATE_NOTHREAD) 670 lwkt_schedule(info->i_thread); 671 } 672 673 /* 674 * Schedule ithread within fast intr handler 675 * 676 * XXX Protect sched_ithd_hard() call with gd_intr_nesting_level? 677 * Interrupts aren't enabled, but still... 678 */ 679 static __inline void 680 ithread_fast_sched(int intr, thread_t td) 681 { 682 ++td->td_nest_count; 683 684 /* 685 * We are already in critical section, exit it now to 686 * allow preemption. 687 */ 688 crit_exit_quick(td); 689 sched_ithd_hard(intr); 690 crit_enter_quick(td); 691 692 --td->td_nest_count; 693 } 694 695 /* 696 * This function is called directly from the ICU or APIC vector code assembly 697 * to process an interrupt. The critical section and interrupt deferral 698 * checks have already been done but the function is entered WITHOUT 699 * a critical section held. The BGL may or may not be held. 700 * 701 * Must return non-zero if we do not want the vector code to re-enable 702 * the interrupt (which we don't if we have to schedule the interrupt) 703 */ 704 int ithread_fast_handler(struct intrframe *frame); 705 706 int 707 ithread_fast_handler(struct intrframe *frame) 708 { 709 int intr; 710 struct intr_info *info; 711 struct intrec **list; 712 int must_schedule; 713 int got_mplock; 714 TD_INVARIANTS_DECLARE; 715 intrec_t rec, nrec; 716 globaldata_t gd; 717 thread_t td; 718 719 intr = frame->if_vec; 720 gd = mycpu; 721 td = curthread; 722 723 /* We must be in critical section. */ 724 KKASSERT(td->td_critcount); 725 726 info = &intr_block->ary[mycpuid][intr]; 727 728 /* 729 * If we are not processing any FAST interrupts, just schedule the thing. 730 */ 731 if (info->i_fast == 0) { 732 ++gd->gd_cnt.v_intr; 733 ithread_fast_sched(intr, td); 734 return(1); 735 } 736 737 /* 738 * This should not normally occur since interrupts ought to be 739 * masked if the ithread has been scheduled or is running. 740 */ 741 if (info->i_running) 742 return(1); 743 744 /* 745 * Bump the interrupt nesting level to process any FAST interrupts. 746 * Obtain the MP lock as necessary. If the MP lock cannot be obtained, 747 * schedule the interrupt thread to deal with the issue instead. 748 * 749 * To reduce overhead, just leave the MP lock held once it has been 750 * obtained. 751 */ 752 ++gd->gd_intr_nesting_level; 753 ++gd->gd_cnt.v_intr; 754 must_schedule = info->i_slow; 755 got_mplock = 0; 756 757 TD_INVARIANTS_GET(td); 758 list = &info->i_reclist; 759 760 for (rec = *list; rec; rec = nrec) { 761 /* rec may be invalid after call */ 762 nrec = rec->next; 763 764 if (rec->intr_flags & INTR_CLOCK) { 765 if ((rec->intr_flags & INTR_MPSAFE) == 0 && got_mplock == 0) { 766 if (try_mplock() == 0) { 767 /* Couldn't get the MP lock; just schedule it. */ 768 must_schedule = 1; 769 break; 770 } 771 got_mplock = 1; 772 } 773 if (rec->serializer) { 774 must_schedule += lwkt_serialize_handler_try( 775 rec->serializer, rec->handler, 776 rec->argument, frame); 777 } else { 778 rec->handler(rec->argument, frame); 779 } 780 TD_INVARIANTS_TEST(td, rec->name); 781 } 782 } 783 784 /* 785 * Cleanup 786 */ 787 --gd->gd_intr_nesting_level; 788 if (got_mplock) 789 rel_mplock(); 790 791 /* 792 * If we had a problem, or mixed fast and slow interrupt handlers are 793 * registered, schedule the ithread to catch the missed records (it 794 * will just re-run all of them). A return value of 0 indicates that 795 * all handlers have been run and the interrupt can be re-enabled, and 796 * a non-zero return indicates that the interrupt thread controls 797 * re-enablement. 798 */ 799 if (must_schedule > 0) 800 ithread_fast_sched(intr, td); 801 else if (must_schedule == 0) 802 ++info->i_count; 803 return(must_schedule); 804 } 805 806 /* 807 * Interrupt threads run this as their main loop. 808 * 809 * The handler begins execution outside a critical section and no MP lock. 810 * 811 * The i_running state starts at 0. When an interrupt occurs, the hardware 812 * interrupt is disabled and sched_ithd_hard(). The HW interrupt remains 813 * disabled until all routines have run. We then call machintr_intr_enable() 814 * to reenable the HW interrupt and deschedule us until the next interrupt. 815 * 816 * We are responsible for atomically checking i_running. i_running for our 817 * irq is only set in the context of our cpu, so a critical section is a 818 * sufficient interlock. 819 */ 820 #define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */ 821 822 static void 823 ithread_handler(void *arg) 824 { 825 struct intr_info *info; 826 int use_limit; 827 uint32_t lseconds; 828 int intr, cpuid = mycpuid; 829 int mpheld; 830 struct intrec **list; 831 intrec_t rec, nrec; 832 globaldata_t gd; 833 struct systimer ill_timer; /* enforced freq. timer */ 834 u_int ill_count; /* interrupt livelock counter */ 835 int upper_limit; /* interrupt livelock upper limit */ 836 TD_INVARIANTS_DECLARE; 837 838 ill_count = 0; 839 intr = (int)(intptr_t)arg; 840 info = &intr_block->ary[cpuid][intr]; 841 list = &info->i_reclist; 842 843 /* 844 * The loop must be entered with one critical section held. The thread 845 * does not hold the mplock on startup. 846 */ 847 gd = mycpu; 848 lseconds = gd->gd_time_seconds; 849 crit_enter_gd(gd); 850 mpheld = 0; 851 852 for (;;) { 853 /* 854 * The chain is only considered MPSAFE if all its interrupt handlers 855 * are MPSAFE. However, if intr_mpsafe has been turned off we 856 * always operate with the BGL. 857 */ 858 if (info->i_mplock_required != mpheld) { 859 if (info->i_mplock_required) { 860 KKASSERT(mpheld == 0); 861 get_mplock(); 862 mpheld = 1; 863 } else { 864 KKASSERT(mpheld != 0); 865 rel_mplock(); 866 mpheld = 0; 867 } 868 } 869 870 TD_INVARIANTS_GET(gd->gd_curthread); 871 872 /* 873 * If an interrupt is pending, clear i_running and execute the 874 * handlers. Note that certain types of interrupts can re-trigger 875 * and set i_running again. 876 * 877 * Each handler is run in a critical section. Note that we run both 878 * FAST and SLOW designated service routines. 879 */ 880 if (info->i_running) { 881 ++ill_count; 882 info->i_running = 0; 883 884 if (*list == NULL) 885 report_stray_interrupt(info, "ithread_handler"); 886 887 for (rec = *list; rec; rec = nrec) { 888 /* rec may be invalid after call */ 889 nrec = rec->next; 890 if (rec->handler == NULL) { 891 kprintf("NULL HANDLER %s\n", rec->name); 892 } else 893 if (rec->serializer) { 894 lwkt_serialize_handler_call(rec->serializer, rec->handler, 895 rec->argument, NULL); 896 } else { 897 rec->handler(rec->argument, NULL); 898 } 899 TD_INVARIANTS_TEST(gd->gd_curthread, rec->name); 900 } 901 } 902 903 /* 904 * This is our interrupt hook to add rate randomness to the random 905 * number generator. 906 */ 907 if (info->i_random.sc_enabled > 0) 908 add_interrupt_randomness(intr); 909 910 /* 911 * Unmask the interrupt to allow it to trigger again. This only 912 * applies to certain types of interrupts (typ level interrupts). 913 * This can result in the interrupt retriggering, but the retrigger 914 * will not be processed until we cycle our critical section. 915 * 916 * Only unmask interrupts while handlers are installed. It is 917 * possible to hit a situation where no handlers are installed 918 * due to a device driver livelocking and then tearing down its 919 * interrupt on close (the parallel bus being a good example). 920 */ 921 if (intr < FIRST_SOFTINT && *list) 922 machintr_intr_enable(intr); 923 924 /* 925 * Do a quick exit/enter to catch any higher-priority interrupt 926 * sources, such as the statclock, so thread time accounting 927 * will still work. This may also cause an interrupt to re-trigger. 928 */ 929 crit_exit_gd(gd); 930 crit_enter_gd(gd); 931 932 /* 933 * LIVELOCK STATE MACHINE 934 */ 935 switch(info->i_state) { 936 case ISTATE_NORMAL: 937 /* 938 * Reset the count each second. 939 */ 940 if (lseconds != gd->gd_time_seconds) { 941 lseconds = gd->gd_time_seconds; 942 ill_count = 0; 943 } 944 945 /* 946 * If we did not exceed the frequency limit, we are done. 947 * If the interrupt has not retriggered we deschedule ourselves. 948 */ 949 if (info->i_flags & INTR_HIFREQ) 950 upper_limit = livelock_limit_hi; 951 else 952 upper_limit = livelock_limit; 953 if (ill_count <= upper_limit) { 954 if (info->i_running == 0) { 955 lwkt_deschedule_self(gd->gd_curthread); 956 lwkt_switch(); 957 } 958 break; 959 } 960 961 /* 962 * Otherwise we are livelocked. Set up a periodic systimer 963 * to wake the thread up at the limit frequency. 964 */ 965 kprintf("intr %d on cpu%d at %d/%d hz, livelocked limit engaged!\n", 966 intr, cpuid, ill_count, upper_limit); 967 info->i_state = ISTATE_LIVELOCKED; 968 if ((use_limit = upper_limit) < 100) 969 use_limit = 100; 970 else if (use_limit > 500000) 971 use_limit = 500000; 972 systimer_init_periodic_nq(&ill_timer, ithread_livelock_wakeup, 973 (void *)(intptr_t)intr, use_limit); 974 /* fall through */ 975 case ISTATE_LIVELOCKED: 976 /* 977 * Wait for our periodic timer to go off. Since the interrupt 978 * has re-armed it can still set i_running, but it will not 979 * reschedule us while we are in a livelocked state. 980 */ 981 lwkt_deschedule_self(gd->gd_curthread); 982 lwkt_switch(); 983 984 /* 985 * Check once a second to see if the livelock condition no 986 * longer applies. 987 */ 988 if (lseconds != gd->gd_time_seconds) { 989 lseconds = gd->gd_time_seconds; 990 if (ill_count < livelock_lowater) { 991 info->i_state = ISTATE_NORMAL; 992 systimer_del(&ill_timer); 993 kprintf("intr %d on cpu%d at %d/%d hz, livelock removed\n", 994 intr, cpuid, ill_count, livelock_lowater); 995 } else if (livelock_debug == intr || 996 (bootverbose && cold)) { 997 kprintf("intr %d on cpu%d at %d/%d hz, in livelock\n", 998 intr, cpuid, ill_count, livelock_lowater); 999 } 1000 ill_count = 0; 1001 } 1002 break; 1003 } 1004 } 1005 /* NOT REACHED */ 1006 } 1007 1008 /* 1009 * Emergency interrupt polling thread. The thread begins execution 1010 * outside a critical section with the BGL held. 1011 * 1012 * If emergency interrupt polling is enabled, this thread will 1013 * execute all system interrupts not marked INTR_NOPOLL at the 1014 * specified polling frequency. 1015 * 1016 * WARNING! This thread runs *ALL* interrupt service routines that 1017 * are not marked INTR_NOPOLL, which basically means everything except 1018 * the 8254 clock interrupt and the ATA interrupt. It has very high 1019 * overhead and should only be used in situations where the machine 1020 * cannot otherwise be made to work. Due to the severe performance 1021 * degredation, it should not be enabled on production machines. 1022 */ 1023 static void 1024 ithread_emergency(void *arg __unused) 1025 { 1026 globaldata_t gd = mycpu; 1027 struct intr_info *info; 1028 intrec_t rec, nrec; 1029 int intr, cpuid = mycpuid; 1030 TD_INVARIANTS_DECLARE; 1031 1032 get_mplock(); 1033 crit_enter_gd(gd); 1034 TD_INVARIANTS_GET(gd->gd_curthread); 1035 1036 for (;;) { 1037 for (intr = 0; intr < max_installed_hard_intr[cpuid]; ++intr) { 1038 info = &intr_block->ary[cpuid][intr]; 1039 for (rec = info->i_reclist; rec; rec = nrec) { 1040 /* rec may be invalid after call */ 1041 nrec = rec->next; 1042 if ((rec->intr_flags & INTR_NOPOLL) == 0) { 1043 if (rec->serializer) { 1044 lwkt_serialize_handler_try(rec->serializer, 1045 rec->handler, rec->argument, NULL); 1046 } else { 1047 rec->handler(rec->argument, NULL); 1048 } 1049 TD_INVARIANTS_TEST(gd->gd_curthread, rec->name); 1050 } 1051 } 1052 } 1053 lwkt_deschedule_self(gd->gd_curthread); 1054 lwkt_switch(); 1055 } 1056 /* NOT REACHED */ 1057 } 1058 1059 /* 1060 * Systimer callback - schedule the emergency interrupt poll thread 1061 * if emergency polling is enabled. 1062 */ 1063 static 1064 void 1065 emergency_intr_timer_callback(systimer_t info, int in_ipi __unused, 1066 struct intrframe *frame __unused) 1067 { 1068 if (emergency_intr_enable) 1069 lwkt_schedule(info->data); 1070 } 1071 1072 /* 1073 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1074 * The data for this machine dependent, and the declarations are in machine 1075 * dependent code. The layout of intrnames and intrcnt however is machine 1076 * independent. 1077 * 1078 * We do not know the length of intrcnt and intrnames at compile time, so 1079 * calculate things at run time. 1080 */ 1081 1082 static int 1083 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1084 { 1085 struct intr_info *info; 1086 intrec_t rec; 1087 int error = 0; 1088 int len; 1089 int intr, cpuid; 1090 char buf[64]; 1091 1092 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 1093 for (intr = 0; error == 0 && intr < MAX_INTS; ++intr) { 1094 info = &intr_block->ary[cpuid][intr]; 1095 1096 len = 0; 1097 buf[0] = 0; 1098 for (rec = info->i_reclist; rec; rec = rec->next) { 1099 ksnprintf(buf + len, sizeof(buf) - len, "%s%s", 1100 (len ? "/" : ""), rec->name); 1101 len += strlen(buf + len); 1102 } 1103 if (len == 0) { 1104 ksnprintf(buf, sizeof(buf), "irq%d", intr); 1105 len = strlen(buf); 1106 } 1107 error = SYSCTL_OUT(req, buf, len + 1); 1108 } 1109 } 1110 return (error); 1111 } 1112 1113 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1114 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1115 1116 static int 1117 sysctl_intrcnt_all(SYSCTL_HANDLER_ARGS) 1118 { 1119 struct intr_info *info; 1120 int error = 0; 1121 int intr, cpuid; 1122 1123 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 1124 for (intr = 0; intr < MAX_INTS; ++intr) { 1125 info = &intr_block->ary[cpuid][intr]; 1126 1127 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count)); 1128 if (error) 1129 goto failed; 1130 } 1131 } 1132 failed: 1133 return(error); 1134 } 1135 1136 SYSCTL_PROC(_hw, OID_AUTO, intrcnt_all, CTLTYPE_OPAQUE | CTLFLAG_RD, 1137 NULL, 0, sysctl_intrcnt_all, "", "Interrupt Counts"); 1138 1139 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1140 NULL, 0, sysctl_intrcnt_all, "", "Interrupt Counts"); 1141 1142 static void 1143 int_moveto_destcpu(int *orig_cpuid0, int cpuid) 1144 { 1145 int orig_cpuid = mycpuid; 1146 1147 if (cpuid != orig_cpuid) 1148 lwkt_migratecpu(cpuid); 1149 1150 *orig_cpuid0 = orig_cpuid; 1151 } 1152 1153 static void 1154 int_moveto_origcpu(int orig_cpuid, int cpuid) 1155 { 1156 if (cpuid != orig_cpuid) 1157 lwkt_migratecpu(orig_cpuid); 1158 } 1159 1160 static void 1161 intr_init(void *dummy __unused) 1162 { 1163 int cpuid; 1164 1165 kprintf("Initialize MI interrupts\n"); 1166 1167 intr_block = kmalloc(sizeof(*intr_block), M_INTRMNG, 1168 M_INTWAIT | M_ZERO); 1169 1170 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 1171 int intr; 1172 1173 for (intr = 0; intr < MAX_INTS; ++intr) { 1174 struct intr_info *info = &intr_block->ary[cpuid][intr]; 1175 1176 info->i_cpuid = cpuid; 1177 info->i_intr = intr; 1178 } 1179 } 1180 } 1181 SYSINIT(intr_init, SI_BOOT2_FINISH_PIC, SI_ORDER_ANY, intr_init, NULL); 1182