1 /* 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved. 3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $ 27 * 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/malloc.h> 33 #include <sys/kernel.h> 34 #include <sys/sysctl.h> 35 #include <sys/thread.h> 36 #include <sys/proc.h> 37 #include <sys/random.h> 38 #include <sys/serialize.h> 39 #include <sys/interrupt.h> 40 #include <sys/bus.h> 41 #include <sys/machintr.h> 42 43 #include <machine/frame.h> 44 45 #include <sys/thread2.h> 46 #include <sys/mplock2.h> 47 48 struct intr_info; 49 50 typedef struct intrec { 51 struct intrec *next; 52 struct intr_info *info; 53 inthand2_t *handler; 54 void *argument; 55 char *name; 56 int intr; 57 int intr_flags; 58 struct lwkt_serialize *serializer; 59 } *intrec_t; 60 61 struct intr_info { 62 intrec_t i_reclist; 63 struct thread *i_thread; /* don't embed struct thread */ 64 struct random_softc i_random; 65 long i_count; /* interrupts dispatched */ 66 int i_running; 67 short i_mplock_required; 68 short i_flags; 69 int i_fast; 70 int i_slow; 71 int i_state; 72 int i_errorticks; 73 unsigned long i_straycount; 74 int i_cpuid; 75 int i_intr; 76 }; 77 78 struct intr_info_block { 79 struct intr_info ary[MAXCPU][MAX_INTS]; 80 }; 81 82 static struct intr_info_block *intr_block; 83 static struct intr_info *swi_info_ary[MAX_SOFTINTS]; 84 85 static int max_installed_hard_intr[MAXCPU]; 86 87 MALLOC_DEFINE(M_INTRMNG, "intrmng", "interrupt management"); 88 89 90 #define EMERGENCY_INTR_POLLING_FREQ_MAX 20000 91 92 /* 93 * Assert that callers into interrupt handlers don't return with 94 * dangling tokens, spinlocks, or mp locks. 95 */ 96 #ifdef INVARIANTS 97 98 #define TD_INVARIANTS_DECLARE \ 99 int spincount; \ 100 lwkt_tokref_t curstop 101 102 #define TD_INVARIANTS_GET(td) \ 103 do { \ 104 spincount = (td)->td_gd->gd_spinlocks; \ 105 curstop = (td)->td_toks_stop; \ 106 } while(0) 107 108 #define TD_INVARIANTS_TEST(td, name) \ 109 do { \ 110 KASSERT(spincount == (td)->td_gd->gd_spinlocks, \ 111 ("spincount mismatch after interrupt handler %s", \ 112 name)); \ 113 KASSERT(curstop == (td)->td_toks_stop, \ 114 ("token count mismatch after interrupt handler %s", \ 115 name)); \ 116 } while(0) 117 118 #else 119 120 /* !INVARIANTS */ 121 122 #define TD_INVARIANTS_DECLARE 123 #define TD_INVARIANTS_GET(td) 124 #define TD_INVARIANTS_TEST(td, name) 125 126 #endif /* ndef INVARIANTS */ 127 128 static int sysctl_emergency_freq(SYSCTL_HANDLER_ARGS); 129 static int sysctl_emergency_enable(SYSCTL_HANDLER_ARGS); 130 static void emergency_intr_timer_callback(systimer_t, int, struct intrframe *); 131 static void ithread_handler(void *arg); 132 static void ithread_emergency(void *arg); 133 static void report_stray_interrupt(struct intr_info *info, const char *func); 134 static void int_moveto_destcpu(int *, int); 135 static void int_moveto_origcpu(int, int); 136 static void sched_ithd_intern(struct intr_info *info); 137 138 static struct systimer emergency_intr_timer[MAXCPU]; 139 static struct thread *emergency_intr_thread[MAXCPU]; 140 141 #define ISTATE_NOTHREAD 0 142 #define ISTATE_NORMAL 1 143 #define ISTATE_LIVELOCKED 2 144 145 static int livelock_limit = 40000; 146 static int livelock_limit_hi = 120000; 147 static int livelock_lowater = 20000; 148 static int livelock_debug = -1; 149 SYSCTL_INT(_kern, OID_AUTO, livelock_limit, 150 CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit"); 151 SYSCTL_INT(_kern, OID_AUTO, livelock_limit_hi, 152 CTLFLAG_RW, &livelock_limit_hi, 0, 153 "Livelock interrupt rate limit (high frequency)"); 154 SYSCTL_INT(_kern, OID_AUTO, livelock_lowater, 155 CTLFLAG_RW, &livelock_lowater, 0, "Livelock low-water mark restore"); 156 SYSCTL_INT(_kern, OID_AUTO, livelock_debug, 157 CTLFLAG_RW, &livelock_debug, 0, "Livelock debug intr#"); 158 159 static int emergency_intr_enable = 0; /* emergency interrupt polling */ 160 TUNABLE_INT("kern.emergency_intr_enable", &emergency_intr_enable); 161 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_enable, CTLTYPE_INT | CTLFLAG_RW, 162 0, 0, sysctl_emergency_enable, "I", "Emergency Interrupt Poll Enable"); 163 164 static int emergency_intr_freq = 10; /* emergency polling frequency */ 165 TUNABLE_INT("kern.emergency_intr_freq", &emergency_intr_freq); 166 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_freq, CTLTYPE_INT | CTLFLAG_RW, 167 0, 0, sysctl_emergency_freq, "I", "Emergency Interrupt Poll Frequency"); 168 169 /* 170 * Sysctl support routines 171 */ 172 static int 173 sysctl_emergency_enable(SYSCTL_HANDLER_ARGS) 174 { 175 int error, enabled, cpuid, freq, origcpu; 176 177 enabled = emergency_intr_enable; 178 error = sysctl_handle_int(oidp, &enabled, 0, req); 179 if (error || req->newptr == NULL) 180 return error; 181 emergency_intr_enable = enabled; 182 if (emergency_intr_enable) 183 freq = emergency_intr_freq; 184 else 185 freq = 1; 186 187 origcpu = mycpuid; 188 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 189 lwkt_migratecpu(cpuid); 190 systimer_adjust_periodic(&emergency_intr_timer[cpuid], freq); 191 } 192 lwkt_migratecpu(origcpu); 193 return 0; 194 } 195 196 static int 197 sysctl_emergency_freq(SYSCTL_HANDLER_ARGS) 198 { 199 int error, phz, cpuid, freq, origcpu; 200 201 phz = emergency_intr_freq; 202 error = sysctl_handle_int(oidp, &phz, 0, req); 203 if (error || req->newptr == NULL) 204 return error; 205 if (phz <= 0) 206 return EINVAL; 207 else if (phz > EMERGENCY_INTR_POLLING_FREQ_MAX) 208 phz = EMERGENCY_INTR_POLLING_FREQ_MAX; 209 210 emergency_intr_freq = phz; 211 if (emergency_intr_enable) 212 freq = emergency_intr_freq; 213 else 214 freq = 1; 215 216 origcpu = mycpuid; 217 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 218 lwkt_migratecpu(cpuid); 219 systimer_adjust_periodic(&emergency_intr_timer[cpuid], freq); 220 } 221 lwkt_migratecpu(origcpu); 222 return 0; 223 } 224 225 /* 226 * Register an SWI or INTerrupt handler. 227 */ 228 void * 229 register_swi(int intr, inthand2_t *handler, void *arg, const char *name, 230 struct lwkt_serialize *serializer, int cpuid) 231 { 232 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 233 panic("register_swi: bad intr %d", intr); 234 235 if (cpuid < 0) 236 cpuid = intr % ncpus; 237 return(register_int(intr, handler, arg, name, serializer, 0, cpuid)); 238 } 239 240 void * 241 register_swi_mp(int intr, inthand2_t *handler, void *arg, const char *name, 242 struct lwkt_serialize *serializer, int cpuid) 243 { 244 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 245 panic("register_swi: bad intr %d", intr); 246 247 if (cpuid < 0) 248 cpuid = intr % ncpus; 249 return(register_int(intr, handler, arg, name, serializer, 250 INTR_MPSAFE, cpuid)); 251 } 252 253 void * 254 register_int(int intr, inthand2_t *handler, void *arg, const char *name, 255 struct lwkt_serialize *serializer, int intr_flags, int cpuid) 256 { 257 struct intr_info *info; 258 struct intrec **list; 259 intrec_t rec = NULL; 260 int orig_cpuid; 261 262 KKASSERT(cpuid >= 0 && cpuid < ncpus); 263 264 if (intr < 0 || intr >= MAX_INTS) 265 panic("register_int: bad intr %d", intr); 266 if (name == NULL) 267 name = "???"; 268 info = &intr_block->ary[cpuid][intr]; 269 270 int_moveto_destcpu(&orig_cpuid, cpuid); 271 272 /* 273 * This intr has been registered as exclusive one, so 274 * it can't shared. 275 */ 276 if (info->i_flags & INTR_EXCL) 277 goto done; 278 279 /* 280 * This intr has been registered as shared one, so it 281 * can't be used for exclusive handler. 282 */ 283 list = &info->i_reclist; 284 if ((intr_flags & INTR_EXCL) && *list != NULL) 285 goto done; 286 287 /* 288 * Construct an interrupt handler record 289 */ 290 rec = kmalloc(sizeof(struct intrec), M_DEVBUF, M_INTWAIT); 291 rec->name = kmalloc(strlen(name) + 1, M_DEVBUF, M_INTWAIT); 292 strcpy(rec->name, name); 293 294 rec->info = info; 295 rec->handler = handler; 296 rec->argument = arg; 297 rec->intr = intr; 298 rec->intr_flags = intr_flags; 299 rec->next = NULL; 300 rec->serializer = serializer; 301 302 /* 303 * Create an emergency polling thread and set up a systimer to wake 304 * it up. objcache isn't operational yet so use kmalloc. 305 * 306 * objcache may not be operational yet, use kmalloc(). 307 */ 308 if (emergency_intr_thread[cpuid] == NULL) { 309 emergency_intr_thread[cpuid] = kmalloc(sizeof(struct thread), M_DEVBUF, 310 M_INTWAIT | M_ZERO); 311 lwkt_create(ithread_emergency, NULL, NULL, 312 emergency_intr_thread[cpuid], 313 TDF_NOSTART | TDF_INTTHREAD, cpuid, "ithreadE %d", 314 cpuid); 315 systimer_init_periodic_nq(&emergency_intr_timer[cpuid], 316 emergency_intr_timer_callback, 317 emergency_intr_thread[cpuid], 318 (emergency_intr_enable ? emergency_intr_freq : 1)); 319 } 320 321 /* 322 * Create an interrupt thread if necessary, leave it in an unscheduled 323 * state. 324 */ 325 if (info->i_state == ISTATE_NOTHREAD) { 326 info->i_state = ISTATE_NORMAL; 327 info->i_thread = kmalloc(sizeof(struct thread), M_DEVBUF, 328 M_INTWAIT | M_ZERO); 329 lwkt_create(ithread_handler, (void *)(intptr_t)intr, NULL, 330 info->i_thread, TDF_NOSTART | TDF_INTTHREAD, cpuid, 331 "ithread%d %d", intr, cpuid); 332 if (intr >= FIRST_SOFTINT) 333 lwkt_setpri(info->i_thread, TDPRI_SOFT_NORM); 334 else 335 lwkt_setpri(info->i_thread, TDPRI_INT_MED); 336 info->i_thread->td_preemptable = lwkt_preempt; 337 } 338 339 /* 340 * Keep track of how many fast and slow interrupts we have. 341 * Set i_mplock_required if any handler in the chain requires 342 * the MP lock to operate. 343 */ 344 if ((intr_flags & INTR_MPSAFE) == 0) { 345 info->i_mplock_required = 1; 346 kprintf("interrupt uses mplock: %s\n", name); 347 } 348 if (intr_flags & INTR_CLOCK) 349 ++info->i_fast; 350 else 351 ++info->i_slow; 352 353 info->i_flags |= (intr_flags & INTR_EXCL); 354 if (info->i_slow + info->i_fast == 1 && (intr_flags & INTR_HIFREQ)) { 355 /* 356 * Allow high frequency interrupt, if this intr is not 357 * shared yet. 358 */ 359 info->i_flags |= INTR_HIFREQ; 360 } else { 361 info->i_flags &= ~INTR_HIFREQ; 362 } 363 364 /* 365 * Enable random number generation keying off of this interrupt. 366 */ 367 if ((intr_flags & INTR_NOENTROPY) == 0 && info->i_random.sc_enabled == 0) { 368 info->i_random.sc_enabled = 1; 369 info->i_random.sc_intr = intr; 370 } 371 372 /* 373 * Add the record to the interrupt list. 374 */ 375 crit_enter(); 376 while (*list != NULL) 377 list = &(*list)->next; 378 *list = rec; 379 crit_exit(); 380 381 /* 382 * Update max_installed_hard_intr to make the emergency intr poll 383 * a bit more efficient. 384 */ 385 if (intr < FIRST_SOFTINT) { 386 if (max_installed_hard_intr[cpuid] <= intr) 387 max_installed_hard_intr[cpuid] = intr + 1; 388 } 389 390 if (intr >= FIRST_SOFTINT) 391 swi_info_ary[intr - FIRST_SOFTINT] = info; 392 393 /* 394 * Setup the machine level interrupt vector 395 */ 396 if (intr < FIRST_SOFTINT && info->i_slow + info->i_fast == 1) 397 machintr_intr_setup(intr, intr_flags); 398 399 done: 400 int_moveto_origcpu(orig_cpuid, cpuid); 401 return(rec); 402 } 403 404 void 405 unregister_swi(void *id, int intr, int cpuid) 406 { 407 if (cpuid < 0) 408 cpuid = intr % ncpus; 409 410 unregister_int(id, cpuid); 411 } 412 413 void 414 unregister_int(void *id, int cpuid) 415 { 416 struct intr_info *info; 417 struct intrec **list; 418 intrec_t rec; 419 int intr, orig_cpuid; 420 421 KKASSERT(cpuid >= 0 && cpuid < ncpus); 422 423 intr = ((intrec_t)id)->intr; 424 425 if (intr < 0 || intr >= MAX_INTS) 426 panic("register_int: bad intr %d", intr); 427 428 info = &intr_block->ary[cpuid][intr]; 429 430 int_moveto_destcpu(&orig_cpuid, cpuid); 431 432 /* 433 * Remove the interrupt descriptor, adjust the descriptor count, 434 * and teardown the machine level vector if this was the last interrupt. 435 */ 436 crit_enter(); 437 list = &info->i_reclist; 438 while ((rec = *list) != NULL) { 439 if (rec == id) 440 break; 441 list = &rec->next; 442 } 443 if (rec) { 444 intrec_t rec0; 445 446 *list = rec->next; 447 if (rec->intr_flags & INTR_CLOCK) 448 --info->i_fast; 449 else 450 --info->i_slow; 451 if (intr < FIRST_SOFTINT && info->i_fast + info->i_slow == 0) 452 machintr_intr_teardown(intr); 453 454 /* 455 * Clear i_mplock_required if no handlers in the chain require the 456 * MP lock. 457 */ 458 for (rec0 = info->i_reclist; rec0; rec0 = rec0->next) { 459 if ((rec0->intr_flags & INTR_MPSAFE) == 0) 460 break; 461 } 462 if (rec0 == NULL) 463 info->i_mplock_required = 0; 464 } 465 466 if (info->i_reclist == NULL) { 467 info->i_flags = 0; 468 if (intr >= FIRST_SOFTINT) 469 swi_info_ary[intr - FIRST_SOFTINT] = NULL; 470 } else if (info->i_fast + info->i_slow == 1 && 471 (info->i_reclist->intr_flags & INTR_HIFREQ)) { 472 /* Unshared high frequency interrupt. */ 473 info->i_flags |= INTR_HIFREQ; 474 } 475 476 crit_exit(); 477 478 int_moveto_origcpu(orig_cpuid, cpuid); 479 480 /* 481 * Free the record. 482 */ 483 if (rec != NULL) { 484 kfree(rec->name, M_DEVBUF); 485 kfree(rec, M_DEVBUF); 486 } else { 487 kprintf("warning: unregister_int: int %d handler for %s not found\n", 488 intr, ((intrec_t)id)->name); 489 } 490 } 491 492 long 493 get_interrupt_counter(int intr, int cpuid) 494 { 495 struct intr_info *info; 496 497 KKASSERT(cpuid >= 0 && cpuid < ncpus); 498 499 if (intr < 0 || intr >= MAX_INTS) 500 panic("register_int: bad intr %d", intr); 501 info = &intr_block->ary[cpuid][intr]; 502 return(info->i_count); 503 } 504 505 void 506 register_randintr(int intr) 507 { 508 struct intr_info *info; 509 int cpuid; 510 511 if (intr < 0 || intr >= MAX_INTS) 512 panic("register_randintr: bad intr %d", intr); 513 514 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 515 info = &intr_block->ary[cpuid][intr]; 516 info->i_random.sc_intr = intr; 517 info->i_random.sc_enabled = 1; 518 } 519 } 520 521 void 522 unregister_randintr(int intr) 523 { 524 struct intr_info *info; 525 int cpuid; 526 527 if (intr < 0 || intr >= MAX_INTS) 528 panic("register_swi: bad intr %d", intr); 529 530 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 531 info = &intr_block->ary[cpuid][intr]; 532 info->i_random.sc_enabled = -1; 533 } 534 } 535 536 int 537 next_registered_randintr(int intr) 538 { 539 struct intr_info *info; 540 541 if (intr < 0 || intr >= MAX_INTS) 542 panic("register_swi: bad intr %d", intr); 543 544 while (intr < MAX_INTS) { 545 int cpuid; 546 547 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 548 info = &intr_block->ary[cpuid][intr]; 549 if (info->i_random.sc_enabled > 0) 550 return intr; 551 } 552 ++intr; 553 } 554 return intr; 555 } 556 557 /* 558 * Dispatch an interrupt. If there's nothing to do we have a stray 559 * interrupt and can just return, leaving the interrupt masked. 560 * 561 * We need to schedule the interrupt and set its i_running bit. If 562 * we are not on the interrupt thread's cpu we have to send a message 563 * to the correct cpu that will issue the desired action (interlocking 564 * with the interrupt thread's critical section). We do NOT attempt to 565 * reschedule interrupts whos i_running bit is already set because 566 * this would prematurely wakeup a livelock-limited interrupt thread. 567 * 568 * i_running is only tested/set on the same cpu as the interrupt thread. 569 * 570 * We are NOT in a critical section, which will allow the scheduled 571 * interrupt to preempt us. The MP lock might *NOT* be held here. 572 */ 573 static void 574 sched_ithd_remote(void *arg) 575 { 576 sched_ithd_intern(arg); 577 } 578 579 static void 580 sched_ithd_intern(struct intr_info *info) 581 { 582 ++info->i_count; 583 if (info->i_state != ISTATE_NOTHREAD) { 584 if (info->i_reclist == NULL) { 585 report_stray_interrupt(info, "sched_ithd"); 586 } else { 587 if (info->i_thread->td_gd == mycpu) { 588 if (info->i_running == 0) { 589 info->i_running = 1; 590 if (info->i_state != ISTATE_LIVELOCKED) 591 lwkt_schedule(info->i_thread); /* MIGHT PREEMPT */ 592 } 593 } else { 594 lwkt_send_ipiq(info->i_thread->td_gd, sched_ithd_remote, info); 595 } 596 } 597 } else { 598 report_stray_interrupt(info, "sched_ithd"); 599 } 600 } 601 602 void 603 sched_ithd_soft(int intr) 604 { 605 struct intr_info *info; 606 607 KKASSERT(intr >= FIRST_SOFTINT && intr < MAX_INTS); 608 609 info = swi_info_ary[intr - FIRST_SOFTINT]; 610 if (info != NULL) { 611 sched_ithd_intern(info); 612 } else { 613 kprintf("unregistered softint %d got scheduled on cpu%d\n", 614 intr, mycpuid); 615 } 616 } 617 618 void 619 sched_ithd_hard(int intr) 620 { 621 KKASSERT(intr >= 0 && intr < MAX_HARDINTS); 622 sched_ithd_intern(&intr_block->ary[mycpuid][intr]); 623 } 624 625 #ifdef _KERNEL_VIRTUAL 626 627 void 628 sched_ithd_hard_virtual(int intr) 629 { 630 KKASSERT(intr >= 0 && intr < MAX_HARDINTS); 631 sched_ithd_intern(&intr_block->ary[0][intr]); 632 } 633 634 void * 635 register_int_virtual(int intr, inthand2_t *handler, void *arg, const char *name, 636 struct lwkt_serialize *serializer, int intr_flags) 637 { 638 return register_int(intr, handler, arg, name, serializer, intr_flags, 0); 639 } 640 641 void 642 unregister_int_virtual(void *id) 643 { 644 unregister_int(id, 0); 645 } 646 647 #endif /* _KERN_VIRTUAL */ 648 649 static void 650 report_stray_interrupt(struct intr_info *info, const char *func) 651 { 652 ++info->i_straycount; 653 if (info->i_straycount < 10) { 654 if (info->i_errorticks == ticks) 655 return; 656 info->i_errorticks = ticks; 657 kprintf("%s: stray interrupt %d on cpu%d\n", 658 func, info->i_intr, mycpuid); 659 } else if (info->i_straycount == 10) { 660 kprintf("%s: %ld stray interrupts %d on cpu%d - " 661 "there will be no further reports\n", func, 662 info->i_straycount, info->i_intr, mycpuid); 663 } 664 } 665 666 /* 667 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL 668 * might not be held). 669 */ 670 static void 671 ithread_livelock_wakeup(systimer_t st, int in_ipi __unused, 672 struct intrframe *frame __unused) 673 { 674 struct intr_info *info; 675 676 info = &intr_block->ary[mycpuid][(int)(intptr_t)st->data]; 677 if (info->i_state != ISTATE_NOTHREAD) 678 lwkt_schedule(info->i_thread); 679 } 680 681 /* 682 * Schedule ithread within fast intr handler 683 * 684 * Temporarily bump the current thread's td_nest_count to prevent deep 685 * preemptions and splz/doreti stacks. 686 */ 687 static __inline void 688 ithread_fast_sched(int intr, thread_t td) 689 { 690 ++td->td_nest_count; 691 crit_exit_quick(td); 692 sched_ithd_hard(intr); 693 crit_enter_quick(td); 694 --td->td_nest_count; 695 } 696 697 /* 698 * This function is called directly from the ICU or APIC vector code assembly 699 * to process an interrupt. The critical section and interrupt deferral 700 * checks have already been done but the function is entered WITHOUT 701 * a critical section held. The BGL may or may not be held. 702 * 703 * Must return non-zero if we do not want the vector code to re-enable 704 * the interrupt (which we don't if we have to schedule the interrupt) 705 */ 706 int ithread_fast_handler(struct intrframe *frame); 707 708 int 709 ithread_fast_handler(struct intrframe *frame) 710 { 711 int intr; 712 struct intr_info *info; 713 struct intrec **list; 714 int must_schedule; 715 int got_mplock; 716 TD_INVARIANTS_DECLARE; 717 intrec_t rec, nrec; 718 globaldata_t gd; 719 thread_t td; 720 721 intr = frame->if_vec; 722 gd = mycpu; 723 td = curthread; 724 725 /* We must be in critical section. */ 726 KKASSERT(td->td_critcount); 727 728 /* Race condition during early boot */ 729 if (intr_block == NULL) 730 return 0; 731 732 info = &intr_block->ary[mycpuid][intr]; 733 734 /* 735 * If we are not processing any FAST interrupts, just schedule the thing. 736 */ 737 if (info->i_fast == 0) { 738 ++gd->gd_cnt.v_intr; 739 ithread_fast_sched(intr, td); 740 return(1); 741 } 742 743 /* 744 * This should not normally occur since interrupts ought to be 745 * masked if the ithread has been scheduled or is running. 746 */ 747 if (info->i_running) 748 return(1); 749 750 /* 751 * Bump the interrupt nesting level to process any FAST interrupts. 752 * Obtain the MP lock as necessary. If the MP lock cannot be obtained, 753 * schedule the interrupt thread to deal with the issue instead. 754 * 755 * To reduce overhead, just leave the MP lock held once it has been 756 * obtained. 757 */ 758 ++gd->gd_intr_nesting_level; 759 ++gd->gd_cnt.v_intr; 760 must_schedule = info->i_slow; 761 got_mplock = 0; 762 763 TD_INVARIANTS_GET(td); 764 list = &info->i_reclist; 765 766 for (rec = *list; rec; rec = nrec) { 767 /* rec may be invalid after call */ 768 nrec = rec->next; 769 770 if (rec->intr_flags & INTR_CLOCK) { 771 if ((rec->intr_flags & INTR_MPSAFE) == 0 && got_mplock == 0) { 772 if (try_mplock() == 0) { 773 /* Couldn't get the MP lock; just schedule it. */ 774 must_schedule = 1; 775 break; 776 } 777 got_mplock = 1; 778 } 779 if (rec->serializer) { 780 must_schedule += lwkt_serialize_handler_try( 781 rec->serializer, rec->handler, 782 rec->argument, frame); 783 } else { 784 rec->handler(rec->argument, frame); 785 } 786 TD_INVARIANTS_TEST(td, rec->name); 787 } 788 } 789 790 /* 791 * Cleanup 792 */ 793 --gd->gd_intr_nesting_level; 794 if (got_mplock) 795 rel_mplock(); 796 797 /* 798 * If we had a problem, or mixed fast and slow interrupt handlers are 799 * registered, schedule the ithread to catch the missed records (it 800 * will just re-run all of them). A return value of 0 indicates that 801 * all handlers have been run and the interrupt can be re-enabled, and 802 * a non-zero return indicates that the interrupt thread controls 803 * re-enablement. 804 */ 805 if (must_schedule > 0) 806 ithread_fast_sched(intr, td); 807 else if (must_schedule == 0) 808 ++info->i_count; 809 return(must_schedule); 810 } 811 812 /* 813 * Interrupt threads run this as their main loop. 814 * 815 * The handler begins execution outside a critical section and no MP lock. 816 * 817 * The i_running state starts at 0. When an interrupt occurs, the hardware 818 * interrupt is disabled and sched_ithd_hard(). The HW interrupt remains 819 * disabled until all routines have run. We then call machintr_intr_enable() 820 * to reenable the HW interrupt and deschedule us until the next interrupt. 821 * 822 * We are responsible for atomically checking i_running. i_running for our 823 * irq is only set in the context of our cpu, so a critical section is a 824 * sufficient interlock. 825 */ 826 #define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */ 827 828 static void 829 ithread_handler(void *arg) 830 { 831 struct intr_info *info; 832 int use_limit; 833 uint32_t lseconds; 834 int intr, cpuid = mycpuid; 835 int mpheld; 836 struct intrec **list; 837 intrec_t rec, nrec; 838 globaldata_t gd; 839 struct systimer ill_timer; /* enforced freq. timer */ 840 u_int ill_count; /* interrupt livelock counter */ 841 int upper_limit; /* interrupt livelock upper limit */ 842 TD_INVARIANTS_DECLARE; 843 844 ill_count = 0; 845 intr = (int)(intptr_t)arg; 846 info = &intr_block->ary[cpuid][intr]; 847 list = &info->i_reclist; 848 849 /* 850 * The loop must be entered with one critical section held. The thread 851 * does not hold the mplock on startup. 852 */ 853 gd = mycpu; 854 lseconds = gd->gd_time_seconds; 855 crit_enter_gd(gd); 856 mpheld = 0; 857 858 for (;;) { 859 /* 860 * The chain is only considered MPSAFE if all its interrupt handlers 861 * are MPSAFE. However, if intr_mpsafe has been turned off we 862 * always operate with the BGL. 863 */ 864 if (info->i_mplock_required != mpheld) { 865 if (info->i_mplock_required) { 866 KKASSERT(mpheld == 0); 867 get_mplock(); 868 mpheld = 1; 869 } else { 870 KKASSERT(mpheld != 0); 871 rel_mplock(); 872 mpheld = 0; 873 } 874 } 875 876 TD_INVARIANTS_GET(gd->gd_curthread); 877 878 /* 879 * If an interrupt is pending, clear i_running and execute the 880 * handlers. Note that certain types of interrupts can re-trigger 881 * and set i_running again. 882 * 883 * Each handler is run in a critical section. Note that we run both 884 * FAST and SLOW designated service routines. 885 */ 886 if (info->i_running) { 887 ++ill_count; 888 info->i_running = 0; 889 890 if (*list == NULL) 891 report_stray_interrupt(info, "ithread_handler"); 892 893 for (rec = *list; rec; rec = nrec) { 894 /* rec may be invalid after call */ 895 nrec = rec->next; 896 if (rec->handler == NULL) { 897 kprintf("NULL HANDLER %s\n", rec->name); 898 } else 899 if (rec->serializer) { 900 lwkt_serialize_handler_call(rec->serializer, rec->handler, 901 rec->argument, NULL); 902 } else { 903 rec->handler(rec->argument, NULL); 904 } 905 TD_INVARIANTS_TEST(gd->gd_curthread, rec->name); 906 } 907 } 908 909 /* 910 * This is our interrupt hook to add rate randomness to the random 911 * number generator. 912 */ 913 if (info->i_random.sc_enabled > 0) 914 add_interrupt_randomness(intr); 915 916 /* 917 * Unmask the interrupt to allow it to trigger again. This only 918 * applies to certain types of interrupts (typ level interrupts). 919 * This can result in the interrupt retriggering, but the retrigger 920 * will not be processed until we cycle our critical section. 921 * 922 * Only unmask interrupts while handlers are installed. It is 923 * possible to hit a situation where no handlers are installed 924 * due to a device driver livelocking and then tearing down its 925 * interrupt on close (the parallel bus being a good example). 926 */ 927 if (intr < FIRST_SOFTINT && *list) 928 machintr_intr_enable(intr); 929 930 /* 931 * Do a quick exit/enter to catch any higher-priority interrupt 932 * sources, such as the statclock, so thread time accounting 933 * will still work. This may also cause an interrupt to re-trigger. 934 */ 935 crit_exit_gd(gd); 936 crit_enter_gd(gd); 937 938 /* 939 * LIVELOCK STATE MACHINE 940 */ 941 switch(info->i_state) { 942 case ISTATE_NORMAL: 943 /* 944 * Reset the count each second. 945 */ 946 if (lseconds != gd->gd_time_seconds) { 947 lseconds = gd->gd_time_seconds; 948 ill_count = 0; 949 } 950 951 /* 952 * If we did not exceed the frequency limit, we are done. 953 * If the interrupt has not retriggered we deschedule ourselves. 954 */ 955 if (info->i_flags & INTR_HIFREQ) 956 upper_limit = livelock_limit_hi; 957 else 958 upper_limit = livelock_limit; 959 if (ill_count <= upper_limit) { 960 if (info->i_running == 0) { 961 lwkt_deschedule_self(gd->gd_curthread); 962 lwkt_switch(); 963 } 964 break; 965 } 966 967 /* 968 * Otherwise we are livelocked. Set up a periodic systimer 969 * to wake the thread up at the limit frequency. 970 */ 971 kprintf("intr %d on cpu%d at %d/%d hz, livelocked limit engaged!\n", 972 intr, cpuid, ill_count, upper_limit); 973 info->i_state = ISTATE_LIVELOCKED; 974 if ((use_limit = upper_limit) < 100) 975 use_limit = 100; 976 else if (use_limit > 500000) 977 use_limit = 500000; 978 systimer_init_periodic_nq(&ill_timer, ithread_livelock_wakeup, 979 (void *)(intptr_t)intr, use_limit); 980 /* fall through */ 981 case ISTATE_LIVELOCKED: 982 /* 983 * Wait for our periodic timer to go off. Since the interrupt 984 * has re-armed it can still set i_running, but it will not 985 * reschedule us while we are in a livelocked state. 986 */ 987 lwkt_deschedule_self(gd->gd_curthread); 988 lwkt_switch(); 989 990 /* 991 * Check once a second to see if the livelock condition no 992 * longer applies. 993 */ 994 if (lseconds != gd->gd_time_seconds) { 995 lseconds = gd->gd_time_seconds; 996 if (ill_count < livelock_lowater) { 997 info->i_state = ISTATE_NORMAL; 998 systimer_del(&ill_timer); 999 kprintf("intr %d on cpu%d at %d/%d hz, livelock removed\n", 1000 intr, cpuid, ill_count, livelock_lowater); 1001 } else if (livelock_debug == intr || 1002 (bootverbose && cold)) { 1003 kprintf("intr %d on cpu%d at %d/%d hz, in livelock\n", 1004 intr, cpuid, ill_count, livelock_lowater); 1005 } 1006 ill_count = 0; 1007 } 1008 break; 1009 } 1010 } 1011 /* NOT REACHED */ 1012 } 1013 1014 /* 1015 * Emergency interrupt polling thread. The thread begins execution 1016 * outside a critical section with the BGL held. 1017 * 1018 * If emergency interrupt polling is enabled, this thread will 1019 * execute all system interrupts not marked INTR_NOPOLL at the 1020 * specified polling frequency. 1021 * 1022 * WARNING! This thread runs *ALL* interrupt service routines that 1023 * are not marked INTR_NOPOLL, which basically means everything except 1024 * the 8254 clock interrupt and the ATA interrupt. It has very high 1025 * overhead and should only be used in situations where the machine 1026 * cannot otherwise be made to work. Due to the severe performance 1027 * degredation, it should not be enabled on production machines. 1028 */ 1029 static void 1030 ithread_emergency(void *arg __unused) 1031 { 1032 globaldata_t gd = mycpu; 1033 struct intr_info *info; 1034 intrec_t rec, nrec; 1035 int intr, cpuid = mycpuid; 1036 TD_INVARIANTS_DECLARE; 1037 1038 get_mplock(); 1039 crit_enter_gd(gd); 1040 TD_INVARIANTS_GET(gd->gd_curthread); 1041 1042 for (;;) { 1043 for (intr = 0; intr < max_installed_hard_intr[cpuid]; ++intr) { 1044 info = &intr_block->ary[cpuid][intr]; 1045 for (rec = info->i_reclist; rec; rec = nrec) { 1046 /* rec may be invalid after call */ 1047 nrec = rec->next; 1048 if ((rec->intr_flags & INTR_NOPOLL) == 0) { 1049 if (rec->serializer) { 1050 lwkt_serialize_handler_try(rec->serializer, 1051 rec->handler, rec->argument, NULL); 1052 } else { 1053 rec->handler(rec->argument, NULL); 1054 } 1055 TD_INVARIANTS_TEST(gd->gd_curthread, rec->name); 1056 } 1057 } 1058 } 1059 lwkt_deschedule_self(gd->gd_curthread); 1060 lwkt_switch(); 1061 } 1062 /* NOT REACHED */ 1063 } 1064 1065 /* 1066 * Systimer callback - schedule the emergency interrupt poll thread 1067 * if emergency polling is enabled. 1068 */ 1069 static 1070 void 1071 emergency_intr_timer_callback(systimer_t info, int in_ipi __unused, 1072 struct intrframe *frame __unused) 1073 { 1074 if (emergency_intr_enable) 1075 lwkt_schedule(info->data); 1076 } 1077 1078 /* 1079 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1080 * The data for this machine dependent, and the declarations are in machine 1081 * dependent code. The layout of intrnames and intrcnt however is machine 1082 * independent. 1083 * 1084 * We do not know the length of intrcnt and intrnames at compile time, so 1085 * calculate things at run time. 1086 */ 1087 1088 static int 1089 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1090 { 1091 struct intr_info *info; 1092 intrec_t rec; 1093 int error = 0; 1094 int len; 1095 int intr, cpuid; 1096 char buf[64]; 1097 1098 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 1099 for (intr = 0; error == 0 && intr < MAX_INTS; ++intr) { 1100 info = &intr_block->ary[cpuid][intr]; 1101 1102 len = 0; 1103 buf[0] = 0; 1104 for (rec = info->i_reclist; rec; rec = rec->next) { 1105 ksnprintf(buf + len, sizeof(buf) - len, "%s%s", 1106 (len ? "/" : ""), rec->name); 1107 len += strlen(buf + len); 1108 } 1109 if (len == 0) { 1110 ksnprintf(buf, sizeof(buf), "irq%d", intr); 1111 len = strlen(buf); 1112 } 1113 error = SYSCTL_OUT(req, buf, len + 1); 1114 } 1115 } 1116 return (error); 1117 } 1118 1119 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1120 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1121 1122 static int 1123 sysctl_intrcnt_all(SYSCTL_HANDLER_ARGS) 1124 { 1125 struct intr_info *info; 1126 int error = 0; 1127 int intr, cpuid; 1128 1129 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 1130 for (intr = 0; intr < MAX_INTS; ++intr) { 1131 info = &intr_block->ary[cpuid][intr]; 1132 1133 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count)); 1134 if (error) 1135 goto failed; 1136 } 1137 } 1138 failed: 1139 return(error); 1140 } 1141 1142 SYSCTL_PROC(_hw, OID_AUTO, intrcnt_all, CTLTYPE_OPAQUE | CTLFLAG_RD, 1143 NULL, 0, sysctl_intrcnt_all, "", "Interrupt Counts"); 1144 1145 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1146 NULL, 0, sysctl_intrcnt_all, "", "Interrupt Counts"); 1147 1148 static void 1149 int_moveto_destcpu(int *orig_cpuid0, int cpuid) 1150 { 1151 int orig_cpuid = mycpuid; 1152 1153 if (cpuid != orig_cpuid) 1154 lwkt_migratecpu(cpuid); 1155 1156 *orig_cpuid0 = orig_cpuid; 1157 } 1158 1159 static void 1160 int_moveto_origcpu(int orig_cpuid, int cpuid) 1161 { 1162 if (cpuid != orig_cpuid) 1163 lwkt_migratecpu(orig_cpuid); 1164 } 1165 1166 static void 1167 intr_init(void *dummy __unused) 1168 { 1169 int cpuid; 1170 1171 kprintf("Initialize MI interrupts for %d cpus\n", ncpus); 1172 1173 intr_block = kmalloc(offsetof(struct intr_info_block, ary[ncpus][0]), 1174 M_INTRMNG, M_INTWAIT | M_ZERO); 1175 1176 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 1177 int intr; 1178 1179 for (intr = 0; intr < MAX_INTS; ++intr) { 1180 struct intr_info *info = &intr_block->ary[cpuid][intr]; 1181 1182 info->i_cpuid = cpuid; 1183 info->i_intr = intr; 1184 } 1185 } 1186 } 1187 SYSINIT(intr_init, SI_BOOT2_FINISH_PIC, SI_ORDER_ANY, intr_init, NULL); 1188