1 /* 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved. 3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $ 27 * 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/malloc.h> 33 #include <sys/kernel.h> 34 #include <sys/sysctl.h> 35 #include <sys/thread.h> 36 #include <sys/proc.h> 37 #include <sys/random.h> 38 #include <sys/serialize.h> 39 #include <sys/interrupt.h> 40 #include <sys/bus.h> 41 #include <sys/machintr.h> 42 43 #include <machine/frame.h> 44 45 #include <sys/thread2.h> 46 #include <sys/mplock2.h> 47 48 struct intr_info; 49 50 typedef struct intrec { 51 struct intrec *next; 52 struct intr_info *info; 53 inthand2_t *handler; 54 void *argument; 55 char *name; 56 int intr; 57 int intr_flags; 58 struct lwkt_serialize *serializer; 59 } *intrec_t; 60 61 struct intr_info { 62 intrec_t i_reclist; 63 struct thread i_thread; 64 struct random_softc i_random; 65 int i_running; 66 long i_count; /* interrupts dispatched */ 67 int i_mplock_required; 68 int i_fast; 69 int i_slow; 70 int i_state; 71 int i_errorticks; 72 unsigned long i_straycount; 73 int i_cpuid; 74 int i_intr; 75 }; 76 77 static struct intr_info intr_info_ary[MAXCPU][MAX_INTS]; 78 static struct intr_info *swi_info_ary[MAX_SOFTINTS]; 79 80 static int max_installed_hard_intr[MAXCPU]; 81 82 #define EMERGENCY_INTR_POLLING_FREQ_MAX 20000 83 84 /* 85 * Assert that callers into interrupt handlers don't return with 86 * dangling tokens, spinlocks, or mp locks. 87 */ 88 #ifdef INVARIANTS 89 90 #define TD_INVARIANTS_DECLARE \ 91 int spincount; \ 92 lwkt_tokref_t curstop 93 94 #define TD_INVARIANTS_GET(td) \ 95 do { \ 96 spincount = (td)->td_gd->gd_spinlocks_wr; \ 97 curstop = (td)->td_toks_stop; \ 98 } while(0) 99 100 #define TD_INVARIANTS_TEST(td, name) \ 101 do { \ 102 KASSERT(spincount == (td)->td_gd->gd_spinlocks_wr, \ 103 ("spincount mismatch after interrupt handler %s", \ 104 name)); \ 105 KASSERT(curstop == (td)->td_toks_stop, \ 106 ("token count mismatch after interrupt handler %s", \ 107 name)); \ 108 } while(0) 109 110 #else 111 112 /* !INVARIANTS */ 113 114 #define TD_INVARIANTS_DECLARE 115 #define TD_INVARIANTS_GET(td) 116 #define TD_INVARIANTS_TEST(td, name) 117 118 #endif /* ndef INVARIANTS */ 119 120 static int sysctl_emergency_freq(SYSCTL_HANDLER_ARGS); 121 static int sysctl_emergency_enable(SYSCTL_HANDLER_ARGS); 122 static void emergency_intr_timer_callback(systimer_t, int, struct intrframe *); 123 static void ithread_handler(void *arg); 124 static void ithread_emergency(void *arg); 125 static void report_stray_interrupt(struct intr_info *info, const char *func); 126 static void int_moveto_destcpu(int *, int); 127 static void int_moveto_origcpu(int, int); 128 static void sched_ithd_intern(struct intr_info *info); 129 130 static struct systimer emergency_intr_timer[MAXCPU]; 131 static struct thread emergency_intr_thread[MAXCPU]; 132 133 #define ISTATE_NOTHREAD 0 134 #define ISTATE_NORMAL 1 135 #define ISTATE_LIVELOCKED 2 136 137 static int livelock_limit = 40000; 138 static int livelock_lowater = 20000; 139 static int livelock_debug = -1; 140 SYSCTL_INT(_kern, OID_AUTO, livelock_limit, 141 CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit"); 142 SYSCTL_INT(_kern, OID_AUTO, livelock_lowater, 143 CTLFLAG_RW, &livelock_lowater, 0, "Livelock low-water mark restore"); 144 SYSCTL_INT(_kern, OID_AUTO, livelock_debug, 145 CTLFLAG_RW, &livelock_debug, 0, "Livelock debug intr#"); 146 147 static int emergency_intr_enable = 0; /* emergency interrupt polling */ 148 TUNABLE_INT("kern.emergency_intr_enable", &emergency_intr_enable); 149 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_enable, CTLTYPE_INT | CTLFLAG_RW, 150 0, 0, sysctl_emergency_enable, "I", "Emergency Interrupt Poll Enable"); 151 152 static int emergency_intr_freq = 10; /* emergency polling frequency */ 153 TUNABLE_INT("kern.emergency_intr_freq", &emergency_intr_freq); 154 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_freq, CTLTYPE_INT | CTLFLAG_RW, 155 0, 0, sysctl_emergency_freq, "I", "Emergency Interrupt Poll Frequency"); 156 157 /* 158 * Sysctl support routines 159 */ 160 static int 161 sysctl_emergency_enable(SYSCTL_HANDLER_ARGS) 162 { 163 int error, enabled, cpuid, freq; 164 165 enabled = emergency_intr_enable; 166 error = sysctl_handle_int(oidp, &enabled, 0, req); 167 if (error || req->newptr == NULL) 168 return error; 169 emergency_intr_enable = enabled; 170 if (emergency_intr_enable) 171 freq = emergency_intr_freq; 172 else 173 freq = 1; 174 175 for (cpuid = 0; cpuid < ncpus; ++cpuid) 176 systimer_adjust_periodic(&emergency_intr_timer[cpuid], freq); 177 return 0; 178 } 179 180 static int 181 sysctl_emergency_freq(SYSCTL_HANDLER_ARGS) 182 { 183 int error, phz, cpuid, freq; 184 185 phz = emergency_intr_freq; 186 error = sysctl_handle_int(oidp, &phz, 0, req); 187 if (error || req->newptr == NULL) 188 return error; 189 if (phz <= 0) 190 return EINVAL; 191 else if (phz > EMERGENCY_INTR_POLLING_FREQ_MAX) 192 phz = EMERGENCY_INTR_POLLING_FREQ_MAX; 193 194 emergency_intr_freq = phz; 195 if (emergency_intr_enable) 196 freq = emergency_intr_freq; 197 else 198 freq = 1; 199 200 for (cpuid = 0; cpuid < ncpus; ++cpuid) 201 systimer_adjust_periodic(&emergency_intr_timer[cpuid], freq); 202 return 0; 203 } 204 205 /* 206 * Register an SWI or INTerrupt handler. 207 */ 208 void * 209 register_swi(int intr, inthand2_t *handler, void *arg, const char *name, 210 struct lwkt_serialize *serializer, int cpuid) 211 { 212 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 213 panic("register_swi: bad intr %d", intr); 214 215 if (cpuid < 0) 216 cpuid = intr % ncpus; 217 return(register_int(intr, handler, arg, name, serializer, 0, cpuid)); 218 } 219 220 void * 221 register_swi_mp(int intr, inthand2_t *handler, void *arg, const char *name, 222 struct lwkt_serialize *serializer, int cpuid) 223 { 224 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 225 panic("register_swi: bad intr %d", intr); 226 227 if (cpuid < 0) 228 cpuid = intr % ncpus; 229 return(register_int(intr, handler, arg, name, serializer, 230 INTR_MPSAFE, cpuid)); 231 } 232 233 void * 234 register_int(int intr, inthand2_t *handler, void *arg, const char *name, 235 struct lwkt_serialize *serializer, int intr_flags, int cpuid) 236 { 237 struct intr_info *info; 238 struct intrec **list; 239 intrec_t rec; 240 int orig_cpuid; 241 242 KKASSERT(cpuid >= 0 && cpuid < ncpus); 243 244 if (intr < 0 || intr >= MAX_INTS) 245 panic("register_int: bad intr %d", intr); 246 if (name == NULL) 247 name = "???"; 248 info = &intr_info_ary[cpuid][intr]; 249 250 /* 251 * Construct an interrupt handler record 252 */ 253 rec = kmalloc(sizeof(struct intrec), M_DEVBUF, M_INTWAIT); 254 rec->name = kmalloc(strlen(name) + 1, M_DEVBUF, M_INTWAIT); 255 strcpy(rec->name, name); 256 257 rec->info = info; 258 rec->handler = handler; 259 rec->argument = arg; 260 rec->intr = intr; 261 rec->intr_flags = intr_flags; 262 rec->next = NULL; 263 rec->serializer = serializer; 264 265 int_moveto_destcpu(&orig_cpuid, cpuid); 266 267 /* 268 * Create an emergency polling thread and set up a systimer to wake 269 * it up. 270 */ 271 if (emergency_intr_thread[cpuid].td_kstack == NULL) { 272 lwkt_create(ithread_emergency, NULL, NULL, 273 &emergency_intr_thread[cpuid], 274 TDF_NOSTART | TDF_INTTHREAD, cpuid, "ithreadE %d", 275 cpuid); 276 systimer_init_periodic_nq(&emergency_intr_timer[cpuid], 277 emergency_intr_timer_callback, 278 &emergency_intr_thread[cpuid], 279 (emergency_intr_enable ? emergency_intr_freq : 1)); 280 } 281 282 /* 283 * Create an interrupt thread if necessary, leave it in an unscheduled 284 * state. 285 */ 286 if (info->i_state == ISTATE_NOTHREAD) { 287 info->i_state = ISTATE_NORMAL; 288 lwkt_create(ithread_handler, (void *)(intptr_t)intr, NULL, 289 &info->i_thread, TDF_NOSTART | TDF_INTTHREAD, cpuid, 290 "ithread%d %d", intr, cpuid); 291 if (intr >= FIRST_SOFTINT) 292 lwkt_setpri(&info->i_thread, TDPRI_SOFT_NORM); 293 else 294 lwkt_setpri(&info->i_thread, TDPRI_INT_MED); 295 info->i_thread.td_preemptable = lwkt_preempt; 296 } 297 298 list = &info->i_reclist; 299 300 /* 301 * Keep track of how many fast and slow interrupts we have. 302 * Set i_mplock_required if any handler in the chain requires 303 * the MP lock to operate. 304 */ 305 if ((intr_flags & INTR_MPSAFE) == 0) 306 info->i_mplock_required = 1; 307 if (intr_flags & INTR_CLOCK) 308 ++info->i_fast; 309 else 310 ++info->i_slow; 311 312 /* 313 * Enable random number generation keying off of this interrupt. 314 */ 315 if ((intr_flags & INTR_NOENTROPY) == 0 && info->i_random.sc_enabled == 0) { 316 info->i_random.sc_enabled = 1; 317 info->i_random.sc_intr = intr; 318 } 319 320 /* 321 * Add the record to the interrupt list. 322 */ 323 crit_enter(); 324 while (*list != NULL) 325 list = &(*list)->next; 326 *list = rec; 327 crit_exit(); 328 329 /* 330 * Update max_installed_hard_intr to make the emergency intr poll 331 * a bit more efficient. 332 */ 333 if (intr < FIRST_SOFTINT) { 334 if (max_installed_hard_intr[cpuid] <= intr) 335 max_installed_hard_intr[cpuid] = intr + 1; 336 } 337 338 if (intr >= FIRST_SOFTINT) 339 swi_info_ary[intr - FIRST_SOFTINT] = info; 340 341 /* 342 * Setup the machine level interrupt vector 343 */ 344 if (intr < FIRST_SOFTINT && info->i_slow + info->i_fast == 1) 345 machintr_intr_setup(intr, intr_flags); 346 347 int_moveto_origcpu(orig_cpuid, cpuid); 348 349 return(rec); 350 } 351 352 void 353 unregister_swi(void *id, int intr, int cpuid) 354 { 355 if (cpuid < 0) 356 cpuid = intr % ncpus; 357 358 unregister_int(id, cpuid); 359 } 360 361 void 362 unregister_int(void *id, int cpuid) 363 { 364 struct intr_info *info; 365 struct intrec **list; 366 intrec_t rec; 367 int intr, orig_cpuid; 368 369 KKASSERT(cpuid >= 0 && cpuid < ncpus); 370 371 intr = ((intrec_t)id)->intr; 372 373 if (intr < 0 || intr >= MAX_INTS) 374 panic("register_int: bad intr %d", intr); 375 376 info = &intr_info_ary[cpuid][intr]; 377 378 int_moveto_destcpu(&orig_cpuid, cpuid); 379 380 /* 381 * Remove the interrupt descriptor, adjust the descriptor count, 382 * and teardown the machine level vector if this was the last interrupt. 383 */ 384 crit_enter(); 385 list = &info->i_reclist; 386 while ((rec = *list) != NULL) { 387 if (rec == id) 388 break; 389 list = &rec->next; 390 } 391 if (rec) { 392 intrec_t rec0; 393 394 *list = rec->next; 395 if (rec->intr_flags & INTR_CLOCK) 396 --info->i_fast; 397 else 398 --info->i_slow; 399 if (intr < FIRST_SOFTINT && info->i_fast + info->i_slow == 0) 400 machintr_intr_teardown(intr); 401 402 /* 403 * Clear i_mplock_required if no handlers in the chain require the 404 * MP lock. 405 */ 406 for (rec0 = info->i_reclist; rec0; rec0 = rec0->next) { 407 if ((rec0->intr_flags & INTR_MPSAFE) == 0) 408 break; 409 } 410 if (rec0 == NULL) 411 info->i_mplock_required = 0; 412 } 413 414 if (intr >= FIRST_SOFTINT && info->i_reclist == NULL) 415 swi_info_ary[intr - FIRST_SOFTINT] = NULL; 416 417 crit_exit(); 418 419 int_moveto_origcpu(orig_cpuid, cpuid); 420 421 /* 422 * Free the record. 423 */ 424 if (rec != NULL) { 425 kfree(rec->name, M_DEVBUF); 426 kfree(rec, M_DEVBUF); 427 } else { 428 kprintf("warning: unregister_int: int %d handler for %s not found\n", 429 intr, ((intrec_t)id)->name); 430 } 431 } 432 433 long 434 get_interrupt_counter(int intr, int cpuid) 435 { 436 struct intr_info *info; 437 438 KKASSERT(cpuid >= 0 && cpuid < ncpus); 439 440 if (intr < 0 || intr >= MAX_INTS) 441 panic("register_int: bad intr %d", intr); 442 info = &intr_info_ary[cpuid][intr]; 443 return(info->i_count); 444 } 445 446 void 447 register_randintr(int intr) 448 { 449 struct intr_info *info; 450 int cpuid; 451 452 if (intr < 0 || intr >= MAX_INTS) 453 panic("register_randintr: bad intr %d", intr); 454 455 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 456 info = &intr_info_ary[cpuid][intr]; 457 info->i_random.sc_intr = intr; 458 info->i_random.sc_enabled = 1; 459 } 460 } 461 462 void 463 unregister_randintr(int intr) 464 { 465 struct intr_info *info; 466 int cpuid; 467 468 if (intr < 0 || intr >= MAX_INTS) 469 panic("register_swi: bad intr %d", intr); 470 471 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 472 info = &intr_info_ary[cpuid][intr]; 473 info->i_random.sc_enabled = -1; 474 } 475 } 476 477 int 478 next_registered_randintr(int intr) 479 { 480 struct intr_info *info; 481 482 if (intr < 0 || intr >= MAX_INTS) 483 panic("register_swi: bad intr %d", intr); 484 485 while (intr < MAX_INTS) { 486 int cpuid; 487 488 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 489 info = &intr_info_ary[cpuid][intr]; 490 if (info->i_random.sc_enabled > 0) 491 return intr; 492 } 493 ++intr; 494 } 495 return intr; 496 } 497 498 /* 499 * Dispatch an interrupt. If there's nothing to do we have a stray 500 * interrupt and can just return, leaving the interrupt masked. 501 * 502 * We need to schedule the interrupt and set its i_running bit. If 503 * we are not on the interrupt thread's cpu we have to send a message 504 * to the correct cpu that will issue the desired action (interlocking 505 * with the interrupt thread's critical section). We do NOT attempt to 506 * reschedule interrupts whos i_running bit is already set because 507 * this would prematurely wakeup a livelock-limited interrupt thread. 508 * 509 * i_running is only tested/set on the same cpu as the interrupt thread. 510 * 511 * We are NOT in a critical section, which will allow the scheduled 512 * interrupt to preempt us. The MP lock might *NOT* be held here. 513 */ 514 #ifdef SMP 515 516 static void 517 sched_ithd_remote(void *arg) 518 { 519 sched_ithd_intern(arg); 520 } 521 522 #endif 523 524 static void 525 sched_ithd_intern(struct intr_info *info) 526 { 527 ++info->i_count; 528 if (info->i_state != ISTATE_NOTHREAD) { 529 if (info->i_reclist == NULL) { 530 report_stray_interrupt(info, "sched_ithd"); 531 } else { 532 #ifdef SMP 533 if (info->i_thread.td_gd == mycpu) { 534 if (info->i_running == 0) { 535 info->i_running = 1; 536 if (info->i_state != ISTATE_LIVELOCKED) 537 lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */ 538 } 539 } else { 540 lwkt_send_ipiq(info->i_thread.td_gd, sched_ithd_remote, info); 541 } 542 #else 543 if (info->i_running == 0) { 544 info->i_running = 1; 545 if (info->i_state != ISTATE_LIVELOCKED) 546 lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */ 547 } 548 #endif 549 } 550 } else { 551 report_stray_interrupt(info, "sched_ithd"); 552 } 553 } 554 555 void 556 sched_ithd_soft(int intr) 557 { 558 struct intr_info *info; 559 560 KKASSERT(intr >= FIRST_SOFTINT && intr < MAX_INTS); 561 562 info = swi_info_ary[intr - FIRST_SOFTINT]; 563 if (info != NULL) { 564 sched_ithd_intern(info); 565 } else { 566 kprintf("unregistered softint %d got scheduled on cpu%d\n", 567 intr, mycpuid); 568 } 569 } 570 571 void 572 sched_ithd_hard(int intr) 573 { 574 KKASSERT(intr >= 0 && intr < MAX_HARDINTS); 575 sched_ithd_intern(&intr_info_ary[mycpuid][intr]); 576 } 577 578 #ifdef _KERNEL_VIRTUAL 579 580 void 581 sched_ithd_hard_virtual(int intr) 582 { 583 KKASSERT(intr >= 0 && intr < MAX_HARDINTS); 584 sched_ithd_intern(&intr_info_ary[0][intr]); 585 } 586 587 void * 588 register_int_virtual(int intr, inthand2_t *handler, void *arg, const char *name, 589 struct lwkt_serialize *serializer, int intr_flags) 590 { 591 return register_int(intr, handler, arg, name, serializer, intr_flags, 0); 592 } 593 594 void 595 unregister_int_virtual(void *id) 596 { 597 unregister_int(id, 0); 598 } 599 600 #endif /* _KERN_VIRTUAL */ 601 602 static void 603 report_stray_interrupt(struct intr_info *info, const char *func) 604 { 605 ++info->i_straycount; 606 if (info->i_straycount < 10) { 607 if (info->i_errorticks == ticks) 608 return; 609 info->i_errorticks = ticks; 610 kprintf("%s: stray interrupt %d on cpu%d\n", 611 func, info->i_intr, mycpuid); 612 } else if (info->i_straycount == 10) { 613 kprintf("%s: %ld stray interrupts %d on cpu%d - " 614 "there will be no further reports\n", func, 615 info->i_straycount, info->i_intr, mycpuid); 616 } 617 } 618 619 /* 620 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL 621 * might not be held). 622 */ 623 static void 624 ithread_livelock_wakeup(systimer_t st, int in_ipi __unused, 625 struct intrframe *frame __unused) 626 { 627 struct intr_info *info; 628 629 info = &intr_info_ary[mycpuid][(int)(intptr_t)st->data]; 630 if (info->i_state != ISTATE_NOTHREAD) 631 lwkt_schedule(&info->i_thread); 632 } 633 634 /* 635 * Schedule ithread within fast intr handler 636 * 637 * XXX Protect sched_ithd_hard() call with gd_intr_nesting_level? 638 * Interrupts aren't enabled, but still... 639 */ 640 static __inline void 641 ithread_fast_sched(int intr, thread_t td) 642 { 643 ++td->td_nest_count; 644 645 /* 646 * We are already in critical section, exit it now to 647 * allow preemption. 648 */ 649 crit_exit_quick(td); 650 sched_ithd_hard(intr); 651 crit_enter_quick(td); 652 653 --td->td_nest_count; 654 } 655 656 /* 657 * This function is called directly from the ICU or APIC vector code assembly 658 * to process an interrupt. The critical section and interrupt deferral 659 * checks have already been done but the function is entered WITHOUT 660 * a critical section held. The BGL may or may not be held. 661 * 662 * Must return non-zero if we do not want the vector code to re-enable 663 * the interrupt (which we don't if we have to schedule the interrupt) 664 */ 665 int ithread_fast_handler(struct intrframe *frame); 666 667 int 668 ithread_fast_handler(struct intrframe *frame) 669 { 670 int intr; 671 struct intr_info *info; 672 struct intrec **list; 673 int must_schedule; 674 #ifdef SMP 675 int got_mplock; 676 #endif 677 TD_INVARIANTS_DECLARE; 678 intrec_t rec, nrec; 679 globaldata_t gd; 680 thread_t td; 681 682 intr = frame->if_vec; 683 gd = mycpu; 684 td = curthread; 685 686 /* We must be in critical section. */ 687 KKASSERT(td->td_critcount); 688 689 info = &intr_info_ary[mycpuid][intr]; 690 691 /* 692 * If we are not processing any FAST interrupts, just schedule the thing. 693 */ 694 if (info->i_fast == 0) { 695 ++gd->gd_cnt.v_intr; 696 ithread_fast_sched(intr, td); 697 return(1); 698 } 699 700 /* 701 * This should not normally occur since interrupts ought to be 702 * masked if the ithread has been scheduled or is running. 703 */ 704 if (info->i_running) 705 return(1); 706 707 /* 708 * Bump the interrupt nesting level to process any FAST interrupts. 709 * Obtain the MP lock as necessary. If the MP lock cannot be obtained, 710 * schedule the interrupt thread to deal with the issue instead. 711 * 712 * To reduce overhead, just leave the MP lock held once it has been 713 * obtained. 714 */ 715 ++gd->gd_intr_nesting_level; 716 ++gd->gd_cnt.v_intr; 717 must_schedule = info->i_slow; 718 #ifdef SMP 719 got_mplock = 0; 720 #endif 721 722 TD_INVARIANTS_GET(td); 723 list = &info->i_reclist; 724 725 for (rec = *list; rec; rec = nrec) { 726 /* rec may be invalid after call */ 727 nrec = rec->next; 728 729 if (rec->intr_flags & INTR_CLOCK) { 730 #ifdef SMP 731 if ((rec->intr_flags & INTR_MPSAFE) == 0 && got_mplock == 0) { 732 if (try_mplock() == 0) { 733 /* Couldn't get the MP lock; just schedule it. */ 734 must_schedule = 1; 735 break; 736 } 737 got_mplock = 1; 738 } 739 #endif 740 if (rec->serializer) { 741 must_schedule += lwkt_serialize_handler_try( 742 rec->serializer, rec->handler, 743 rec->argument, frame); 744 } else { 745 rec->handler(rec->argument, frame); 746 } 747 TD_INVARIANTS_TEST(td, rec->name); 748 } 749 } 750 751 /* 752 * Cleanup 753 */ 754 --gd->gd_intr_nesting_level; 755 #ifdef SMP 756 if (got_mplock) 757 rel_mplock(); 758 #endif 759 760 /* 761 * If we had a problem, or mixed fast and slow interrupt handlers are 762 * registered, schedule the ithread to catch the missed records (it 763 * will just re-run all of them). A return value of 0 indicates that 764 * all handlers have been run and the interrupt can be re-enabled, and 765 * a non-zero return indicates that the interrupt thread controls 766 * re-enablement. 767 */ 768 if (must_schedule > 0) 769 ithread_fast_sched(intr, td); 770 else if (must_schedule == 0) 771 ++info->i_count; 772 return(must_schedule); 773 } 774 775 /* 776 * Interrupt threads run this as their main loop. 777 * 778 * The handler begins execution outside a critical section and no MP lock. 779 * 780 * The i_running state starts at 0. When an interrupt occurs, the hardware 781 * interrupt is disabled and sched_ithd_hard() The HW interrupt remains 782 * disabled until all routines have run. We then call ithread_done() to 783 * reenable the HW interrupt and deschedule us until the next interrupt. 784 * 785 * We are responsible for atomically checking i_running and ithread_done() 786 * is responsible for atomically checking for platform-specific delayed 787 * interrupts. i_running for our irq is only set in the context of our cpu, 788 * so a critical section is a sufficient interlock. 789 */ 790 #define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */ 791 792 static void 793 ithread_handler(void *arg) 794 { 795 struct intr_info *info; 796 int use_limit; 797 __uint32_t lseconds; 798 int intr, cpuid = mycpuid; 799 int mpheld; 800 struct intrec **list; 801 intrec_t rec, nrec; 802 globaldata_t gd; 803 struct systimer ill_timer; /* enforced freq. timer */ 804 u_int ill_count; /* interrupt livelock counter */ 805 TD_INVARIANTS_DECLARE; 806 807 ill_count = 0; 808 intr = (int)(intptr_t)arg; 809 info = &intr_info_ary[cpuid][intr]; 810 list = &info->i_reclist; 811 812 /* 813 * The loop must be entered with one critical section held. The thread 814 * does not hold the mplock on startup. 815 */ 816 gd = mycpu; 817 lseconds = gd->gd_time_seconds; 818 crit_enter_gd(gd); 819 mpheld = 0; 820 821 for (;;) { 822 /* 823 * The chain is only considered MPSAFE if all its interrupt handlers 824 * are MPSAFE. However, if intr_mpsafe has been turned off we 825 * always operate with the BGL. 826 */ 827 #ifdef SMP 828 if (info->i_mplock_required != mpheld) { 829 if (info->i_mplock_required) { 830 KKASSERT(mpheld == 0); 831 get_mplock(); 832 mpheld = 1; 833 } else { 834 KKASSERT(mpheld != 0); 835 rel_mplock(); 836 mpheld = 0; 837 } 838 } 839 #endif 840 841 TD_INVARIANTS_GET(gd->gd_curthread); 842 843 /* 844 * If an interrupt is pending, clear i_running and execute the 845 * handlers. Note that certain types of interrupts can re-trigger 846 * and set i_running again. 847 * 848 * Each handler is run in a critical section. Note that we run both 849 * FAST and SLOW designated service routines. 850 */ 851 if (info->i_running) { 852 ++ill_count; 853 info->i_running = 0; 854 855 if (*list == NULL) 856 report_stray_interrupt(info, "ithread_handler"); 857 858 for (rec = *list; rec; rec = nrec) { 859 /* rec may be invalid after call */ 860 nrec = rec->next; 861 if (rec->serializer) { 862 lwkt_serialize_handler_call(rec->serializer, rec->handler, 863 rec->argument, NULL); 864 } else { 865 rec->handler(rec->argument, NULL); 866 } 867 TD_INVARIANTS_TEST(gd->gd_curthread, rec->name); 868 } 869 } 870 871 /* 872 * This is our interrupt hook to add rate randomness to the random 873 * number generator. 874 */ 875 if (info->i_random.sc_enabled > 0) 876 add_interrupt_randomness(intr); 877 878 /* 879 * Unmask the interrupt to allow it to trigger again. This only 880 * applies to certain types of interrupts (typ level interrupts). 881 * This can result in the interrupt retriggering, but the retrigger 882 * will not be processed until we cycle our critical section. 883 * 884 * Only unmask interrupts while handlers are installed. It is 885 * possible to hit a situation where no handlers are installed 886 * due to a device driver livelocking and then tearing down its 887 * interrupt on close (the parallel bus being a good example). 888 */ 889 if (intr < FIRST_SOFTINT && *list) 890 machintr_intr_enable(intr); 891 892 /* 893 * Do a quick exit/enter to catch any higher-priority interrupt 894 * sources, such as the statclock, so thread time accounting 895 * will still work. This may also cause an interrupt to re-trigger. 896 */ 897 crit_exit_gd(gd); 898 crit_enter_gd(gd); 899 900 /* 901 * LIVELOCK STATE MACHINE 902 */ 903 switch(info->i_state) { 904 case ISTATE_NORMAL: 905 /* 906 * Reset the count each second. 907 */ 908 if (lseconds != gd->gd_time_seconds) { 909 lseconds = gd->gd_time_seconds; 910 ill_count = 0; 911 } 912 913 /* 914 * If we did not exceed the frequency limit, we are done. 915 * If the interrupt has not retriggered we deschedule ourselves. 916 */ 917 if (ill_count <= livelock_limit) { 918 if (info->i_running == 0) { 919 lwkt_deschedule_self(gd->gd_curthread); 920 lwkt_switch(); 921 } 922 break; 923 } 924 925 /* 926 * Otherwise we are livelocked. Set up a periodic systimer 927 * to wake the thread up at the limit frequency. 928 */ 929 kprintf("intr %d on cpu%d at %d/%d hz, livelocked limit engaged!\n", 930 intr, cpuid, ill_count, livelock_limit); 931 info->i_state = ISTATE_LIVELOCKED; 932 if ((use_limit = livelock_limit) < 100) 933 use_limit = 100; 934 else if (use_limit > 500000) 935 use_limit = 500000; 936 systimer_init_periodic_nq(&ill_timer, ithread_livelock_wakeup, 937 (void *)(intptr_t)intr, use_limit); 938 /* fall through */ 939 case ISTATE_LIVELOCKED: 940 /* 941 * Wait for our periodic timer to go off. Since the interrupt 942 * has re-armed it can still set i_running, but it will not 943 * reschedule us while we are in a livelocked state. 944 */ 945 lwkt_deschedule_self(gd->gd_curthread); 946 lwkt_switch(); 947 948 /* 949 * Check once a second to see if the livelock condition no 950 * longer applies. 951 */ 952 if (lseconds != gd->gd_time_seconds) { 953 lseconds = gd->gd_time_seconds; 954 if (ill_count < livelock_lowater) { 955 info->i_state = ISTATE_NORMAL; 956 systimer_del(&ill_timer); 957 kprintf("intr %d on cpu%d at %d/%d hz, livelock removed\n", 958 intr, cpuid, ill_count, livelock_lowater); 959 } else if (livelock_debug == intr || 960 (bootverbose && cold)) { 961 kprintf("intr %d on cpu%d at %d/%d hz, in livelock\n", 962 intr, cpuid, ill_count, livelock_lowater); 963 } 964 ill_count = 0; 965 } 966 break; 967 } 968 } 969 /* NOT REACHED */ 970 } 971 972 /* 973 * Emergency interrupt polling thread. The thread begins execution 974 * outside a critical section with the BGL held. 975 * 976 * If emergency interrupt polling is enabled, this thread will 977 * execute all system interrupts not marked INTR_NOPOLL at the 978 * specified polling frequency. 979 * 980 * WARNING! This thread runs *ALL* interrupt service routines that 981 * are not marked INTR_NOPOLL, which basically means everything except 982 * the 8254 clock interrupt and the ATA interrupt. It has very high 983 * overhead and should only be used in situations where the machine 984 * cannot otherwise be made to work. Due to the severe performance 985 * degredation, it should not be enabled on production machines. 986 */ 987 static void 988 ithread_emergency(void *arg __unused) 989 { 990 globaldata_t gd = mycpu; 991 struct intr_info *info; 992 intrec_t rec, nrec; 993 int intr, cpuid = mycpuid; 994 TD_INVARIANTS_DECLARE; 995 996 get_mplock(); 997 crit_enter_gd(gd); 998 TD_INVARIANTS_GET(gd->gd_curthread); 999 1000 for (;;) { 1001 for (intr = 0; intr < max_installed_hard_intr[cpuid]; ++intr) { 1002 info = &intr_info_ary[cpuid][intr]; 1003 for (rec = info->i_reclist; rec; rec = nrec) { 1004 /* rec may be invalid after call */ 1005 nrec = rec->next; 1006 if ((rec->intr_flags & INTR_NOPOLL) == 0) { 1007 if (rec->serializer) { 1008 lwkt_serialize_handler_try(rec->serializer, 1009 rec->handler, rec->argument, NULL); 1010 } else { 1011 rec->handler(rec->argument, NULL); 1012 } 1013 TD_INVARIANTS_TEST(gd->gd_curthread, rec->name); 1014 } 1015 } 1016 } 1017 lwkt_deschedule_self(gd->gd_curthread); 1018 lwkt_switch(); 1019 } 1020 /* NOT REACHED */ 1021 } 1022 1023 /* 1024 * Systimer callback - schedule the emergency interrupt poll thread 1025 * if emergency polling is enabled. 1026 */ 1027 static 1028 void 1029 emergency_intr_timer_callback(systimer_t info, int in_ipi __unused, 1030 struct intrframe *frame __unused) 1031 { 1032 if (emergency_intr_enable) 1033 lwkt_schedule(info->data); 1034 } 1035 1036 /* 1037 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1038 * The data for this machine dependent, and the declarations are in machine 1039 * dependent code. The layout of intrnames and intrcnt however is machine 1040 * independent. 1041 * 1042 * We do not know the length of intrcnt and intrnames at compile time, so 1043 * calculate things at run time. 1044 */ 1045 1046 static int 1047 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1048 { 1049 struct intr_info *info; 1050 intrec_t rec; 1051 int error = 0; 1052 int len; 1053 int intr, cpuid; 1054 char buf[64]; 1055 1056 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 1057 for (intr = 0; error == 0 && intr < MAX_INTS; ++intr) { 1058 info = &intr_info_ary[cpuid][intr]; 1059 1060 len = 0; 1061 buf[0] = 0; 1062 for (rec = info->i_reclist; rec; rec = rec->next) { 1063 ksnprintf(buf + len, sizeof(buf) - len, "%s%s", 1064 (len ? "/" : ""), rec->name); 1065 len += strlen(buf + len); 1066 } 1067 if (len == 0) { 1068 ksnprintf(buf, sizeof(buf), "irq%d", intr); 1069 len = strlen(buf); 1070 } 1071 error = SYSCTL_OUT(req, buf, len + 1); 1072 } 1073 } 1074 return (error); 1075 } 1076 1077 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1078 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1079 1080 static int 1081 sysctl_intrcnt_all(SYSCTL_HANDLER_ARGS) 1082 { 1083 struct intr_info *info; 1084 int error = 0; 1085 int intr, cpuid; 1086 1087 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 1088 for (intr = 0; intr < MAX_INTS; ++intr) { 1089 info = &intr_info_ary[cpuid][intr]; 1090 1091 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count)); 1092 if (error) 1093 goto failed; 1094 } 1095 } 1096 failed: 1097 return(error); 1098 } 1099 1100 SYSCTL_PROC(_hw, OID_AUTO, intrcnt_all, CTLTYPE_OPAQUE | CTLFLAG_RD, 1101 NULL, 0, sysctl_intrcnt_all, "", "Interrupt Counts"); 1102 1103 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1104 NULL, 0, sysctl_intrcnt_all, "", "Interrupt Counts"); 1105 1106 static void 1107 int_moveto_destcpu(int *orig_cpuid0, int cpuid) 1108 { 1109 int orig_cpuid = mycpuid; 1110 1111 if (cpuid != orig_cpuid) 1112 lwkt_migratecpu(cpuid); 1113 1114 *orig_cpuid0 = orig_cpuid; 1115 } 1116 1117 static void 1118 int_moveto_origcpu(int orig_cpuid, int cpuid) 1119 { 1120 if (cpuid != orig_cpuid) 1121 lwkt_migratecpu(orig_cpuid); 1122 } 1123 1124 static void 1125 intr_init(void *dummy __unused) 1126 { 1127 int cpuid; 1128 1129 kprintf("Initialize MI interrupts\n"); 1130 1131 for (cpuid = 0; cpuid < ncpus; ++cpuid) { 1132 int intr; 1133 1134 for (intr = 0; intr < MAX_INTS; ++intr) { 1135 struct intr_info *info = &intr_info_ary[cpuid][intr]; 1136 1137 info->i_cpuid = cpuid; 1138 info->i_intr = intr; 1139 } 1140 } 1141 } 1142 SYSINIT(intr_init, SI_BOOT2_FINISH_PIC, SI_ORDER_ANY, intr_init, NULL); 1143