1 /*- 2 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* 30 * Machine dependent interrupt code for x86. For x86, we have to 31 * deal with different PICs. Thus, we use the passed in vector to lookup 32 * an interrupt source associated with that vector. The interrupt source 33 * describes which PIC the source belongs to and includes methods to handle 34 * that source. 35 */ 36 37 #include "opt_atpic.h" 38 #include "opt_ddb.h" 39 40 #include <sys/param.h> 41 #include <sys/bus.h> 42 #include <sys/interrupt.h> 43 #include <sys/ktr.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/proc.h> 48 #include <sys/smp.h> 49 #include <sys/syslog.h> 50 #include <sys/systm.h> 51 #include <machine/clock.h> 52 #include <machine/intr_machdep.h> 53 #include <machine/smp.h> 54 #ifdef DDB 55 #include <ddb/ddb.h> 56 #endif 57 58 #ifndef DEV_ATPIC 59 #include <machine/segments.h> 60 #include <machine/frame.h> 61 #include <dev/ic/i8259.h> 62 #include <x86/isa/icu.h> 63 #ifdef PC98 64 #include <pc98/cbus/cbus.h> 65 #else 66 #include <isa/isareg.h> 67 #endif 68 #endif 69 70 #define MAX_STRAY_LOG 5 71 72 typedef void (*mask_fn)(void *); 73 74 static int intrcnt_index; 75 static struct intsrc *interrupt_sources[NUM_IO_INTS]; 76 static struct mtx intr_table_lock; 77 static struct mtx intrcnt_lock; 78 static TAILQ_HEAD(pics_head, pic) pics; 79 80 #ifdef SMP 81 static int assign_cpu; 82 #endif 83 84 u_long intrcnt[INTRCNT_COUNT]; 85 char intrnames[INTRCNT_COUNT * (MAXCOMLEN + 1)]; 86 size_t sintrcnt = sizeof(intrcnt); 87 size_t sintrnames = sizeof(intrnames); 88 89 static int intr_assign_cpu(void *arg, int cpu); 90 static void intr_disable_src(void *arg); 91 static void intr_init(void *__dummy); 92 static int intr_pic_registered(struct pic *pic); 93 static void intrcnt_setname(const char *name, int index); 94 static void intrcnt_updatename(struct intsrc *is); 95 static void intrcnt_register(struct intsrc *is); 96 97 static int 98 intr_pic_registered(struct pic *pic) 99 { 100 struct pic *p; 101 102 TAILQ_FOREACH(p, &pics, pics) { 103 if (p == pic) 104 return (1); 105 } 106 return (0); 107 } 108 109 /* 110 * Register a new interrupt controller (PIC). This is to support suspend 111 * and resume where we suspend/resume controllers rather than individual 112 * sources. This also allows controllers with no active sources (such as 113 * 8259As in a system using the APICs) to participate in suspend and resume. 114 */ 115 int 116 intr_register_pic(struct pic *pic) 117 { 118 int error; 119 120 mtx_lock(&intr_table_lock); 121 if (intr_pic_registered(pic)) 122 error = EBUSY; 123 else { 124 TAILQ_INSERT_TAIL(&pics, pic, pics); 125 error = 0; 126 } 127 mtx_unlock(&intr_table_lock); 128 return (error); 129 } 130 131 /* 132 * Register a new interrupt source with the global interrupt system. 133 * The global interrupts need to be disabled when this function is 134 * called. 135 */ 136 int 137 intr_register_source(struct intsrc *isrc) 138 { 139 int error, vector; 140 141 KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC")); 142 vector = isrc->is_pic->pic_vector(isrc); 143 if (interrupt_sources[vector] != NULL) 144 return (EEXIST); 145 error = intr_event_create(&isrc->is_event, isrc, 0, vector, 146 intr_disable_src, (mask_fn)isrc->is_pic->pic_enable_source, 147 (mask_fn)isrc->is_pic->pic_eoi_source, intr_assign_cpu, "irq%d:", 148 vector); 149 if (error) 150 return (error); 151 mtx_lock(&intr_table_lock); 152 if (interrupt_sources[vector] != NULL) { 153 mtx_unlock(&intr_table_lock); 154 intr_event_destroy(isrc->is_event); 155 return (EEXIST); 156 } 157 intrcnt_register(isrc); 158 interrupt_sources[vector] = isrc; 159 isrc->is_handlers = 0; 160 mtx_unlock(&intr_table_lock); 161 return (0); 162 } 163 164 struct intsrc * 165 intr_lookup_source(int vector) 166 { 167 168 return (interrupt_sources[vector]); 169 } 170 171 int 172 intr_add_handler(const char *name, int vector, driver_filter_t filter, 173 driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep) 174 { 175 struct intsrc *isrc; 176 int error; 177 178 isrc = intr_lookup_source(vector); 179 if (isrc == NULL) 180 return (EINVAL); 181 error = intr_event_add_handler(isrc->is_event, name, filter, handler, 182 arg, intr_priority(flags), flags, cookiep); 183 if (error == 0) { 184 mtx_lock(&intr_table_lock); 185 intrcnt_updatename(isrc); 186 isrc->is_handlers++; 187 if (isrc->is_handlers == 1) { 188 isrc->is_pic->pic_enable_intr(isrc); 189 isrc->is_pic->pic_enable_source(isrc); 190 } 191 mtx_unlock(&intr_table_lock); 192 } 193 return (error); 194 } 195 196 int 197 intr_remove_handler(void *cookie) 198 { 199 struct intsrc *isrc; 200 int error; 201 202 isrc = intr_handler_source(cookie); 203 error = intr_event_remove_handler(cookie); 204 if (error == 0) { 205 mtx_lock(&intr_table_lock); 206 isrc->is_handlers--; 207 if (isrc->is_handlers == 0) { 208 isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI); 209 isrc->is_pic->pic_disable_intr(isrc); 210 } 211 intrcnt_updatename(isrc); 212 mtx_unlock(&intr_table_lock); 213 } 214 return (error); 215 } 216 217 int 218 intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol) 219 { 220 struct intsrc *isrc; 221 222 isrc = intr_lookup_source(vector); 223 if (isrc == NULL) 224 return (EINVAL); 225 return (isrc->is_pic->pic_config_intr(isrc, trig, pol)); 226 } 227 228 static void 229 intr_disable_src(void *arg) 230 { 231 struct intsrc *isrc; 232 233 isrc = arg; 234 isrc->is_pic->pic_disable_source(isrc, PIC_EOI); 235 } 236 237 void 238 intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) 239 { 240 struct intr_event *ie; 241 int vector; 242 243 /* 244 * We count software interrupts when we process them. The 245 * code here follows previous practice, but there's an 246 * argument for counting hardware interrupts when they're 247 * processed too. 248 */ 249 (*isrc->is_count)++; 250 PCPU_INC(cnt.v_intr); 251 252 ie = isrc->is_event; 253 254 /* 255 * XXX: We assume that IRQ 0 is only used for the ISA timer 256 * device (clk). 257 */ 258 vector = isrc->is_pic->pic_vector(isrc); 259 if (vector == 0) 260 clkintr_pending = 1; 261 262 /* 263 * For stray interrupts, mask and EOI the source, bump the 264 * stray count, and log the condition. 265 */ 266 if (intr_event_handle(ie, frame) != 0) { 267 isrc->is_pic->pic_disable_source(isrc, PIC_EOI); 268 (*isrc->is_straycount)++; 269 if (*isrc->is_straycount < MAX_STRAY_LOG) 270 log(LOG_ERR, "stray irq%d\n", vector); 271 else if (*isrc->is_straycount == MAX_STRAY_LOG) 272 log(LOG_CRIT, 273 "too many stray irq %d's: not logging anymore\n", 274 vector); 275 } 276 } 277 278 void 279 intr_resume(bool suspend_cancelled) 280 { 281 struct pic *pic; 282 283 #ifndef DEV_ATPIC 284 atpic_reset(); 285 #endif 286 mtx_lock(&intr_table_lock); 287 TAILQ_FOREACH(pic, &pics, pics) { 288 if (pic->pic_resume != NULL) 289 pic->pic_resume(pic, suspend_cancelled); 290 } 291 mtx_unlock(&intr_table_lock); 292 } 293 294 void 295 intr_suspend(void) 296 { 297 struct pic *pic; 298 299 mtx_lock(&intr_table_lock); 300 TAILQ_FOREACH_REVERSE(pic, &pics, pics_head, pics) { 301 if (pic->pic_suspend != NULL) 302 pic->pic_suspend(pic); 303 } 304 mtx_unlock(&intr_table_lock); 305 } 306 307 static int 308 intr_assign_cpu(void *arg, int cpu) 309 { 310 #ifdef SMP 311 struct intsrc *isrc; 312 int error; 313 314 /* 315 * Don't do anything during early boot. We will pick up the 316 * assignment once the APs are started. 317 */ 318 if (assign_cpu && cpu != NOCPU) { 319 isrc = arg; 320 mtx_lock(&intr_table_lock); 321 error = isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]); 322 mtx_unlock(&intr_table_lock); 323 } else 324 error = 0; 325 return (error); 326 #else 327 return (EOPNOTSUPP); 328 #endif 329 } 330 331 static void 332 intrcnt_setname(const char *name, int index) 333 { 334 335 snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", 336 MAXCOMLEN, name); 337 } 338 339 static void 340 intrcnt_updatename(struct intsrc *is) 341 { 342 343 intrcnt_setname(is->is_event->ie_fullname, is->is_index); 344 } 345 346 static void 347 intrcnt_register(struct intsrc *is) 348 { 349 char straystr[MAXCOMLEN + 1]; 350 351 KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__)); 352 mtx_lock_spin(&intrcnt_lock); 353 is->is_index = intrcnt_index; 354 intrcnt_index += 2; 355 snprintf(straystr, MAXCOMLEN + 1, "stray irq%d", 356 is->is_pic->pic_vector(is)); 357 intrcnt_updatename(is); 358 is->is_count = &intrcnt[is->is_index]; 359 intrcnt_setname(straystr, is->is_index + 1); 360 is->is_straycount = &intrcnt[is->is_index + 1]; 361 mtx_unlock_spin(&intrcnt_lock); 362 } 363 364 void 365 intrcnt_add(const char *name, u_long **countp) 366 { 367 368 mtx_lock_spin(&intrcnt_lock); 369 *countp = &intrcnt[intrcnt_index]; 370 intrcnt_setname(name, intrcnt_index); 371 intrcnt_index++; 372 mtx_unlock_spin(&intrcnt_lock); 373 } 374 375 static void 376 intr_init(void *dummy __unused) 377 { 378 379 intrcnt_setname("???", 0); 380 intrcnt_index = 1; 381 TAILQ_INIT(&pics); 382 mtx_init(&intr_table_lock, "intr sources", NULL, MTX_DEF); 383 mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN); 384 } 385 SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL); 386 387 #ifndef DEV_ATPIC 388 /* Initialize the two 8259A's to a known-good shutdown state. */ 389 void 390 atpic_reset(void) 391 { 392 393 outb(IO_ICU1, ICW1_RESET | ICW1_IC4); 394 outb(IO_ICU1 + ICU_IMR_OFFSET, IDT_IO_INTS); 395 outb(IO_ICU1 + ICU_IMR_OFFSET, IRQ_MASK(ICU_SLAVEID)); 396 outb(IO_ICU1 + ICU_IMR_OFFSET, MASTER_MODE); 397 outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff); 398 outb(IO_ICU1, OCW3_SEL | OCW3_RR); 399 400 outb(IO_ICU2, ICW1_RESET | ICW1_IC4); 401 outb(IO_ICU2 + ICU_IMR_OFFSET, IDT_IO_INTS + 8); 402 outb(IO_ICU2 + ICU_IMR_OFFSET, ICU_SLAVEID); 403 outb(IO_ICU2 + ICU_IMR_OFFSET, SLAVE_MODE); 404 outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff); 405 outb(IO_ICU2, OCW3_SEL | OCW3_RR); 406 } 407 #endif 408 409 /* Add a description to an active interrupt handler. */ 410 int 411 intr_describe(u_int vector, void *ih, const char *descr) 412 { 413 struct intsrc *isrc; 414 int error; 415 416 isrc = intr_lookup_source(vector); 417 if (isrc == NULL) 418 return (EINVAL); 419 error = intr_event_describe_handler(isrc->is_event, ih, descr); 420 if (error) 421 return (error); 422 intrcnt_updatename(isrc); 423 return (0); 424 } 425 426 void 427 intr_reprogram(void) 428 { 429 struct intsrc *is; 430 int v; 431 432 mtx_lock(&intr_table_lock); 433 for (v = 0; v < NUM_IO_INTS; v++) { 434 is = interrupt_sources[v]; 435 if (is == NULL) 436 continue; 437 if (is->is_pic->pic_reprogram_pin != NULL) 438 is->is_pic->pic_reprogram_pin(is); 439 } 440 mtx_unlock(&intr_table_lock); 441 } 442 443 #ifdef DDB 444 /* 445 * Dump data about interrupt handlers 446 */ 447 DB_SHOW_COMMAND(irqs, db_show_irqs) 448 { 449 struct intsrc **isrc; 450 int i, verbose; 451 452 if (strcmp(modif, "v") == 0) 453 verbose = 1; 454 else 455 verbose = 0; 456 isrc = interrupt_sources; 457 for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++) 458 if (*isrc != NULL) 459 db_dump_intr_event((*isrc)->is_event, verbose); 460 } 461 #endif 462 463 #ifdef SMP 464 /* 465 * Support for balancing interrupt sources across CPUs. For now we just 466 * allocate CPUs round-robin. 467 */ 468 469 static cpuset_t intr_cpus = CPUSET_T_INITIALIZER(0x1); 470 static int current_cpu; 471 472 /* 473 * Return the CPU that the next interrupt source should use. For now 474 * this just returns the next local APIC according to round-robin. 475 */ 476 u_int 477 intr_next_cpu(void) 478 { 479 u_int apic_id; 480 481 /* Leave all interrupts on the BSP during boot. */ 482 if (!assign_cpu) 483 return (PCPU_GET(apic_id)); 484 485 mtx_lock_spin(&icu_lock); 486 apic_id = cpu_apic_ids[current_cpu]; 487 do { 488 current_cpu++; 489 if (current_cpu > mp_maxid) 490 current_cpu = 0; 491 } while (!CPU_ISSET(current_cpu, &intr_cpus)); 492 mtx_unlock_spin(&icu_lock); 493 return (apic_id); 494 } 495 496 /* Attempt to bind the specified IRQ to the specified CPU. */ 497 int 498 intr_bind(u_int vector, u_char cpu) 499 { 500 struct intsrc *isrc; 501 502 isrc = intr_lookup_source(vector); 503 if (isrc == NULL) 504 return (EINVAL); 505 return (intr_event_bind(isrc->is_event, cpu)); 506 } 507 508 /* 509 * Add a CPU to our mask of valid CPUs that can be destinations of 510 * interrupts. 511 */ 512 void 513 intr_add_cpu(u_int cpu) 514 { 515 516 if (cpu >= MAXCPU) 517 panic("%s: Invalid CPU ID", __func__); 518 if (bootverbose) 519 printf("INTR: Adding local APIC %d as a target\n", 520 cpu_apic_ids[cpu]); 521 522 CPU_SET(cpu, &intr_cpus); 523 } 524 525 /* 526 * Distribute all the interrupt sources among the available CPUs once the 527 * AP's have been launched. 528 */ 529 static void 530 intr_shuffle_irqs(void *arg __unused) 531 { 532 struct intsrc *isrc; 533 int i; 534 535 #ifdef XEN 536 /* 537 * Doesn't work yet 538 */ 539 return; 540 #endif 541 542 /* Don't bother on UP. */ 543 if (mp_ncpus == 1) 544 return; 545 546 /* Round-robin assign a CPU to each enabled source. */ 547 mtx_lock(&intr_table_lock); 548 assign_cpu = 1; 549 for (i = 0; i < NUM_IO_INTS; i++) { 550 isrc = interrupt_sources[i]; 551 if (isrc != NULL && isrc->is_handlers > 0) { 552 /* 553 * If this event is already bound to a CPU, 554 * then assign the source to that CPU instead 555 * of picking one via round-robin. Note that 556 * this is careful to only advance the 557 * round-robin if the CPU assignment succeeds. 558 */ 559 if (isrc->is_event->ie_cpu != NOCPU) 560 (void)isrc->is_pic->pic_assign_cpu(isrc, 561 cpu_apic_ids[isrc->is_event->ie_cpu]); 562 else if (isrc->is_pic->pic_assign_cpu(isrc, 563 cpu_apic_ids[current_cpu]) == 0) 564 (void)intr_next_cpu(); 565 566 } 567 } 568 mtx_unlock(&intr_table_lock); 569 } 570 SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs, 571 NULL); 572 #else 573 /* 574 * Always route interrupts to the current processor in the UP case. 575 */ 576 u_int 577 intr_next_cpu(void) 578 { 579 580 return (PCPU_GET(apic_id)); 581 } 582 #endif 583