1 /* $OpenBSD: isa_machdep.c,v 1.83 2016/09/22 10:25:34 jsg Exp $ */ 2 /* $NetBSD: isa_machdep.c,v 1.22 1997/06/12 23:57:32 thorpej Exp $ */ 3 4 /*- 5 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 10 * NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /*- 35 * Copyright (c) 1993, 1994, 1996, 1997 36 * Charles M. Hannum. All rights reserved. 37 * Copyright (c) 1991 The Regents of the University of California. 38 * All rights reserved. 39 * 40 * This code is derived from software contributed to Berkeley by 41 * William Jolitz. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)isa.c 7.2 (Berkeley) 5/13/91 68 */ 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/syslog.h> 73 #include <sys/device.h> 74 #include <sys/malloc.h> 75 #include <sys/proc.h> 76 77 #include <uvm/uvm_extern.h> 78 79 #include "ioapic.h" 80 81 #if NIOAPIC > 0 82 #include <machine/i82093var.h> 83 #include <machine/mpbiosvar.h> 84 #endif 85 86 #include <machine/bus.h> 87 88 #include <machine/intr.h> 89 #include <machine/pio.h> 90 #include <machine/cpufunc.h> 91 #include <machine/i8259.h> 92 93 #include <dev/isa/isareg.h> 94 #include <dev/isa/isavar.h> 95 #include <dev/isa/isadmavar.h> 96 #include <i386/isa/isa_machdep.h> 97 98 #include "isadma.h" 99 100 extern paddr_t avail_end; 101 102 #define IDTVEC(name) __CONCAT(X,name) 103 /* default interrupt vector table entries */ 104 typedef int (*vector)(void); 105 extern vector IDTVEC(intr)[]; 106 void isa_strayintr(int); 107 void intr_calculatemasks(void); 108 int fakeintr(void *); 109 110 #if NISADMA > 0 111 int _isa_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int, 112 bus_size_t, bus_size_t, int, bus_dmamap_t *); 113 void _isa_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t); 114 int _isa_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, 115 bus_size_t, struct proc *, int); 116 int _isa_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, 117 struct mbuf *, int); 118 int _isa_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, 119 struct uio *, int); 120 int _isa_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, 121 bus_dma_segment_t *, int, bus_size_t, int); 122 void _isa_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 123 void _isa_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, 124 bus_addr_t, bus_size_t, int); 125 126 int _isa_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t, 127 bus_size_t, bus_dma_segment_t *, int, int *, int); 128 129 int _isa_dma_check_buffer(void *, bus_size_t, int, bus_size_t, 130 struct proc *); 131 int _isa_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t, 132 bus_size_t, int); 133 void _isa_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t); 134 135 /* 136 * Entry points for ISA DMA. These are mostly wrappers around 137 * the generic functions that understand how to deal with bounce 138 * buffers, if necessary. 139 */ 140 struct bus_dma_tag isa_bus_dma_tag = { 141 NULL, /* _cookie */ 142 _isa_bus_dmamap_create, 143 _isa_bus_dmamap_destroy, 144 _isa_bus_dmamap_load, 145 _isa_bus_dmamap_load_mbuf, 146 _isa_bus_dmamap_load_uio, 147 _isa_bus_dmamap_load_raw, 148 _isa_bus_dmamap_unload, 149 _isa_bus_dmamap_sync, 150 _isa_bus_dmamem_alloc, 151 _bus_dmamem_alloc_range, 152 _bus_dmamem_free, 153 _bus_dmamem_map, 154 _bus_dmamem_unmap, 155 _bus_dmamem_mmap, 156 }; 157 #endif /* NISADMA > 0 */ 158 159 /* 160 * Fill in default interrupt table (in case of spurious interrupt 161 * during configuration of kernel, setup interrupt control unit 162 */ 163 void 164 isa_defaultirq(void) 165 { 166 int i; 167 168 /* icu vectors */ 169 for (i = 0; i < ICU_LEN; i++) 170 setgate(&idt[ICU_OFFSET + i], IDTVEC(intr)[i], 0, 171 SDT_SYS386IGT, SEL_KPL, GICODE_SEL); 172 173 /* initialize 8259's */ 174 outb(IO_ICU1, 0x11); /* reset; program device, four bytes */ 175 outb(IO_ICU1+1, ICU_OFFSET); /* starting at this vector index */ 176 outb(IO_ICU1+1, 1 << IRQ_SLAVE); /* slave on line 2 */ 177 #ifdef AUTO_EOI_1 178 outb(IO_ICU1+1, 2 | 1); /* auto EOI, 8086 mode */ 179 #else 180 outb(IO_ICU1+1, 1); /* 8086 mode */ 181 #endif 182 outb(IO_ICU1+1, 0xff); /* leave interrupts masked */ 183 outb(IO_ICU1, 0x68); /* special mask mode (if available) */ 184 outb(IO_ICU1, 0x0a); /* Read IRR by default. */ 185 #ifdef REORDER_IRQ 186 outb(IO_ICU1, 0xc0 | (3 - 1)); /* pri order 3-7, 0-2 (com2 first) */ 187 #endif 188 189 outb(IO_ICU2, 0x11); /* reset; program device, four bytes */ 190 outb(IO_ICU2+1, ICU_OFFSET+8); /* staring at this vector index */ 191 outb(IO_ICU2+1, IRQ_SLAVE); 192 #ifdef AUTO_EOI_2 193 outb(IO_ICU2+1, 2 | 1); /* auto EOI, 8086 mode */ 194 #else 195 outb(IO_ICU2+1, 1); /* 8086 mode */ 196 #endif 197 outb(IO_ICU2+1, 0xff); /* leave interrupts masked */ 198 outb(IO_ICU2, 0x68); /* special mask mode (if available) */ 199 outb(IO_ICU2, 0x0a); /* Read IRR by default. */ 200 } 201 202 /* 203 * Handle a NMI, possibly a machine check. 204 * return true to panic system, false to ignore. 205 */ 206 int 207 isa_nmi(void) 208 { 209 /* This is historic garbage; these ports are not readable */ 210 log(LOG_CRIT, "No-maskable interrupt, may be parity error\n"); 211 return(0); 212 } 213 214 u_long intrstray[ICU_LEN]; 215 216 /* 217 * Caught a stray interrupt, notify 218 */ 219 void 220 isa_strayintr(int irq) 221 { 222 /* 223 * Stray interrupts on irq 7 occur when an interrupt line is raised 224 * and then lowered before the CPU acknowledges it. This generally 225 * means either the device is screwed or something is cli'ing too 226 * long and it's timing out. 227 */ 228 if (++intrstray[irq] <= 5) 229 log(LOG_ERR, "stray interrupt %d%s\n", irq, 230 intrstray[irq] >= 5 ? "; stopped logging" : ""); 231 } 232 233 int intrtype[ICU_LEN], intrmask[ICU_LEN], intrlevel[ICU_LEN]; 234 int iminlevel[ICU_LEN], imaxlevel[ICU_LEN]; 235 struct intrhand *intrhand[ICU_LEN]; 236 237 int imask[NIPL]; /* Bitmask telling what interrupts are blocked. */ 238 int iunmask[NIPL]; /* Bitmask telling what interrupts are accepted. */ 239 240 /* 241 * Recalculate the interrupt masks from scratch. 242 * We could code special registry and deregistry versions of this function that 243 * would be faster, but the code would be nastier, and we don't expect this to 244 * happen very much anyway. 245 */ 246 void 247 intr_calculatemasks(void) 248 { 249 int irq, level, unusedirqs; 250 struct intrhand *q; 251 252 /* First, figure out which levels each IRQ uses. */ 253 unusedirqs = 0xffff; 254 for (irq = 0; irq < ICU_LEN; irq++) { 255 int levels = 0; 256 for (q = intrhand[irq]; q; q = q->ih_next) 257 levels |= 1 << IPL(q->ih_level); 258 intrlevel[irq] = levels; 259 if (levels) 260 unusedirqs &= ~(1 << irq); 261 } 262 263 /* Then figure out which IRQs use each level. */ 264 for (level = 0; level < NIPL; level++) { 265 int irqs = 0; 266 for (irq = 0; irq < ICU_LEN; irq++) 267 if (intrlevel[irq] & (1 << level)) 268 irqs |= 1 << irq; 269 imask[level] = irqs | unusedirqs; 270 } 271 272 /* 273 * Initialize soft interrupt masks to block themselves. 274 */ 275 IMASK(IPL_SOFTCLOCK) |= 1 << SIR_CLOCK; 276 IMASK(IPL_SOFTNET) |= 1 << SIR_NET; 277 IMASK(IPL_SOFTTTY) |= 1 << SIR_TTY; 278 279 /* 280 * Enforce a hierarchy that gives slow devices a better chance at not 281 * dropping data. 282 */ 283 for (level = 0; level < NIPL - 1; level++) 284 imask[level + 1] |= imask[level]; 285 286 /* And eventually calculate the complete masks. */ 287 for (irq = 0; irq < ICU_LEN; irq++) { 288 int irqs = 1 << irq; 289 int minlevel = IPL_NONE; 290 int maxlevel = IPL_NONE; 291 292 if (intrhand[irq] == NULL) { 293 maxlevel = IPL_HIGH; 294 irqs = IMASK(IPL_HIGH); 295 } else { 296 for (q = intrhand[irq]; q; q = q->ih_next) { 297 irqs |= IMASK(q->ih_level); 298 if (minlevel == IPL_NONE || 299 q->ih_level < minlevel) 300 minlevel = q->ih_level; 301 if (q->ih_level > maxlevel) 302 maxlevel = q->ih_level; 303 } 304 } 305 if (irqs != IMASK(maxlevel)) 306 panic("irq %d level %x mask mismatch: %x vs %x", irq, 307 maxlevel, irqs, IMASK(maxlevel)); 308 309 intrmask[irq] = irqs; 310 iminlevel[irq] = minlevel; 311 imaxlevel[irq] = maxlevel; 312 313 #if 0 314 printf("irq %d: level %x, mask 0x%x (%x)\n", irq, 315 imaxlevel[irq], intrmask[irq], IMASK(imaxlevel[irq])); 316 #endif 317 } 318 319 /* Lastly, determine which IRQs are actually in use. */ 320 { 321 int irqs = 0; 322 for (irq = 0; irq < ICU_LEN; irq++) 323 if (intrhand[irq]) 324 irqs |= 1 << irq; 325 if (irqs >= 0x100) /* any IRQs >= 8 in use */ 326 irqs |= 1 << IRQ_SLAVE; 327 imen = ~irqs; 328 SET_ICUS(); 329 } 330 331 /* For speed of splx, provide the inverse of the interrupt masks. */ 332 for (irq = 0; irq < ICU_LEN; irq++) 333 iunmask[irq] = ~imask[irq]; 334 } 335 336 int 337 fakeintr(arg) 338 void *arg; 339 { 340 return 0; 341 } 342 343 #define LEGAL_IRQ(x) ((x) >= 0 && (x) < ICU_LEN && (x) != 2) 344 345 int 346 isa_intr_alloc(isa_chipset_tag_t ic, int mask, int type, int *irq) 347 { 348 int i, bestirq, count; 349 int tmp; 350 struct intrhand **p, *q; 351 352 if (type == IST_NONE) 353 panic("intr_alloc: bogus type"); 354 355 bestirq = -1; 356 count = -1; 357 358 /* some interrupts should never be dynamically allocated */ 359 mask &= 0xdef8; 360 361 /* 362 * XXX some interrupts will be used later (6 for fdc, 12 for pms). 363 * the right answer is to do "breadth-first" searching of devices. 364 */ 365 mask &= 0xefbf; 366 367 for (i = 0; i < ICU_LEN; i++) { 368 if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0) 369 continue; 370 371 switch(intrtype[i]) { 372 case IST_NONE: 373 /* 374 * if nothing's using the irq, just return it 375 */ 376 *irq = i; 377 return (0); 378 379 case IST_EDGE: 380 case IST_LEVEL: 381 if (type != intrtype[i]) 382 continue; 383 /* 384 * if the irq is shareable, count the number of other 385 * handlers, and if it's smaller than the last irq like 386 * this, remember it 387 * 388 * XXX We should probably also consider the 389 * interrupt level and stick IPL_TTY with other 390 * IPL_TTY, etc. 391 */ 392 for (p = &intrhand[i], tmp = 0; (q = *p) != NULL; 393 p = &q->ih_next, tmp++) 394 ; 395 if ((bestirq == -1) || (count > tmp)) { 396 bestirq = i; 397 count = tmp; 398 } 399 break; 400 401 case IST_PULSE: 402 /* this just isn't shareable */ 403 continue; 404 } 405 } 406 407 if (bestirq == -1) 408 return (1); 409 410 *irq = bestirq; 411 412 return (0); 413 } 414 415 /* 416 * Just check to see if an IRQ is available/can be shared. 417 * 0 = interrupt not available 418 * 1 = interrupt shareable 419 * 2 = interrupt all to ourself 420 */ 421 int 422 isa_intr_check(isa_chipset_tag_t ic, int irq, int type) 423 { 424 if (!LEGAL_IRQ(irq) || type == IST_NONE) 425 return (0); 426 427 switch (intrtype[irq]) { 428 case IST_NONE: 429 return (2); 430 break; 431 case IST_LEVEL: 432 if (type != intrtype[irq]) 433 return (0); 434 return (1); 435 break; 436 case IST_EDGE: 437 case IST_PULSE: 438 if (type != IST_NONE) 439 return (0); 440 } 441 return (1); 442 } 443 444 /* 445 * Set up an interrupt handler to start being called. 446 * XXX PRONE TO RACE CONDITIONS, UGLY, 'INTERESTING' INSERTION ALGORITHM. 447 */ 448 void * 449 isa_intr_establish(isa_chipset_tag_t ic, int irq, int type, int level, 450 int (*ih_fun)(void *), void *ih_arg, const char *ih_what) 451 { 452 struct intrhand **p, *q, *ih; 453 static struct intrhand fakehand = {fakeintr}; 454 int flags; 455 456 #if NIOAPIC > 0 457 struct mp_intr_map *mip; 458 459 if (mp_busses != NULL) { 460 int mpspec_pin = irq; 461 int airq; 462 463 if (mp_isa_bus == NULL) 464 panic("no isa bus"); 465 466 for (mip = mp_isa_bus->mb_intrs; mip != NULL; 467 mip = mip->next) { 468 if (mip->bus_pin == mpspec_pin) { 469 airq = mip->ioapic_ih | irq; 470 break; 471 } 472 } 473 if (mip == NULL && mp_eisa_bus) { 474 for (mip = mp_eisa_bus->mb_intrs; mip != NULL; 475 mip = mip->next) { 476 if (mip->bus_pin == mpspec_pin) { 477 airq = mip->ioapic_ih | irq; 478 break; 479 } 480 } 481 } 482 483 /* no MP mapping found -- invent! */ 484 if (mip == NULL) 485 airq = mpbios_invent(irq, type, mp_isa_bus->mb_idx); 486 487 return (apic_intr_establish(airq, type, level, ih_fun, 488 ih_arg, ih_what)); 489 } 490 #endif 491 492 flags = level & IPL_MPSAFE; 493 level &= ~IPL_MPSAFE; 494 495 KASSERT(level <= IPL_TTY || level >= IPL_CLOCK || flags & IPL_MPSAFE); 496 497 /* no point in sleeping unless someone can free memory. */ 498 ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK); 499 if (ih == NULL) { 500 printf("%s: isa_intr_establish: can't malloc handler info\n", 501 ih_what); 502 return (NULL); 503 } 504 505 if (!LEGAL_IRQ(irq) || type == IST_NONE) { 506 printf("%s: isa_intr_establish: bogus irq or type\n", ih_what); 507 free(ih, M_DEVBUF, sizeof *ih); 508 return (NULL); 509 } 510 switch (intrtype[irq]) { 511 case IST_NONE: 512 intrtype[irq] = type; 513 break; 514 case IST_EDGE: 515 intr_shared_edge = 1; 516 /* FALLTHROUGH */ 517 case IST_LEVEL: 518 if (type == intrtype[irq]) 519 break; 520 case IST_PULSE: 521 if (type != IST_NONE) { 522 /*printf("%s: intr_establish: can't share %s with %s, irq %d\n", 523 ih_what, isa_intr_typename(intrtype[irq]), 524 isa_intr_typename(type), irq);*/ 525 free(ih, M_DEVBUF, sizeof *ih); 526 return (NULL); 527 } 528 break; 529 } 530 531 /* 532 * Figure out where to put the handler. 533 * This is O(N^2), but we want to preserve the order, and N is 534 * generally small. 535 */ 536 for (p = &intrhand[irq]; (q = *p) != NULL; p = &q->ih_next) 537 ; 538 539 /* 540 * Actually install a fake handler momentarily, since we might be doing 541 * this with interrupts enabled and don't want the real routine called 542 * until masking is set up. 543 */ 544 fakehand.ih_level = level; 545 *p = &fakehand; 546 547 intr_calculatemasks(); 548 549 /* 550 * Poke the real handler in now. 551 */ 552 ih->ih_fun = ih_fun; 553 ih->ih_arg = ih_arg; 554 ih->ih_next = NULL; 555 ih->ih_level = level; 556 ih->ih_flags = flags; 557 ih->ih_irq = irq; 558 evcount_attach(&ih->ih_count, ih_what, &ih->ih_irq); 559 *p = ih; 560 561 return (ih); 562 } 563 564 /* 565 * Deregister an interrupt handler. 566 */ 567 void 568 isa_intr_disestablish(isa_chipset_tag_t ic, void *arg) 569 { 570 struct intrhand *ih = arg; 571 int irq = ih->ih_irq; 572 struct intrhand **p, *q; 573 574 #if NIOAPIC > 0 575 if (irq & APIC_INT_VIA_APIC) { 576 apic_intr_disestablish(arg); 577 return; 578 } 579 #endif 580 581 if (!LEGAL_IRQ(irq)) 582 panic("intr_disestablish: bogus irq %d", irq); 583 584 /* 585 * Remove the handler from the chain. 586 * This is O(n^2), too. 587 */ 588 for (p = &intrhand[irq]; (q = *p) != NULL && q != ih; p = &q->ih_next) 589 ; 590 if (q) 591 *p = q->ih_next; 592 else 593 panic("intr_disestablish: handler not registered"); 594 evcount_detach(&ih->ih_count); 595 free(ih, M_DEVBUF, sizeof *ih); 596 597 intr_calculatemasks(); 598 599 if (intrhand[irq] == NULL) 600 intrtype[irq] = IST_NONE; 601 } 602 603 void 604 isa_attach_hook(struct device *parent, struct device *self, 605 struct isabus_attach_args *iba) 606 { 607 extern int isa_has_been_seen; 608 609 /* 610 * Notify others that might need to know that the ISA bus 611 * has now been attached. 612 */ 613 if (isa_has_been_seen) 614 panic("isaattach: ISA bus already seen!"); 615 isa_has_been_seen = 1; 616 } 617 618 #if NISADMA > 0 619 /********************************************************************** 620 * bus.h dma interface entry points 621 **********************************************************************/ 622 623 #ifdef ISA_DMA_STATS 624 #define STAT_INCR(v) (v)++ 625 #define STAT_DECR(v) do { \ 626 if ((v) == 0) \ 627 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \ 628 else \ 629 (v)--; \ 630 } while (0) 631 u_long isa_dma_stats_loads; 632 u_long isa_dma_stats_bounces; 633 u_long isa_dma_stats_nbouncebufs; 634 #else 635 #define STAT_INCR(v) 636 #define STAT_DECR(v) 637 #endif 638 639 /* 640 * Create an ISA DMA map. 641 */ 642 int 643 _isa_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 644 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 645 { 646 struct isa_dma_cookie *cookie; 647 bus_dmamap_t map; 648 int error, cookieflags; 649 void *cookiestore; 650 size_t cookiesize; 651 652 /* Call common function to create the basic map. */ 653 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, 654 flags, dmamp); 655 if (error) 656 return (error); 657 658 map = *dmamp; 659 map->_dm_cookie = NULL; 660 661 cookiesize = sizeof(struct isa_dma_cookie); 662 663 /* 664 * ISA only has 24-bits of address space. This means 665 * we can't DMA to pages over 16M. In order to DMA to 666 * arbitrary buffers, we use "bounce buffers" - pages 667 * in memory below the 16M boundary. On DMA reads, 668 * DMA happens to the bounce buffers, and is copied into 669 * the caller's buffer. On writes, data is copied into 670 * the bounce buffer, and the DMA happens from those 671 * pages. To software using the DMA mapping interface, 672 * this looks simply like a data cache. 673 * 674 * If we have more than 16M of RAM in the system, we may 675 * need bounce buffers. We check and remember that here. 676 * 677 * There are exceptions, however. VLB devices can do 678 * 32-bit DMA, and indicate that here. 679 * 680 * ...or, there is an opposite case. The most segments 681 * a transfer will require is (maxxfer / NBPG) + 1. If 682 * the caller can't handle that many segments (e.g. the 683 * ISA DMA controller), we may have to bounce it as well. 684 */ 685 cookieflags = 0; 686 if ((avail_end > ISA_DMA_BOUNCE_THRESHOLD && 687 (flags & ISABUS_DMA_32BIT) == 0) || 688 ((map->_dm_size / NBPG) + 1) > map->_dm_segcnt) { 689 cookieflags |= ID_MIGHT_NEED_BOUNCE; 690 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt); 691 } 692 693 /* 694 * Allocate our cookie. 695 */ 696 if ((cookiestore = malloc(cookiesize, M_DEVBUF, 697 ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)|M_ZERO)) == NULL) { 698 error = ENOMEM; 699 goto out; 700 } 701 cookie = (struct isa_dma_cookie *)cookiestore; 702 cookie->id_flags = cookieflags; 703 map->_dm_cookie = cookie; 704 705 if (cookieflags & ID_MIGHT_NEED_BOUNCE) { 706 /* 707 * Allocate the bounce pages now if the caller 708 * wishes us to do so. 709 */ 710 if ((flags & BUS_DMA_ALLOCNOW) == 0) 711 goto out; 712 713 error = _isa_dma_alloc_bouncebuf(t, map, size, flags); 714 } 715 716 out: 717 if (error) { 718 free(map->_dm_cookie, M_DEVBUF, cookiesize); 719 _bus_dmamap_destroy(t, map); 720 } 721 return (error); 722 } 723 724 /* 725 * Destroy an ISA DMA map. 726 */ 727 void 728 _isa_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 729 { 730 struct isa_dma_cookie *cookie = map->_dm_cookie; 731 732 /* 733 * Free any bounce pages this map might hold. 734 */ 735 if (cookie->id_flags & ID_HAS_BOUNCE) 736 _isa_dma_free_bouncebuf(t, map); 737 738 free(cookie, M_DEVBUF, 0); 739 _bus_dmamap_destroy(t, map); 740 } 741 742 /* 743 * Load an ISA DMA map with a linear buffer. 744 */ 745 int 746 _isa_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 747 bus_size_t buflen, struct proc *p, int flags) 748 { 749 struct isa_dma_cookie *cookie = map->_dm_cookie; 750 int error; 751 752 STAT_INCR(isa_dma_stats_loads); 753 754 /* 755 * Check to see if we might need to bounce the transfer. 756 */ 757 if (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) { 758 /* 759 * Check if all pages are below the bounce 760 * threshold. If they are, don't bother bouncing. 761 */ 762 if (_isa_dma_check_buffer(buf, buflen, 763 map->_dm_segcnt, map->_dm_boundary, p) == 0) 764 return (_bus_dmamap_load(t, map, buf, buflen, 765 p, flags)); 766 767 STAT_INCR(isa_dma_stats_bounces); 768 769 /* 770 * Allocate bounce pages, if necessary. 771 */ 772 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { 773 error = _isa_dma_alloc_bouncebuf(t, map, buflen, 774 flags); 775 if (error) 776 return (error); 777 } 778 779 /* 780 * Cache a pointer to the caller's buffer and 781 * load the DMA map with the bounce buffer. 782 */ 783 cookie->id_origbuf = buf; 784 cookie->id_origbuflen = buflen; 785 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, 786 buflen, p, flags); 787 788 if (error) { 789 /* 790 * Free the bounce pages, unless our resources 791 * are reserved for our exclusive use. 792 */ 793 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 794 _isa_dma_free_bouncebuf(t, map); 795 } 796 797 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */ 798 cookie->id_flags |= ID_IS_BOUNCING; 799 } else { 800 /* 801 * Just use the generic load function. 802 */ 803 error = _bus_dmamap_load(t, map, buf, buflen, p, flags); 804 } 805 806 return (error); 807 } 808 809 /* 810 * Like _isa_bus_dmamap_load(), but for mbufs. 811 */ 812 int 813 _isa_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m, 814 int flags) 815 { 816 817 panic("_isa_bus_dmamap_load_mbuf: not implemented"); 818 } 819 820 /* 821 * Like _isa_bus_dmamap_load(), but for uios. 822 */ 823 int 824 _isa_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, 825 int flags) 826 { 827 828 panic("_isa_bus_dmamap_load_uio: not implemented"); 829 } 830 831 /* 832 * Like _isa_bus_dmamap_load(), but for raw memory allocated with 833 * bus_dmamem_alloc(). 834 */ 835 int 836 _isa_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 837 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 838 { 839 840 panic("_isa_bus_dmamap_load_raw: not implemented"); 841 } 842 843 /* 844 * Unload an ISA DMA map. 845 */ 846 void 847 _isa_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 848 { 849 struct isa_dma_cookie *cookie = map->_dm_cookie; 850 851 /* 852 * If we have bounce pages, free them, unless they're 853 * reserved for our exclusive use. 854 */ 855 if ((cookie->id_flags & ID_HAS_BOUNCE) && 856 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 857 _isa_dma_free_bouncebuf(t, map); 858 859 cookie->id_flags &= ~ID_IS_BOUNCING; 860 861 /* 862 * Do the generic bits of the unload. 863 */ 864 _bus_dmamap_unload(t, map); 865 } 866 867 /* 868 * Synchronize an ISA DMA map. 869 */ 870 void 871 _isa_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 872 bus_size_t len, int op) 873 { 874 struct isa_dma_cookie *cookie = map->_dm_cookie; 875 876 #ifdef DEBUG 877 if ((op & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) { 878 if (offset >= map->dm_mapsize) 879 panic("_isa_bus_dmamap_sync: bad offset"); 880 if (len == 0 || (offset + len) > map->dm_mapsize) 881 panic("_isa_bus_dmamap_sync: bad length"); 882 } 883 #endif 884 #ifdef DIAGNOSTIC 885 if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0 && 886 (op & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) != 0) 887 panic("_isa_bus_dmamap_sync: mix PRE and POST"); 888 #endif /* DIAGNOSTIC */ 889 890 /* PREREAD and POSTWRITE are no-ops */ 891 if (op & BUS_DMASYNC_PREWRITE) { 892 /* 893 * If we're bouncing this transfer, copy the 894 * caller's buffer to the bounce buffer. 895 */ 896 if (cookie->id_flags & ID_IS_BOUNCING) 897 memcpy(cookie->id_bouncebuf + offset, 898 (char *)cookie->id_origbuf + offset, len); 899 } 900 901 _bus_dmamap_sync(t, map, offset, len, op); 902 903 if (op & BUS_DMASYNC_POSTREAD) { 904 /* 905 * If we're bouncing this transfer, copy the 906 * bounce buffer to the caller's buffer. 907 */ 908 if (cookie->id_flags & ID_IS_BOUNCING) 909 memcpy(cookie->id_origbuf + offset, 910 (char *)cookie->id_bouncebuf + offset, len); 911 } 912 } 913 914 /* 915 * Allocate memory safe for ISA DMA. 916 */ 917 int 918 _isa_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 919 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 920 int flags) 921 { 922 int error; 923 924 /* Try in ISA addressable region first */ 925 error = _bus_dmamem_alloc_range(t, size, alignment, boundary, 926 segs, nsegs, rsegs, flags, 0, ISA_DMA_BOUNCE_THRESHOLD); 927 if (!error) 928 return (error); 929 930 /* Otherwise try anywhere (we'll bounce later) */ 931 error = _bus_dmamem_alloc_range(t, size, alignment, boundary, 932 segs, nsegs, rsegs, flags, (bus_addr_t)0, (bus_addr_t)-1); 933 return (error); 934 } 935 936 937 /********************************************************************** 938 * ISA DMA utility functions 939 **********************************************************************/ 940 941 /* 942 * Return 0 if all pages in the passed buffer lie within the DMA'able 943 * range RAM. 944 */ 945 int 946 _isa_dma_check_buffer(void *buf, bus_size_t buflen, int segcnt, 947 bus_size_t boundary, struct proc *p) 948 { 949 vaddr_t vaddr = (vaddr_t)buf; 950 vaddr_t endva; 951 paddr_t pa, lastpa; 952 u_long pagemask = ~(boundary - 1); 953 pmap_t pmap; 954 int nsegs; 955 956 endva = round_page(vaddr + buflen); 957 958 nsegs = 1; 959 lastpa = 0; 960 961 if (p != NULL) 962 pmap = p->p_vmspace->vm_map.pmap; 963 else 964 pmap = pmap_kernel(); 965 966 for (; vaddr < endva; vaddr += NBPG) { 967 /* 968 * Get physical address for this segment. 969 */ 970 pmap_extract(pmap, (vaddr_t)vaddr, &pa); 971 pa = trunc_page(pa); 972 973 /* 974 * Is it below the DMA'able threshold? 975 */ 976 if (pa > ISA_DMA_BOUNCE_THRESHOLD) 977 return (EINVAL); 978 979 if (lastpa) { 980 /* 981 * Check excessive segment count. 982 */ 983 if (lastpa + NBPG != pa) { 984 if (++nsegs > segcnt) 985 return (EFBIG); 986 } 987 988 /* 989 * Check boundary restriction. 990 */ 991 if (boundary) { 992 if ((lastpa ^ pa) & pagemask) 993 return (EINVAL); 994 } 995 } 996 lastpa = pa; 997 } 998 999 return (0); 1000 } 1001 1002 int 1003 _isa_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t size, int flags) 1004 { 1005 struct isa_dma_cookie *cookie = map->_dm_cookie; 1006 int error = 0; 1007 1008 cookie->id_bouncebuflen = round_page(size); 1009 error = _bus_dmamem_alloc_range(t, cookie->id_bouncebuflen, 1010 NBPG, map->_dm_boundary, cookie->id_bouncesegs, 1011 map->_dm_segcnt, &cookie->id_nbouncesegs, flags, 1012 0, ISA_DMA_BOUNCE_THRESHOLD); 1013 if (error) 1014 goto out; 1015 error = _bus_dmamem_map(t, cookie->id_bouncesegs, 1016 cookie->id_nbouncesegs, cookie->id_bouncebuflen, 1017 (caddr_t *)&cookie->id_bouncebuf, flags); 1018 1019 out: 1020 if (error) { 1021 _bus_dmamem_free(t, cookie->id_bouncesegs, 1022 cookie->id_nbouncesegs); 1023 cookie->id_bouncebuflen = 0; 1024 cookie->id_nbouncesegs = 0; 1025 } else { 1026 cookie->id_flags |= ID_HAS_BOUNCE; 1027 STAT_INCR(isa_dma_stats_nbouncebufs); 1028 } 1029 1030 return (error); 1031 } 1032 1033 void 1034 _isa_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map) 1035 { 1036 struct isa_dma_cookie *cookie = map->_dm_cookie; 1037 1038 STAT_DECR(isa_dma_stats_nbouncebufs); 1039 1040 _bus_dmamem_unmap(t, cookie->id_bouncebuf, 1041 cookie->id_bouncebuflen); 1042 _bus_dmamem_free(t, cookie->id_bouncesegs, 1043 cookie->id_nbouncesegs); 1044 cookie->id_bouncebuflen = 0; 1045 cookie->id_nbouncesegs = 0; 1046 cookie->id_flags &= ~ID_HAS_BOUNCE; 1047 } 1048 #endif /* NISADMA > 0 */ 1049