1 /* $NetBSD: iommu.c,v 1.63 2002/03/11 16:27:03 pk Exp $ */ 2 3 /* 4 * Copyright (c) 1996 5 * The President and Fellows of Harvard College. All rights reserved. 6 * Copyright (c) 1995 Paul Kranenburg 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Aaron Brown and 19 * Harvard University. 20 * This product includes software developed by Paul Kranenburg. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 */ 38 #include "opt_sparc_arch.h" 39 40 #include <sys/param.h> 41 #include <sys/extent.h> 42 #include <sys/malloc.h> 43 #include <sys/queue.h> 44 #include <sys/systm.h> 45 #include <sys/device.h> 46 #include <sys/proc.h> 47 48 #include <uvm/uvm.h> 49 50 #define _SPARC_BUS_DMA_PRIVATE 51 #include <machine/bus.h> 52 #include <machine/autoconf.h> 53 #include <machine/ctlreg.h> 54 #include <sparc/sparc/asm.h> 55 #include <sparc/sparc/vaddrs.h> 56 #include <sparc/sparc/cpuvar.h> 57 #include <sparc/sparc/iommureg.h> 58 #include <sparc/sparc/iommuvar.h> 59 60 struct iommu_softc { 61 struct device sc_dev; /* base device */ 62 struct iommureg *sc_reg; 63 u_int sc_pagesize; 64 u_int sc_range; 65 bus_addr_t sc_dvmabase; 66 iopte_t *sc_ptes; 67 int sc_hasiocache; 68 }; 69 struct iommu_softc *iommu_sc;/*XXX*/ 70 int has_iocache; 71 72 /* 73 * Note: operations on the extent map are being protected with 74 * splhigh(), since we cannot predict at which interrupt priority 75 * our clients will run. 76 */ 77 struct extent *iommu_dvmamap; 78 79 80 /* autoconfiguration driver */ 81 int iommu_print __P((void *, const char *)); 82 void iommu_attach __P((struct device *, struct device *, void *)); 83 int iommu_match __P((struct device *, struct cfdata *, void *)); 84 85 #if defined(SUN4M) 86 static void iommu_copy_prom_entries __P((struct iommu_softc *)); 87 #endif 88 89 struct cfattach iommu_ca = { 90 sizeof(struct iommu_softc), iommu_match, iommu_attach 91 }; 92 93 /* IOMMU DMA map functions */ 94 int iommu_dmamap_create __P((bus_dma_tag_t, bus_size_t, int, bus_size_t, 95 bus_size_t, int, bus_dmamap_t *)); 96 int iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *, 97 bus_size_t, struct proc *, int)); 98 int iommu_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t, 99 struct mbuf *, int)); 100 int iommu_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t, 101 struct uio *, int)); 102 int iommu_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t, 103 bus_dma_segment_t *, int, bus_size_t, int)); 104 void iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); 105 void iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 106 bus_size_t, int)); 107 108 int iommu_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs, 109 int nsegs, size_t size, caddr_t *kvap, int flags)); 110 paddr_t iommu_dmamem_mmap __P((bus_dma_tag_t tag, bus_dma_segment_t *segs, 111 int nsegs, off_t off, int prot, int flags)); 112 int iommu_dvma_alloc(bus_dmamap_t, vaddr_t, bus_size_t, int, 113 bus_addr_t *, bus_size_t *); 114 115 116 struct sparc_bus_dma_tag iommu_dma_tag = { 117 NULL, 118 iommu_dmamap_create, 119 _bus_dmamap_destroy, 120 iommu_dmamap_load, 121 iommu_dmamap_load_mbuf, 122 iommu_dmamap_load_uio, 123 iommu_dmamap_load_raw, 124 iommu_dmamap_unload, 125 iommu_dmamap_sync, 126 127 _bus_dmamem_alloc, 128 _bus_dmamem_free, 129 iommu_dmamem_map, 130 _bus_dmamem_unmap, 131 iommu_dmamem_mmap 132 }; 133 /* 134 * Print the location of some iommu-attached device (called just 135 * before attaching that device). If `iommu' is not NULL, the 136 * device was found but not configured; print the iommu as well. 137 * Return UNCONF (config_find ignores this if the device was configured). 138 */ 139 int 140 iommu_print(args, iommu) 141 void *args; 142 const char *iommu; 143 { 144 struct iommu_attach_args *ia = args; 145 146 if (iommu) 147 printf("%s at %s", ia->iom_name, iommu); 148 return (UNCONF); 149 } 150 151 int 152 iommu_match(parent, cf, aux) 153 struct device *parent; 154 struct cfdata *cf; 155 void *aux; 156 { 157 struct mainbus_attach_args *ma = aux; 158 159 if (CPU_ISSUN4OR4C) 160 return (0); 161 return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0); 162 } 163 164 /* 165 * Attach the iommu. 166 */ 167 void 168 iommu_attach(parent, self, aux) 169 struct device *parent; 170 struct device *self; 171 void *aux; 172 { 173 #if defined(SUN4M) 174 struct iommu_softc *sc = (struct iommu_softc *)self; 175 struct mainbus_attach_args *ma = aux; 176 bus_space_handle_t bh; 177 int node; 178 int js1_implicit_iommu; 179 int i, s; 180 u_int iopte_table_pa; 181 struct pglist mlist; 182 u_int size; 183 struct vm_page *m; 184 vaddr_t va; 185 186 /* 187 * XXX there is only one iommu, for now -- do not know how to 188 * address children on others 189 */ 190 if (sc->sc_dev.dv_unit > 0) { 191 printf(" unsupported\n"); 192 return; 193 } 194 iommu_sc = sc; 195 196 /* 197 * JS1/OF device tree does not have an iommu node and sbus 198 * node is directly under root. mainbus_attach detects this 199 * and calls us with sbus node instead so that we can attach 200 * implicit iommu and attach that sbus node under it. 201 */ 202 node = ma->ma_node; 203 if (strcmp(PROM_getpropstring(node, "name"), "sbus") == 0) 204 js1_implicit_iommu = 1; 205 else 206 js1_implicit_iommu = 0; 207 208 /* 209 * Map registers into our space. The PROM may have done this 210 * already, but I feel better if we have our own copy. Plus, the 211 * prom doesn't map the entire register set. 212 * 213 * XXX struct iommureg is bigger than ra->ra_len; what are the 214 * other fields for? 215 */ 216 if (bus_space_map( 217 ma->ma_bustag, 218 ma->ma_paddr, 219 sizeof(struct iommureg), 220 0, 221 &bh) != 0) { 222 printf("iommu_attach: cannot map registers\n"); 223 return; 224 } 225 sc->sc_reg = (struct iommureg *)bh; 226 227 sc->sc_hasiocache = js1_implicit_iommu ? 0 228 : node_has_property(node, "cache-coherence?"); 229 if (CACHEINFO.c_enabled == 0) /* XXX - is this correct? */ 230 sc->sc_hasiocache = 0; 231 has_iocache = sc->sc_hasiocache; /* Set global flag */ 232 233 sc->sc_pagesize = js1_implicit_iommu ? NBPG 234 : PROM_getpropint(node, "page-size", NBPG), 235 236 /* 237 * Allocate memory for I/O pagetables. 238 * This takes 64K of contiguous physical memory to map 64M of 239 * DVMA space (starting at IOMMU_DVMA_BASE). 240 * The table must be aligned on a (-IOMMU_DVMA_BASE/pagesize) 241 * boundary (i.e. 64K for 64M of DVMA space). 242 */ 243 244 size = ((0 - IOMMU_DVMA_BASE) / sc->sc_pagesize) * sizeof(iopte_t); 245 TAILQ_INIT(&mlist); 246 if (uvm_pglistalloc(size, vm_first_phys, vm_first_phys+vm_num_phys, 247 size, 0, &mlist, 1, 0) != 0) 248 panic("iommu_attach: no memory"); 249 250 va = uvm_km_valloc(kernel_map, size); 251 if (va == 0) 252 panic("iommu_attach: no memory"); 253 254 sc->sc_ptes = (iopte_t *)va; 255 256 m = TAILQ_FIRST(&mlist); 257 iopte_table_pa = VM_PAGE_TO_PHYS(m); 258 259 /* Map the pages */ 260 for (; m != NULL; m = TAILQ_NEXT(m,pageq)) { 261 paddr_t pa = VM_PAGE_TO_PHYS(m); 262 pmap_kenter_pa(va, pa | PMAP_NC, VM_PROT_READ | VM_PROT_WRITE); 263 va += NBPG; 264 } 265 pmap_update(pmap_kernel()); 266 267 /* 268 * Copy entries from current IOMMU table. 269 * XXX - Why do we need to do this? 270 */ 271 iommu_copy_prom_entries(sc); 272 273 /* 274 * Now we can install our new pagetable into the IOMMU 275 */ 276 sc->sc_range = 0 - IOMMU_DVMA_BASE; 277 sc->sc_dvmabase = IOMMU_DVMA_BASE; 278 279 /* calculate log2(sc->sc_range/16MB) */ 280 i = ffs(sc->sc_range/(1 << 24)) - 1; 281 if ((1 << i) != (sc->sc_range/(1 << 24))) 282 panic("iommu: bad range: %d\n", i); 283 284 s = splhigh(); 285 IOMMU_FLUSHALL(sc); 286 287 /* Load range and physical address of PTEs */ 288 sc->sc_reg->io_cr = (sc->sc_reg->io_cr & ~IOMMU_CTL_RANGE) | 289 (i << IOMMU_CTL_RANGESHFT) | IOMMU_CTL_ME; 290 sc->sc_reg->io_bar = (iopte_table_pa >> 4) & IOMMU_BAR_IBA; 291 292 IOMMU_FLUSHALL(sc); 293 splx(s); 294 295 printf(": version 0x%x/0x%x, page-size %d, range %dMB\n", 296 (sc->sc_reg->io_cr & IOMMU_CTL_VER) >> 24, 297 (sc->sc_reg->io_cr & IOMMU_CTL_IMPL) >> 28, 298 sc->sc_pagesize, 299 sc->sc_range >> 20); 300 301 iommu_dvmamap = extent_create("iommudvma", 302 IOMMU_DVMA_BASE, IOMMU_DVMA_END, 303 M_DEVBUF, 0, 0, EX_NOWAIT); 304 if (iommu_dvmamap == NULL) 305 panic("iommu: unable to allocate DVMA map"); 306 307 /* 308 * If we are attaching implicit iommu on JS1/OF we do not have 309 * an iommu node to traverse, instead mainbus_attach passed us 310 * sbus node in ma.ma_node. Attach it as the only iommu child. 311 */ 312 if (js1_implicit_iommu) { 313 struct iommu_attach_args ia; 314 struct iommu_reg sbus_iommu_reg = { 0, 0x10001000, 0x28 }; 315 316 bzero(&ia, sizeof ia); 317 318 /* Propagate BUS & DMA tags */ 319 ia.iom_bustag = ma->ma_bustag; 320 ia.iom_dmatag = &iommu_dma_tag; 321 322 ia.iom_name = "sbus"; 323 ia.iom_node = node; 324 ia.iom_reg = &sbus_iommu_reg; 325 ia.iom_nreg = 1; 326 327 (void) config_found(&sc->sc_dev, (void *)&ia, iommu_print); 328 return; 329 } 330 331 /* 332 * Loop through ROM children (expect Sbus among them). 333 */ 334 for (node = firstchild(node); node; node = nextsibling(node)) { 335 struct iommu_attach_args ia; 336 337 bzero(&ia, sizeof ia); 338 ia.iom_name = PROM_getpropstring(node, "name"); 339 340 /* Propagate BUS & DMA tags */ 341 ia.iom_bustag = ma->ma_bustag; 342 ia.iom_dmatag = &iommu_dma_tag; 343 344 ia.iom_node = node; 345 346 ia.iom_reg = NULL; 347 PROM_getprop(node, "reg", sizeof(struct sbus_reg), 348 &ia.iom_nreg, (void **)&ia.iom_reg); 349 350 (void) config_found(&sc->sc_dev, (void *)&ia, iommu_print); 351 if (ia.iom_reg != NULL) 352 free(ia.iom_reg, M_DEVBUF); 353 } 354 #endif 355 } 356 357 #if defined(SUN4M) 358 static void 359 iommu_copy_prom_entries(sc) 360 struct iommu_softc *sc; 361 { 362 u_int pbase, pa; 363 u_int range; 364 iopte_t *tpte_p; 365 u_int pagesz = sc->sc_pagesize; 366 int use_ac = (cpuinfo.cpu_impl == 4 && cpuinfo.mxcc); 367 u_int mmupcr_save; 368 369 /* 370 * We read in the original table using MMU bypass and copy all 371 * of its entries to the appropriate place in our new table, 372 * even if the sizes are different. 373 * This is pretty easy since we know DVMA ends at 0xffffffff. 374 */ 375 376 range = (1 << 24) << 377 ((sc->sc_reg->io_cr & IOMMU_CTL_RANGE) >> IOMMU_CTL_RANGESHFT); 378 379 pbase = (sc->sc_reg->io_bar & IOMMU_BAR_IBA) << 380 (14 - IOMMU_BAR_IBASHFT); 381 382 if (use_ac) { 383 /* 384 * Set MMU AC bit so we'll still read from the cache 385 * in by-pass mode. 386 */ 387 mmupcr_save = lda(SRMMU_PCR, ASI_SRMMU); 388 sta(SRMMU_PCR, ASI_SRMMU, mmupcr_save | VIKING_PCR_AC); 389 } else 390 mmupcr_save = 0; /* XXX - avoid GCC `unintialized' warning */ 391 392 /* Flush entire IOMMU TLB before messing with the in-memory tables */ 393 IOMMU_FLUSHALL(sc); 394 395 /* 396 * tpte_p = top of our PTE table 397 * pa = top of current PTE table 398 * Then work downwards and copy entries until we hit the bottom 399 * of either table. 400 */ 401 for (tpte_p = &sc->sc_ptes[((0 - IOMMU_DVMA_BASE)/pagesz) - 1], 402 pa = (u_int)pbase + (range/pagesz - 1)*sizeof(iopte_t); 403 tpte_p >= &sc->sc_ptes[0] && pa >= (u_int)pbase; 404 tpte_p--, pa -= sizeof(iopte_t)) { 405 406 *tpte_p = lda(pa, ASI_BYPASS); 407 } 408 409 if (use_ac) { 410 /* restore mmu after bug-avoidance */ 411 sta(SRMMU_PCR, ASI_SRMMU, mmupcr_save); 412 } 413 } 414 #endif 415 416 void 417 iommu_enter(dva, pa) 418 bus_addr_t dva; 419 paddr_t pa; 420 { 421 struct iommu_softc *sc = iommu_sc; 422 int pte; 423 424 /* This routine relies on the fact that sc->sc_pagesize == PAGE_SIZE */ 425 426 #ifdef DIAGNOSTIC 427 if (dva < sc->sc_dvmabase) 428 panic("iommu_enter: dva 0x%lx not in DVMA space", (long)dva); 429 #endif 430 431 pte = atop(pa) << IOPTE_PPNSHFT; 432 pte &= IOPTE_PPN; 433 pte |= IOPTE_V | IOPTE_W | (has_iocache ? IOPTE_C : 0); 434 sc->sc_ptes[atop(dva - sc->sc_dvmabase)] = pte; 435 IOMMU_FLUSHPAGE(sc, dva); 436 } 437 438 /* 439 * iommu_clear: clears mappings created by iommu_enter 440 */ 441 void 442 iommu_remove(dva, len) 443 bus_addr_t dva; 444 bus_size_t len; 445 { 446 struct iommu_softc *sc = iommu_sc; 447 u_int pagesz = sc->sc_pagesize; 448 bus_addr_t base = sc->sc_dvmabase; 449 450 #ifdef DEBUG 451 if (dva < base) 452 panic("iommu_remove: va 0x%lx not in DVMA space", (long)dva); 453 #endif 454 455 while ((long)len > 0) { 456 #ifdef notyet 457 #ifdef DEBUG 458 if ((sc->sc_ptes[atop(dva - base)] & IOPTE_V) == 0) 459 panic("iommu_remove: clearing invalid pte at dva 0x%lx", 460 (long)dva); 461 #endif 462 #endif 463 sc->sc_ptes[atop(dva - base)] = 0; 464 IOMMU_FLUSHPAGE(sc, dva); 465 len -= pagesz; 466 dva += pagesz; 467 } 468 } 469 470 #if 0 /* These registers aren't there??? */ 471 void 472 iommu_error() 473 { 474 struct iommu_softc *sc = X; 475 struct iommureg *iop = sc->sc_reg; 476 477 printf("iommu: afsr 0x%x, afar 0x%x\n", iop->io_afsr, iop->io_afar); 478 printf("iommu: mfsr 0x%x, mfar 0x%x\n", iop->io_mfsr, iop->io_mfar); 479 } 480 int 481 iommu_alloc(va, len) 482 u_int va, len; 483 { 484 struct iommu_softc *sc = X; 485 int off, tva, iovaddr, pte; 486 paddr_t pa; 487 488 off = (int)va & PGOFSET; 489 len = round_page(len + off); 490 va -= off; 491 492 if ((int)sc->sc_dvmacur + len > 0) 493 sc->sc_dvmacur = sc->sc_dvmabase; 494 495 iovaddr = tva = sc->sc_dvmacur; 496 sc->sc_dvmacur += len; 497 while (len) { 498 (void) pmap_extract(pmap_kernel(), va, &pa); 499 500 #define IOMMU_PPNSHIFT 8 501 #define IOMMU_V 0x00000002 502 #define IOMMU_W 0x00000004 503 504 pte = atop(pa) << IOMMU_PPNSHIFT; 505 pte |= IOMMU_V | IOMMU_W; 506 sta(sc->sc_ptes + atop(tva - sc->sc_dvmabase), ASI_BYPASS, pte); 507 sc->sc_reg->io_flushpage = tva; 508 len -= NBPG; 509 va += NBPG; 510 tva += NBPG; 511 } 512 return iovaddr + off; 513 } 514 #endif 515 516 517 /* 518 * IOMMU DMA map functions. 519 */ 520 int 521 iommu_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp) 522 bus_dma_tag_t t; 523 bus_size_t size; 524 int nsegments; 525 bus_size_t maxsegsz; 526 bus_size_t boundary; 527 int flags; 528 bus_dmamap_t *dmamp; 529 { 530 bus_dmamap_t map; 531 int error; 532 533 if ((error = _bus_dmamap_create(t, size, nsegments, maxsegsz, 534 boundary, flags, &map)) != 0) 535 return (error); 536 537 if ((flags & BUS_DMA_24BIT) != 0) { 538 /* Limit this map to the range usable by `24-bit' devices */ 539 map->_dm_ex_start = D24_DVMA_BASE; 540 map->_dm_ex_end = D24_DVMA_END; 541 } else { 542 /* Enable allocations from the entire map */ 543 map->_dm_ex_start = iommu_dvmamap->ex_start; 544 map->_dm_ex_end = iommu_dvmamap->ex_end; 545 } 546 547 *dmamp = map; 548 return (0); 549 } 550 551 /* 552 * Internal routine to allocate space in the IOMMU map. 553 */ 554 int 555 iommu_dvma_alloc(map, va, len, flags, dvap, sgsizep) 556 bus_dmamap_t map; 557 vaddr_t va; 558 bus_size_t len; 559 int flags; 560 bus_addr_t *dvap; 561 bus_size_t *sgsizep; 562 { 563 bus_size_t sgsize; 564 u_long align, voff, dvaddr; 565 int s, error; 566 int pagesz = PAGE_SIZE; 567 568 /* 569 * Remember page offset, then truncate the buffer address to 570 * a page boundary. 571 */ 572 voff = va & (pagesz - 1); 573 va &= -pagesz; 574 575 if (len > map->_dm_size) 576 return (EINVAL); 577 578 sgsize = (len + voff + pagesz - 1) & -pagesz; 579 align = dvma_cachealign ? dvma_cachealign : map->_dm_align; 580 581 s = splhigh(); 582 error = extent_alloc_subregion1(iommu_dvmamap, 583 map->_dm_ex_start, map->_dm_ex_end, 584 sgsize, align, va & (align-1), 585 map->_dm_boundary, 586 (flags & BUS_DMA_NOWAIT) == 0 587 ? EX_WAITOK : EX_NOWAIT, 588 &dvaddr); 589 splx(s); 590 *dvap = (bus_addr_t)dvaddr; 591 *sgsizep = sgsize; 592 return (error); 593 } 594 595 /* 596 * Prepare buffer for DMA transfer. 597 */ 598 int 599 iommu_dmamap_load(t, map, buf, buflen, p, flags) 600 bus_dma_tag_t t; 601 bus_dmamap_t map; 602 void *buf; 603 bus_size_t buflen; 604 struct proc *p; 605 int flags; 606 { 607 bus_size_t sgsize; 608 bus_addr_t dva; 609 vaddr_t va = (vaddr_t)buf; 610 int pagesz = PAGE_SIZE; 611 pmap_t pmap; 612 int error; 613 614 /* 615 * Make sure that on error condition we return "no valid mappings". 616 */ 617 map->dm_nsegs = 0; 618 619 /* Allocate IOMMU resources */ 620 if ((error = iommu_dvma_alloc(map, va, buflen, flags, 621 &dva, &sgsize)) != 0) 622 return (error); 623 624 cpuinfo.cache_flush(buf, buflen); /* XXX - move to bus_dma_sync? */ 625 626 /* 627 * We always use just one segment. 628 */ 629 map->dm_mapsize = buflen; 630 map->dm_nsegs = 1; 631 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1)); 632 map->dm_segs[0].ds_len = buflen; 633 map->dm_segs[0]._ds_sgsize = sgsize; 634 635 if (p != NULL) 636 pmap = p->p_vmspace->vm_map.pmap; 637 else 638 pmap = pmap_kernel(); 639 640 for (; sgsize != 0; ) { 641 paddr_t pa; 642 /* 643 * Get the physical address for this page. 644 */ 645 (void) pmap_extract(pmap, va, &pa); 646 647 iommu_enter(dva, pa); 648 649 dva += pagesz; 650 va += pagesz; 651 sgsize -= pagesz; 652 } 653 654 return (0); 655 } 656 657 /* 658 * Like _bus_dmamap_load(), but for mbufs. 659 */ 660 int 661 iommu_dmamap_load_mbuf(t, map, m, flags) 662 bus_dma_tag_t t; 663 bus_dmamap_t map; 664 struct mbuf *m; 665 int flags; 666 { 667 668 panic("_bus_dmamap_load_mbuf: not implemented"); 669 } 670 671 /* 672 * Like _bus_dmamap_load(), but for uios. 673 */ 674 int 675 iommu_dmamap_load_uio(t, map, uio, flags) 676 bus_dma_tag_t t; 677 bus_dmamap_t map; 678 struct uio *uio; 679 int flags; 680 { 681 682 panic("_bus_dmamap_load_uio: not implemented"); 683 } 684 685 /* 686 * Like _bus_dmamap_load(), but for raw memory allocated with 687 * bus_dmamem_alloc(). 688 */ 689 int 690 iommu_dmamap_load_raw(t, map, segs, nsegs, size, flags) 691 bus_dma_tag_t t; 692 bus_dmamap_t map; 693 bus_dma_segment_t *segs; 694 int nsegs; 695 bus_size_t size; 696 int flags; 697 { 698 struct vm_page *m; 699 paddr_t pa; 700 bus_addr_t dva; 701 bus_size_t sgsize; 702 struct pglist *mlist; 703 int pagesz = PAGE_SIZE; 704 int error; 705 706 map->dm_nsegs = 0; 707 708 /* Allocate IOMMU resources */ 709 if ((error = iommu_dvma_alloc(map, segs[0]._ds_va, size, 710 flags, &dva, &sgsize)) != 0) 711 return (error); 712 713 /* 714 * Note DVMA address in case bus_dmamem_map() is called later. 715 * It can then insure cache coherency by choosing a KVA that 716 * is aligned to `ds_addr'. 717 */ 718 segs[0].ds_addr = dva; 719 segs[0].ds_len = size; 720 721 map->dm_segs[0].ds_addr = dva; 722 map->dm_segs[0].ds_len = size; 723 map->dm_segs[0]._ds_sgsize = sgsize; 724 725 /* Map physical pages into IOMMU */ 726 mlist = segs[0]._ds_mlist; 727 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) { 728 if (sgsize == 0) 729 panic("iommu_dmamap_load_raw: size botch"); 730 pa = VM_PAGE_TO_PHYS(m); 731 iommu_enter(dva, pa); 732 dva += pagesz; 733 sgsize -= pagesz; 734 } 735 736 map->dm_nsegs = 1; 737 map->dm_mapsize = size; 738 739 return (0); 740 } 741 742 /* 743 * Unload an IOMMU DMA map. 744 */ 745 void 746 iommu_dmamap_unload(t, map) 747 bus_dma_tag_t t; 748 bus_dmamap_t map; 749 { 750 bus_dma_segment_t *segs = map->dm_segs; 751 int nsegs = map->dm_nsegs; 752 bus_addr_t dva; 753 bus_size_t len; 754 int i, s, error; 755 756 for (i = 0; i < nsegs; i++) { 757 dva = segs[i].ds_addr & -PAGE_SIZE; 758 len = segs[i]._ds_sgsize; 759 760 iommu_remove(dva, len); 761 s = splhigh(); 762 error = extent_free(iommu_dvmamap, dva, len, EX_NOWAIT); 763 splx(s); 764 if (error != 0) 765 printf("warning: %ld of DVMA space lost\n", (long)len); 766 } 767 768 /* Mark the mappings as invalid. */ 769 map->dm_mapsize = 0; 770 map->dm_nsegs = 0; 771 } 772 773 /* 774 * DMA map synchronization. 775 */ 776 void 777 iommu_dmamap_sync(t, map, offset, len, ops) 778 bus_dma_tag_t t; 779 bus_dmamap_t map; 780 bus_addr_t offset; 781 bus_size_t len; 782 int ops; 783 { 784 785 /* 786 * XXX Should flush CPU write buffers. 787 */ 788 } 789 790 /* 791 * Map DMA-safe memory. 792 */ 793 int 794 iommu_dmamem_map(t, segs, nsegs, size, kvap, flags) 795 bus_dma_tag_t t; 796 bus_dma_segment_t *segs; 797 int nsegs; 798 size_t size; 799 caddr_t *kvap; 800 int flags; 801 { 802 struct vm_page *m; 803 vaddr_t va; 804 bus_addr_t addr; 805 struct pglist *mlist; 806 int cbit; 807 u_long align; 808 int pagesz = PAGE_SIZE; 809 810 if (nsegs != 1) 811 panic("iommu_dmamem_map: nsegs = %d", nsegs); 812 813 cbit = has_iocache ? 0 : PMAP_NC; 814 align = dvma_cachealign ? dvma_cachealign : pagesz; 815 816 size = round_page(size); 817 818 /* 819 * In case the segment has already been loaded by 820 * iommu_dmamap_load_raw(), find a region of kernel virtual 821 * addresses that can accomodate our aligment requirements. 822 */ 823 va = _bus_dma_valloc_skewed(size, 0, align, 824 segs[0].ds_addr & (align - 1)); 825 if (va == 0) 826 return (ENOMEM); 827 828 segs[0]._ds_va = va; 829 *kvap = (caddr_t)va; 830 831 /* 832 * Map the pages allocated in _bus_dmamem_alloc() to the 833 * kernel virtual address space. 834 */ 835 mlist = segs[0]._ds_mlist; 836 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) { 837 838 if (size == 0) 839 panic("iommu_dmamem_map: size botch"); 840 841 addr = VM_PAGE_TO_PHYS(m); 842 pmap_kenter_pa(va, addr | cbit, VM_PROT_READ | VM_PROT_WRITE); 843 #if 0 844 if (flags & BUS_DMA_COHERENT) 845 /* XXX */; 846 #endif 847 va += pagesz; 848 size -= pagesz; 849 } 850 pmap_update(pmap_kernel()); 851 852 return (0); 853 } 854 855 /* 856 * mmap(2)'ing DMA-safe memory. 857 */ 858 paddr_t 859 iommu_dmamem_mmap(t, segs, nsegs, off, prot, flags) 860 bus_dma_tag_t t; 861 bus_dma_segment_t *segs; 862 int nsegs; 863 off_t off; 864 int prot, flags; 865 { 866 867 panic("_bus_dmamem_mmap: not implemented"); 868 } 869