1 /* $NetBSD: iommu.c,v 1.72 2002/10/02 16:02:10 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1996 5 * The President and Fellows of Harvard College. All rights reserved. 6 * Copyright (c) 1995 Paul Kranenburg 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Aaron Brown and 19 * Harvard University. 20 * This product includes software developed by Paul Kranenburg. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 */ 38 #include "opt_sparc_arch.h" 39 40 #include <sys/param.h> 41 #include <sys/extent.h> 42 #include <sys/malloc.h> 43 #include <sys/queue.h> 44 #include <sys/systm.h> 45 #include <sys/device.h> 46 #include <sys/proc.h> 47 48 #include <uvm/uvm.h> 49 50 #define _SPARC_BUS_DMA_PRIVATE 51 #include <machine/bus.h> 52 #include <machine/autoconf.h> 53 #include <machine/ctlreg.h> 54 #include <sparc/sparc/asm.h> 55 #include <sparc/sparc/vaddrs.h> 56 #include <sparc/sparc/cpuvar.h> 57 #include <sparc/sparc/iommureg.h> 58 #include <sparc/sparc/iommuvar.h> 59 60 struct iommu_softc { 61 struct device sc_dev; /* base device */ 62 struct iommureg *sc_reg; 63 u_int sc_pagesize; 64 u_int sc_range; 65 bus_addr_t sc_dvmabase; 66 iopte_t *sc_ptes; 67 int sc_hasiocache; 68 /* 69 * Note: operations on the extent map are being protected with 70 * splhigh(), since we cannot predict at which interrupt priority 71 * our clients will run. 72 */ 73 struct sparc_bus_dma_tag sc_dmatag; 74 struct extent *sc_dvmamap; 75 }; 76 static int has_iocache; 77 78 /* autoconfiguration driver */ 79 int iommu_print __P((void *, const char *)); 80 void iommu_attach __P((struct device *, struct device *, void *)); 81 int iommu_match __P((struct device *, struct cfdata *, void *)); 82 83 #if defined(SUN4M) 84 static void iommu_copy_prom_entries __P((struct iommu_softc *)); 85 #endif 86 87 CFATTACH_DECL(iommu, sizeof(struct iommu_softc), 88 iommu_match, iommu_attach, NULL, NULL); 89 90 /* IOMMU DMA map functions */ 91 int iommu_dmamap_create __P((bus_dma_tag_t, bus_size_t, int, bus_size_t, 92 bus_size_t, int, bus_dmamap_t *)); 93 int iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *, 94 bus_size_t, struct proc *, int)); 95 int iommu_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t, 96 struct mbuf *, int)); 97 int iommu_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t, 98 struct uio *, int)); 99 int iommu_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t, 100 bus_dma_segment_t *, int, bus_size_t, int)); 101 void iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); 102 void iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 103 bus_size_t, int)); 104 105 int iommu_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs, 106 int nsegs, size_t size, caddr_t *kvap, int flags)); 107 paddr_t iommu_dmamem_mmap __P((bus_dma_tag_t tag, bus_dma_segment_t *segs, 108 int nsegs, off_t off, int prot, int flags)); 109 int iommu_dvma_alloc(struct iommu_softc *, bus_dmamap_t, vaddr_t, 110 bus_size_t, int, bus_addr_t *, bus_size_t *); 111 112 /* 113 * Print the location of some iommu-attached device (called just 114 * before attaching that device). If `iommu' is not NULL, the 115 * device was found but not configured; print the iommu as well. 116 * Return UNCONF (config_find ignores this if the device was configured). 117 */ 118 int 119 iommu_print(args, iommu) 120 void *args; 121 const char *iommu; 122 { 123 struct iommu_attach_args *ia = args; 124 125 if (iommu) 126 printf("%s at %s", ia->iom_name, iommu); 127 return (UNCONF); 128 } 129 130 int 131 iommu_match(parent, cf, aux) 132 struct device *parent; 133 struct cfdata *cf; 134 void *aux; 135 { 136 struct mainbus_attach_args *ma = aux; 137 138 if (CPU_ISSUN4 || CPU_ISSUN4C) 139 return (0); 140 return (strcmp(cf->cf_name, ma->ma_name) == 0); 141 } 142 143 /* 144 * Attach the iommu. 145 */ 146 void 147 iommu_attach(parent, self, aux) 148 struct device *parent; 149 struct device *self; 150 void *aux; 151 { 152 #if defined(SUN4M) 153 struct iommu_softc *sc = (struct iommu_softc *)self; 154 struct mainbus_attach_args *ma = aux; 155 struct sparc_bus_dma_tag *dmat = &sc->sc_dmatag; 156 bus_space_handle_t bh; 157 int node; 158 int js1_implicit_iommu; 159 int i, s; 160 u_int iopte_table_pa; 161 struct pglist mlist; 162 u_int size; 163 struct vm_page *m; 164 vaddr_t va; 165 166 dmat->_cookie = sc; 167 dmat->_dmamap_create = iommu_dmamap_create; 168 dmat->_dmamap_destroy = _bus_dmamap_destroy; 169 dmat->_dmamap_load = iommu_dmamap_load; 170 dmat->_dmamap_load_mbuf = iommu_dmamap_load_mbuf; 171 dmat->_dmamap_load_uio = iommu_dmamap_load_uio; 172 dmat->_dmamap_load_raw = iommu_dmamap_load_raw; 173 dmat->_dmamap_unload = iommu_dmamap_unload; 174 dmat->_dmamap_sync = iommu_dmamap_sync; 175 176 dmat->_dmamem_alloc = _bus_dmamem_alloc; 177 dmat->_dmamem_free = _bus_dmamem_free; 178 dmat->_dmamem_map = iommu_dmamem_map; 179 dmat->_dmamem_unmap = _bus_dmamem_unmap; 180 dmat->_dmamem_mmap = iommu_dmamem_mmap; 181 182 /* 183 * JS1/OF device tree does not have an iommu node and sbus 184 * node is directly under root. mainbus_attach detects this 185 * and calls us with sbus node instead so that we can attach 186 * implicit iommu and attach that sbus node under it. 187 */ 188 node = ma->ma_node; 189 if (strcmp(PROM_getpropstring(node, "name"), "sbus") == 0) 190 js1_implicit_iommu = 1; 191 else 192 js1_implicit_iommu = 0; 193 194 /* 195 * Map registers into our space. The PROM may have done this 196 * already, but I feel better if we have our own copy. Plus, the 197 * prom doesn't map the entire register set. 198 * 199 * XXX struct iommureg is bigger than ra->ra_len; what are the 200 * other fields for? 201 */ 202 if (bus_space_map(ma->ma_bustag, ma->ma_paddr, 203 sizeof(struct iommureg), 0, &bh) != 0) { 204 printf("iommu_attach: cannot map registers\n"); 205 return; 206 } 207 sc->sc_reg = (struct iommureg *)bh; 208 209 sc->sc_hasiocache = js1_implicit_iommu ? 0 210 : node_has_property(node, "cache-coherence?"); 211 if (CACHEINFO.c_enabled == 0) /* XXX - is this correct? */ 212 sc->sc_hasiocache = 0; 213 has_iocache = sc->sc_hasiocache; /* Set global flag */ 214 215 sc->sc_pagesize = js1_implicit_iommu ? NBPG 216 : PROM_getpropint(node, "page-size", NBPG), 217 218 /* 219 * Allocate memory for I/O pagetables. 220 * This takes 64K of contiguous physical memory to map 64M of 221 * DVMA space (starting at IOMMU_DVMA_BASE). 222 * The table must be aligned on a (-IOMMU_DVMA_BASE/pagesize) 223 * boundary (i.e. 64K for 64M of DVMA space). 224 */ 225 226 size = ((0 - IOMMU_DVMA_BASE) / sc->sc_pagesize) * sizeof(iopte_t); 227 if (uvm_pglistalloc(size, vm_first_phys, vm_first_phys+vm_num_phys, 228 size, 0, &mlist, 1, 0) != 0) 229 panic("iommu_attach: no memory"); 230 231 va = uvm_km_valloc(kernel_map, size); 232 if (va == 0) 233 panic("iommu_attach: no memory"); 234 235 sc->sc_ptes = (iopte_t *)va; 236 237 m = TAILQ_FIRST(&mlist); 238 iopte_table_pa = VM_PAGE_TO_PHYS(m); 239 240 /* Map the pages */ 241 for (; m != NULL; m = TAILQ_NEXT(m,pageq)) { 242 paddr_t pa = VM_PAGE_TO_PHYS(m); 243 pmap_kenter_pa(va, pa | PMAP_NC, VM_PROT_READ | VM_PROT_WRITE); 244 va += NBPG; 245 } 246 pmap_update(pmap_kernel()); 247 248 /* 249 * Copy entries from current IOMMU table. 250 * XXX - Why do we need to do this? 251 */ 252 iommu_copy_prom_entries(sc); 253 254 /* 255 * Now we can install our new pagetable into the IOMMU 256 */ 257 sc->sc_range = 0 - IOMMU_DVMA_BASE; 258 sc->sc_dvmabase = IOMMU_DVMA_BASE; 259 260 /* calculate log2(sc->sc_range/16MB) */ 261 i = ffs(sc->sc_range/(1 << 24)) - 1; 262 if ((1 << i) != (sc->sc_range/(1 << 24))) 263 panic("iommu: bad range: %d", i); 264 265 s = splhigh(); 266 IOMMU_FLUSHALL(sc); 267 268 /* Load range and physical address of PTEs */ 269 sc->sc_reg->io_cr = (sc->sc_reg->io_cr & ~IOMMU_CTL_RANGE) | 270 (i << IOMMU_CTL_RANGESHFT) | IOMMU_CTL_ME; 271 sc->sc_reg->io_bar = (iopte_table_pa >> 4) & IOMMU_BAR_IBA; 272 273 IOMMU_FLUSHALL(sc); 274 splx(s); 275 276 printf(": version 0x%x/0x%x, page-size %d, range %dMB\n", 277 (sc->sc_reg->io_cr & IOMMU_CTL_VER) >> 24, 278 (sc->sc_reg->io_cr & IOMMU_CTL_IMPL) >> 28, 279 sc->sc_pagesize, 280 sc->sc_range >> 20); 281 282 sc->sc_dvmamap = extent_create("iommudvma", 283 IOMMU_DVMA_BASE, IOMMU_DVMA_END, 284 M_DEVBUF, 0, 0, EX_NOWAIT); 285 if (sc->sc_dvmamap == NULL) 286 panic("iommu: unable to allocate DVMA map"); 287 288 /* 289 * If we are attaching implicit iommu on JS1/OF we do not have 290 * an iommu node to traverse, instead mainbus_attach passed us 291 * sbus node in ma.ma_node. Attach it as the only iommu child. 292 */ 293 if (js1_implicit_iommu) { 294 struct iommu_attach_args ia; 295 struct openprom_addr sbus_iommu_reg = { 0, 0x10001000, 0x28 }; 296 297 bzero(&ia, sizeof ia); 298 299 /* Propagate BUS & DMA tags */ 300 ia.iom_bustag = ma->ma_bustag; 301 ia.iom_dmatag = &sc->sc_dmatag; 302 303 ia.iom_name = "sbus"; 304 ia.iom_node = node; 305 ia.iom_reg = &sbus_iommu_reg; 306 ia.iom_nreg = 1; 307 308 (void) config_found(&sc->sc_dev, (void *)&ia, iommu_print); 309 return; 310 } 311 312 /* 313 * Loop through ROM children (expect Sbus among them). 314 */ 315 for (node = firstchild(node); node; node = nextsibling(node)) { 316 struct iommu_attach_args ia; 317 318 bzero(&ia, sizeof ia); 319 ia.iom_name = PROM_getpropstring(node, "name"); 320 321 /* Propagate BUS & DMA tags */ 322 ia.iom_bustag = ma->ma_bustag; 323 ia.iom_dmatag = &sc->sc_dmatag; 324 325 ia.iom_node = node; 326 327 ia.iom_reg = NULL; 328 PROM_getprop(node, "reg", sizeof(struct openprom_addr), 329 &ia.iom_nreg, (void **)&ia.iom_reg); 330 331 (void) config_found(&sc->sc_dev, (void *)&ia, iommu_print); 332 if (ia.iom_reg != NULL) 333 free(ia.iom_reg, M_DEVBUF); 334 } 335 #endif 336 } 337 338 #if defined(SUN4M) 339 static void 340 iommu_copy_prom_entries(sc) 341 struct iommu_softc *sc; 342 { 343 u_int pbase, pa; 344 u_int range; 345 iopte_t *tpte_p; 346 u_int pagesz = sc->sc_pagesize; 347 int use_ac = (cpuinfo.cpu_impl == 4 && cpuinfo.mxcc); 348 u_int mmupcr_save; 349 350 /* 351 * We read in the original table using MMU bypass and copy all 352 * of its entries to the appropriate place in our new table, 353 * even if the sizes are different. 354 * This is pretty easy since we know DVMA ends at 0xffffffff. 355 */ 356 357 range = (1 << 24) << 358 ((sc->sc_reg->io_cr & IOMMU_CTL_RANGE) >> IOMMU_CTL_RANGESHFT); 359 360 pbase = (sc->sc_reg->io_bar & IOMMU_BAR_IBA) << 361 (14 - IOMMU_BAR_IBASHFT); 362 363 if (use_ac) { 364 /* 365 * Set MMU AC bit so we'll still read from the cache 366 * in by-pass mode. 367 */ 368 mmupcr_save = lda(SRMMU_PCR, ASI_SRMMU); 369 sta(SRMMU_PCR, ASI_SRMMU, mmupcr_save | VIKING_PCR_AC); 370 } else 371 mmupcr_save = 0; /* XXX - avoid GCC `unintialized' warning */ 372 373 /* Flush entire IOMMU TLB before messing with the in-memory tables */ 374 IOMMU_FLUSHALL(sc); 375 376 /* 377 * tpte_p = top of our PTE table 378 * pa = top of current PTE table 379 * Then work downwards and copy entries until we hit the bottom 380 * of either table. 381 */ 382 for (tpte_p = &sc->sc_ptes[((0 - IOMMU_DVMA_BASE)/pagesz) - 1], 383 pa = (u_int)pbase + (range/pagesz - 1)*sizeof(iopte_t); 384 tpte_p >= &sc->sc_ptes[0] && pa >= (u_int)pbase; 385 tpte_p--, pa -= sizeof(iopte_t)) { 386 387 *tpte_p = lda(pa, ASI_BYPASS); 388 } 389 390 if (use_ac) { 391 /* restore mmu after bug-avoidance */ 392 sta(SRMMU_PCR, ASI_SRMMU, mmupcr_save); 393 } 394 } 395 #endif 396 397 static void 398 iommu_enter(struct iommu_softc *sc, bus_addr_t dva, paddr_t pa) 399 { 400 int pte; 401 402 /* This routine relies on the fact that sc->sc_pagesize == PAGE_SIZE */ 403 404 #ifdef DIAGNOSTIC 405 if (dva < sc->sc_dvmabase) 406 panic("iommu_enter: dva 0x%lx not in DVMA space", (long)dva); 407 #endif 408 409 pte = atop(pa) << IOPTE_PPNSHFT; 410 pte &= IOPTE_PPN; 411 pte |= IOPTE_V | IOPTE_W | (has_iocache ? IOPTE_C : 0); 412 sc->sc_ptes[atop(dva - sc->sc_dvmabase)] = pte; 413 IOMMU_FLUSHPAGE(sc, dva); 414 } 415 416 /* 417 * iommu_remove: removes mappings created by iommu_enter 418 */ 419 static void 420 iommu_remove(struct iommu_softc *sc, bus_addr_t dva, bus_size_t len) 421 { 422 u_int pagesz = sc->sc_pagesize; 423 bus_addr_t base = sc->sc_dvmabase; 424 425 #ifdef DEBUG 426 if (dva < base) 427 panic("iommu_remove: va 0x%lx not in DVMA space", (long)dva); 428 #endif 429 430 while ((long)len > 0) { 431 #ifdef notyet 432 #ifdef DEBUG 433 if ((sc->sc_ptes[atop(dva - base)] & IOPTE_V) == 0) 434 panic("iommu_remove: clearing invalid pte at dva 0x%lx", 435 (long)dva); 436 #endif 437 #endif 438 sc->sc_ptes[atop(dva - base)] = 0; 439 IOMMU_FLUSHPAGE(sc, dva); 440 len -= pagesz; 441 dva += pagesz; 442 } 443 } 444 445 #if 0 /* These registers aren't there??? */ 446 void 447 iommu_error() 448 { 449 struct iommu_softc *sc = X; 450 struct iommureg *iop = sc->sc_reg; 451 452 printf("iommu: afsr 0x%x, afar 0x%x\n", iop->io_afsr, iop->io_afar); 453 printf("iommu: mfsr 0x%x, mfar 0x%x\n", iop->io_mfsr, iop->io_mfar); 454 } 455 int 456 iommu_alloc(va, len) 457 u_int va, len; 458 { 459 struct iommu_softc *sc = X; 460 int off, tva, iovaddr, pte; 461 paddr_t pa; 462 463 off = (int)va & PGOFSET; 464 len = round_page(len + off); 465 va -= off; 466 467 if ((int)sc->sc_dvmacur + len > 0) 468 sc->sc_dvmacur = sc->sc_dvmabase; 469 470 iovaddr = tva = sc->sc_dvmacur; 471 sc->sc_dvmacur += len; 472 while (len) { 473 (void) pmap_extract(pmap_kernel(), va, &pa); 474 475 #define IOMMU_PPNSHIFT 8 476 #define IOMMU_V 0x00000002 477 #define IOMMU_W 0x00000004 478 479 pte = atop(pa) << IOMMU_PPNSHIFT; 480 pte |= IOMMU_V | IOMMU_W; 481 sta(sc->sc_ptes + atop(tva - sc->sc_dvmabase), ASI_BYPASS, pte); 482 sc->sc_reg->io_flushpage = tva; 483 len -= NBPG; 484 va += NBPG; 485 tva += NBPG; 486 } 487 return iovaddr + off; 488 } 489 #endif 490 491 492 /* 493 * IOMMU DMA map functions. 494 */ 495 int 496 iommu_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp) 497 bus_dma_tag_t t; 498 bus_size_t size; 499 int nsegments; 500 bus_size_t maxsegsz; 501 bus_size_t boundary; 502 int flags; 503 bus_dmamap_t *dmamp; 504 { 505 struct iommu_softc *sc = t->_cookie; 506 bus_dmamap_t map; 507 int error; 508 509 if ((error = _bus_dmamap_create(t, size, nsegments, maxsegsz, 510 boundary, flags, &map)) != 0) 511 return (error); 512 513 if ((flags & BUS_DMA_24BIT) != 0) { 514 /* Limit this map to the range usable by `24-bit' devices */ 515 map->_dm_ex_start = D24_DVMA_BASE; 516 map->_dm_ex_end = D24_DVMA_END; 517 } else { 518 /* Enable allocations from the entire map */ 519 map->_dm_ex_start = sc->sc_dvmamap->ex_start; 520 map->_dm_ex_end = sc->sc_dvmamap->ex_end; 521 } 522 523 *dmamp = map; 524 return (0); 525 } 526 527 /* 528 * Internal routine to allocate space in the IOMMU map. 529 */ 530 int 531 iommu_dvma_alloc(sc, map, va, len, flags, dvap, sgsizep) 532 struct iommu_softc *sc; 533 bus_dmamap_t map; 534 vaddr_t va; 535 bus_size_t len; 536 int flags; 537 bus_addr_t *dvap; 538 bus_size_t *sgsizep; 539 { 540 bus_size_t sgsize; 541 u_long align, voff, dvaddr; 542 int s, error; 543 int pagesz = PAGE_SIZE; 544 545 /* 546 * Remember page offset, then truncate the buffer address to 547 * a page boundary. 548 */ 549 voff = va & (pagesz - 1); 550 va &= -pagesz; 551 552 if (len > map->_dm_size) 553 return (EINVAL); 554 555 sgsize = (len + voff + pagesz - 1) & -pagesz; 556 align = dvma_cachealign ? dvma_cachealign : map->_dm_align; 557 558 s = splhigh(); 559 error = extent_alloc_subregion1(sc->sc_dvmamap, 560 map->_dm_ex_start, map->_dm_ex_end, 561 sgsize, align, va & (align-1), 562 map->_dm_boundary, 563 (flags & BUS_DMA_NOWAIT) == 0 564 ? EX_WAITOK : EX_NOWAIT, 565 &dvaddr); 566 splx(s); 567 *dvap = (bus_addr_t)dvaddr; 568 *sgsizep = sgsize; 569 return (error); 570 } 571 572 /* 573 * Prepare buffer for DMA transfer. 574 */ 575 int 576 iommu_dmamap_load(t, map, buf, buflen, p, flags) 577 bus_dma_tag_t t; 578 bus_dmamap_t map; 579 void *buf; 580 bus_size_t buflen; 581 struct proc *p; 582 int flags; 583 { 584 struct iommu_softc *sc = t->_cookie; 585 bus_size_t sgsize; 586 bus_addr_t dva; 587 vaddr_t va = (vaddr_t)buf; 588 int pagesz = PAGE_SIZE; 589 pmap_t pmap; 590 int error; 591 592 /* 593 * Make sure that on error condition we return "no valid mappings". 594 */ 595 map->dm_nsegs = 0; 596 597 /* Allocate IOMMU resources */ 598 if ((error = iommu_dvma_alloc(sc, map, va, buflen, flags, 599 &dva, &sgsize)) != 0) 600 return (error); 601 602 cpuinfo.cache_flush(buf, buflen); /* XXX - move to bus_dma_sync? */ 603 604 /* 605 * We always use just one segment. 606 */ 607 map->dm_mapsize = buflen; 608 map->dm_nsegs = 1; 609 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1)); 610 map->dm_segs[0].ds_len = buflen; 611 map->dm_segs[0]._ds_sgsize = sgsize; 612 613 if (p != NULL) 614 pmap = p->p_vmspace->vm_map.pmap; 615 else 616 pmap = pmap_kernel(); 617 618 for (; sgsize != 0; ) { 619 paddr_t pa; 620 /* 621 * Get the physical address for this page. 622 */ 623 (void) pmap_extract(pmap, va, &pa); 624 625 iommu_enter(sc, dva, pa); 626 627 dva += pagesz; 628 va += pagesz; 629 sgsize -= pagesz; 630 } 631 632 return (0); 633 } 634 635 /* 636 * Like _bus_dmamap_load(), but for mbufs. 637 */ 638 int 639 iommu_dmamap_load_mbuf(t, map, m, flags) 640 bus_dma_tag_t t; 641 bus_dmamap_t map; 642 struct mbuf *m; 643 int flags; 644 { 645 646 panic("_bus_dmamap_load_mbuf: not implemented"); 647 } 648 649 /* 650 * Like _bus_dmamap_load(), but for uios. 651 */ 652 int 653 iommu_dmamap_load_uio(t, map, uio, flags) 654 bus_dma_tag_t t; 655 bus_dmamap_t map; 656 struct uio *uio; 657 int flags; 658 { 659 660 panic("_bus_dmamap_load_uio: not implemented"); 661 } 662 663 /* 664 * Like _bus_dmamap_load(), but for raw memory allocated with 665 * bus_dmamem_alloc(). 666 */ 667 int 668 iommu_dmamap_load_raw(t, map, segs, nsegs, size, flags) 669 bus_dma_tag_t t; 670 bus_dmamap_t map; 671 bus_dma_segment_t *segs; 672 int nsegs; 673 bus_size_t size; 674 int flags; 675 { 676 struct iommu_softc *sc = t->_cookie; 677 struct vm_page *m; 678 paddr_t pa; 679 bus_addr_t dva; 680 bus_size_t sgsize; 681 struct pglist *mlist; 682 int pagesz = PAGE_SIZE; 683 int error; 684 685 map->dm_nsegs = 0; 686 687 /* Allocate IOMMU resources */ 688 if ((error = iommu_dvma_alloc(sc, map, segs[0]._ds_va, size, 689 flags, &dva, &sgsize)) != 0) 690 return (error); 691 692 /* 693 * Note DVMA address in case bus_dmamem_map() is called later. 694 * It can then insure cache coherency by choosing a KVA that 695 * is aligned to `ds_addr'. 696 */ 697 segs[0].ds_addr = dva; 698 segs[0].ds_len = size; 699 700 map->dm_segs[0].ds_addr = dva; 701 map->dm_segs[0].ds_len = size; 702 map->dm_segs[0]._ds_sgsize = sgsize; 703 704 /* Map physical pages into IOMMU */ 705 mlist = segs[0]._ds_mlist; 706 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) { 707 if (sgsize == 0) 708 panic("iommu_dmamap_load_raw: size botch"); 709 pa = VM_PAGE_TO_PHYS(m); 710 iommu_enter(sc, dva, pa); 711 dva += pagesz; 712 sgsize -= pagesz; 713 } 714 715 map->dm_nsegs = 1; 716 map->dm_mapsize = size; 717 718 return (0); 719 } 720 721 /* 722 * Unload an IOMMU DMA map. 723 */ 724 void 725 iommu_dmamap_unload(t, map) 726 bus_dma_tag_t t; 727 bus_dmamap_t map; 728 { 729 struct iommu_softc *sc = t->_cookie; 730 bus_dma_segment_t *segs = map->dm_segs; 731 int nsegs = map->dm_nsegs; 732 bus_addr_t dva; 733 bus_size_t len; 734 int i, s, error; 735 736 for (i = 0; i < nsegs; i++) { 737 dva = segs[i].ds_addr & -PAGE_SIZE; 738 len = segs[i]._ds_sgsize; 739 740 iommu_remove(sc, dva, len); 741 s = splhigh(); 742 error = extent_free(sc->sc_dvmamap, dva, len, EX_NOWAIT); 743 splx(s); 744 if (error != 0) 745 printf("warning: %ld of DVMA space lost\n", (long)len); 746 } 747 748 /* Mark the mappings as invalid. */ 749 map->dm_mapsize = 0; 750 map->dm_nsegs = 0; 751 } 752 753 /* 754 * DMA map synchronization. 755 */ 756 void 757 iommu_dmamap_sync(t, map, offset, len, ops) 758 bus_dma_tag_t t; 759 bus_dmamap_t map; 760 bus_addr_t offset; 761 bus_size_t len; 762 int ops; 763 { 764 765 /* 766 * XXX Should flush CPU write buffers. 767 */ 768 } 769 770 /* 771 * Map DMA-safe memory. 772 */ 773 int 774 iommu_dmamem_map(t, segs, nsegs, size, kvap, flags) 775 bus_dma_tag_t t; 776 bus_dma_segment_t *segs; 777 int nsegs; 778 size_t size; 779 caddr_t *kvap; 780 int flags; 781 { 782 struct vm_page *m; 783 vaddr_t va; 784 bus_addr_t addr; 785 struct pglist *mlist; 786 int cbit; 787 u_long align; 788 int pagesz = PAGE_SIZE; 789 790 if (nsegs != 1) 791 panic("iommu_dmamem_map: nsegs = %d", nsegs); 792 793 cbit = has_iocache ? 0 : PMAP_NC; 794 align = dvma_cachealign ? dvma_cachealign : pagesz; 795 796 size = round_page(size); 797 798 /* 799 * In case the segment has already been loaded by 800 * iommu_dmamap_load_raw(), find a region of kernel virtual 801 * addresses that can accomodate our aligment requirements. 802 */ 803 va = _bus_dma_valloc_skewed(size, 0, align, 804 segs[0].ds_addr & (align - 1)); 805 if (va == 0) 806 return (ENOMEM); 807 808 segs[0]._ds_va = va; 809 *kvap = (caddr_t)va; 810 811 /* 812 * Map the pages allocated in _bus_dmamem_alloc() to the 813 * kernel virtual address space. 814 */ 815 mlist = segs[0]._ds_mlist; 816 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) { 817 818 if (size == 0) 819 panic("iommu_dmamem_map: size botch"); 820 821 addr = VM_PAGE_TO_PHYS(m); 822 pmap_kenter_pa(va, addr | cbit, VM_PROT_READ | VM_PROT_WRITE); 823 #if 0 824 if (flags & BUS_DMA_COHERENT) 825 /* XXX */; 826 #endif 827 va += pagesz; 828 size -= pagesz; 829 } 830 pmap_update(pmap_kernel()); 831 832 return (0); 833 } 834 835 /* 836 * mmap(2)'ing DMA-safe memory. 837 */ 838 paddr_t 839 iommu_dmamem_mmap(t, segs, nsegs, off, prot, flags) 840 bus_dma_tag_t t; 841 bus_dma_segment_t *segs; 842 int nsegs; 843 off_t off; 844 int prot, flags; 845 { 846 847 panic("_bus_dmamem_mmap: not implemented"); 848 } 849