1 /* $NetBSD: isadma_bounce.c,v 1.2 2002/03/18 01:21:12 simonb Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997, 1998, 2000, 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/syslog.h> 43 #include <sys/device.h> 44 #include <sys/malloc.h> 45 #include <sys/proc.h> 46 #include <sys/mbuf.h> 47 48 #include <mips/cache.h> 49 #define _MIPS_BUS_DMA_PRIVATE 50 #include <machine/bus.h> 51 #include <machine/locore.h> 52 53 #include <dev/isa/isareg.h> 54 #include <dev/isa/isavar.h> 55 56 #include <uvm/uvm_extern.h> 57 58 extern paddr_t avail_end; 59 60 /* 61 * ISA can only DMA to 0-16M. 62 */ 63 #define ISA_DMA_BOUNCE_THRESHOLD (16 * 1024 * 1024) 64 65 /* 66 * Cookie used by bouncing ISA DMA. A pointer to one of these is stashed 67 * in the DMA map. 68 */ 69 struct isadma_bounce_cookie { 70 int id_flags; /* flags; see below */ 71 72 /* 73 * Information about the original buffer used during 74 * DMA map syncs. Note that origbuflen is only used 75 * for ID_BUFTYPE_LINEAR. 76 */ 77 void *id_origbuf; /* pointer to orig buffer if 78 bouncing */ 79 bus_size_t id_origbuflen; /* ...and size */ 80 int id_buftype; /* type of buffer */ 81 82 void *id_bouncebuf; /* pointer to the bounce buffer */ 83 bus_size_t id_bouncebuflen; /* ...and size */ 84 int id_nbouncesegs; /* number of valid bounce segs */ 85 bus_dma_segment_t id_bouncesegs[1]; /* array of bounce buffer 86 physical memory segments */ 87 }; 88 89 /* id_flags */ 90 #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */ 91 #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */ 92 #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */ 93 94 /* id_buftype */ 95 #define ID_BUFTYPE_INVALID 0 96 #define ID_BUFTYPE_LINEAR 1 97 #define ID_BUFTYPE_MBUF 2 98 #define ID_BUFTYPE_UIO 3 99 #define ID_BUFTYPE_RAW 4 100 101 int isadma_bounce_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t, 102 bus_size_t, int); 103 void isadma_bounce_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t); 104 105 /* 106 * Create an ISA DMA map. 107 */ 108 int 109 isadma_bounce_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 110 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 111 { 112 struct isadma_bounce_cookie *cookie; 113 bus_dmamap_t map; 114 int error, cookieflags; 115 void *cookiestore; 116 size_t cookiesize; 117 118 /* Call common function to create the basic map. */ 119 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, 120 flags, dmamp); 121 if (error) 122 return (error); 123 124 map = *dmamp; 125 map->_dm_cookie = NULL; 126 127 cookiesize = sizeof(*cookie); 128 129 /* 130 * ISA only has 24-bits of address space. This means 131 * we can't DMA to pages over 16M. In order to DMA to 132 * arbitrary buffers, we use "bounce buffers" - pages 133 * in memory below the 16M boundary. On DMA reads, 134 * DMA happens to the bounce buffers, and is copied into 135 * the caller's buffer. On writes, data is copied into 136 * but bounce buffer, and the DMA happens from those 137 * pages. To software using the DMA mapping interface, 138 * this looks simply like a data cache. 139 * 140 * If we have more than 16M of RAM in the system, we may 141 * need bounce buffers. We check and remember that here. 142 * 143 * ...or, there is an opposite case. The most segments 144 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If 145 * the caller can't handle that many segments (e.g. the 146 * ISA DMA controller), we may have to bounce it as well. 147 */ 148 cookieflags = 0; 149 if (avail_end > (t->_wbase + t->_wsize) || 150 ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) { 151 cookieflags |= ID_MIGHT_NEED_BOUNCE; 152 cookiesize += (sizeof(bus_dma_segment_t) * 153 (map->_dm_segcnt - 1)); 154 } 155 156 /* 157 * Allocate our cookie. 158 */ 159 if ((cookiestore = malloc(cookiesize, M_DMAMAP, 160 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) { 161 error = ENOMEM; 162 goto out; 163 } 164 memset(cookiestore, 0, cookiesize); 165 cookie = (struct isadma_bounce_cookie *)cookiestore; 166 cookie->id_flags = cookieflags; 167 map->_dm_cookie = cookie; 168 169 if (cookieflags & ID_MIGHT_NEED_BOUNCE) { 170 /* 171 * Allocate the bounce pages now if the caller 172 * wishes us to do so. 173 */ 174 if ((flags & BUS_DMA_ALLOCNOW) == 0) 175 goto out; 176 177 error = isadma_bounce_alloc_bouncebuf(t, map, size, flags); 178 } 179 180 out: 181 if (error) { 182 if (map->_dm_cookie != NULL) 183 free(map->_dm_cookie, M_DMAMAP); 184 _bus_dmamap_destroy(t, map); 185 } 186 return (error); 187 } 188 189 /* 190 * Destroy an ISA DMA map. 191 */ 192 void 193 isadma_bounce_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 194 { 195 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 196 197 /* 198 * Free any bounce pages this map might hold. 199 */ 200 if (cookie->id_flags & ID_HAS_BOUNCE) 201 isadma_bounce_free_bouncebuf(t, map); 202 203 free(cookie, M_DMAMAP); 204 _bus_dmamap_destroy(t, map); 205 } 206 207 /* 208 * Load an ISA DMA map with a linear buffer. 209 */ 210 int 211 isadma_bounce_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 212 bus_size_t buflen, struct proc *p, int flags) 213 { 214 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 215 int error; 216 217 /* 218 * Make sure that on error condition we return "no valid mappings." 219 */ 220 map->dm_mapsize = 0; 221 map->dm_nsegs = 0; 222 223 /* 224 * Try to load the map the normal way. If this errors out, 225 * and we can bounce, we will. 226 */ 227 error = _bus_dmamap_load(t, map, buf, buflen, p, flags); 228 if (error == 0 || 229 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)) 230 return (error); 231 232 /* 233 * First attempt failed; bounce it. 234 */ 235 236 /* 237 * Allocate bounce pages, if necessary. 238 */ 239 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { 240 error = isadma_bounce_alloc_bouncebuf(t, map, buflen, flags); 241 if (error) 242 return (error); 243 } 244 245 /* 246 * Cache a pointer to the caller's buffer and load the DMA map 247 * with the bounce buffer. 248 */ 249 cookie->id_origbuf = buf; 250 cookie->id_origbuflen = buflen; 251 cookie->id_buftype = ID_BUFTYPE_LINEAR; 252 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen, 253 p, flags); 254 if (error) { 255 /* 256 * Free the bounce pages, unless our resources 257 * are reserved for our exclusive use. 258 */ 259 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 260 isadma_bounce_free_bouncebuf(t, map); 261 return (error); 262 } 263 264 /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */ 265 cookie->id_flags |= ID_IS_BOUNCING; 266 return (0); 267 } 268 269 /* 270 * Like isadma_bounce_dmamap_load(), but for mbufs. 271 */ 272 int 273 isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, 274 struct mbuf *m0, int flags) 275 { 276 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 277 int error; 278 279 /* 280 * Make sure on error condition we return "no valid mappings." 281 */ 282 map->dm_mapsize = 0; 283 map->dm_nsegs = 0; 284 285 #ifdef DIAGNOSTIC 286 if ((m0->m_flags & M_PKTHDR) == 0) 287 panic("isadma_bounce_dmamap_load_mbuf: no packet header"); 288 #endif 289 290 if (m0->m_pkthdr.len > map->_dm_size) 291 return (EINVAL); 292 293 /* 294 * Try to load the map the normal way. If this errors out, 295 * and we can bounce, we will. 296 */ 297 error = _bus_dmamap_load_mbuf(t, map, m0, flags); 298 if (error == 0 || 299 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)) 300 return (error); 301 302 /* 303 * First attempt failed; bounce it. 304 */ 305 306 /* 307 * Allocate bounce pages, if necessary. 308 */ 309 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { 310 error = isadma_bounce_alloc_bouncebuf(t, map, m0->m_pkthdr.len, 311 flags); 312 if (error) 313 return (error); 314 } 315 316 /* 317 * Cache a pointer to the caller's buffer and load the DMA map 318 * with the bounce buffer. 319 */ 320 cookie->id_origbuf = m0; 321 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */ 322 cookie->id_buftype = ID_BUFTYPE_MBUF; 323 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, 324 m0->m_pkthdr.len, NULL, flags); 325 if (error) { 326 /* 327 * Free the bounce pages, unless our resources 328 * are reserved for our exclusive use. 329 */ 330 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 331 isadma_bounce_free_bouncebuf(t, map); 332 return (error); 333 } 334 335 /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */ 336 cookie->id_flags |= ID_IS_BOUNCING; 337 return (0); 338 } 339 340 /* 341 * Like isadma_bounce_dmamap_load(), but for uios. 342 */ 343 int 344 isadma_bounce_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, 345 struct uio *uio, int flags) 346 { 347 348 panic("isadma_bounce_dmamap_load_uio: not implemented"); 349 } 350 351 /* 352 * Like isadma_bounce_dmamap_load(), but for raw memory allocated with 353 * bus_dmamem_alloc(). 354 */ 355 int 356 isadma_bounce_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 357 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 358 { 359 360 panic("isadma_bounce_dmamap_load_raw: not implemented"); 361 } 362 363 /* 364 * Unload an ISA DMA map. 365 */ 366 void 367 isadma_bounce_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 368 { 369 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 370 371 /* 372 * If we have bounce pages, free them, unless they're 373 * reserved for our exclusive use. 374 */ 375 if ((cookie->id_flags & ID_HAS_BOUNCE) && 376 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 377 isadma_bounce_free_bouncebuf(t, map); 378 379 cookie->id_flags &= ~ID_IS_BOUNCING; 380 cookie->id_buftype = ID_BUFTYPE_INVALID; 381 382 /* 383 * Do the generic bits of the unload. 384 */ 385 _bus_dmamap_unload(t, map); 386 } 387 388 /* 389 * Synchronize an ISA DMA map. 390 */ 391 void 392 isadma_bounce_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 393 bus_size_t len, int ops) 394 { 395 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 396 397 /* 398 * Mixing PRE and POST operations is not allowed. 399 */ 400 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 401 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 402 panic("isadma_bounce_dmamap_sync: mix PRE and POST"); 403 404 #ifdef DIAGNOSTIC 405 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) { 406 if (offset >= map->dm_mapsize) 407 panic("isadma_bounce_dmamap_sync: bad offset"); 408 if (len == 0 || (offset + len) > map->dm_mapsize) 409 panic("isadma_bounce_dmamap_sync: bad length"); 410 } 411 #endif 412 413 /* 414 * If we're not bouncing, just do the normal sync operation 415 * and return. 416 */ 417 if ((cookie->id_flags & ID_IS_BOUNCING) == 0) { 418 _bus_dmamap_sync(t, map, offset, len, ops); 419 return; 420 } 421 422 /* 423 * Flush data cache for PREREAD. This has the side-effect 424 * of invalidating the cache. Done at PREREAD since it 425 * causes the cache line(s) to be written back to memory. 426 * 427 * Copy the original buffer to the bounce buffer and flush 428 * the data cache for PREWRITE, so that the contents 429 * of the data buffer in memory reflect reality. 430 * 431 * Copy the bounce buffer to the original buffer in POSTREAD. 432 */ 433 434 switch (cookie->id_buftype) { 435 case ID_BUFTYPE_LINEAR: 436 /* 437 * Nothing to do for pre-read. 438 */ 439 440 if (ops & BUS_DMASYNC_PREWRITE) { 441 /* 442 * Copy the caller's buffer to the bounce buffer. 443 */ 444 memcpy((char *)cookie->id_bouncebuf + offset, 445 (char *)cookie->id_origbuf + offset, len); 446 wbflush(); 447 } 448 449 if (ops & BUS_DMASYNC_POSTREAD) { 450 /* 451 * Copy the bounce buffer to the caller's buffer. 452 */ 453 memcpy((char *)cookie->id_origbuf + offset, 454 (char *)cookie->id_bouncebuf + offset, len); 455 } 456 457 /* 458 * Nothing to do for post-write. 459 */ 460 break; 461 462 case ID_BUFTYPE_MBUF: 463 { 464 struct mbuf *m, *m0 = cookie->id_origbuf; 465 bus_size_t minlen, moff; 466 467 /* 468 * Nothing to do for pre-read. 469 */ 470 471 if (ops & BUS_DMASYNC_PREWRITE) { 472 /* 473 * Copy the caller's buffer to the bounce buffer. 474 */ 475 m_copydata(m0, offset, len, 476 (char *)cookie->id_bouncebuf + offset); 477 } 478 479 if (ops & BUS_DMASYNC_POSTREAD) { 480 /* 481 * Copy the bounce buffer to the caller's buffer. 482 */ 483 for (moff = offset, m = m0; m != NULL && len != 0; 484 m = m->m_next) { 485 /* Find the beginning mbuf. */ 486 if (moff >= m->m_len) { 487 moff -= m->m_len; 488 continue; 489 } 490 491 /* 492 * Now at the first mbuf to sync; nail 493 * each one until we have exhausted the 494 * length. 495 */ 496 minlen = len < m->m_len - moff ? 497 len : m->m_len - moff; 498 499 memcpy(mtod(m, caddr_t) + moff, 500 (char *)cookie->id_bouncebuf + offset, 501 minlen); 502 503 moff = 0; 504 len -= minlen; 505 offset += minlen; 506 } 507 } 508 509 /* 510 * Nothing to do for post-write. 511 */ 512 break; 513 } 514 515 case ID_BUFTYPE_UIO: 516 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_UIO"); 517 break; 518 519 case ID_BUFTYPE_RAW: 520 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_RAW"); 521 break; 522 523 case ID_BUFTYPE_INVALID: 524 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_INVALID"); 525 break; 526 527 default: 528 printf("unknown buffer type %d\n", cookie->id_buftype); 529 panic("isadma_bounce_dmamap_sync"); 530 } 531 532 /* Drain the write buffer. */ 533 wbflush(); 534 535 /* XXXJRT */ 536 if (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) 537 mips_dcache_wbinv_range((vaddr_t)cookie->id_bouncebuf + offset, 538 len); 539 } 540 541 /* 542 * Allocate memory safe for ISA DMA. 543 */ 544 int 545 isadma_bounce_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, 546 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, 547 int nsegs, int *rsegs, int flags) 548 { 549 paddr_t high; 550 551 if (avail_end > ISA_DMA_BOUNCE_THRESHOLD) 552 high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD); 553 else 554 high = trunc_page(avail_end); 555 556 return (_bus_dmamem_alloc_range(t, size, alignment, boundary, 557 segs, nsegs, rsegs, flags, 0, high)); 558 } 559 560 /********************************************************************** 561 * ISA DMA utility functions 562 **********************************************************************/ 563 564 int 565 isadma_bounce_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, 566 bus_size_t size, int flags) 567 { 568 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 569 int error = 0; 570 571 cookie->id_bouncebuflen = round_page(size); 572 error = isadma_bounce_dmamem_alloc(t, cookie->id_bouncebuflen, 573 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs, 574 map->_dm_segcnt, &cookie->id_nbouncesegs, flags); 575 if (error) 576 goto out; 577 error = _bus_dmamem_map(t, cookie->id_bouncesegs, 578 cookie->id_nbouncesegs, cookie->id_bouncebuflen, 579 (caddr_t *)&cookie->id_bouncebuf, flags); 580 581 out: 582 if (error) { 583 _bus_dmamem_free(t, cookie->id_bouncesegs, 584 cookie->id_nbouncesegs); 585 cookie->id_bouncebuflen = 0; 586 cookie->id_nbouncesegs = 0; 587 } else 588 cookie->id_flags |= ID_HAS_BOUNCE; 589 590 return (error); 591 } 592 593 void 594 isadma_bounce_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map) 595 { 596 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 597 598 _bus_dmamem_unmap(t, cookie->id_bouncebuf, 599 cookie->id_bouncebuflen); 600 _bus_dmamem_free(t, cookie->id_bouncesegs, 601 cookie->id_nbouncesegs); 602 cookie->id_bouncebuflen = 0; 603 cookie->id_nbouncesegs = 0; 604 cookie->id_flags &= ~ID_HAS_BOUNCE; 605 } 606