1 /* $OpenBSD: isadma_bounce.c,v 1.8 2008/06/26 05:42:08 ray Exp $ */ 2 /* $NetBSD: isadma_bounce.c,v 1.3 2000/06/29 09:02:57 mrg Exp $ */ 3 4 /*- 5 * Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 10 * NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #define _ALPHA_BUS_DMA_PRIVATE 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/syslog.h> 38 #include <sys/device.h> 39 #include <sys/malloc.h> 40 #include <sys/proc.h> 41 #include <sys/mbuf.h> 42 43 #include <machine/bus.h> 44 45 #include <dev/isa/isareg.h> 46 #include <dev/isa/isavar.h> 47 48 #include <uvm/uvm_extern.h> 49 50 extern paddr_t avail_end; 51 52 /* 53 * ISA can only DMA to 0-16M. 54 */ 55 #define ISA_DMA_BOUNCE_THRESHOLD (16 * 1024 * 1024) 56 57 /* 58 * Cookie used by bouncing ISA DMA. A pointer to one of these is stashed 59 * in the DMA map. 60 */ 61 struct isadma_bounce_cookie { 62 int id_flags; /* flags; see below */ 63 64 /* 65 * Information about the original buffer used during 66 * DMA map syncs. Note that origbuflen is only used 67 * for ID_BUFTYPE_LINEAR. 68 */ 69 void *id_origbuf; /* pointer to orig buffer if 70 bouncing */ 71 bus_size_t id_origbuflen; /* ...and size */ 72 int id_buftype; /* type of buffer */ 73 74 void *id_bouncebuf; /* pointer to the bounce buffer */ 75 bus_size_t id_bouncebuflen; /* ...and size */ 76 int id_nbouncesegs; /* number of valid bounce segs */ 77 bus_dma_segment_t id_bouncesegs[1]; /* array of bounce buffer 78 physical memory segments */ 79 }; 80 81 /* id_flags */ 82 #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */ 83 #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */ 84 #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */ 85 86 /* id_buftype */ 87 #define ID_BUFTYPE_INVALID 0 88 #define ID_BUFTYPE_LINEAR 1 89 #define ID_BUFTYPE_MBUF 2 90 #define ID_BUFTYPE_UIO 3 91 #define ID_BUFTYPE_RAW 4 92 93 int isadma_bounce_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t, 94 bus_size_t, int); 95 void isadma_bounce_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t); 96 97 /* 98 * Create an ISA DMA map. 99 */ 100 int 101 isadma_bounce_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 102 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 103 { 104 struct isadma_bounce_cookie *cookie; 105 bus_dmamap_t map; 106 int error, cookieflags; 107 void *cookiestore; 108 size_t cookiesize; 109 110 /* Call common function to create the basic map. */ 111 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, 112 flags, dmamp); 113 if (error) 114 return (error); 115 116 map = *dmamp; 117 map->_dm_cookie = NULL; 118 119 cookiesize = sizeof(*cookie); 120 121 /* 122 * ISA only has 24-bits of address space. This means 123 * we can't DMA to pages over 16M. In order to DMA to 124 * arbitrary buffers, we use "bounce buffers" - pages 125 * in memory below the 16M boundary. On DMA reads, 126 * DMA happens to the bounce buffers, and is copied into 127 * the caller's buffer. On writes, data is copied into 128 * but bounce buffer, and the DMA happens from those 129 * pages. To software using the DMA mapping interface, 130 * this looks simply like a data cache. 131 * 132 * If we have more than 16M of RAM in the system, we may 133 * need bounce buffers. We check and remember that here. 134 * 135 * ...or, there is an opposite case. The most segments 136 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If 137 * the caller can't handle that many segments (e.g. the 138 * ISA DMA controller), we may have to bounce it as well. 139 */ 140 cookieflags = 0; 141 if (avail_end > (t->_wbase + t->_wsize) || 142 ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) { 143 cookieflags |= ID_MIGHT_NEED_BOUNCE; 144 cookiesize += (sizeof(bus_dma_segment_t) * 145 (map->_dm_segcnt - 1)); 146 } 147 148 /* 149 * Allocate our cookie. 150 */ 151 if ((cookiestore = malloc(cookiesize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) 152 ? (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL) { 153 error = ENOMEM; 154 goto out; 155 } 156 cookie = (struct isadma_bounce_cookie *)cookiestore; 157 cookie->id_flags = cookieflags; 158 map->_dm_cookie = cookie; 159 160 if (cookieflags & ID_MIGHT_NEED_BOUNCE) { 161 /* 162 * Allocate the bounce pages now if the caller 163 * wishes us to do so. 164 */ 165 if ((flags & BUS_DMA_ALLOCNOW) == 0) 166 goto out; 167 168 error = isadma_bounce_alloc_bouncebuf(t, map, size, flags); 169 } 170 171 out: 172 if (error) { 173 if (map->_dm_cookie != NULL) 174 free(map->_dm_cookie, M_DEVBUF); 175 _bus_dmamap_destroy(t, map); 176 } 177 return (error); 178 } 179 180 /* 181 * Destroy an ISA DMA map. 182 */ 183 void 184 isadma_bounce_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 185 { 186 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 187 188 /* 189 * Free any bounce pages this map might hold. 190 */ 191 if (cookie->id_flags & ID_HAS_BOUNCE) 192 isadma_bounce_free_bouncebuf(t, map); 193 194 free(cookie, M_DEVBUF); 195 _bus_dmamap_destroy(t, map); 196 } 197 198 /* 199 * Load an ISA DMA map with a linear buffer. 200 */ 201 int 202 isadma_bounce_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 203 size_t buflen, struct proc *p, int flags) 204 { 205 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 206 int error; 207 208 /* 209 * Make sure that on error condition we return "no valid mappings." 210 */ 211 map->dm_mapsize = 0; 212 map->dm_nsegs = 0; 213 214 /* 215 * Try to load the map the normal way. If this errors out, 216 * and we can bounce, we will. 217 */ 218 error = _bus_dmamap_load_direct(t, map, buf, buflen, p, flags); 219 if (error == 0 || 220 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)) 221 return (error); 222 223 /* 224 * First attempt failed; bounce it. 225 */ 226 227 /* 228 * Allocate bounce pages, if necessary. 229 */ 230 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { 231 error = isadma_bounce_alloc_bouncebuf(t, map, buflen, flags); 232 if (error) 233 return (error); 234 } 235 236 /* 237 * Cache a pointer to the caller's buffer and load the DMA map 238 * with the bounce buffer. 239 */ 240 cookie->id_origbuf = buf; 241 cookie->id_origbuflen = buflen; 242 cookie->id_buftype = ID_BUFTYPE_LINEAR; 243 error = _bus_dmamap_load_direct(t, map, cookie->id_bouncebuf, buflen, 244 p, flags); 245 if (error) { 246 /* 247 * Free the bounce pages, unless our resources 248 * are reserved for our exclusive use. 249 */ 250 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 251 isadma_bounce_free_bouncebuf(t, map); 252 return (error); 253 } 254 255 /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */ 256 cookie->id_flags |= ID_IS_BOUNCING; 257 map->_dm_window = t; 258 return (0); 259 } 260 261 /* 262 * Like isadma_bounce_dmamap_load(), but for mbufs. 263 */ 264 int 265 isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, 266 struct mbuf *m0, int flags) 267 { 268 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 269 int error; 270 271 /* 272 * Make sure on error condition we return "no valid mappings." 273 */ 274 map->dm_mapsize = 0; 275 map->dm_nsegs = 0; 276 277 #ifdef DIAGNOSTIC 278 if ((m0->m_flags & M_PKTHDR) == 0) 279 panic("isadma_bounce_dmamap_load_mbuf: no packet header"); 280 #endif 281 282 if (m0->m_pkthdr.len > map->_dm_size) 283 return (EINVAL); 284 285 /* 286 * Try to load the map the normal way. If this errors out, 287 * and we can bounce, we will. 288 */ 289 error = _bus_dmamap_load_mbuf_direct(t, map, m0, flags); 290 if (error == 0 || 291 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)) 292 return (error); 293 294 /* 295 * First attempt failed; bounce it. 296 */ 297 298 /* 299 * Allocate bounce pages, if necessary. 300 */ 301 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { 302 error = isadma_bounce_alloc_bouncebuf(t, map, m0->m_pkthdr.len, 303 flags); 304 if (error) 305 return (error); 306 } 307 308 /* 309 * Cache a pointer to the caller's buffer and load the DMA map 310 * with the bounce buffer. 311 */ 312 cookie->id_origbuf = m0; 313 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */ 314 cookie->id_buftype = ID_BUFTYPE_MBUF; 315 error = _bus_dmamap_load_direct(t, map, cookie->id_bouncebuf, 316 m0->m_pkthdr.len, NULL, flags); 317 if (error) { 318 /* 319 * Free the bounce pages, unless our resources 320 * are reserved for our exclusive use. 321 */ 322 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 323 isadma_bounce_free_bouncebuf(t, map); 324 return (error); 325 } 326 327 /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */ 328 cookie->id_flags |= ID_IS_BOUNCING; 329 map->_dm_window = t; 330 return (0); 331 } 332 333 /* 334 * Like isadma_bounce_dmamap_load(), but for uios. 335 */ 336 int 337 isadma_bounce_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, 338 struct uio *uio, int flags) 339 { 340 341 panic("isadma_bounce_dmamap_load_uio: not implemented"); 342 } 343 344 /* 345 * Like isadma_bounce_dmamap_load(), but for raw memory allocated with 346 * bus_dmamem_alloc(). 347 */ 348 int 349 isadma_bounce_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 350 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 351 { 352 353 panic("isadma_bounce_dmamap_load_raw: not implemented"); 354 } 355 356 /* 357 * Unload an ISA DMA map. 358 */ 359 void 360 isadma_bounce_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 361 { 362 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 363 364 /* 365 * If we have bounce pages, free them, unless they're 366 * reserved for our exclusive use. 367 */ 368 if ((cookie->id_flags & ID_HAS_BOUNCE) && 369 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 370 isadma_bounce_free_bouncebuf(t, map); 371 372 cookie->id_flags &= ~ID_IS_BOUNCING; 373 cookie->id_buftype = ID_BUFTYPE_INVALID; 374 375 /* 376 * Do the generic bits of the unload. 377 */ 378 _bus_dmamap_unload(t, map); 379 } 380 381 void 382 isadma_bounce_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 383 bus_size_t len, int ops) 384 { 385 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 386 387 /* 388 * Mixing PRE and POST operations is not allowed. 389 */ 390 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 391 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 392 panic("isadma_bounce_dmamap_sync: mix PRE and POST"); 393 394 #ifdef DIAGNOSTIC 395 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) { 396 if (offset >= map->dm_mapsize) 397 panic("isadma_bounce_dmamap_sync: bad offset"); 398 if (len == 0 || (offset + len) > map->dm_mapsize) 399 panic("isadma_bounce_dmamap_sync: bad length"); 400 } 401 #endif 402 403 /* 404 * If we're not bouncing, just drain the write buffer 405 * and return. 406 */ 407 if ((cookie->id_flags & ID_IS_BOUNCING) == 0) { 408 alpha_mb(); 409 return; 410 } 411 412 switch (cookie->id_buftype) { 413 case ID_BUFTYPE_LINEAR: 414 /* 415 * Nothing to do for pre-read. 416 */ 417 418 if (ops & BUS_DMASYNC_PREWRITE) { 419 /* 420 * Copy the caller's buffer to the bounce buffer. 421 */ 422 memcpy((char *)cookie->id_bouncebuf + offset, 423 (char *)cookie->id_origbuf + offset, len); 424 } 425 426 if (ops & BUS_DMASYNC_POSTREAD) { 427 /* 428 * Copy the bounce buffer to the caller's buffer. 429 */ 430 memcpy((char *)cookie->id_origbuf + offset, 431 (char *)cookie->id_bouncebuf + offset, len); 432 } 433 434 /* 435 * Nothing to do for post-write. 436 */ 437 break; 438 439 case ID_BUFTYPE_MBUF: 440 { 441 struct mbuf *m, *m0 = cookie->id_origbuf; 442 bus_size_t minlen, moff; 443 444 /* 445 * Nothing to do for pre-read. 446 */ 447 448 if (ops & BUS_DMASYNC_PREWRITE) { 449 /* 450 * Copy the caller's buffer to the bounce buffer. 451 */ 452 m_copydata(m0, offset, len, 453 (char *)cookie->id_bouncebuf + offset); 454 } 455 456 if (ops & BUS_DMASYNC_POSTREAD) { 457 /* 458 * Copy the bounce buffer to the caller's buffer. 459 */ 460 for (moff = offset, m = m0; m != NULL && len != 0; 461 m = m->m_next) { 462 /* Find the beginning mbuf. */ 463 if (moff >= m->m_len) { 464 moff -= m->m_len; 465 continue; 466 } 467 468 /* 469 * Now at the first mbuf to sync; nail 470 * each one until we have exhausted the 471 * length. 472 */ 473 minlen = len < m->m_len - moff ? 474 len : m->m_len - moff; 475 476 memcpy(mtod(m, caddr_t) + moff, 477 (char *)cookie->id_bouncebuf + offset, 478 minlen); 479 480 moff = 0; 481 len -= minlen; 482 offset += minlen; 483 } 484 } 485 486 /* 487 * Nothing to do for post-write. 488 */ 489 break; 490 } 491 492 case ID_BUFTYPE_UIO: 493 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_UIO"); 494 break; 495 496 case ID_BUFTYPE_RAW: 497 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_RAW"); 498 break; 499 500 case ID_BUFTYPE_INVALID: 501 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_INVALID"); 502 break; 503 504 default: 505 panic("isadma_bounce_dmamap_sync: unknown buffer type %d", 506 cookie->id_buftype); 507 } 508 509 /* Drain the write buffer. */ 510 alpha_mb(); 511 } 512 513 /* 514 * Allocate memory safe for ISA DMA. 515 */ 516 int 517 isadma_bounce_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, 518 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, 519 int nsegs, int *rsegs, int flags) 520 { 521 paddr_t high; 522 523 if (avail_end > ISA_DMA_BOUNCE_THRESHOLD) 524 high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD); 525 else 526 high = trunc_page(avail_end); 527 528 return (_bus_dmamem_alloc_range(t, size, alignment, boundary, 529 segs, nsegs, rsegs, flags, 0, high)); 530 } 531 532 /********************************************************************** 533 * ISA DMA utility functions 534 **********************************************************************/ 535 536 int 537 isadma_bounce_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, 538 bus_size_t size, int flags) 539 { 540 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 541 int error = 0; 542 543 cookie->id_bouncebuflen = round_page(size); 544 error = isadma_bounce_dmamem_alloc(t, cookie->id_bouncebuflen, 545 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs, 546 map->_dm_segcnt, &cookie->id_nbouncesegs, flags); 547 if (error) 548 goto out; 549 error = _bus_dmamem_map(t, cookie->id_bouncesegs, 550 cookie->id_nbouncesegs, cookie->id_bouncebuflen, 551 (caddr_t *)&cookie->id_bouncebuf, flags); 552 553 out: 554 if (error) { 555 _bus_dmamem_free(t, cookie->id_bouncesegs, 556 cookie->id_nbouncesegs); 557 cookie->id_bouncebuflen = 0; 558 cookie->id_nbouncesegs = 0; 559 } else 560 cookie->id_flags |= ID_HAS_BOUNCE; 561 562 return (error); 563 } 564 565 void 566 isadma_bounce_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map) 567 { 568 struct isadma_bounce_cookie *cookie = map->_dm_cookie; 569 570 _bus_dmamem_unmap(t, cookie->id_bouncebuf, 571 cookie->id_bouncebuflen); 572 _bus_dmamem_free(t, cookie->id_bouncesegs, 573 cookie->id_nbouncesegs); 574 cookie->id_bouncebuflen = 0; 575 cookie->id_nbouncesegs = 0; 576 cookie->id_flags &= ~ID_HAS_BOUNCE; 577 } 578