1 /* $NetBSD: isadma_machdep.c,v 1.14 2009/03/18 10:22:36 cegger Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: isadma_machdep.c,v 1.14 2009/03/18 10:22:36 cegger Exp $"); 35 36 #define ISA_DMA_STATS 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/syslog.h> 41 #include <sys/device.h> 42 #include <sys/malloc.h> 43 #include <sys/proc.h> 44 #include <sys/mbuf.h> 45 46 #define _ARM32_BUS_DMA_PRIVATE 47 #include <machine/bus.h> 48 49 #include <dev/isa/isareg.h> 50 #include <dev/isa/isavar.h> 51 52 #include <uvm/uvm_extern.h> 53 54 /* 55 * ISA has a 24-bit address limitation, so at most it has a 16M 56 * DMA range. However, some platforms have a more limited range, 57 * e.g. the Shark NC. On these systems, we are provided with 58 * a set of DMA ranges. The pmap module is aware of these ranges 59 * and places DMA-safe memory for them onto an alternate free list 60 * so that they are protected from being used to service page faults, 61 * etc. (unless we've run out of memory elsewhere). 62 */ 63 extern struct arm32_dma_range *shark_isa_dma_ranges; 64 extern int shark_isa_dma_nranges; 65 66 int _isa_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int, 67 bus_size_t, bus_size_t, int, bus_dmamap_t *); 68 void _isa_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t); 69 int _isa_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, 70 bus_size_t, struct proc *, int); 71 int _isa_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, 72 struct mbuf *, int); 73 int _isa_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, 74 struct uio *, int); 75 int _isa_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, 76 bus_dma_segment_t *, int, bus_size_t, int); 77 void _isa_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 78 void _isa_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, 79 bus_addr_t, bus_size_t, int); 80 81 int _isa_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t, 82 bus_size_t, bus_dma_segment_t *, int, int *, int); 83 84 int _isa_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t, 85 bus_size_t, int); 86 void _isa_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t); 87 88 /* 89 * Entry points for ISA DMA. These are mostly wrappers around 90 * the generic functions that understand how to deal with bounce 91 * buffers, if necessary. 92 */ 93 struct arm32_bus_dma_tag isa_bus_dma_tag = { 94 0, /* _ranges */ 95 0, /* _nranges */ 96 NULL, /* _cookie */ 97 _isa_bus_dmamap_create, 98 _isa_bus_dmamap_destroy, 99 _isa_bus_dmamap_load, 100 _isa_bus_dmamap_load_mbuf, 101 _isa_bus_dmamap_load_uio, 102 _isa_bus_dmamap_load_raw, 103 _isa_bus_dmamap_unload, 104 _isa_bus_dmamap_sync, /* pre */ 105 _isa_bus_dmamap_sync, /* post */ 106 _isa_bus_dmamem_alloc, 107 _bus_dmamem_free, 108 _bus_dmamem_map, 109 _bus_dmamem_unmap, 110 _bus_dmamem_mmap, 111 }; 112 113 /* 114 * Initialize ISA DMA. 115 */ 116 void 117 isa_dma_init(void) 118 { 119 120 isa_bus_dma_tag._ranges = shark_isa_dma_ranges; 121 isa_bus_dma_tag._nranges = shark_isa_dma_nranges; 122 } 123 124 /********************************************************************** 125 * bus.h dma interface entry points 126 **********************************************************************/ 127 128 #ifdef ISA_DMA_STATS 129 #define STAT_INCR(v) (v)++ 130 #define STAT_DECR(v) do { \ 131 if ((v) == 0) \ 132 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \ 133 else \ 134 (v)--; \ 135 } while (0) 136 u_long isa_dma_stats_loads; 137 u_long isa_dma_stats_bounces; 138 u_long isa_dma_stats_nbouncebufs; 139 #else 140 #define STAT_INCR(v) 141 #define STAT_DECR(v) 142 #endif 143 144 /* 145 * Create an ISA DMA map. 146 */ 147 int 148 _isa_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 149 { 150 struct arm32_isa_dma_cookie *cookie; 151 bus_dmamap_t map; 152 int error, cookieflags; 153 void *cookiestore; 154 size_t cookiesize; 155 156 /* Call common function to create the basic map. */ 157 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, 158 flags, dmamp); 159 if (error) 160 return (error); 161 162 map = *dmamp; 163 map->_dm_cookie = NULL; 164 165 cookiesize = sizeof(struct arm32_isa_dma_cookie); 166 167 /* 168 * ISA only has 24-bits of address space. This means 169 * we can't DMA to pages over 16M. In order to DMA to 170 * arbitrary buffers, we use "bounce buffers" - pages 171 * in memory below the 16M boundary. On DMA reads, 172 * DMA happens to the bounce buffers, and is copied into 173 * the caller's buffer. On writes, data is copied into 174 * but bounce buffer, and the DMA happens from those 175 * pages. To software using the DMA mapping interface, 176 * this looks simply like a data cache. 177 * 178 * If we have more than 16M of RAM in the system, we may 179 * need bounce buffers. We check and remember that here. 180 * 181 * There are exceptions, however. VLB devices can do 182 * 32-bit DMA, and indicate that here. 183 * 184 * ...or, there is an opposite case. The most segments 185 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If 186 * the caller can't handle that many segments (e.g. the 187 * ISA DMA controller), we may have to bounce it as well. 188 * 189 * Well, not really... see note above regarding DMA ranges. 190 * Because of the range issue on this platform, we just 191 * always "might bounce". 192 */ 193 cookieflags = ID_MIGHT_NEED_BOUNCE; 194 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt); 195 196 /* 197 * Allocate our cookie. 198 */ 199 if ((cookiestore = malloc(cookiesize, M_DMAMAP, 200 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) { 201 error = ENOMEM; 202 goto out; 203 } 204 memset(cookiestore, 0, cookiesize); 205 cookie = (struct arm32_isa_dma_cookie *)cookiestore; 206 cookie->id_flags = cookieflags; 207 map->_dm_cookie = cookie; 208 209 if (cookieflags & ID_MIGHT_NEED_BOUNCE) { 210 /* 211 * Allocate the bounce pages now if the caller 212 * wishes us to do so. 213 */ 214 if ((flags & BUS_DMA_ALLOCNOW) == 0) 215 goto out; 216 217 error = _isa_dma_alloc_bouncebuf(t, map, size, flags); 218 } 219 220 out: 221 if (error) { 222 if (map->_dm_cookie != NULL) 223 free(map->_dm_cookie, M_DMAMAP); 224 _bus_dmamap_destroy(t, map); 225 } 226 return (error); 227 } 228 229 /* 230 * Destroy an ISA DMA map. 231 */ 232 void 233 _isa_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 234 { 235 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie; 236 237 /* 238 * Free any bounce pages this map might hold. 239 */ 240 if (cookie->id_flags & ID_HAS_BOUNCE) 241 _isa_dma_free_bouncebuf(t, map); 242 243 free(cookie, M_DMAMAP); 244 _bus_dmamap_destroy(t, map); 245 } 246 247 /* 248 * Load an ISA DMA map with a linear buffer. 249 */ 250 int 251 _isa_bus_dmamap_load(t, map, buf, buflen, p, flags) 252 bus_dma_tag_t t; 253 bus_dmamap_t map; 254 void *buf; 255 bus_size_t buflen; 256 struct proc *p; 257 int flags; 258 { 259 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie; 260 int error; 261 262 STAT_INCR(isa_dma_stats_loads); 263 264 /* 265 * Make sure that on error condition we return "no valid mappings." 266 */ 267 map->dm_mapsize = 0; 268 map->dm_nsegs = 0; 269 270 /* 271 * Try to load the map the normal way. If this errors out, 272 * and we can bounce, we will. 273 */ 274 error = _bus_dmamap_load(t, map, buf, buflen, p, flags); 275 if (error == 0 || 276 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)) 277 return (error); 278 279 /* 280 * First attempt failed; bounce it. 281 */ 282 283 STAT_INCR(isa_dma_stats_bounces); 284 285 /* 286 * Allocate bounce pages, if necessary. 287 */ 288 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { 289 error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags); 290 if (error) 291 return (error); 292 } 293 294 /* 295 * Cache a pointer to the caller's buffer and load the DMA map 296 * with the bounce buffer. 297 */ 298 cookie->id_origbuf = buf; 299 cookie->id_origbuflen = buflen; 300 cookie->id_buftype = ID_BUFTYPE_LINEAR; 301 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen, 302 NULL, flags); 303 if (error) { 304 /* 305 * Free the bounce pages, unless our resources 306 * are reserved for our exclusive use. 307 */ 308 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 309 _isa_dma_free_bouncebuf(t, map); 310 return (error); 311 } 312 313 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */ 314 cookie->id_flags |= ID_IS_BOUNCING; 315 return (0); 316 } 317 318 /* 319 * Like _isa_bus_dmamap_load(), but for mbufs. 320 */ 321 int 322 _isa_bus_dmamap_load_mbuf(t, map, m0, flags) 323 bus_dma_tag_t t; 324 bus_dmamap_t map; 325 struct mbuf *m0; 326 int flags; 327 { 328 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie; 329 int error; 330 331 /* 332 * Make sure that on error condition we return "no valid mappings." 333 */ 334 map->dm_mapsize = 0; 335 map->dm_nsegs = 0; 336 337 #ifdef DIAGNOSTIC 338 if ((m0->m_flags & M_PKTHDR) == 0) 339 panic("_isa_bus_dmamap_load_mbuf: no packet header"); 340 #endif 341 342 if (m0->m_pkthdr.len > map->_dm_size) 343 return (EINVAL); 344 345 /* 346 * Try to load the map the normal way. If this errors out, 347 * and we can bounce, we will. 348 */ 349 error = _bus_dmamap_load_mbuf(t, map, m0, flags); 350 if (error == 0 || 351 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)) 352 return (error); 353 354 /* 355 * First attempt failed; bounce it. 356 */ 357 358 STAT_INCR(isa_dma_stats_bounces); 359 360 /* 361 * Allocate bounce pages, if necessary. 362 */ 363 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { 364 error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len, 365 flags); 366 if (error) 367 return (error); 368 } 369 370 /* 371 * Cache a pointer to the caller's buffer and load the DMA map 372 * with the bounce buffer. 373 */ 374 cookie->id_origbuf = m0; 375 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */ 376 cookie->id_buftype = ID_BUFTYPE_MBUF; 377 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, 378 m0->m_pkthdr.len, NULL, flags); 379 if (error) { 380 /* 381 * Free the bounce pages, unless our resources 382 * are reserved for our exclusive use. 383 */ 384 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 385 _isa_dma_free_bouncebuf(t, map); 386 return (error); 387 } 388 389 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */ 390 cookie->id_flags |= ID_IS_BOUNCING; 391 return (0); 392 } 393 394 /* 395 * Like _isa_bus_dmamap_load(), but for uios. 396 */ 397 int 398 _isa_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags) 399 { 400 401 panic("_isa_bus_dmamap_load_uio: not implemented"); 402 } 403 404 /* 405 * Like _isa_bus_dmamap_load(), but for raw memory allocated with 406 * bus_dmamem_alloc(). 407 */ 408 int 409 _isa_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 410 { 411 412 panic("_isa_bus_dmamap_load_raw: not implemented"); 413 } 414 415 /* 416 * Unload an ISA DMA map. 417 */ 418 void 419 _isa_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 420 { 421 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie; 422 423 /* 424 * If we have bounce pages, free them, unless they're 425 * reserved for our exclusive use. 426 */ 427 if ((cookie->id_flags & ID_HAS_BOUNCE) && 428 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 429 _isa_dma_free_bouncebuf(t, map); 430 431 cookie->id_flags &= ~ID_IS_BOUNCING; 432 cookie->id_buftype = ID_BUFTYPE_INVALID; 433 434 /* 435 * Do the generic bits of the unload. 436 */ 437 _bus_dmamap_unload(t, map); 438 } 439 440 /* 441 * Synchronize an ISA DMA map. 442 */ 443 void 444 _isa_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops) 445 { 446 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie; 447 448 /* 449 * Mixing PRE and POST operations is not allowed. 450 */ 451 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 452 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 453 panic("_isa_bus_dmamap_sync: mix PRE and POST"); 454 455 #ifdef DIAGNOSTIC 456 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) { 457 if (offset >= map->dm_mapsize) 458 panic("_isa_bus_dmamap_sync: bad offset"); 459 if (len == 0 || (offset + len) > map->dm_mapsize) 460 panic("_isa_bus_dmamap_sync: bad length"); 461 } 462 #endif 463 464 /* 465 * If we're not bouncing, just return; nothing to do. 466 */ 467 if ((cookie->id_flags & ID_IS_BOUNCING) == 0) 468 return; 469 470 switch (cookie->id_buftype) { 471 case ID_BUFTYPE_LINEAR: 472 /* 473 * Nothing to do for pre-read. 474 */ 475 476 if (ops & BUS_DMASYNC_PREWRITE) { 477 /* 478 * Copy the caller's buffer to the bounce buffer. 479 */ 480 memcpy((char *)cookie->id_bouncebuf + offset, 481 (char *)cookie->id_origbuf + offset, len); 482 } 483 484 if (ops & BUS_DMASYNC_POSTREAD) { 485 /* 486 * Copy the bounce buffer to the caller's buffer. 487 */ 488 memcpy((char *)cookie->id_origbuf + offset, 489 (char *)cookie->id_bouncebuf + offset, len); 490 } 491 492 /* 493 * Nothing to do for post-write. 494 */ 495 break; 496 497 case ID_BUFTYPE_MBUF: 498 { 499 struct mbuf *m, *m0 = cookie->id_origbuf; 500 bus_size_t minlen, moff; 501 502 /* 503 * Nothing to do for pre-read. 504 */ 505 506 if (ops & BUS_DMASYNC_PREWRITE) { 507 /* 508 * Copy the caller's buffer to the bounce buffer. 509 */ 510 m_copydata(m0, offset, len, 511 (char *)cookie->id_bouncebuf + offset); 512 } 513 514 if (ops & BUS_DMASYNC_POSTREAD) { 515 /* 516 * Copy the bounce buffer to the caller's buffer. 517 */ 518 for (moff = offset, m = m0; m != NULL && len != 0; 519 m = m->m_next) { 520 /* Find the beginning mbuf. */ 521 if (moff >= m->m_len) { 522 moff -= m->m_len; 523 continue; 524 } 525 526 /* 527 * Now at the first mbuf to sync; nail 528 * each one until we have exhausted the 529 * length. 530 */ 531 minlen = len < m->m_len - moff ? 532 len : m->m_len - moff; 533 534 memcpy(mtod(m, char *) + moff, 535 (char *)cookie->id_bouncebuf + offset, 536 minlen); 537 538 moff = 0; 539 len -= minlen; 540 offset += minlen; 541 } 542 } 543 544 /* 545 * Nothing to do for post-write. 546 */ 547 break; 548 } 549 550 case ID_BUFTYPE_UIO: 551 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO"); 552 break; 553 554 case ID_BUFTYPE_RAW: 555 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW"); 556 break; 557 558 case ID_BUFTYPE_INVALID: 559 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID"); 560 break; 561 562 default: 563 printf("unknown buffer type %d\n", cookie->id_buftype); 564 panic("_isa_bus_dmamap_sync"); 565 } 566 } 567 568 /* 569 * Allocate memory safe for ISA DMA. 570 */ 571 int 572 _isa_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) 573 { 574 575 if (t->_ranges == NULL) 576 return (ENOMEM); 577 578 /* _bus_dmamem_alloc() does the range checks for us. */ 579 return (_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, 580 rsegs, flags)); 581 } 582 583 /********************************************************************** 584 * ISA DMA utility functions 585 **********************************************************************/ 586 587 int 588 _isa_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t size, int flags) 589 { 590 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie; 591 int error = 0; 592 593 cookie->id_bouncebuflen = round_page(size); 594 error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen, 595 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs, 596 map->_dm_segcnt, &cookie->id_nbouncesegs, flags); 597 if (error) 598 goto out; 599 error = _bus_dmamem_map(t, cookie->id_bouncesegs, 600 cookie->id_nbouncesegs, cookie->id_bouncebuflen, 601 (void **)&cookie->id_bouncebuf, flags); 602 603 out: 604 if (error) { 605 _bus_dmamem_free(t, cookie->id_bouncesegs, 606 cookie->id_nbouncesegs); 607 cookie->id_bouncebuflen = 0; 608 cookie->id_nbouncesegs = 0; 609 } else { 610 cookie->id_flags |= ID_HAS_BOUNCE; 611 STAT_INCR(isa_dma_stats_nbouncebufs); 612 } 613 614 return (error); 615 } 616 617 void 618 _isa_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map) 619 { 620 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie; 621 622 STAT_DECR(isa_dma_stats_nbouncebufs); 623 624 _bus_dmamem_unmap(t, cookie->id_bouncebuf, 625 cookie->id_bouncebuflen); 626 _bus_dmamem_free(t, cookie->id_bouncesegs, 627 cookie->id_nbouncesegs); 628 cookie->id_bouncebuflen = 0; 629 cookie->id_nbouncesegs = 0; 630 cookie->id_flags &= ~ID_HAS_BOUNCE; 631 } 632