1 /* $NetBSD: pxa2x0_dmac.c,v 1.6 2009/03/16 11:42:31 nonaka Exp $ */ 2 3 /* 4 * Copyright (c) 2003, 2005 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include "opt_pxa2x0_dmac.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/device.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/queue.h> 46 47 #include <uvm/uvm_param.h> /* For PAGE_SIZE */ 48 49 #include <machine/intr.h> 50 #include <machine/bus.h> 51 52 #include <dev/dmover/dmovervar.h> 53 54 #include <arm/xscale/pxa2x0reg.h> 55 #include <arm/xscale/pxa2x0var.h> 56 #include <arm/xscale/pxa2x0cpu.h> 57 58 #include <arm/xscale/pxa2x0_dmac.h> 59 60 #include "locators.h" 61 62 #undef DMAC_N_PRIORITIES 63 #ifndef PXA2X0_DMAC_FIXED_PRIORITY 64 #define DMAC_N_PRIORITIES 3 65 #define DMAC_PRI(p) (p) 66 #else 67 #define DMAC_N_PRIORITIES 1 68 #define DMAC_PRI(p) (0) 69 #endif 70 71 struct dmac_desc { 72 SLIST_ENTRY(dmac_desc) d_link; 73 struct pxa2x0_dma_desc *d_desc; 74 paddr_t d_desc_pa; 75 }; 76 77 /* 78 * This is used to maintain state for an in-progress transfer. 79 * It tracks the current DMA segment, and offset within the segment 80 * in the case where we had to split a request into several DMA 81 * operations due to a shortage of DMAC descriptors. 82 */ 83 struct dmac_desc_segs { 84 bus_dma_segment_t *ds_curseg; /* Current segment */ 85 u_int ds_nsegs; /* Remaining segments */ 86 bus_size_t ds_offset; /* Offset within current seg */ 87 }; 88 89 SIMPLEQ_HEAD(dmac_xfer_state_head, dmac_xfer_state); 90 91 struct dmac_xfer_state { 92 struct dmac_xfer dxs_xfer; 93 #define dxs_cookie dxs_xfer.dx_cookie 94 #define dxs_done dxs_xfer.dx_done 95 #define dxs_priority dxs_xfer.dx_priority 96 #define dxs_peripheral dxs_xfer.dx_peripheral 97 #define dxs_flow dxs_xfer.dx_flow 98 #define dxs_dev_width dxs_xfer.dx_dev_width 99 #define dxs_burst_size dxs_xfer.dx_burst_size 100 #define dxs_loop_notify dxs_xfer.dx_loop_notify 101 #define dxs_desc dxs_xfer.dx_desc 102 SIMPLEQ_ENTRY(dmac_xfer_state) dxs_link; 103 SLIST_HEAD(, dmac_desc) dxs_descs; 104 struct dmac_xfer_state_head *dxs_queue; 105 u_int dxs_channel; 106 #define DMAC_NO_CHANNEL (~0) 107 u_int32_t dxs_dcmd; 108 struct dmac_desc_segs dxs_segs[2]; 109 bool dxs_misaligned_flag; 110 }; 111 112 113 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0) 114 /* 115 * This structure is used to maintain state for the dmover(9) backend 116 * part of the driver. We can have a number of concurrent dmover 117 * requests in progress at any given time. The exact number is given 118 * by the PXA2X0_DMAC_DMOVER_CONCURRENCY compile-time constant. One of 119 * these structures is allocated for each concurrent request. 120 */ 121 struct dmac_dmover_state { 122 LIST_ENTRY(dmac_dmover_state) ds_link; /* List of idle dmover chans */ 123 struct pxadmac_softc *ds_sc; /* Uplink to pxadmac softc */ 124 struct dmover_request *ds_current; /* Current dmover request */ 125 struct dmac_xfer_state ds_xfer; 126 bus_dmamap_t ds_src_dmap; 127 bus_dmamap_t ds_dst_dmap; 128 /* 129 * There is no inherent size limit in the DMA engine. 130 * The following limit is somewhat arbitrary. 131 */ 132 #define DMAC_DMOVER_MAX_XFER (8*1024*1024) 133 #if 0 134 /* This would require 16KB * 2 just for segments... */ 135 #define DMAC_DMOVER_NSEGS ((DMAC_DMOVER_MAX_XFER / PAGE_SIZE) + 1) 136 #else 137 #define DMAC_DMOVER_NSEGS 512 /* XXX: Only enough for 2MB */ 138 #endif 139 bus_dma_segment_t ds_zero_seg; /* Used for zero-fill ops */ 140 void *ds_zero_va; 141 bus_dma_segment_t ds_fill_seg; /* Used for fill8 ops */ 142 void *ds_fill_va; 143 144 #define ds_src_addr_hold ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_addr_hold 145 #define ds_dst_addr_hold ds_xfer.dxs_desc[DMAC_DESC_DST].xd_addr_hold 146 #define ds_src_burst ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_burst_size 147 #define ds_dst_burst ds_xfer.dxs_desc[DMAC_DESC_DST].xd_burst_size 148 #define ds_src_dma_segs ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_dma_segs 149 #define ds_dst_dma_segs ds_xfer.dxs_desc[DMAC_DESC_DST].xd_dma_segs 150 #define ds_src_nsegs ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_nsegs 151 #define ds_dst_nsegs ds_xfer.dxs_desc[DMAC_DESC_DST].xd_nsegs 152 }; 153 154 /* 155 * Overall dmover(9) backend state 156 */ 157 struct dmac_dmover { 158 struct dmover_backend dd_backend; 159 int dd_busy; 160 LIST_HEAD(, dmac_dmover_state) dd_free; 161 struct dmac_dmover_state dd_state[PXA2X0_DMAC_DMOVER_CONCURRENCY]; 162 }; 163 #endif 164 165 struct pxadmac_softc { 166 struct device sc_dev; 167 bus_space_tag_t sc_bust; 168 bus_dma_tag_t sc_dmat; 169 bus_space_handle_t sc_bush; 170 void *sc_irqcookie; 171 172 /* 173 * Queue of pending requests, per priority 174 */ 175 struct dmac_xfer_state_head sc_queue[DMAC_N_PRIORITIES]; 176 177 /* 178 * Queue of pending requests, per peripheral 179 */ 180 struct { 181 struct dmac_xfer_state_head sp_queue; 182 u_int sp_busy; 183 } sc_periph[DMAC_N_PERIPH]; 184 185 /* 186 * Active requests, per channel. 187 */ 188 struct dmac_xfer_state *sc_active[DMAC_N_CHANNELS]; 189 190 /* 191 * Channel Priority Allocation 192 */ 193 struct { 194 u_int8_t p_first; 195 u_int8_t p_pri[DMAC_N_CHANNELS]; 196 } sc_prio[DMAC_N_PRIORITIES]; 197 #define DMAC_PRIO_END (~0) 198 u_int8_t sc_channel_priority[DMAC_N_CHANNELS]; 199 200 /* 201 * DMA descriptor management 202 */ 203 bus_dmamap_t sc_desc_map; 204 bus_dma_segment_t sc_segs; 205 #define DMAC_N_DESCS ((PAGE_SIZE * 2) / sizeof(struct pxa2x0_dma_desc)) 206 #define DMAC_DESCS_SIZE (DMAC_N_DESCS * sizeof(struct pxa2x0_dma_desc)) 207 struct dmac_desc sc_all_descs[DMAC_N_DESCS]; 208 u_int sc_free_descs; 209 SLIST_HEAD(, dmac_desc) sc_descs; 210 211 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0) 212 /* 213 * dmover(9) backend state 214 */ 215 struct dmac_dmover sc_dmover; 216 #endif 217 }; 218 219 static int pxadmac_match(struct device *, struct cfdata *, void *); 220 static void pxadmac_attach(struct device *, struct device *, void *); 221 222 CFATTACH_DECL(pxadmac, sizeof(struct pxadmac_softc), 223 pxadmac_match, pxadmac_attach, NULL, NULL); 224 225 static struct pxadmac_softc *pxadmac_sc; 226 227 static void dmac_start(struct pxadmac_softc *, dmac_priority_t); 228 static int dmac_continue_xfer(struct pxadmac_softc *, struct dmac_xfer_state *); 229 static u_int dmac_channel_intr(struct pxadmac_softc *, u_int); 230 static int dmac_intr(void *); 231 232 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0) 233 static void dmac_dmover_attach(struct pxadmac_softc *); 234 static void dmac_dmover_process(struct dmover_backend *); 235 static void dmac_dmover_run(struct dmover_backend *); 236 static void dmac_dmover_done(struct dmac_xfer *, int); 237 #endif 238 239 static inline u_int32_t 240 dmac_reg_read(struct pxadmac_softc *sc, int reg) 241 { 242 243 return (bus_space_read_4(sc->sc_bust, sc->sc_bush, reg)); 244 } 245 246 static inline void 247 dmac_reg_write(struct pxadmac_softc *sc, int reg, u_int32_t val) 248 { 249 250 bus_space_write_4(sc->sc_bust, sc->sc_bush, reg, val); 251 } 252 253 static inline int 254 dmac_allocate_channel(struct pxadmac_softc *sc, dmac_priority_t priority, 255 u_int *chanp) 256 { 257 u_int channel; 258 259 KDASSERT((u_int)priority < DMAC_N_PRIORITIES); 260 261 if ((channel = sc->sc_prio[priority].p_first) == DMAC_PRIO_END) 262 return (-1); 263 sc->sc_prio[priority].p_first = sc->sc_prio[priority].p_pri[channel]; 264 265 *chanp = channel; 266 return (0); 267 } 268 269 static inline void 270 dmac_free_channel(struct pxadmac_softc *sc, dmac_priority_t priority, 271 u_int channel) 272 { 273 274 KDASSERT((u_int)priority < DMAC_N_PRIORITIES); 275 276 sc->sc_prio[priority].p_pri[channel] = sc->sc_prio[priority].p_first; 277 sc->sc_prio[priority].p_first = channel; 278 } 279 280 static int 281 pxadmac_match(struct device *parent, struct cfdata *cf, void *aux) 282 { 283 struct pxaip_attach_args *pxa = aux; 284 285 if (pxadmac_sc || pxa->pxa_addr != PXA2X0_DMAC_BASE || 286 pxa->pxa_intr != PXA2X0_INT_DMA) 287 return (0); 288 289 pxa->pxa_size = PXA2X0_DMAC_SIZE; 290 291 return (1); 292 } 293 294 static void 295 pxadmac_attach(struct device *parent, struct device *self, void *aux) 296 { 297 struct pxadmac_softc *sc = (struct pxadmac_softc *)self; 298 struct pxaip_attach_args *pxa = aux; 299 struct pxa2x0_dma_desc *dd; 300 int i, nsegs; 301 302 sc->sc_bust = pxa->pxa_iot; 303 sc->sc_dmat = pxa->pxa_dmat; 304 305 aprint_normal(": DMA Controller\n"); 306 307 if (bus_space_map(sc->sc_bust, pxa->pxa_addr, pxa->pxa_size, 0, 308 &sc->sc_bush)) { 309 aprint_error("%s: Can't map registers!\n", sc->sc_dev.dv_xname); 310 return; 311 } 312 313 pxadmac_sc = sc; 314 315 /* 316 * Make sure the DMAC is quiescent 317 */ 318 for (i = 0; i < DMAC_N_CHANNELS; i++) { 319 dmac_reg_write(sc, DMAC_DCSR(i), 0); 320 dmac_reg_write(sc, DMAC_DRCMR(i), 0); 321 sc->sc_active[i] = NULL; 322 } 323 dmac_reg_write(sc, DMAC_DINT, 324 dmac_reg_read(sc, DMAC_DINT) & DMAC_DINT_MASK); 325 326 /* 327 * Initialise the request queues 328 */ 329 for (i = 0; i < DMAC_N_PRIORITIES; i++) 330 SIMPLEQ_INIT(&sc->sc_queue[i]); 331 332 /* 333 * Initialise the request queues 334 */ 335 for (i = 0; i < DMAC_N_PERIPH; i++) { 336 sc->sc_periph[i].sp_busy = 0; 337 SIMPLEQ_INIT(&sc->sc_periph[i].sp_queue); 338 } 339 340 /* 341 * Initialise the channel priority metadata 342 */ 343 memset(sc->sc_prio, DMAC_PRIO_END, sizeof(sc->sc_prio)); 344 for (i = 0; i < DMAC_N_CHANNELS; i++) { 345 #if (DMAC_N_PRIORITIES > 1) 346 if (i <= 3) 347 dmac_free_channel(sc, DMAC_PRIORITY_HIGH, i); 348 else 349 if (i <= 7) 350 dmac_free_channel(sc, DMAC_PRIORITY_MED, i); 351 else 352 dmac_free_channel(sc, DMAC_PRIORITY_LOW, i); 353 #else 354 dmac_free_channel(sc, DMAC_PRIORITY_NORMAL, i); 355 #endif 356 } 357 358 /* 359 * Initialise DMA descriptors and associated metadata 360 */ 361 if (bus_dmamem_alloc(sc->sc_dmat, DMAC_DESCS_SIZE, DMAC_DESCS_SIZE, 0, 362 &sc->sc_segs, 1, &nsegs, BUS_DMA_NOWAIT)) 363 panic("dmac_pxaip_attach: bus_dmamem_alloc failed"); 364 365 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_segs, 1, DMAC_DESCS_SIZE, 366 (void *)&dd, BUS_DMA_COHERENT|BUS_DMA_NOCACHE)) 367 panic("dmac_pxaip_attach: bus_dmamem_map failed"); 368 369 if (bus_dmamap_create(sc->sc_dmat, DMAC_DESCS_SIZE, 1, 370 DMAC_DESCS_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_desc_map)) 371 panic("dmac_pxaip_attach: bus_dmamap_create failed"); 372 373 if (bus_dmamap_load(sc->sc_dmat, sc->sc_desc_map, (void *)dd, 374 DMAC_DESCS_SIZE, NULL, BUS_DMA_NOWAIT)) 375 panic("dmac_pxaip_attach: bus_dmamap_load failed"); 376 377 SLIST_INIT(&sc->sc_descs); 378 sc->sc_free_descs = DMAC_N_DESCS; 379 for (i = 0; i < DMAC_N_DESCS; i++, dd++) { 380 SLIST_INSERT_HEAD(&sc->sc_descs, &sc->sc_all_descs[i], d_link); 381 sc->sc_all_descs[i].d_desc = dd; 382 sc->sc_all_descs[i].d_desc_pa = 383 sc->sc_segs.ds_addr + (sizeof(struct pxa2x0_dma_desc) * i); 384 } 385 386 sc->sc_irqcookie = pxa2x0_intr_establish(pxa->pxa_intr, IPL_BIO, 387 dmac_intr, sc); 388 KASSERT(sc->sc_irqcookie != NULL); 389 390 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0) 391 dmac_dmover_attach(sc); 392 #endif 393 } 394 395 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0) 396 /* 397 * We support the following dmover(9) operations 398 */ 399 static const struct dmover_algdesc dmac_dmover_algdescs[] = { 400 {DMOVER_FUNC_ZERO, NULL, 0}, /* Zero-fill */ 401 {DMOVER_FUNC_FILL8, NULL, 0}, /* Fill with 8-bit immediate value */ 402 {DMOVER_FUNC_COPY, NULL, 1} /* Copy */ 403 }; 404 #define DMAC_DMOVER_ALGDESC_COUNT \ 405 (sizeof(dmac_dmover_algdescs) / sizeof(dmac_dmover_algdescs[0])) 406 407 static void 408 dmac_dmover_attach(struct pxadmac_softc *sc) 409 { 410 struct dmac_dmover *dd = &sc->sc_dmover; 411 struct dmac_dmover_state *ds; 412 int i, dummy; 413 414 /* 415 * Describe ourselves to the dmover(9) code 416 */ 417 dd->dd_backend.dmb_name = "pxadmac"; 418 dd->dd_backend.dmb_speed = 100*1024*1024; /* XXX */ 419 dd->dd_backend.dmb_cookie = sc; 420 dd->dd_backend.dmb_algdescs = dmac_dmover_algdescs; 421 dd->dd_backend.dmb_nalgdescs = DMAC_DMOVER_ALGDESC_COUNT; 422 dd->dd_backend.dmb_process = dmac_dmover_process; 423 dd->dd_busy = 0; 424 LIST_INIT(&dd->dd_free); 425 426 for (i = 0; i < PXA2X0_DMAC_DMOVER_CONCURRENCY; i++) { 427 ds = &dd->dd_state[i]; 428 ds->ds_sc = sc; 429 ds->ds_current = NULL; 430 ds->ds_xfer.dxs_cookie = ds; 431 ds->ds_xfer.dxs_done = dmac_dmover_done; 432 ds->ds_xfer.dxs_priority = DMAC_PRIORITY_NORMAL; 433 ds->ds_xfer.dxs_peripheral = DMAC_PERIPH_NONE; 434 ds->ds_xfer.dxs_flow = DMAC_FLOW_CTRL_NONE; 435 ds->ds_xfer.dxs_dev_width = DMAC_DEV_WIDTH_DEFAULT; 436 ds->ds_xfer.dxs_burst_size = DMAC_BURST_SIZE_8; /* XXX */ 437 ds->ds_xfer.dxs_loop_notify = DMAC_DONT_LOOP; 438 ds->ds_src_addr_hold = false; 439 ds->ds_dst_addr_hold = false; 440 ds->ds_src_nsegs = 0; 441 ds->ds_dst_nsegs = 0; 442 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link); 443 444 /* 445 * Create dma maps for both source and destination buffers. 446 */ 447 if (bus_dmamap_create(sc->sc_dmat, DMAC_DMOVER_MAX_XFER, 448 DMAC_DMOVER_NSEGS, DMAC_DMOVER_MAX_XFER, 449 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 450 &ds->ds_src_dmap) || 451 bus_dmamap_create(sc->sc_dmat, DMAC_DMOVER_MAX_XFER, 452 DMAC_DMOVER_NSEGS, DMAC_DMOVER_MAX_XFER, 453 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 454 &ds->ds_dst_dmap)) { 455 panic("dmac_dmover_attach: bus_dmamap_create failed"); 456 } 457 458 /* 459 * Allocate some dma memory to be used as source buffers 460 * for the zero-fill and fill-8 operations. We only need 461 * small buffers here, since we set up the DMAC source 462 * descriptor with 'ds_addr_hold' set to true. 463 */ 464 if (bus_dmamem_alloc(sc->sc_dmat, 465 arm_pdcache_line_size, arm_pdcache_line_size, 0, 466 &ds->ds_zero_seg, 1, &dummy, BUS_DMA_NOWAIT) || 467 bus_dmamem_alloc(sc->sc_dmat, 468 arm_pdcache_line_size, arm_pdcache_line_size, 0, 469 &ds->ds_fill_seg, 1, &dummy, BUS_DMA_NOWAIT)) { 470 panic("dmac_dmover_attach: bus_dmamem_alloc failed"); 471 } 472 473 if (bus_dmamem_map(sc->sc_dmat, &ds->ds_zero_seg, 1, 474 arm_pdcache_line_size, &ds->ds_zero_va, 475 BUS_DMA_NOWAIT) || 476 bus_dmamem_map(sc->sc_dmat, &ds->ds_fill_seg, 1, 477 arm_pdcache_line_size, &ds->ds_fill_va, 478 BUS_DMA_NOWAIT)) { 479 panic("dmac_dmover_attach: bus_dmamem_map failed"); 480 } 481 482 /* 483 * Make sure the zero-fill source buffer really is zero filled 484 */ 485 memset(ds->ds_zero_va, 0, arm_pdcache_line_size); 486 } 487 488 dmover_backend_register(&sc->sc_dmover.dd_backend); 489 } 490 491 static void 492 dmac_dmover_process(struct dmover_backend *dmb) 493 { 494 struct pxadmac_softc *sc = dmb->dmb_cookie; 495 int s = splbio(); 496 497 /* 498 * If the backend is currently idle, go process the queue. 499 */ 500 if (sc->sc_dmover.dd_busy == 0) 501 dmac_dmover_run(&sc->sc_dmover.dd_backend); 502 splx(s); 503 } 504 505 static void 506 dmac_dmover_run(struct dmover_backend *dmb) 507 { 508 struct dmover_request *dreq; 509 struct pxadmac_softc *sc; 510 struct dmac_dmover *dd; 511 struct dmac_dmover_state *ds; 512 size_t len_src, len_dst; 513 int rv; 514 515 sc = dmb->dmb_cookie; 516 dd = &sc->sc_dmover; 517 sc->sc_dmover.dd_busy = 1; 518 519 /* 520 * As long as we can queue up dmover requests... 521 */ 522 while ((dreq = TAILQ_FIRST(&dmb->dmb_pendreqs)) != NULL && 523 (ds = LIST_FIRST(&dd->dd_free)) != NULL) { 524 /* 525 * Pull the request off the queue, mark it 'running', 526 * and make it 'current'. 527 */ 528 dmover_backend_remque(dmb, dreq); 529 dreq->dreq_flags |= DMOVER_REQ_RUNNING; 530 LIST_REMOVE(ds, ds_link); 531 ds->ds_current = dreq; 532 533 switch (dreq->dreq_outbuf_type) { 534 case DMOVER_BUF_LINEAR: 535 len_dst = dreq->dreq_outbuf.dmbuf_linear.l_len; 536 break; 537 case DMOVER_BUF_UIO: 538 len_dst = dreq->dreq_outbuf.dmbuf_uio->uio_resid; 539 break; 540 default: 541 goto error; 542 } 543 544 /* 545 * Fix up the appropriate DMA 'source' buffer 546 */ 547 if (dreq->dreq_assignment->das_algdesc->dad_ninputs) { 548 struct uio *uio; 549 /* 550 * This is a 'copy' operation. 551 * Load up the specified source buffer 552 */ 553 switch (dreq->dreq_inbuf_type) { 554 case DMOVER_BUF_LINEAR: 555 len_src= dreq->dreq_inbuf[0].dmbuf_linear.l_len; 556 if (len_src != len_dst) 557 goto error; 558 if (bus_dmamap_load(sc->sc_dmat,ds->ds_src_dmap, 559 dreq->dreq_inbuf[0].dmbuf_linear.l_addr, 560 len_src, NULL, 561 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 562 BUS_DMA_READ)) 563 goto error; 564 break; 565 566 case DMOVER_BUF_UIO: 567 uio = dreq->dreq_inbuf[0].dmbuf_uio; 568 len_src = uio->uio_resid; 569 if (uio->uio_rw != UIO_WRITE || 570 len_src != len_dst) 571 goto error; 572 if (bus_dmamap_load_uio(sc->sc_dmat, 573 ds->ds_src_dmap, uio, 574 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 575 BUS_DMA_READ)) 576 goto error; 577 break; 578 579 default: 580 goto error; 581 } 582 583 ds->ds_src_addr_hold = false; 584 } else 585 if (dreq->dreq_assignment->das_algdesc->dad_name == 586 DMOVER_FUNC_ZERO) { 587 /* 588 * Zero-fill operation. 589 * Simply load up the pre-zeroed source buffer 590 */ 591 if (bus_dmamap_load(sc->sc_dmat, ds->ds_src_dmap, 592 ds->ds_zero_va, arm_pdcache_line_size, NULL, 593 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_READ)) 594 goto error; 595 596 ds->ds_src_addr_hold = true; 597 } else 598 if (dreq->dreq_assignment->das_algdesc->dad_name == 599 DMOVER_FUNC_FILL8) { 600 /* 601 * Fill-8 operation. 602 * Initialise our fill-8 buffer, and load it up. 603 * 604 * XXX: Experiment with exactly how much of the 605 * source buffer needs to be filled. Particularly WRT 606 * burst size (which is hardcoded to 8 for dmover). 607 */ 608 memset(ds->ds_fill_va, dreq->dreq_immediate[0], 609 arm_pdcache_line_size); 610 611 if (bus_dmamap_load(sc->sc_dmat, ds->ds_src_dmap, 612 ds->ds_fill_va, arm_pdcache_line_size, NULL, 613 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_READ)) 614 goto error; 615 616 ds->ds_src_addr_hold = true; 617 } else { 618 goto error; 619 } 620 621 /* 622 * Now do the same for the destination buffer 623 */ 624 switch (dreq->dreq_outbuf_type) { 625 case DMOVER_BUF_LINEAR: 626 if (bus_dmamap_load(sc->sc_dmat, ds->ds_dst_dmap, 627 dreq->dreq_outbuf.dmbuf_linear.l_addr, 628 len_dst, NULL, 629 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE)) 630 goto error_unload_src; 631 break; 632 633 case DMOVER_BUF_UIO: 634 if (dreq->dreq_outbuf.dmbuf_uio->uio_rw != UIO_READ) 635 goto error_unload_src; 636 if (bus_dmamap_load_uio(sc->sc_dmat, ds->ds_dst_dmap, 637 dreq->dreq_outbuf.dmbuf_uio, 638 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE)) 639 goto error_unload_src; 640 break; 641 642 default: 643 error_unload_src: 644 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap); 645 error: 646 dreq->dreq_error = EINVAL; 647 dreq->dreq_flags |= DMOVER_REQ_ERROR; 648 ds->ds_current = NULL; 649 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link); 650 dmover_done(dreq); 651 continue; 652 } 653 654 /* 655 * The last step before shipping the request off to the 656 * DMAC driver is to sync the dma maps. 657 */ 658 bus_dmamap_sync(sc->sc_dmat, ds->ds_src_dmap, 0, 659 ds->ds_src_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 660 ds->ds_src_dma_segs = ds->ds_src_dmap->dm_segs; 661 ds->ds_src_nsegs = ds->ds_src_dmap->dm_nsegs; 662 663 bus_dmamap_sync(sc->sc_dmat, ds->ds_dst_dmap, 0, 664 ds->ds_dst_dmap->dm_mapsize, BUS_DMASYNC_PREREAD); 665 ds->ds_dst_dma_segs = ds->ds_dst_dmap->dm_segs; 666 ds->ds_dst_nsegs = ds->ds_dst_dmap->dm_nsegs; 667 668 /* 669 * Hand the request over to the dmac section of the driver. 670 */ 671 if ((rv = pxa2x0_dmac_start_xfer(&ds->ds_xfer.dxs_xfer)) != 0) { 672 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap); 673 bus_dmamap_unload(sc->sc_dmat, ds->ds_dst_dmap); 674 dreq->dreq_error = rv; 675 dreq->dreq_flags |= DMOVER_REQ_ERROR; 676 ds->ds_current = NULL; 677 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link); 678 dmover_done(dreq); 679 } 680 } 681 682 /* All done */ 683 sc->sc_dmover.dd_busy = 0; 684 } 685 686 static void 687 dmac_dmover_done(struct dmac_xfer *dx, int error) 688 { 689 struct dmac_dmover_state *ds = dx->dx_cookie; 690 struct pxadmac_softc *sc = ds->ds_sc; 691 struct dmover_request *dreq = ds->ds_current; 692 693 /* 694 * A dmover(9) request has just completed. 695 */ 696 697 KDASSERT(dreq != NULL); 698 699 /* 700 * Sync and unload the DMA maps 701 */ 702 bus_dmamap_sync(sc->sc_dmat, ds->ds_src_dmap, 0, 703 ds->ds_src_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 704 bus_dmamap_sync(sc->sc_dmat, ds->ds_dst_dmap, 0, 705 ds->ds_dst_dmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 706 707 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap); 708 bus_dmamap_unload(sc->sc_dmat, ds->ds_dst_dmap); 709 710 ds->ds_current = NULL; 711 LIST_INSERT_HEAD(&sc->sc_dmover.dd_free, ds, ds_link); 712 713 /* 714 * Record the completion status of the transfer 715 */ 716 if (error) { 717 dreq->dreq_error = error; 718 dreq->dreq_flags |= DMOVER_REQ_ERROR; 719 } else { 720 if (dreq->dreq_outbuf_type == DMOVER_BUF_UIO) 721 dreq->dreq_outbuf.dmbuf_uio->uio_resid = 0; 722 if (dreq->dreq_assignment->das_algdesc->dad_ninputs && 723 dreq->dreq_inbuf_type == DMOVER_BUF_UIO) 724 dreq->dreq_inbuf[0].dmbuf_uio->uio_resid = 0; 725 } 726 727 /* 728 * Done! 729 */ 730 dmover_done(dreq); 731 732 /* 733 * See if we can start some more dmover(9) requests. 734 * 735 * Note: We're already at splbio() here. 736 */ 737 if (sc->sc_dmover.dd_busy == 0) 738 dmac_dmover_run(&sc->sc_dmover.dd_backend); 739 } 740 #endif 741 742 struct dmac_xfer * 743 pxa2x0_dmac_allocate_xfer(int flags) 744 { 745 struct dmac_xfer_state *dxs; 746 747 dxs = malloc(sizeof(struct dmac_xfer_state), M_DEVBUF, flags); 748 749 return ((struct dmac_xfer *)dxs); 750 } 751 752 void 753 pxa2x0_dmac_free_xfer(struct dmac_xfer *dx) 754 { 755 756 /* 757 * XXX: Should verify the DMAC is not actively using this 758 * structure before freeing... 759 */ 760 free(dx, M_DEVBUF); 761 } 762 763 static inline int 764 dmac_validate_desc(struct dmac_xfer_desc *xd, size_t *psize, 765 bool *misaligned_flag) 766 { 767 size_t size; 768 int i; 769 770 /* 771 * Make sure the transfer parameters are acceptable. 772 */ 773 774 if (xd->xd_addr_hold && 775 (xd->xd_nsegs != 1 || xd->xd_dma_segs[0].ds_len == 0)) 776 return (EINVAL); 777 778 for (i = 0, size = 0; i < xd->xd_nsegs; i++) { 779 if (xd->xd_dma_segs[i].ds_addr & 0x7) { 780 if (!CPU_IS_PXA270) 781 return (EFAULT); 782 *misaligned_flag = true; 783 } 784 size += xd->xd_dma_segs[i].ds_len; 785 } 786 787 *psize = size; 788 return (0); 789 } 790 791 static inline int 792 dmac_init_desc(struct dmac_desc_segs *ds, struct dmac_xfer_desc *xd, 793 size_t *psize, bool *misaligned_flag) 794 { 795 int err; 796 797 if ((err = dmac_validate_desc(xd, psize, misaligned_flag))) 798 return (err); 799 800 ds->ds_curseg = xd->xd_dma_segs; 801 ds->ds_nsegs = xd->xd_nsegs; 802 ds->ds_offset = 0; 803 return (0); 804 } 805 806 int 807 pxa2x0_dmac_start_xfer(struct dmac_xfer *dx) 808 { 809 struct pxadmac_softc *sc = pxadmac_sc; 810 struct dmac_xfer_state *dxs = (struct dmac_xfer_state *)dx; 811 struct dmac_xfer_desc *src, *dst; 812 size_t size; 813 int err, s; 814 815 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE && 816 dxs->dxs_peripheral >= DMAC_N_PERIPH) 817 return (EINVAL); 818 819 src = &dxs->dxs_desc[DMAC_DESC_SRC]; 820 dst = &dxs->dxs_desc[DMAC_DESC_DST]; 821 822 dxs->dxs_misaligned_flag = false; 823 824 if ((err = dmac_init_desc(&dxs->dxs_segs[DMAC_DESC_SRC], src, &size, 825 &dxs->dxs_misaligned_flag))) 826 return (err); 827 if (src->xd_addr_hold == false && 828 dxs->dxs_loop_notify != DMAC_DONT_LOOP && 829 (size % dxs->dxs_loop_notify) != 0) 830 return (EINVAL); 831 832 if ((err = dmac_init_desc(&dxs->dxs_segs[DMAC_DESC_DST], dst, &size, 833 &dxs->dxs_misaligned_flag))) 834 return (err); 835 if (dst->xd_addr_hold == false && 836 dxs->dxs_loop_notify != DMAC_DONT_LOOP && 837 (size % dxs->dxs_loop_notify) != 0) 838 return (EINVAL); 839 840 SLIST_INIT(&dxs->dxs_descs); 841 dxs->dxs_channel = DMAC_NO_CHANNEL; 842 dxs->dxs_dcmd = (((u_int32_t)dxs->dxs_dev_width) << DCMD_WIDTH_SHIFT) | 843 (((u_int32_t)dxs->dxs_burst_size) << DCMD_SIZE_SHIFT); 844 845 switch (dxs->dxs_flow) { 846 case DMAC_FLOW_CTRL_NONE: 847 break; 848 case DMAC_FLOW_CTRL_SRC: 849 dxs->dxs_dcmd |= DCMD_FLOWSRC; 850 break; 851 case DMAC_FLOW_CTRL_DEST: 852 dxs->dxs_dcmd |= DCMD_FLOWTRG; 853 break; 854 } 855 856 if (src->xd_addr_hold == false) 857 dxs->dxs_dcmd |= DCMD_INCSRCADDR; 858 if (dst->xd_addr_hold == false) 859 dxs->dxs_dcmd |= DCMD_INCTRGADDR; 860 861 s = splbio(); 862 if (dxs->dxs_peripheral == DMAC_PERIPH_NONE || 863 sc->sc_periph[dxs->dxs_peripheral].sp_busy == 0) { 864 dxs->dxs_queue = &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)]; 865 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link); 866 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE) 867 sc->sc_periph[dxs->dxs_peripheral].sp_busy++; 868 dmac_start(sc, DMAC_PRI(dxs->dxs_priority)); 869 } else { 870 dxs->dxs_queue = &sc->sc_periph[dxs->dxs_peripheral].sp_queue; 871 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link); 872 sc->sc_periph[dxs->dxs_peripheral].sp_busy++; 873 } 874 splx(s); 875 876 return (0); 877 } 878 879 void 880 pxa2x0_dmac_abort_xfer(struct dmac_xfer *dx) 881 { 882 struct pxadmac_softc *sc = pxadmac_sc; 883 struct dmac_xfer_state *ndxs, *dxs = (struct dmac_xfer_state *)dx; 884 struct dmac_desc *desc, *ndesc; 885 struct dmac_xfer_state_head *queue; 886 u_int32_t rv; 887 int s, timeout, need_start = 0; 888 889 s = splbio(); 890 891 queue = dxs->dxs_queue; 892 893 if (dxs->dxs_channel == DMAC_NO_CHANNEL) { 894 /* 895 * The request has not yet started, or it has already 896 * completed. If the request is not on a queue, just 897 * return. 898 */ 899 if (queue == NULL) { 900 splx(s); 901 return; 902 } 903 904 dxs->dxs_queue = NULL; 905 SIMPLEQ_REMOVE(queue, dxs, dmac_xfer_state, dxs_link); 906 } else { 907 /* 908 * The request is in progress. This is a bit trickier. 909 */ 910 dmac_reg_write(sc, DMAC_DCSR(dxs->dxs_channel), 0); 911 912 for (timeout = 5000; timeout; timeout--) { 913 rv = dmac_reg_read(sc, DMAC_DCSR(dxs->dxs_channel)); 914 if (rv & DCSR_STOPSTATE) 915 break; 916 delay(1); 917 } 918 919 if ((rv & DCSR_STOPSTATE) == 0) 920 panic( 921 "pxa2x0_dmac_abort_xfer: channel %d failed to abort", 922 dxs->dxs_channel); 923 924 /* 925 * Free resources allocated to the request 926 */ 927 for (desc = SLIST_FIRST(&dxs->dxs_descs); desc; desc = ndesc) { 928 ndesc = SLIST_NEXT(desc, d_link); 929 SLIST_INSERT_HEAD(&sc->sc_descs, desc, d_link); 930 sc->sc_free_descs++; 931 } 932 933 sc->sc_active[dxs->dxs_channel] = NULL; 934 dmac_free_channel(sc, DMAC_PRI(dxs->dxs_priority), 935 dxs->dxs_channel); 936 937 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE) 938 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral), 0); 939 940 need_start = 1; 941 dxs->dxs_queue = NULL; 942 } 943 944 if (dxs->dxs_peripheral == DMAC_PERIPH_NONE || 945 sc->sc_periph[dxs->dxs_peripheral].sp_busy-- == 1 || 946 queue == &sc->sc_periph[dxs->dxs_peripheral].sp_queue) 947 goto out; 948 949 /* 950 * We've just removed the current item for this 951 * peripheral, and there is at least one more 952 * pending item waiting. Make it current. 953 */ 954 ndxs = SIMPLEQ_FIRST(&sc->sc_periph[dxs->dxs_peripheral].sp_queue); 955 dxs = ndxs; 956 KDASSERT(dxs != NULL); 957 SIMPLEQ_REMOVE_HEAD(&sc->sc_periph[dxs->dxs_peripheral].sp_queue, 958 dxs_link); 959 960 dxs->dxs_queue = &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)]; 961 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link); 962 need_start = 1; 963 964 /* 965 * Try to start any pending requests with the same 966 * priority. 967 */ 968 out: 969 if (need_start) 970 dmac_start(sc, DMAC_PRI(dxs->dxs_priority)); 971 splx(s); 972 } 973 974 static void 975 dmac_start(struct pxadmac_softc *sc, dmac_priority_t priority) 976 { 977 struct dmac_xfer_state *dxs; 978 u_int channel; 979 980 while (sc->sc_free_descs && 981 (dxs = SIMPLEQ_FIRST(&sc->sc_queue[priority])) != NULL && 982 dmac_allocate_channel(sc, priority, &channel) == 0) { 983 /* 984 * Yay, got some descriptors, a transfer request, and 985 * an available DMA channel. 986 */ 987 KDASSERT(sc->sc_active[channel] == NULL); 988 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue[priority], dxs_link); 989 990 /* set DMA alignment register */ 991 if (CPU_IS_PXA270) { 992 uint32_t dalgn; 993 994 dalgn = dmac_reg_read(sc, DMAC_DALGN); 995 dalgn &= ~(1U << channel); 996 if (dxs->dxs_misaligned_flag) 997 dalgn |= (1U << channel); 998 dmac_reg_write(sc, DMAC_DALGN, dalgn); 999 } 1000 1001 dxs->dxs_channel = channel; 1002 sc->sc_active[channel] = dxs; 1003 (void) dmac_continue_xfer(sc, dxs); 1004 /* 1005 * XXX: Deal with descriptor allocation failure for loops 1006 */ 1007 } 1008 } 1009 1010 static int 1011 dmac_continue_xfer(struct pxadmac_softc *sc, struct dmac_xfer_state *dxs) 1012 { 1013 struct dmac_desc *desc, *prev_desc; 1014 struct pxa2x0_dma_desc *dd; 1015 struct dmac_desc_segs *src_ds, *dst_ds; 1016 struct dmac_xfer_desc *src_xd, *dst_xd; 1017 bus_dma_segment_t *src_seg, *dst_seg; 1018 bus_addr_t src_mem_addr, dst_mem_addr; 1019 bus_size_t src_size, dst_size, this_size; 1020 1021 desc = NULL; 1022 prev_desc = NULL; 1023 dd = NULL; 1024 src_ds = &dxs->dxs_segs[DMAC_DESC_SRC]; 1025 dst_ds = &dxs->dxs_segs[DMAC_DESC_DST]; 1026 src_xd = &dxs->dxs_desc[DMAC_DESC_SRC]; 1027 dst_xd = &dxs->dxs_desc[DMAC_DESC_DST]; 1028 SLIST_INIT(&dxs->dxs_descs); 1029 1030 /* 1031 * As long as the source/destination buffers have DMA segments, 1032 * and we have free descriptors, build a DMA chain. 1033 */ 1034 while (src_ds->ds_nsegs && dst_ds->ds_nsegs && sc->sc_free_descs) { 1035 src_seg = src_ds->ds_curseg; 1036 src_mem_addr = src_seg->ds_addr + src_ds->ds_offset; 1037 if (src_xd->xd_addr_hold == false && 1038 dxs->dxs_loop_notify != DMAC_DONT_LOOP) 1039 src_size = dxs->dxs_loop_notify; 1040 else 1041 src_size = src_seg->ds_len - src_ds->ds_offset; 1042 1043 dst_seg = dst_ds->ds_curseg; 1044 dst_mem_addr = dst_seg->ds_addr + dst_ds->ds_offset; 1045 if (dst_xd->xd_addr_hold == false && 1046 dxs->dxs_loop_notify != DMAC_DONT_LOOP) 1047 dst_size = dxs->dxs_loop_notify; 1048 else 1049 dst_size = dst_seg->ds_len - dst_ds->ds_offset; 1050 1051 /* 1052 * We may need to split a source or destination segment 1053 * across two or more DMAC descriptors. 1054 */ 1055 while (src_size && dst_size && 1056 (desc = SLIST_FIRST(&sc->sc_descs)) != NULL) { 1057 SLIST_REMOVE_HEAD(&sc->sc_descs, d_link); 1058 sc->sc_free_descs--; 1059 1060 /* 1061 * Decide how much data we're going to transfer 1062 * using this DMAC descriptor. 1063 */ 1064 if (src_xd->xd_addr_hold) 1065 this_size = dst_size; 1066 else 1067 if (dst_xd->xd_addr_hold) 1068 this_size = src_size; 1069 else 1070 this_size = min(dst_size, src_size); 1071 1072 /* 1073 * But clamp the transfer size to the DMAC 1074 * descriptor's maximum. 1075 */ 1076 this_size = min(this_size, DCMD_LENGTH_MASK & ~0x1f); 1077 1078 /* 1079 * Fill in the DMAC descriptor 1080 */ 1081 dd = desc->d_desc; 1082 dd->dd_dsadr = src_mem_addr; 1083 dd->dd_dtadr = dst_mem_addr; 1084 dd->dd_dcmd = dxs->dxs_dcmd | this_size; 1085 1086 /* 1087 * Link it into the chain 1088 */ 1089 if (prev_desc) { 1090 SLIST_INSERT_AFTER(prev_desc, desc, d_link); 1091 prev_desc->d_desc->dd_ddadr = desc->d_desc_pa; 1092 } else { 1093 SLIST_INSERT_HEAD(&dxs->dxs_descs, desc, 1094 d_link); 1095 } 1096 prev_desc = desc; 1097 1098 /* 1099 * Update the source/destination pointers 1100 */ 1101 if (src_xd->xd_addr_hold == false) { 1102 src_size -= this_size; 1103 src_ds->ds_offset += this_size; 1104 if (src_ds->ds_offset == src_seg->ds_len) { 1105 KDASSERT(src_size == 0); 1106 src_ds->ds_curseg = ++src_seg; 1107 src_ds->ds_offset = 0; 1108 src_ds->ds_nsegs--; 1109 } else 1110 src_mem_addr += this_size; 1111 } 1112 1113 if (dst_xd->xd_addr_hold == false) { 1114 dst_size -= this_size; 1115 dst_ds->ds_offset += this_size; 1116 if (dst_ds->ds_offset == dst_seg->ds_len) { 1117 KDASSERT(dst_size == 0); 1118 dst_ds->ds_curseg = ++dst_seg; 1119 dst_ds->ds_offset = 0; 1120 dst_ds->ds_nsegs--; 1121 } else 1122 dst_mem_addr += this_size; 1123 } 1124 } 1125 1126 if (dxs->dxs_loop_notify != DMAC_DONT_LOOP) { 1127 /* 1128 * We must be able to allocate descriptors for the 1129 * entire loop. Otherwise, return them to the pool 1130 * and bail. 1131 */ 1132 if (desc == NULL) { 1133 struct dmac_desc *ndesc; 1134 for (desc = SLIST_FIRST(&dxs->dxs_descs); 1135 desc; desc = ndesc) { 1136 ndesc = SLIST_NEXT(desc, d_link); 1137 SLIST_INSERT_HEAD(&sc->sc_descs, desc, 1138 d_link); 1139 sc->sc_free_descs++; 1140 } 1141 1142 return (0); 1143 } 1144 1145 KASSERT(dd != NULL); 1146 dd->dd_dcmd |= DCMD_ENDIRQEN; 1147 } 1148 } 1149 1150 /* 1151 * Did we manage to build a chain? 1152 * If not, just return. 1153 */ 1154 if (dd == NULL) 1155 return (0); 1156 1157 if (dxs->dxs_loop_notify == DMAC_DONT_LOOP) { 1158 dd->dd_dcmd |= DCMD_ENDIRQEN; 1159 dd->dd_ddadr = DMAC_DESC_LAST; 1160 } else 1161 dd->dd_ddadr = SLIST_FIRST(&dxs->dxs_descs)->d_desc_pa; 1162 1163 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE) { 1164 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral), 1165 dxs->dxs_channel | DRCMR_MAPVLD); 1166 } 1167 dmac_reg_write(sc, DMAC_DDADR(dxs->dxs_channel), 1168 SLIST_FIRST(&dxs->dxs_descs)->d_desc_pa); 1169 dmac_reg_write(sc, DMAC_DCSR(dxs->dxs_channel), 1170 DCSR_ENDINTR | DCSR_RUN); 1171 1172 return (1); 1173 } 1174 1175 static u_int 1176 dmac_channel_intr(struct pxadmac_softc *sc, u_int channel) 1177 { 1178 struct dmac_xfer_state *dxs; 1179 struct dmac_desc *desc, *ndesc; 1180 u_int32_t dcsr; 1181 u_int rv = 0; 1182 1183 dcsr = dmac_reg_read(sc, DMAC_DCSR(channel)); 1184 dmac_reg_write(sc, DMAC_DCSR(channel), dcsr); 1185 if (dmac_reg_read(sc, DMAC_DCSR(channel)) & DCSR_STOPSTATE) 1186 dmac_reg_write(sc, DMAC_DCSR(channel), dcsr & ~DCSR_RUN); 1187 1188 if ((dxs = sc->sc_active[channel]) == NULL) { 1189 printf("%s: Stray DMAC interrupt for unallocated channel %d\n", 1190 sc->sc_dev.dv_xname, channel); 1191 return (0); 1192 } 1193 1194 /* 1195 * Clear down the interrupt in the DMA Interrupt Register 1196 */ 1197 dmac_reg_write(sc, DMAC_DINT, (1u << channel)); 1198 1199 /* 1200 * If this is a looping request, invoke the 'done' callback and 1201 * return immediately. 1202 */ 1203 if (dxs->dxs_loop_notify != DMAC_DONT_LOOP && 1204 (dcsr & DCSR_BUSERRINTR) == 0) { 1205 (dxs->dxs_done)(&dxs->dxs_xfer, 0); 1206 return (0); 1207 } 1208 1209 /* 1210 * Free the descriptors allocated to the completed transfer 1211 * 1212 * XXX: If there is more data to transfer in this request, 1213 * we could simply reuse some or all of the descriptors 1214 * already allocated for the transfer which just completed. 1215 */ 1216 for (desc = SLIST_FIRST(&dxs->dxs_descs); desc; desc = ndesc) { 1217 ndesc = SLIST_NEXT(desc, d_link); 1218 SLIST_INSERT_HEAD(&sc->sc_descs, desc, d_link); 1219 sc->sc_free_descs++; 1220 } 1221 1222 if ((dcsr & DCSR_BUSERRINTR) || dmac_continue_xfer(sc, dxs) == 0) { 1223 /* 1224 * The transfer completed (possibly due to an error), 1225 * -OR- we were unable to continue any remaining 1226 * segment of the transfer due to a lack of descriptors. 1227 * 1228 * In either case, we have to free up DMAC resources 1229 * allocated to the request. 1230 */ 1231 sc->sc_active[channel] = NULL; 1232 dmac_free_channel(sc, DMAC_PRI(dxs->dxs_priority), channel); 1233 dxs->dxs_channel = DMAC_NO_CHANNEL; 1234 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE) 1235 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral), 0); 1236 1237 if (dxs->dxs_segs[DMAC_DESC_SRC].ds_nsegs == 0 || 1238 dxs->dxs_segs[DMAC_DESC_DST].ds_nsegs == 0 || 1239 (dcsr & DCSR_BUSERRINTR)) { 1240 1241 /* 1242 * The transfer is complete. 1243 */ 1244 dxs->dxs_queue = NULL; 1245 rv = 1u << DMAC_PRI(dxs->dxs_priority); 1246 1247 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE && 1248 --sc->sc_periph[dxs->dxs_peripheral].sp_busy != 0) { 1249 struct dmac_xfer_state *ndxs; 1250 /* 1251 * We've just removed the current item for this 1252 * peripheral, and there is at least one more 1253 * pending item waiting. Make it current. 1254 */ 1255 ndxs = SIMPLEQ_FIRST( 1256 &sc->sc_periph[dxs->dxs_peripheral].sp_queue); 1257 KDASSERT(ndxs != NULL); 1258 SIMPLEQ_REMOVE_HEAD( 1259 &sc->sc_periph[dxs->dxs_peripheral].sp_queue, 1260 dxs_link); 1261 1262 ndxs->dxs_queue = 1263 &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)]; 1264 SIMPLEQ_INSERT_TAIL(ndxs->dxs_queue, ndxs, 1265 dxs_link); 1266 } 1267 1268 (dxs->dxs_done)(&dxs->dxs_xfer, 1269 (dcsr & DCSR_BUSERRINTR) ? EFAULT : 0); 1270 } else { 1271 /* 1272 * The request is not yet complete, but we were unable 1273 * to make any headway at this time because there are 1274 * no free descriptors. Put the request back at the 1275 * head of the appropriate priority queue. It'll be 1276 * dealt with as other in-progress transfers complete. 1277 */ 1278 SIMPLEQ_INSERT_HEAD( 1279 &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)], dxs, 1280 dxs_link); 1281 } 1282 } 1283 1284 return (rv); 1285 } 1286 1287 static int 1288 dmac_intr(void *arg) 1289 { 1290 struct pxadmac_softc *sc = arg; 1291 u_int32_t rv, mask; 1292 u_int chan, pri; 1293 1294 rv = dmac_reg_read(sc, DMAC_DINT); 1295 if ((rv & DMAC_DINT_MASK) == 0) 1296 return (0); 1297 1298 /* 1299 * Deal with completed transfers 1300 */ 1301 for (chan = 0, mask = 1u, pri = 0; 1302 chan < DMAC_N_CHANNELS; chan++, mask <<= 1) { 1303 if (rv & mask) 1304 pri |= dmac_channel_intr(sc, chan); 1305 } 1306 1307 /* 1308 * Now try to start any queued transfers 1309 */ 1310 #if (DMAC_N_PRIORITIES > 1) 1311 if (pri & (1u << DMAC_PRIORITY_HIGH)) 1312 dmac_start(sc, DMAC_PRIORITY_HIGH); 1313 if (pri & (1u << DMAC_PRIORITY_MED)) 1314 dmac_start(sc, DMAC_PRIORITY_MED); 1315 if (pri & (1u << DMAC_PRIORITY_LOW)) 1316 dmac_start(sc, DMAC_PRIORITY_LOW); 1317 #else 1318 if (pri) 1319 dmac_start(sc, DMAC_PRIORITY_NORMAL); 1320 #endif 1321 1322 return (1); 1323 } 1324