1 /* 2 * Intel XScale PXA255/270 DMA controller. 3 * 4 * Copyright (c) 2006 Openedhand Ltd. 5 * Copyright (c) 2006 Thorsten Zitterell 6 * Written by Andrzej Zaborowski <balrog@zabor.org> 7 * 8 * This code is licensed under the GPL. 9 */ 10 11 #include "qemu/osdep.h" 12 #include "hw/hw.h" 13 #include "hw/irq.h" 14 #include "hw/qdev-properties.h" 15 #include "hw/arm/pxa.h" 16 #include "hw/sysbus.h" 17 #include "migration/vmstate.h" 18 #include "qapi/error.h" 19 #include "qemu/module.h" 20 21 #define PXA255_DMA_NUM_CHANNELS 16 22 #define PXA27X_DMA_NUM_CHANNELS 32 23 24 #define PXA2XX_DMA_NUM_REQUESTS 75 25 26 typedef struct { 27 uint32_t descr; 28 uint32_t src; 29 uint32_t dest; 30 uint32_t cmd; 31 uint32_t state; 32 int request; 33 } PXA2xxDMAChannel; 34 35 #define TYPE_PXA2XX_DMA "pxa2xx-dma" 36 #define PXA2XX_DMA(obj) OBJECT_CHECK(PXA2xxDMAState, (obj), TYPE_PXA2XX_DMA) 37 38 typedef struct PXA2xxDMAState { 39 SysBusDevice parent_obj; 40 41 MemoryRegion iomem; 42 qemu_irq irq; 43 44 uint32_t stopintr; 45 uint32_t eorintr; 46 uint32_t rasintr; 47 uint32_t startintr; 48 uint32_t endintr; 49 50 uint32_t align; 51 uint32_t pio; 52 53 int channels; 54 PXA2xxDMAChannel *chan; 55 56 uint8_t req[PXA2XX_DMA_NUM_REQUESTS]; 57 58 /* Flag to avoid recursive DMA invocations. */ 59 int running; 60 } PXA2xxDMAState; 61 62 #define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */ 63 #define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */ 64 #define DALGN 0x00a0 /* DMA Alignment register */ 65 #define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */ 66 #define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */ 67 #define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */ 68 #define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */ 69 #define DINT 0x00f0 /* DMA Interrupt register */ 70 #define DRCMR0 0x0100 /* Request to Channel Map register 0 */ 71 #define DRCMR63 0x01fc /* Request to Channel Map register 63 */ 72 #define D_CH0 0x0200 /* Channel 0 Descriptor start */ 73 #define DRCMR64 0x1100 /* Request to Channel Map register 64 */ 74 #define DRCMR74 0x1128 /* Request to Channel Map register 74 */ 75 76 /* Per-channel register */ 77 #define DDADR 0x00 78 #define DSADR 0x01 79 #define DTADR 0x02 80 #define DCMD 0x03 81 82 /* Bit-field masks */ 83 #define DRCMR_CHLNUM 0x1f 84 #define DRCMR_MAPVLD (1 << 7) 85 #define DDADR_STOP (1 << 0) 86 #define DDADR_BREN (1 << 1) 87 #define DCMD_LEN 0x1fff 88 #define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1)) 89 #define DCMD_SIZE(x) (4 << (((x) >> 16) & 3)) 90 #define DCMD_FLYBYT (1 << 19) 91 #define DCMD_FLYBYS (1 << 20) 92 #define DCMD_ENDIRQEN (1 << 21) 93 #define DCMD_STARTIRQEN (1 << 22) 94 #define DCMD_CMPEN (1 << 25) 95 #define DCMD_FLOWTRG (1 << 28) 96 #define DCMD_FLOWSRC (1 << 29) 97 #define DCMD_INCTRGADDR (1 << 30) 98 #define DCMD_INCSRCADDR (1 << 31) 99 #define DCSR_BUSERRINTR (1 << 0) 100 #define DCSR_STARTINTR (1 << 1) 101 #define DCSR_ENDINTR (1 << 2) 102 #define DCSR_STOPINTR (1 << 3) 103 #define DCSR_RASINTR (1 << 4) 104 #define DCSR_REQPEND (1 << 8) 105 #define DCSR_EORINT (1 << 9) 106 #define DCSR_CMPST (1 << 10) 107 #define DCSR_MASKRUN (1 << 22) 108 #define DCSR_RASIRQEN (1 << 23) 109 #define DCSR_CLRCMPST (1 << 24) 110 #define DCSR_SETCMPST (1 << 25) 111 #define DCSR_EORSTOPEN (1 << 26) 112 #define DCSR_EORJMPEN (1 << 27) 113 #define DCSR_EORIRQEN (1 << 28) 114 #define DCSR_STOPIRQEN (1 << 29) 115 #define DCSR_NODESCFETCH (1 << 30) 116 #define DCSR_RUN (1 << 31) 117 118 static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch) 119 { 120 if (ch >= 0) { 121 if ((s->chan[ch].state & DCSR_STOPIRQEN) && 122 (s->chan[ch].state & DCSR_STOPINTR)) 123 s->stopintr |= 1 << ch; 124 else 125 s->stopintr &= ~(1 << ch); 126 127 if ((s->chan[ch].state & DCSR_EORIRQEN) && 128 (s->chan[ch].state & DCSR_EORINT)) 129 s->eorintr |= 1 << ch; 130 else 131 s->eorintr &= ~(1 << ch); 132 133 if ((s->chan[ch].state & DCSR_RASIRQEN) && 134 (s->chan[ch].state & DCSR_RASINTR)) 135 s->rasintr |= 1 << ch; 136 else 137 s->rasintr &= ~(1 << ch); 138 139 if (s->chan[ch].state & DCSR_STARTINTR) 140 s->startintr |= 1 << ch; 141 else 142 s->startintr &= ~(1 << ch); 143 144 if (s->chan[ch].state & DCSR_ENDINTR) 145 s->endintr |= 1 << ch; 146 else 147 s->endintr &= ~(1 << ch); 148 } 149 150 if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr) 151 qemu_irq_raise(s->irq); 152 else 153 qemu_irq_lower(s->irq); 154 } 155 156 static inline void pxa2xx_dma_descriptor_fetch( 157 PXA2xxDMAState *s, int ch) 158 { 159 uint32_t desc[4]; 160 hwaddr daddr = s->chan[ch].descr & ~0xf; 161 if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST)) 162 daddr += 32; 163 164 cpu_physical_memory_read(daddr, desc, 16); 165 s->chan[ch].descr = desc[DDADR]; 166 s->chan[ch].src = desc[DSADR]; 167 s->chan[ch].dest = desc[DTADR]; 168 s->chan[ch].cmd = desc[DCMD]; 169 170 if (s->chan[ch].cmd & DCMD_FLOWSRC) 171 s->chan[ch].src &= ~3; 172 if (s->chan[ch].cmd & DCMD_FLOWTRG) 173 s->chan[ch].dest &= ~3; 174 175 if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT)) 176 printf("%s: unsupported mode in channel %i\n", __func__, ch); 177 178 if (s->chan[ch].cmd & DCMD_STARTIRQEN) 179 s->chan[ch].state |= DCSR_STARTINTR; 180 } 181 182 static void pxa2xx_dma_run(PXA2xxDMAState *s) 183 { 184 int c, srcinc, destinc; 185 uint32_t n, size; 186 uint32_t width; 187 uint32_t length; 188 uint8_t buffer[32]; 189 PXA2xxDMAChannel *ch; 190 191 if (s->running ++) 192 return; 193 194 while (s->running) { 195 s->running = 1; 196 for (c = 0; c < s->channels; c ++) { 197 ch = &s->chan[c]; 198 199 while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) { 200 /* Test for pending requests */ 201 if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request) 202 break; 203 204 length = ch->cmd & DCMD_LEN; 205 size = DCMD_SIZE(ch->cmd); 206 width = DCMD_WIDTH(ch->cmd); 207 208 srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0; 209 destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0; 210 211 while (length) { 212 size = MIN(length, size); 213 214 for (n = 0; n < size; n += width) { 215 cpu_physical_memory_read(ch->src, buffer + n, width); 216 ch->src += srcinc; 217 } 218 219 for (n = 0; n < size; n += width) { 220 cpu_physical_memory_write(ch->dest, buffer + n, width); 221 ch->dest += destinc; 222 } 223 224 length -= size; 225 226 if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && 227 !ch->request) { 228 ch->state |= DCSR_EORINT; 229 if (ch->state & DCSR_EORSTOPEN) 230 ch->state |= DCSR_STOPINTR; 231 if ((ch->state & DCSR_EORJMPEN) && 232 !(ch->state & DCSR_NODESCFETCH)) 233 pxa2xx_dma_descriptor_fetch(s, c); 234 break; 235 } 236 } 237 238 ch->cmd = (ch->cmd & ~DCMD_LEN) | length; 239 240 /* Is the transfer complete now? */ 241 if (!length) { 242 if (ch->cmd & DCMD_ENDIRQEN) 243 ch->state |= DCSR_ENDINTR; 244 245 if ((ch->state & DCSR_NODESCFETCH) || 246 (ch->descr & DDADR_STOP) || 247 (ch->state & DCSR_EORSTOPEN)) { 248 ch->state |= DCSR_STOPINTR; 249 ch->state &= ~DCSR_RUN; 250 251 break; 252 } 253 254 ch->state |= DCSR_STOPINTR; 255 break; 256 } 257 } 258 } 259 260 s->running --; 261 } 262 } 263 264 static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset, 265 unsigned size) 266 { 267 PXA2xxDMAState *s = (PXA2xxDMAState *) opaque; 268 unsigned int channel; 269 270 if (size != 4) { 271 hw_error("%s: Bad access width\n", __func__); 272 return 5; 273 } 274 275 switch (offset) { 276 case DRCMR64 ... DRCMR74: 277 offset -= DRCMR64 - DRCMR0 - (64 << 2); 278 /* Fall through */ 279 case DRCMR0 ... DRCMR63: 280 channel = (offset - DRCMR0) >> 2; 281 return s->req[channel]; 282 283 case DRQSR0: 284 case DRQSR1: 285 case DRQSR2: 286 return 0; 287 288 case DCSR0 ... DCSR31: 289 channel = offset >> 2; 290 if (s->chan[channel].request) 291 return s->chan[channel].state | DCSR_REQPEND; 292 return s->chan[channel].state; 293 294 case DINT: 295 return s->stopintr | s->eorintr | s->rasintr | 296 s->startintr | s->endintr; 297 298 case DALGN: 299 return s->align; 300 301 case DPCSR: 302 return s->pio; 303 } 304 305 if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { 306 channel = (offset - D_CH0) >> 4; 307 switch ((offset & 0x0f) >> 2) { 308 case DDADR: 309 return s->chan[channel].descr; 310 case DSADR: 311 return s->chan[channel].src; 312 case DTADR: 313 return s->chan[channel].dest; 314 case DCMD: 315 return s->chan[channel].cmd; 316 } 317 } 318 319 hw_error("%s: Bad offset 0x" TARGET_FMT_plx "\n", __func__, offset); 320 return 7; 321 } 322 323 static void pxa2xx_dma_write(void *opaque, hwaddr offset, 324 uint64_t value, unsigned size) 325 { 326 PXA2xxDMAState *s = (PXA2xxDMAState *) opaque; 327 unsigned int channel; 328 329 if (size != 4) { 330 hw_error("%s: Bad access width\n", __func__); 331 return; 332 } 333 334 switch (offset) { 335 case DRCMR64 ... DRCMR74: 336 offset -= DRCMR64 - DRCMR0 - (64 << 2); 337 /* Fall through */ 338 case DRCMR0 ... DRCMR63: 339 channel = (offset - DRCMR0) >> 2; 340 341 if (value & DRCMR_MAPVLD) 342 if ((value & DRCMR_CHLNUM) > s->channels) 343 hw_error("%s: Bad DMA channel %i\n", 344 __func__, (unsigned)value & DRCMR_CHLNUM); 345 346 s->req[channel] = value; 347 break; 348 349 case DRQSR0: 350 case DRQSR1: 351 case DRQSR2: 352 /* Nothing to do */ 353 break; 354 355 case DCSR0 ... DCSR31: 356 channel = offset >> 2; 357 s->chan[channel].state &= 0x0000071f & ~(value & 358 (DCSR_EORINT | DCSR_ENDINTR | 359 DCSR_STARTINTR | DCSR_BUSERRINTR)); 360 s->chan[channel].state |= value & 0xfc800000; 361 362 if (s->chan[channel].state & DCSR_STOPIRQEN) 363 s->chan[channel].state &= ~DCSR_STOPINTR; 364 365 if (value & DCSR_NODESCFETCH) { 366 /* No-descriptor-fetch mode */ 367 if (value & DCSR_RUN) { 368 s->chan[channel].state &= ~DCSR_STOPINTR; 369 pxa2xx_dma_run(s); 370 } 371 } else { 372 /* Descriptor-fetch mode */ 373 if (value & DCSR_RUN) { 374 s->chan[channel].state &= ~DCSR_STOPINTR; 375 pxa2xx_dma_descriptor_fetch(s, channel); 376 pxa2xx_dma_run(s); 377 } 378 } 379 380 /* Shouldn't matter as our DMA is synchronous. */ 381 if (!(value & (DCSR_RUN | DCSR_MASKRUN))) 382 s->chan[channel].state |= DCSR_STOPINTR; 383 384 if (value & DCSR_CLRCMPST) 385 s->chan[channel].state &= ~DCSR_CMPST; 386 if (value & DCSR_SETCMPST) 387 s->chan[channel].state |= DCSR_CMPST; 388 389 pxa2xx_dma_update(s, channel); 390 break; 391 392 case DALGN: 393 s->align = value; 394 break; 395 396 case DPCSR: 397 s->pio = value & 0x80000001; 398 break; 399 400 default: 401 if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { 402 channel = (offset - D_CH0) >> 4; 403 switch ((offset & 0x0f) >> 2) { 404 case DDADR: 405 s->chan[channel].descr = value; 406 break; 407 case DSADR: 408 s->chan[channel].src = value; 409 break; 410 case DTADR: 411 s->chan[channel].dest = value; 412 break; 413 case DCMD: 414 s->chan[channel].cmd = value; 415 break; 416 default: 417 goto fail; 418 } 419 420 break; 421 } 422 fail: 423 hw_error("%s: Bad offset " TARGET_FMT_plx "\n", __func__, offset); 424 } 425 } 426 427 static const MemoryRegionOps pxa2xx_dma_ops = { 428 .read = pxa2xx_dma_read, 429 .write = pxa2xx_dma_write, 430 .endianness = DEVICE_NATIVE_ENDIAN, 431 }; 432 433 static void pxa2xx_dma_request(void *opaque, int req_num, int on) 434 { 435 PXA2xxDMAState *s = opaque; 436 int ch; 437 if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS) 438 hw_error("%s: Bad DMA request %i\n", __func__, req_num); 439 440 if (!(s->req[req_num] & DRCMR_MAPVLD)) 441 return; 442 ch = s->req[req_num] & DRCMR_CHLNUM; 443 444 if (!s->chan[ch].request && on) 445 s->chan[ch].state |= DCSR_RASINTR; 446 else 447 s->chan[ch].state &= ~DCSR_RASINTR; 448 if (s->chan[ch].request && !on) 449 s->chan[ch].state |= DCSR_EORINT; 450 451 s->chan[ch].request = on; 452 if (on) { 453 pxa2xx_dma_run(s); 454 pxa2xx_dma_update(s, ch); 455 } 456 } 457 458 static void pxa2xx_dma_init(Object *obj) 459 { 460 DeviceState *dev = DEVICE(obj); 461 PXA2xxDMAState *s = PXA2XX_DMA(obj); 462 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 463 464 memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS); 465 466 qdev_init_gpio_in(dev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS); 467 468 memory_region_init_io(&s->iomem, obj, &pxa2xx_dma_ops, s, 469 "pxa2xx.dma", 0x00010000); 470 sysbus_init_mmio(sbd, &s->iomem); 471 sysbus_init_irq(sbd, &s->irq); 472 } 473 474 static void pxa2xx_dma_realize(DeviceState *dev, Error **errp) 475 { 476 PXA2xxDMAState *s = PXA2XX_DMA(dev); 477 int i; 478 479 if (s->channels <= 0) { 480 error_setg(errp, "channels value invalid"); 481 return; 482 } 483 484 s->chan = g_new0(PXA2xxDMAChannel, s->channels); 485 486 for (i = 0; i < s->channels; i ++) 487 s->chan[i].state = DCSR_STOPINTR; 488 } 489 490 DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq) 491 { 492 DeviceState *dev; 493 494 dev = qdev_create(NULL, "pxa2xx-dma"); 495 qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS); 496 qdev_init_nofail(dev); 497 498 sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); 499 sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq); 500 501 return dev; 502 } 503 504 DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq) 505 { 506 DeviceState *dev; 507 508 dev = qdev_create(NULL, "pxa2xx-dma"); 509 qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS); 510 qdev_init_nofail(dev); 511 512 sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); 513 sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq); 514 515 return dev; 516 } 517 518 static bool is_version_0(void *opaque, int version_id) 519 { 520 return version_id == 0; 521 } 522 523 static VMStateDescription vmstate_pxa2xx_dma_chan = { 524 .name = "pxa2xx_dma_chan", 525 .version_id = 1, 526 .minimum_version_id = 1, 527 .fields = (VMStateField[]) { 528 VMSTATE_UINT32(descr, PXA2xxDMAChannel), 529 VMSTATE_UINT32(src, PXA2xxDMAChannel), 530 VMSTATE_UINT32(dest, PXA2xxDMAChannel), 531 VMSTATE_UINT32(cmd, PXA2xxDMAChannel), 532 VMSTATE_UINT32(state, PXA2xxDMAChannel), 533 VMSTATE_INT32(request, PXA2xxDMAChannel), 534 VMSTATE_END_OF_LIST(), 535 }, 536 }; 537 538 static VMStateDescription vmstate_pxa2xx_dma = { 539 .name = "pxa2xx_dma", 540 .version_id = 1, 541 .minimum_version_id = 0, 542 .fields = (VMStateField[]) { 543 VMSTATE_UNUSED_TEST(is_version_0, 4), 544 VMSTATE_UINT32(stopintr, PXA2xxDMAState), 545 VMSTATE_UINT32(eorintr, PXA2xxDMAState), 546 VMSTATE_UINT32(rasintr, PXA2xxDMAState), 547 VMSTATE_UINT32(startintr, PXA2xxDMAState), 548 VMSTATE_UINT32(endintr, PXA2xxDMAState), 549 VMSTATE_UINT32(align, PXA2xxDMAState), 550 VMSTATE_UINT32(pio, PXA2xxDMAState), 551 VMSTATE_BUFFER(req, PXA2xxDMAState), 552 VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels, 553 vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel), 554 VMSTATE_END_OF_LIST(), 555 }, 556 }; 557 558 static Property pxa2xx_dma_properties[] = { 559 DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1), 560 DEFINE_PROP_END_OF_LIST(), 561 }; 562 563 static void pxa2xx_dma_class_init(ObjectClass *klass, void *data) 564 { 565 DeviceClass *dc = DEVICE_CLASS(klass); 566 567 dc->desc = "PXA2xx DMA controller"; 568 dc->vmsd = &vmstate_pxa2xx_dma; 569 dc->props = pxa2xx_dma_properties; 570 dc->realize = pxa2xx_dma_realize; 571 } 572 573 static const TypeInfo pxa2xx_dma_info = { 574 .name = TYPE_PXA2XX_DMA, 575 .parent = TYPE_SYS_BUS_DEVICE, 576 .instance_size = sizeof(PXA2xxDMAState), 577 .instance_init = pxa2xx_dma_init, 578 .class_init = pxa2xx_dma_class_init, 579 }; 580 581 static void pxa2xx_dma_register_types(void) 582 { 583 type_register_static(&pxa2xx_dma_info); 584 } 585 586 type_init(pxa2xx_dma_register_types) 587