1 /* 2 * Intel XScale PXA255/270 DMA controller. 3 * 4 * Copyright (c) 2006 Openedhand Ltd. 5 * Copyright (c) 2006 Thorsten Zitterell 6 * Written by Andrzej Zaborowski <balrog@zabor.org> 7 * 8 * This code is licensed under the GPL. 9 */ 10 11 #include "qemu/osdep.h" 12 #include "qemu/log.h" 13 #include "hw/hw.h" 14 #include "hw/irq.h" 15 #include "hw/qdev-properties.h" 16 #include "hw/arm/pxa.h" 17 #include "hw/sysbus.h" 18 #include "migration/vmstate.h" 19 #include "qapi/error.h" 20 #include "qemu/module.h" 21 #include "qom/object.h" 22 23 #define PXA255_DMA_NUM_CHANNELS 16 24 #define PXA27X_DMA_NUM_CHANNELS 32 25 26 #define PXA2XX_DMA_NUM_REQUESTS 75 27 28 typedef struct { 29 uint32_t descr; 30 uint32_t src; 31 uint32_t dest; 32 uint32_t cmd; 33 uint32_t state; 34 int request; 35 } PXA2xxDMAChannel; 36 37 #define TYPE_PXA2XX_DMA "pxa2xx-dma" 38 typedef struct PXA2xxDMAState PXA2xxDMAState; 39 DECLARE_INSTANCE_CHECKER(PXA2xxDMAState, PXA2XX_DMA, 40 TYPE_PXA2XX_DMA) 41 42 struct PXA2xxDMAState { 43 SysBusDevice parent_obj; 44 45 MemoryRegion iomem; 46 qemu_irq irq; 47 48 uint32_t stopintr; 49 uint32_t eorintr; 50 uint32_t rasintr; 51 uint32_t startintr; 52 uint32_t endintr; 53 54 uint32_t align; 55 uint32_t pio; 56 57 int channels; 58 PXA2xxDMAChannel *chan; 59 60 uint8_t req[PXA2XX_DMA_NUM_REQUESTS]; 61 62 /* Flag to avoid recursive DMA invocations. */ 63 int running; 64 }; 65 66 #define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */ 67 #define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */ 68 #define DALGN 0x00a0 /* DMA Alignment register */ 69 #define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */ 70 #define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */ 71 #define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */ 72 #define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */ 73 #define DINT 0x00f0 /* DMA Interrupt register */ 74 #define DRCMR0 0x0100 /* Request to Channel Map register 0 */ 75 #define DRCMR63 0x01fc /* Request to Channel Map register 63 */ 76 #define D_CH0 0x0200 /* Channel 0 Descriptor start */ 77 #define DRCMR64 0x1100 /* Request to Channel Map register 64 */ 78 #define DRCMR74 0x1128 /* Request to Channel Map register 74 */ 79 80 /* Per-channel register */ 81 #define DDADR 0x00 82 #define DSADR 0x01 83 #define DTADR 0x02 84 #define DCMD 0x03 85 86 /* Bit-field masks */ 87 #define DRCMR_CHLNUM 0x1f 88 #define DRCMR_MAPVLD (1 << 7) 89 #define DDADR_STOP (1 << 0) 90 #define DDADR_BREN (1 << 1) 91 #define DCMD_LEN 0x1fff 92 #define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1)) 93 #define DCMD_SIZE(x) (4 << (((x) >> 16) & 3)) 94 #define DCMD_FLYBYT (1 << 19) 95 #define DCMD_FLYBYS (1 << 20) 96 #define DCMD_ENDIRQEN (1 << 21) 97 #define DCMD_STARTIRQEN (1 << 22) 98 #define DCMD_CMPEN (1 << 25) 99 #define DCMD_FLOWTRG (1 << 28) 100 #define DCMD_FLOWSRC (1 << 29) 101 #define DCMD_INCTRGADDR (1 << 30) 102 #define DCMD_INCSRCADDR (1 << 31) 103 #define DCSR_BUSERRINTR (1 << 0) 104 #define DCSR_STARTINTR (1 << 1) 105 #define DCSR_ENDINTR (1 << 2) 106 #define DCSR_STOPINTR (1 << 3) 107 #define DCSR_RASINTR (1 << 4) 108 #define DCSR_REQPEND (1 << 8) 109 #define DCSR_EORINT (1 << 9) 110 #define DCSR_CMPST (1 << 10) 111 #define DCSR_MASKRUN (1 << 22) 112 #define DCSR_RASIRQEN (1 << 23) 113 #define DCSR_CLRCMPST (1 << 24) 114 #define DCSR_SETCMPST (1 << 25) 115 #define DCSR_EORSTOPEN (1 << 26) 116 #define DCSR_EORJMPEN (1 << 27) 117 #define DCSR_EORIRQEN (1 << 28) 118 #define DCSR_STOPIRQEN (1 << 29) 119 #define DCSR_NODESCFETCH (1 << 30) 120 #define DCSR_RUN (1 << 31) 121 122 static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch) 123 { 124 if (ch >= 0) { 125 if ((s->chan[ch].state & DCSR_STOPIRQEN) && 126 (s->chan[ch].state & DCSR_STOPINTR)) 127 s->stopintr |= 1 << ch; 128 else 129 s->stopintr &= ~(1 << ch); 130 131 if ((s->chan[ch].state & DCSR_EORIRQEN) && 132 (s->chan[ch].state & DCSR_EORINT)) 133 s->eorintr |= 1 << ch; 134 else 135 s->eorintr &= ~(1 << ch); 136 137 if ((s->chan[ch].state & DCSR_RASIRQEN) && 138 (s->chan[ch].state & DCSR_RASINTR)) 139 s->rasintr |= 1 << ch; 140 else 141 s->rasintr &= ~(1 << ch); 142 143 if (s->chan[ch].state & DCSR_STARTINTR) 144 s->startintr |= 1 << ch; 145 else 146 s->startintr &= ~(1 << ch); 147 148 if (s->chan[ch].state & DCSR_ENDINTR) 149 s->endintr |= 1 << ch; 150 else 151 s->endintr &= ~(1 << ch); 152 } 153 154 if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr) 155 qemu_irq_raise(s->irq); 156 else 157 qemu_irq_lower(s->irq); 158 } 159 160 static inline void pxa2xx_dma_descriptor_fetch( 161 PXA2xxDMAState *s, int ch) 162 { 163 uint32_t desc[4]; 164 hwaddr daddr = s->chan[ch].descr & ~0xf; 165 if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST)) 166 daddr += 32; 167 168 cpu_physical_memory_read(daddr, desc, 16); 169 s->chan[ch].descr = desc[DDADR]; 170 s->chan[ch].src = desc[DSADR]; 171 s->chan[ch].dest = desc[DTADR]; 172 s->chan[ch].cmd = desc[DCMD]; 173 174 if (s->chan[ch].cmd & DCMD_FLOWSRC) 175 s->chan[ch].src &= ~3; 176 if (s->chan[ch].cmd & DCMD_FLOWTRG) 177 s->chan[ch].dest &= ~3; 178 179 if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT)) 180 printf("%s: unsupported mode in channel %i\n", __func__, ch); 181 182 if (s->chan[ch].cmd & DCMD_STARTIRQEN) 183 s->chan[ch].state |= DCSR_STARTINTR; 184 } 185 186 static void pxa2xx_dma_run(PXA2xxDMAState *s) 187 { 188 int c, srcinc, destinc; 189 uint32_t n, size; 190 uint32_t width; 191 uint32_t length; 192 uint8_t buffer[32]; 193 PXA2xxDMAChannel *ch; 194 195 if (s->running ++) 196 return; 197 198 while (s->running) { 199 s->running = 1; 200 for (c = 0; c < s->channels; c ++) { 201 ch = &s->chan[c]; 202 203 while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) { 204 /* Test for pending requests */ 205 if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request) 206 break; 207 208 length = ch->cmd & DCMD_LEN; 209 size = DCMD_SIZE(ch->cmd); 210 width = DCMD_WIDTH(ch->cmd); 211 212 srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0; 213 destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0; 214 215 while (length) { 216 size = MIN(length, size); 217 218 for (n = 0; n < size; n += width) { 219 cpu_physical_memory_read(ch->src, buffer + n, width); 220 ch->src += srcinc; 221 } 222 223 for (n = 0; n < size; n += width) { 224 cpu_physical_memory_write(ch->dest, buffer + n, width); 225 ch->dest += destinc; 226 } 227 228 length -= size; 229 230 if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && 231 !ch->request) { 232 ch->state |= DCSR_EORINT; 233 if (ch->state & DCSR_EORSTOPEN) 234 ch->state |= DCSR_STOPINTR; 235 if ((ch->state & DCSR_EORJMPEN) && 236 !(ch->state & DCSR_NODESCFETCH)) 237 pxa2xx_dma_descriptor_fetch(s, c); 238 break; 239 } 240 } 241 242 ch->cmd = (ch->cmd & ~DCMD_LEN) | length; 243 244 /* Is the transfer complete now? */ 245 if (!length) { 246 if (ch->cmd & DCMD_ENDIRQEN) 247 ch->state |= DCSR_ENDINTR; 248 249 if ((ch->state & DCSR_NODESCFETCH) || 250 (ch->descr & DDADR_STOP) || 251 (ch->state & DCSR_EORSTOPEN)) { 252 ch->state |= DCSR_STOPINTR; 253 ch->state &= ~DCSR_RUN; 254 255 break; 256 } 257 258 ch->state |= DCSR_STOPINTR; 259 break; 260 } 261 } 262 } 263 264 s->running --; 265 } 266 } 267 268 static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset, 269 unsigned size) 270 { 271 PXA2xxDMAState *s = (PXA2xxDMAState *) opaque; 272 unsigned int channel; 273 274 if (size != 4) { 275 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n", 276 __func__, size); 277 return 5; 278 } 279 280 switch (offset) { 281 case DRCMR64 ... DRCMR74: 282 offset -= DRCMR64 - DRCMR0 - (64 << 2); 283 /* Fall through */ 284 case DRCMR0 ... DRCMR63: 285 channel = (offset - DRCMR0) >> 2; 286 return s->req[channel]; 287 288 case DRQSR0: 289 case DRQSR1: 290 case DRQSR2: 291 return 0; 292 293 case DCSR0 ... DCSR31: 294 channel = offset >> 2; 295 if (s->chan[channel].request) 296 return s->chan[channel].state | DCSR_REQPEND; 297 return s->chan[channel].state; 298 299 case DINT: 300 return s->stopintr | s->eorintr | s->rasintr | 301 s->startintr | s->endintr; 302 303 case DALGN: 304 return s->align; 305 306 case DPCSR: 307 return s->pio; 308 } 309 310 if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { 311 channel = (offset - D_CH0) >> 4; 312 switch ((offset & 0x0f) >> 2) { 313 case DDADR: 314 return s->chan[channel].descr; 315 case DSADR: 316 return s->chan[channel].src; 317 case DTADR: 318 return s->chan[channel].dest; 319 case DCMD: 320 return s->chan[channel].cmd; 321 } 322 } 323 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", 324 __func__, offset); 325 return 7; 326 } 327 328 static void pxa2xx_dma_write(void *opaque, hwaddr offset, 329 uint64_t value, unsigned size) 330 { 331 PXA2xxDMAState *s = (PXA2xxDMAState *) opaque; 332 unsigned int channel; 333 334 if (size != 4) { 335 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n", 336 __func__, size); 337 return; 338 } 339 340 switch (offset) { 341 case DRCMR64 ... DRCMR74: 342 offset -= DRCMR64 - DRCMR0 - (64 << 2); 343 /* Fall through */ 344 case DRCMR0 ... DRCMR63: 345 channel = (offset - DRCMR0) >> 2; 346 347 if (value & DRCMR_MAPVLD) 348 if ((value & DRCMR_CHLNUM) > s->channels) 349 hw_error("%s: Bad DMA channel %i\n", 350 __func__, (unsigned)value & DRCMR_CHLNUM); 351 352 s->req[channel] = value; 353 break; 354 355 case DRQSR0: 356 case DRQSR1: 357 case DRQSR2: 358 /* Nothing to do */ 359 break; 360 361 case DCSR0 ... DCSR31: 362 channel = offset >> 2; 363 s->chan[channel].state &= 0x0000071f & ~(value & 364 (DCSR_EORINT | DCSR_ENDINTR | 365 DCSR_STARTINTR | DCSR_BUSERRINTR)); 366 s->chan[channel].state |= value & 0xfc800000; 367 368 if (s->chan[channel].state & DCSR_STOPIRQEN) 369 s->chan[channel].state &= ~DCSR_STOPINTR; 370 371 if (value & DCSR_NODESCFETCH) { 372 /* No-descriptor-fetch mode */ 373 if (value & DCSR_RUN) { 374 s->chan[channel].state &= ~DCSR_STOPINTR; 375 pxa2xx_dma_run(s); 376 } 377 } else { 378 /* Descriptor-fetch mode */ 379 if (value & DCSR_RUN) { 380 s->chan[channel].state &= ~DCSR_STOPINTR; 381 pxa2xx_dma_descriptor_fetch(s, channel); 382 pxa2xx_dma_run(s); 383 } 384 } 385 386 /* Shouldn't matter as our DMA is synchronous. */ 387 if (!(value & (DCSR_RUN | DCSR_MASKRUN))) 388 s->chan[channel].state |= DCSR_STOPINTR; 389 390 if (value & DCSR_CLRCMPST) 391 s->chan[channel].state &= ~DCSR_CMPST; 392 if (value & DCSR_SETCMPST) 393 s->chan[channel].state |= DCSR_CMPST; 394 395 pxa2xx_dma_update(s, channel); 396 break; 397 398 case DALGN: 399 s->align = value; 400 break; 401 402 case DPCSR: 403 s->pio = value & 0x80000001; 404 break; 405 406 default: 407 if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { 408 channel = (offset - D_CH0) >> 4; 409 switch ((offset & 0x0f) >> 2) { 410 case DDADR: 411 s->chan[channel].descr = value; 412 break; 413 case DSADR: 414 s->chan[channel].src = value; 415 break; 416 case DTADR: 417 s->chan[channel].dest = value; 418 break; 419 case DCMD: 420 s->chan[channel].cmd = value; 421 break; 422 default: 423 goto fail; 424 } 425 426 break; 427 } 428 fail: 429 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", 430 __func__, offset); 431 } 432 } 433 434 static const MemoryRegionOps pxa2xx_dma_ops = { 435 .read = pxa2xx_dma_read, 436 .write = pxa2xx_dma_write, 437 .endianness = DEVICE_NATIVE_ENDIAN, 438 }; 439 440 static void pxa2xx_dma_request(void *opaque, int req_num, int on) 441 { 442 PXA2xxDMAState *s = opaque; 443 int ch; 444 if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS) 445 hw_error("%s: Bad DMA request %i\n", __func__, req_num); 446 447 if (!(s->req[req_num] & DRCMR_MAPVLD)) 448 return; 449 ch = s->req[req_num] & DRCMR_CHLNUM; 450 451 if (!s->chan[ch].request && on) 452 s->chan[ch].state |= DCSR_RASINTR; 453 else 454 s->chan[ch].state &= ~DCSR_RASINTR; 455 if (s->chan[ch].request && !on) 456 s->chan[ch].state |= DCSR_EORINT; 457 458 s->chan[ch].request = on; 459 if (on) { 460 pxa2xx_dma_run(s); 461 pxa2xx_dma_update(s, ch); 462 } 463 } 464 465 static void pxa2xx_dma_init(Object *obj) 466 { 467 DeviceState *dev = DEVICE(obj); 468 PXA2xxDMAState *s = PXA2XX_DMA(obj); 469 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 470 471 memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS); 472 473 qdev_init_gpio_in(dev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS); 474 475 memory_region_init_io(&s->iomem, obj, &pxa2xx_dma_ops, s, 476 "pxa2xx.dma", 0x00010000); 477 sysbus_init_mmio(sbd, &s->iomem); 478 sysbus_init_irq(sbd, &s->irq); 479 } 480 481 static void pxa2xx_dma_realize(DeviceState *dev, Error **errp) 482 { 483 PXA2xxDMAState *s = PXA2XX_DMA(dev); 484 int i; 485 486 if (s->channels <= 0) { 487 error_setg(errp, "channels value invalid"); 488 return; 489 } 490 491 s->chan = g_new0(PXA2xxDMAChannel, s->channels); 492 493 for (i = 0; i < s->channels; i ++) 494 s->chan[i].state = DCSR_STOPINTR; 495 } 496 497 DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq) 498 { 499 DeviceState *dev; 500 501 dev = qdev_new("pxa2xx-dma"); 502 qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS); 503 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); 504 505 sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); 506 sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq); 507 508 return dev; 509 } 510 511 DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq) 512 { 513 DeviceState *dev; 514 515 dev = qdev_new("pxa2xx-dma"); 516 qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS); 517 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); 518 519 sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); 520 sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq); 521 522 return dev; 523 } 524 525 static bool is_version_0(void *opaque, int version_id) 526 { 527 return version_id == 0; 528 } 529 530 static VMStateDescription vmstate_pxa2xx_dma_chan = { 531 .name = "pxa2xx_dma_chan", 532 .version_id = 1, 533 .minimum_version_id = 1, 534 .fields = (VMStateField[]) { 535 VMSTATE_UINT32(descr, PXA2xxDMAChannel), 536 VMSTATE_UINT32(src, PXA2xxDMAChannel), 537 VMSTATE_UINT32(dest, PXA2xxDMAChannel), 538 VMSTATE_UINT32(cmd, PXA2xxDMAChannel), 539 VMSTATE_UINT32(state, PXA2xxDMAChannel), 540 VMSTATE_INT32(request, PXA2xxDMAChannel), 541 VMSTATE_END_OF_LIST(), 542 }, 543 }; 544 545 static VMStateDescription vmstate_pxa2xx_dma = { 546 .name = "pxa2xx_dma", 547 .version_id = 1, 548 .minimum_version_id = 0, 549 .fields = (VMStateField[]) { 550 VMSTATE_UNUSED_TEST(is_version_0, 4), 551 VMSTATE_UINT32(stopintr, PXA2xxDMAState), 552 VMSTATE_UINT32(eorintr, PXA2xxDMAState), 553 VMSTATE_UINT32(rasintr, PXA2xxDMAState), 554 VMSTATE_UINT32(startintr, PXA2xxDMAState), 555 VMSTATE_UINT32(endintr, PXA2xxDMAState), 556 VMSTATE_UINT32(align, PXA2xxDMAState), 557 VMSTATE_UINT32(pio, PXA2xxDMAState), 558 VMSTATE_BUFFER(req, PXA2xxDMAState), 559 VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels, 560 vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel), 561 VMSTATE_END_OF_LIST(), 562 }, 563 }; 564 565 static Property pxa2xx_dma_properties[] = { 566 DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1), 567 DEFINE_PROP_END_OF_LIST(), 568 }; 569 570 static void pxa2xx_dma_class_init(ObjectClass *klass, void *data) 571 { 572 DeviceClass *dc = DEVICE_CLASS(klass); 573 574 dc->desc = "PXA2xx DMA controller"; 575 dc->vmsd = &vmstate_pxa2xx_dma; 576 device_class_set_props(dc, pxa2xx_dma_properties); 577 dc->realize = pxa2xx_dma_realize; 578 } 579 580 static const TypeInfo pxa2xx_dma_info = { 581 .name = TYPE_PXA2XX_DMA, 582 .parent = TYPE_SYS_BUS_DEVICE, 583 .instance_size = sizeof(PXA2xxDMAState), 584 .instance_init = pxa2xx_dma_init, 585 .class_init = pxa2xx_dma_class_init, 586 }; 587 588 static void pxa2xx_dma_register_types(void) 589 { 590 type_register_static(&pxa2xx_dma_info); 591 } 592 593 type_init(pxa2xx_dma_register_types) 594