xref: /qemu/hw/dma/pxa2xx_dma.c (revision 7a4e543d)
1 /*
2  * Intel XScale PXA255/270 DMA controller.
3  *
4  * Copyright (c) 2006 Openedhand Ltd.
5  * Copyright (c) 2006 Thorsten Zitterell
6  * Written by Andrzej Zaborowski <balrog@zabor.org>
7  *
8  * This code is licensed under the GPL.
9  */
10 
11 #include "qemu/osdep.h"
12 #include "hw/hw.h"
13 #include "hw/arm/pxa.h"
14 #include "hw/sysbus.h"
15 
16 #define PXA255_DMA_NUM_CHANNELS 16
17 #define PXA27X_DMA_NUM_CHANNELS 32
18 
19 #define PXA2XX_DMA_NUM_REQUESTS 75
20 
21 typedef struct {
22     uint32_t descr;
23     uint32_t src;
24     uint32_t dest;
25     uint32_t cmd;
26     uint32_t state;
27     int request;
28 } PXA2xxDMAChannel;
29 
30 #define TYPE_PXA2XX_DMA "pxa2xx-dma"
31 #define PXA2XX_DMA(obj) OBJECT_CHECK(PXA2xxDMAState, (obj), TYPE_PXA2XX_DMA)
32 
33 typedef struct PXA2xxDMAState {
34     SysBusDevice parent_obj;
35 
36     MemoryRegion iomem;
37     qemu_irq irq;
38 
39     uint32_t stopintr;
40     uint32_t eorintr;
41     uint32_t rasintr;
42     uint32_t startintr;
43     uint32_t endintr;
44 
45     uint32_t align;
46     uint32_t pio;
47 
48     int channels;
49     PXA2xxDMAChannel *chan;
50 
51     uint8_t req[PXA2XX_DMA_NUM_REQUESTS];
52 
53     /* Flag to avoid recursive DMA invocations.  */
54     int running;
55 } PXA2xxDMAState;
56 
57 #define DCSR0	0x0000	/* DMA Control / Status register for Channel 0 */
58 #define DCSR31	0x007c	/* DMA Control / Status register for Channel 31 */
59 #define DALGN	0x00a0	/* DMA Alignment register */
60 #define DPCSR	0x00a4	/* DMA Programmed I/O Control Status register */
61 #define DRQSR0	0x00e0	/* DMA DREQ<0> Status register */
62 #define DRQSR1	0x00e4	/* DMA DREQ<1> Status register */
63 #define DRQSR2	0x00e8	/* DMA DREQ<2> Status register */
64 #define DINT	0x00f0	/* DMA Interrupt register */
65 #define DRCMR0	0x0100	/* Request to Channel Map register 0 */
66 #define DRCMR63	0x01fc	/* Request to Channel Map register 63 */
67 #define D_CH0	0x0200	/* Channel 0 Descriptor start */
68 #define DRCMR64	0x1100	/* Request to Channel Map register 64 */
69 #define DRCMR74	0x1128	/* Request to Channel Map register 74 */
70 
71 /* Per-channel register */
72 #define DDADR	0x00
73 #define DSADR	0x01
74 #define DTADR	0x02
75 #define DCMD	0x03
76 
77 /* Bit-field masks */
78 #define DRCMR_CHLNUM		0x1f
79 #define DRCMR_MAPVLD		(1 << 7)
80 #define DDADR_STOP		(1 << 0)
81 #define DDADR_BREN		(1 << 1)
82 #define DCMD_LEN		0x1fff
83 #define DCMD_WIDTH(x)		(1 << ((((x) >> 14) & 3) - 1))
84 #define DCMD_SIZE(x)		(4 << (((x) >> 16) & 3))
85 #define DCMD_FLYBYT		(1 << 19)
86 #define DCMD_FLYBYS		(1 << 20)
87 #define DCMD_ENDIRQEN		(1 << 21)
88 #define DCMD_STARTIRQEN		(1 << 22)
89 #define DCMD_CMPEN		(1 << 25)
90 #define DCMD_FLOWTRG		(1 << 28)
91 #define DCMD_FLOWSRC		(1 << 29)
92 #define DCMD_INCTRGADDR		(1 << 30)
93 #define DCMD_INCSRCADDR		(1 << 31)
94 #define DCSR_BUSERRINTR		(1 << 0)
95 #define DCSR_STARTINTR		(1 << 1)
96 #define DCSR_ENDINTR		(1 << 2)
97 #define DCSR_STOPINTR		(1 << 3)
98 #define DCSR_RASINTR		(1 << 4)
99 #define DCSR_REQPEND		(1 << 8)
100 #define DCSR_EORINT		(1 << 9)
101 #define DCSR_CMPST		(1 << 10)
102 #define DCSR_MASKRUN		(1 << 22)
103 #define DCSR_RASIRQEN		(1 << 23)
104 #define DCSR_CLRCMPST		(1 << 24)
105 #define DCSR_SETCMPST		(1 << 25)
106 #define DCSR_EORSTOPEN		(1 << 26)
107 #define DCSR_EORJMPEN		(1 << 27)
108 #define DCSR_EORIRQEN		(1 << 28)
109 #define DCSR_STOPIRQEN		(1 << 29)
110 #define DCSR_NODESCFETCH	(1 << 30)
111 #define DCSR_RUN		(1 << 31)
112 
113 static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch)
114 {
115     if (ch >= 0) {
116         if ((s->chan[ch].state & DCSR_STOPIRQEN) &&
117                 (s->chan[ch].state & DCSR_STOPINTR))
118             s->stopintr |= 1 << ch;
119         else
120             s->stopintr &= ~(1 << ch);
121 
122         if ((s->chan[ch].state & DCSR_EORIRQEN) &&
123                 (s->chan[ch].state & DCSR_EORINT))
124             s->eorintr |= 1 << ch;
125         else
126             s->eorintr &= ~(1 << ch);
127 
128         if ((s->chan[ch].state & DCSR_RASIRQEN) &&
129                 (s->chan[ch].state & DCSR_RASINTR))
130             s->rasintr |= 1 << ch;
131         else
132             s->rasintr &= ~(1 << ch);
133 
134         if (s->chan[ch].state & DCSR_STARTINTR)
135             s->startintr |= 1 << ch;
136         else
137             s->startintr &= ~(1 << ch);
138 
139         if (s->chan[ch].state & DCSR_ENDINTR)
140             s->endintr |= 1 << ch;
141         else
142             s->endintr &= ~(1 << ch);
143     }
144 
145     if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr)
146         qemu_irq_raise(s->irq);
147     else
148         qemu_irq_lower(s->irq);
149 }
150 
151 static inline void pxa2xx_dma_descriptor_fetch(
152                 PXA2xxDMAState *s, int ch)
153 {
154     uint32_t desc[4];
155     hwaddr daddr = s->chan[ch].descr & ~0xf;
156     if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST))
157         daddr += 32;
158 
159     cpu_physical_memory_read(daddr, desc, 16);
160     s->chan[ch].descr = desc[DDADR];
161     s->chan[ch].src = desc[DSADR];
162     s->chan[ch].dest = desc[DTADR];
163     s->chan[ch].cmd = desc[DCMD];
164 
165     if (s->chan[ch].cmd & DCMD_FLOWSRC)
166         s->chan[ch].src &= ~3;
167     if (s->chan[ch].cmd & DCMD_FLOWTRG)
168         s->chan[ch].dest &= ~3;
169 
170     if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT))
171         printf("%s: unsupported mode in channel %i\n", __FUNCTION__, ch);
172 
173     if (s->chan[ch].cmd & DCMD_STARTIRQEN)
174         s->chan[ch].state |= DCSR_STARTINTR;
175 }
176 
177 static void pxa2xx_dma_run(PXA2xxDMAState *s)
178 {
179     int c, srcinc, destinc;
180     uint32_t n, size;
181     uint32_t width;
182     uint32_t length;
183     uint8_t buffer[32];
184     PXA2xxDMAChannel *ch;
185 
186     if (s->running ++)
187         return;
188 
189     while (s->running) {
190         s->running = 1;
191         for (c = 0; c < s->channels; c ++) {
192             ch = &s->chan[c];
193 
194             while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) {
195                 /* Test for pending requests */
196                 if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request)
197                     break;
198 
199                 length = ch->cmd & DCMD_LEN;
200                 size = DCMD_SIZE(ch->cmd);
201                 width = DCMD_WIDTH(ch->cmd);
202 
203                 srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0;
204                 destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0;
205 
206                 while (length) {
207                     size = MIN(length, size);
208 
209                     for (n = 0; n < size; n += width) {
210                         cpu_physical_memory_read(ch->src, buffer + n, width);
211                         ch->src += srcinc;
212                     }
213 
214                     for (n = 0; n < size; n += width) {
215                         cpu_physical_memory_write(ch->dest, buffer + n, width);
216                         ch->dest += destinc;
217                     }
218 
219                     length -= size;
220 
221                     if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) &&
222                             !ch->request) {
223                         ch->state |= DCSR_EORINT;
224                         if (ch->state & DCSR_EORSTOPEN)
225                             ch->state |= DCSR_STOPINTR;
226                         if ((ch->state & DCSR_EORJMPEN) &&
227                                         !(ch->state & DCSR_NODESCFETCH))
228                             pxa2xx_dma_descriptor_fetch(s, c);
229                         break;
230 		    }
231                 }
232 
233                 ch->cmd = (ch->cmd & ~DCMD_LEN) | length;
234 
235                 /* Is the transfer complete now? */
236                 if (!length) {
237                     if (ch->cmd & DCMD_ENDIRQEN)
238                         ch->state |= DCSR_ENDINTR;
239 
240                     if ((ch->state & DCSR_NODESCFETCH) ||
241                                 (ch->descr & DDADR_STOP) ||
242                                 (ch->state & DCSR_EORSTOPEN)) {
243                         ch->state |= DCSR_STOPINTR;
244                         ch->state &= ~DCSR_RUN;
245 
246                         break;
247                     }
248 
249                     ch->state |= DCSR_STOPINTR;
250                     break;
251                 }
252             }
253         }
254 
255         s->running --;
256     }
257 }
258 
259 static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset,
260                                 unsigned size)
261 {
262     PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
263     unsigned int channel;
264 
265     if (size != 4) {
266         hw_error("%s: Bad access width\n", __FUNCTION__);
267         return 5;
268     }
269 
270     switch (offset) {
271     case DRCMR64 ... DRCMR74:
272         offset -= DRCMR64 - DRCMR0 - (64 << 2);
273         /* Fall through */
274     case DRCMR0 ... DRCMR63:
275         channel = (offset - DRCMR0) >> 2;
276         return s->req[channel];
277 
278     case DRQSR0:
279     case DRQSR1:
280     case DRQSR2:
281         return 0;
282 
283     case DCSR0 ... DCSR31:
284         channel = offset >> 2;
285 	if (s->chan[channel].request)
286             return s->chan[channel].state | DCSR_REQPEND;
287         return s->chan[channel].state;
288 
289     case DINT:
290         return s->stopintr | s->eorintr | s->rasintr |
291                 s->startintr | s->endintr;
292 
293     case DALGN:
294         return s->align;
295 
296     case DPCSR:
297         return s->pio;
298     }
299 
300     if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
301         channel = (offset - D_CH0) >> 4;
302         switch ((offset & 0x0f) >> 2) {
303         case DDADR:
304             return s->chan[channel].descr;
305         case DSADR:
306             return s->chan[channel].src;
307         case DTADR:
308             return s->chan[channel].dest;
309         case DCMD:
310             return s->chan[channel].cmd;
311         }
312     }
313 
314     hw_error("%s: Bad offset 0x" TARGET_FMT_plx "\n", __FUNCTION__, offset);
315     return 7;
316 }
317 
318 static void pxa2xx_dma_write(void *opaque, hwaddr offset,
319                              uint64_t value, unsigned size)
320 {
321     PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
322     unsigned int channel;
323 
324     if (size != 4) {
325         hw_error("%s: Bad access width\n", __FUNCTION__);
326         return;
327     }
328 
329     switch (offset) {
330     case DRCMR64 ... DRCMR74:
331         offset -= DRCMR64 - DRCMR0 - (64 << 2);
332         /* Fall through */
333     case DRCMR0 ... DRCMR63:
334         channel = (offset - DRCMR0) >> 2;
335 
336         if (value & DRCMR_MAPVLD)
337             if ((value & DRCMR_CHLNUM) > s->channels)
338                 hw_error("%s: Bad DMA channel %i\n",
339                          __FUNCTION__, (unsigned)value & DRCMR_CHLNUM);
340 
341         s->req[channel] = value;
342         break;
343 
344     case DRQSR0:
345     case DRQSR1:
346     case DRQSR2:
347         /* Nothing to do */
348         break;
349 
350     case DCSR0 ... DCSR31:
351         channel = offset >> 2;
352         s->chan[channel].state &= 0x0000071f & ~(value &
353                         (DCSR_EORINT | DCSR_ENDINTR |
354                          DCSR_STARTINTR | DCSR_BUSERRINTR));
355         s->chan[channel].state |= value & 0xfc800000;
356 
357         if (s->chan[channel].state & DCSR_STOPIRQEN)
358             s->chan[channel].state &= ~DCSR_STOPINTR;
359 
360         if (value & DCSR_NODESCFETCH) {
361             /* No-descriptor-fetch mode */
362             if (value & DCSR_RUN) {
363                 s->chan[channel].state &= ~DCSR_STOPINTR;
364                 pxa2xx_dma_run(s);
365             }
366         } else {
367             /* Descriptor-fetch mode */
368             if (value & DCSR_RUN) {
369                 s->chan[channel].state &= ~DCSR_STOPINTR;
370                 pxa2xx_dma_descriptor_fetch(s, channel);
371                 pxa2xx_dma_run(s);
372             }
373         }
374 
375         /* Shouldn't matter as our DMA is synchronous.  */
376         if (!(value & (DCSR_RUN | DCSR_MASKRUN)))
377             s->chan[channel].state |= DCSR_STOPINTR;
378 
379         if (value & DCSR_CLRCMPST)
380             s->chan[channel].state &= ~DCSR_CMPST;
381         if (value & DCSR_SETCMPST)
382             s->chan[channel].state |= DCSR_CMPST;
383 
384         pxa2xx_dma_update(s, channel);
385         break;
386 
387     case DALGN:
388         s->align = value;
389         break;
390 
391     case DPCSR:
392         s->pio = value & 0x80000001;
393         break;
394 
395     default:
396         if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
397             channel = (offset - D_CH0) >> 4;
398             switch ((offset & 0x0f) >> 2) {
399             case DDADR:
400                 s->chan[channel].descr = value;
401                 break;
402             case DSADR:
403                 s->chan[channel].src = value;
404                 break;
405             case DTADR:
406                 s->chan[channel].dest = value;
407                 break;
408             case DCMD:
409                 s->chan[channel].cmd = value;
410                 break;
411             default:
412                 goto fail;
413             }
414 
415             break;
416         }
417     fail:
418         hw_error("%s: Bad offset " TARGET_FMT_plx "\n", __FUNCTION__, offset);
419     }
420 }
421 
422 static const MemoryRegionOps pxa2xx_dma_ops = {
423     .read = pxa2xx_dma_read,
424     .write = pxa2xx_dma_write,
425     .endianness = DEVICE_NATIVE_ENDIAN,
426 };
427 
428 static void pxa2xx_dma_request(void *opaque, int req_num, int on)
429 {
430     PXA2xxDMAState *s = opaque;
431     int ch;
432     if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS)
433         hw_error("%s: Bad DMA request %i\n", __FUNCTION__, req_num);
434 
435     if (!(s->req[req_num] & DRCMR_MAPVLD))
436         return;
437     ch = s->req[req_num] & DRCMR_CHLNUM;
438 
439     if (!s->chan[ch].request && on)
440         s->chan[ch].state |= DCSR_RASINTR;
441     else
442         s->chan[ch].state &= ~DCSR_RASINTR;
443     if (s->chan[ch].request && !on)
444         s->chan[ch].state |= DCSR_EORINT;
445 
446     s->chan[ch].request = on;
447     if (on) {
448         pxa2xx_dma_run(s);
449         pxa2xx_dma_update(s, ch);
450     }
451 }
452 
453 static int pxa2xx_dma_init(SysBusDevice *sbd)
454 {
455     DeviceState *dev = DEVICE(sbd);
456     PXA2xxDMAState *s = PXA2XX_DMA(dev);
457     int i;
458 
459     if (s->channels <= 0) {
460         return -1;
461     }
462 
463     s->chan = g_new0(PXA2xxDMAChannel, s->channels);
464 
465     for (i = 0; i < s->channels; i ++)
466         s->chan[i].state = DCSR_STOPINTR;
467 
468     memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS);
469 
470     qdev_init_gpio_in(dev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS);
471 
472     memory_region_init_io(&s->iomem, OBJECT(s), &pxa2xx_dma_ops, s,
473                           "pxa2xx.dma", 0x00010000);
474     sysbus_init_mmio(sbd, &s->iomem);
475     sysbus_init_irq(sbd, &s->irq);
476 
477     return 0;
478 }
479 
480 DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq)
481 {
482     DeviceState *dev;
483 
484     dev = qdev_create(NULL, "pxa2xx-dma");
485     qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
486     qdev_init_nofail(dev);
487 
488     sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
489     sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
490 
491     return dev;
492 }
493 
494 DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq)
495 {
496     DeviceState *dev;
497 
498     dev = qdev_create(NULL, "pxa2xx-dma");
499     qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
500     qdev_init_nofail(dev);
501 
502     sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
503     sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
504 
505     return dev;
506 }
507 
508 static bool is_version_0(void *opaque, int version_id)
509 {
510     return version_id == 0;
511 }
512 
513 static VMStateDescription vmstate_pxa2xx_dma_chan = {
514     .name = "pxa2xx_dma_chan",
515     .version_id = 1,
516     .minimum_version_id = 1,
517     .fields = (VMStateField[]) {
518         VMSTATE_UINT32(descr, PXA2xxDMAChannel),
519         VMSTATE_UINT32(src, PXA2xxDMAChannel),
520         VMSTATE_UINT32(dest, PXA2xxDMAChannel),
521         VMSTATE_UINT32(cmd, PXA2xxDMAChannel),
522         VMSTATE_UINT32(state, PXA2xxDMAChannel),
523         VMSTATE_INT32(request, PXA2xxDMAChannel),
524         VMSTATE_END_OF_LIST(),
525     },
526 };
527 
528 static VMStateDescription vmstate_pxa2xx_dma = {
529     .name = "pxa2xx_dma",
530     .version_id = 1,
531     .minimum_version_id = 0,
532     .fields = (VMStateField[]) {
533         VMSTATE_UNUSED_TEST(is_version_0, 4),
534         VMSTATE_UINT32(stopintr, PXA2xxDMAState),
535         VMSTATE_UINT32(eorintr, PXA2xxDMAState),
536         VMSTATE_UINT32(rasintr, PXA2xxDMAState),
537         VMSTATE_UINT32(startintr, PXA2xxDMAState),
538         VMSTATE_UINT32(endintr, PXA2xxDMAState),
539         VMSTATE_UINT32(align, PXA2xxDMAState),
540         VMSTATE_UINT32(pio, PXA2xxDMAState),
541         VMSTATE_BUFFER(req, PXA2xxDMAState),
542         VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels,
543                 vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel),
544         VMSTATE_END_OF_LIST(),
545     },
546 };
547 
548 static Property pxa2xx_dma_properties[] = {
549     DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1),
550     DEFINE_PROP_END_OF_LIST(),
551 };
552 
553 static void pxa2xx_dma_class_init(ObjectClass *klass, void *data)
554 {
555     DeviceClass *dc = DEVICE_CLASS(klass);
556     SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
557 
558     k->init = pxa2xx_dma_init;
559     dc->desc = "PXA2xx DMA controller";
560     dc->vmsd = &vmstate_pxa2xx_dma;
561     dc->props = pxa2xx_dma_properties;
562 }
563 
564 static const TypeInfo pxa2xx_dma_info = {
565     .name          = TYPE_PXA2XX_DMA,
566     .parent        = TYPE_SYS_BUS_DEVICE,
567     .instance_size = sizeof(PXA2xxDMAState),
568     .class_init    = pxa2xx_dma_class_init,
569 };
570 
571 static void pxa2xx_dma_register_types(void)
572 {
573     type_register_static(&pxa2xx_dma_info);
574 }
575 
576 type_init(pxa2xx_dma_register_types)
577