xref: /qemu/hw/dma/xlnx_csu_dma.c (revision 7cebff0d)
1 /*
2  * Xilinx Platform CSU Stream DMA emulation
3  *
4  * This implementation is based on
5  * https://github.com/Xilinx/qemu/blob/master/hw/dma/csu_stream_dma.c
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2 or
10  * (at your option) version 3 of the License.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "qapi/error.h"
24 #include "hw/hw.h"
25 #include "hw/irq.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "sysemu/dma.h"
30 #include "hw/ptimer.h"
31 #include "hw/stream.h"
32 #include "hw/register.h"
33 #include "hw/dma/xlnx_csu_dma.h"
34 
35 /*
36  * Ref: UG1087 (v1.7) February 8, 2019
37  * https://www.xilinx.com/html_docs/registers/ug1087/ug1087-zynq-ultrascale-registers.html
38  * CSUDMA Module section
39  */
40 REG32(ADDR, 0x0)
41     FIELD(ADDR, ADDR, 2, 30) /* wo */
42 REG32(SIZE, 0x4)
43     FIELD(SIZE, SIZE, 2, 27) /* wo */
44     FIELD(SIZE, LAST_WORD, 0, 1) /* rw, only exists in SRC */
45 REG32(STATUS, 0x8)
46     FIELD(STATUS, DONE_CNT, 13, 3) /* wtc */
47     FIELD(STATUS, FIFO_LEVEL, 5, 8) /* ro */
48     FIELD(STATUS, OUTSTANDING, 1, 4) /* ro */
49     FIELD(STATUS, BUSY, 0, 1) /* ro */
50 REG32(CTRL, 0xc)
51     FIELD(CTRL, FIFOTHRESH, 25, 7) /* rw, only exists in DST, reset 0x40 */
52     FIELD(CTRL, APB_ERR_RESP, 24, 1) /* rw */
53     FIELD(CTRL, ENDIANNESS, 23, 1) /* rw */
54     FIELD(CTRL, AXI_BRST_TYPE, 22, 1) /* rw */
55     FIELD(CTRL, TIMEOUT_VAL, 10, 12) /* rw, reset: 0xFFE */
56     FIELD(CTRL, FIFO_THRESH, 2, 8) /* rw, reset: 0x80 */
57     FIELD(CTRL, PAUSE_STRM, 1, 1) /* rw */
58     FIELD(CTRL, PAUSE_MEM, 0, 1) /* rw */
59 REG32(CRC, 0x10)
60 REG32(INT_STATUS, 0x14)
61     FIELD(INT_STATUS, FIFO_OVERFLOW, 7, 1) /* wtc */
62     FIELD(INT_STATUS, INVALID_APB, 6, 1) /* wtc */
63     FIELD(INT_STATUS, THRESH_HIT, 5, 1) /* wtc */
64     FIELD(INT_STATUS, TIMEOUT_MEM, 4, 1) /* wtc */
65     FIELD(INT_STATUS, TIMEOUT_STRM, 3, 1) /* wtc */
66     FIELD(INT_STATUS, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
67     FIELD(INT_STATUS, DONE, 1, 1) /* wtc */
68     FIELD(INT_STATUS, MEM_DONE, 0, 1) /* wtc */
69 REG32(INT_ENABLE, 0x18)
70     FIELD(INT_ENABLE, FIFO_OVERFLOW, 7, 1) /* wtc */
71     FIELD(INT_ENABLE, INVALID_APB, 6, 1) /* wtc */
72     FIELD(INT_ENABLE, THRESH_HIT, 5, 1) /* wtc */
73     FIELD(INT_ENABLE, TIMEOUT_MEM, 4, 1) /* wtc */
74     FIELD(INT_ENABLE, TIMEOUT_STRM, 3, 1) /* wtc */
75     FIELD(INT_ENABLE, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
76     FIELD(INT_ENABLE, DONE, 1, 1) /* wtc */
77     FIELD(INT_ENABLE, MEM_DONE, 0, 1) /* wtc */
78 REG32(INT_DISABLE, 0x1c)
79     FIELD(INT_DISABLE, FIFO_OVERFLOW, 7, 1) /* wtc */
80     FIELD(INT_DISABLE, INVALID_APB, 6, 1) /* wtc */
81     FIELD(INT_DISABLE, THRESH_HIT, 5, 1) /* wtc */
82     FIELD(INT_DISABLE, TIMEOUT_MEM, 4, 1) /* wtc */
83     FIELD(INT_DISABLE, TIMEOUT_STRM, 3, 1) /* wtc */
84     FIELD(INT_DISABLE, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
85     FIELD(INT_DISABLE, DONE, 1, 1) /* wtc */
86     FIELD(INT_DISABLE, MEM_DONE, 0, 1) /* wtc */
87 REG32(INT_MASK, 0x20)
88     FIELD(INT_MASK, FIFO_OVERFLOW, 7, 1) /* ro, reset: 0x1 */
89     FIELD(INT_MASK, INVALID_APB, 6, 1) /* ro, reset: 0x1 */
90     FIELD(INT_MASK, THRESH_HIT, 5, 1) /* ro, reset: 0x1 */
91     FIELD(INT_MASK, TIMEOUT_MEM, 4, 1) /* ro, reset: 0x1 */
92     FIELD(INT_MASK, TIMEOUT_STRM, 3, 1) /* ro, reset: 0x1 */
93     FIELD(INT_MASK, AXI_BRESP_ERR, 2, 1) /* ro, reset: 0x1, SRC: AXI_RDERR */
94     FIELD(INT_MASK, DONE, 1, 1) /* ro, reset: 0x1 */
95     FIELD(INT_MASK, MEM_DONE, 0, 1) /* ro, reset: 0x1 */
96 REG32(CTRL2, 0x24)
97     FIELD(CTRL2, ARCACHE, 24, 3) /* rw */
98     FIELD(CTRL2, ROUTE_BIT, 23, 1) /* rw */
99     FIELD(CTRL2, TIMEOUT_EN, 22, 1) /* rw */
100     FIELD(CTRL2, TIMEOUT_PRE, 4, 12) /* rw, reset: 0xFFF */
101     FIELD(CTRL2, MAX_OUTS_CMDS, 0, 4) /* rw, reset: 0x8 */
102 REG32(ADDR_MSB, 0x28)
103     FIELD(ADDR_MSB, ADDR_MSB, 0, 17) /* wo */
104 
105 #define R_CTRL_TIMEOUT_VAL_RESET    (0xFFE)
106 #define R_CTRL_FIFO_THRESH_RESET    (0x80)
107 #define R_CTRL_FIFOTHRESH_RESET     (0x40)
108 
109 #define R_CTRL2_TIMEOUT_PRE_RESET   (0xFFF)
110 #define R_CTRL2_MAX_OUTS_CMDS_RESET (0x8)
111 
112 #define XLNX_CSU_DMA_ERR_DEBUG      (0)
113 #define XLNX_CSU_DMA_INT_R_MASK     (0xff)
114 
115 /* UG1807: Set the prescaler value for the timeout in clk (~2.5ns) cycles */
116 #define XLNX_CSU_DMA_TIMER_FREQ     (400 * 1000 * 1000)
117 
118 static bool xlnx_csu_dma_is_paused(XlnxCSUDMA *s)
119 {
120     bool paused;
121 
122     paused = !!(s->regs[R_CTRL] & R_CTRL_PAUSE_STRM_MASK);
123     paused |= !!(s->regs[R_CTRL] & R_CTRL_PAUSE_MEM_MASK);
124 
125     return paused;
126 }
127 
128 static bool xlnx_csu_dma_get_eop(XlnxCSUDMA *s)
129 {
130     return s->r_size_last_word;
131 }
132 
133 static bool xlnx_csu_dma_burst_is_fixed(XlnxCSUDMA *s)
134 {
135     return !!(s->regs[R_CTRL] & R_CTRL_AXI_BRST_TYPE_MASK);
136 }
137 
138 static bool xlnx_csu_dma_timeout_enabled(XlnxCSUDMA *s)
139 {
140     return !!(s->regs[R_CTRL2] & R_CTRL2_TIMEOUT_EN_MASK);
141 }
142 
143 static void xlnx_csu_dma_update_done_cnt(XlnxCSUDMA *s, int a)
144 {
145     int cnt;
146 
147     /* Increase DONE_CNT */
148     cnt = ARRAY_FIELD_EX32(s->regs, STATUS, DONE_CNT) + a;
149     ARRAY_FIELD_DP32(s->regs, STATUS, DONE_CNT, cnt);
150 }
151 
152 static void xlnx_csu_dma_data_process(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
153 {
154     uint32_t bswap;
155     uint32_t i;
156 
157     bswap = s->regs[R_CTRL] & R_CTRL_ENDIANNESS_MASK;
158     if (s->is_dst && !bswap) {
159         /* Fast when ENDIANNESS cleared */
160         return;
161     }
162 
163     for (i = 0; i < len; i += 4) {
164         uint8_t *b = &buf[i];
165         union {
166             uint8_t u8[4];
167             uint32_t u32;
168         } v = {
169             .u8 = { b[0], b[1], b[2], b[3] }
170         };
171 
172         if (!s->is_dst) {
173             s->regs[R_CRC] += v.u32;
174         }
175         if (bswap) {
176             /*
177              * No point using bswap, we need to writeback
178              * into a potentially unaligned pointer.
179              */
180             b[0] = v.u8[3];
181             b[1] = v.u8[2];
182             b[2] = v.u8[1];
183             b[3] = v.u8[0];
184         }
185     }
186 }
187 
188 static void xlnx_csu_dma_update_irq(XlnxCSUDMA *s)
189 {
190     qemu_set_irq(s->irq, !!(s->regs[R_INT_STATUS] & ~s->regs[R_INT_MASK]));
191 }
192 
193 /* len is in bytes */
194 static uint32_t xlnx_csu_dma_read(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
195 {
196     hwaddr addr = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
197     MemTxResult result = MEMTX_OK;
198 
199     if (xlnx_csu_dma_burst_is_fixed(s)) {
200         uint32_t i;
201 
202         for (i = 0; i < len && (result == MEMTX_OK); i += s->width) {
203             uint32_t mlen = MIN(len - i, s->width);
204 
205             result = address_space_rw(s->dma_as, addr, s->attr,
206                                       buf + i, mlen, false);
207         }
208     } else {
209         result = address_space_rw(s->dma_as, addr, s->attr, buf, len, false);
210     }
211 
212     if (result == MEMTX_OK) {
213         xlnx_csu_dma_data_process(s, buf, len);
214     } else {
215         qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " TARGET_FMT_plx
216                       " for mem read", __func__, addr);
217         s->regs[R_INT_STATUS] |= R_INT_STATUS_AXI_BRESP_ERR_MASK;
218         xlnx_csu_dma_update_irq(s);
219     }
220     return len;
221 }
222 
223 /* len is in bytes */
224 static uint32_t xlnx_csu_dma_write(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
225 {
226     hwaddr addr = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
227     MemTxResult result = MEMTX_OK;
228 
229     xlnx_csu_dma_data_process(s, buf, len);
230     if (xlnx_csu_dma_burst_is_fixed(s)) {
231         uint32_t i;
232 
233         for (i = 0; i < len && (result == MEMTX_OK); i += s->width) {
234             uint32_t mlen = MIN(len - i, s->width);
235 
236             result = address_space_rw(s->dma_as, addr, s->attr,
237                                       buf, mlen, true);
238             buf += mlen;
239         }
240     } else {
241         result = address_space_rw(s->dma_as, addr, s->attr, buf, len, true);
242     }
243 
244     if (result != MEMTX_OK) {
245         qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " TARGET_FMT_plx
246                       " for mem write", __func__, addr);
247         s->regs[R_INT_STATUS] |= R_INT_STATUS_AXI_BRESP_ERR_MASK;
248         xlnx_csu_dma_update_irq(s);
249     }
250     return len;
251 }
252 
253 static void xlnx_csu_dma_done(XlnxCSUDMA *s)
254 {
255     s->regs[R_STATUS] &= ~R_STATUS_BUSY_MASK;
256     s->regs[R_INT_STATUS] |= R_INT_STATUS_DONE_MASK;
257 
258     if (!s->is_dst) {
259         s->regs[R_INT_STATUS] |= R_INT_STATUS_MEM_DONE_MASK;
260     }
261 
262     xlnx_csu_dma_update_done_cnt(s, 1);
263 }
264 
265 static uint32_t xlnx_csu_dma_advance(XlnxCSUDMA *s, uint32_t len)
266 {
267     uint32_t size = s->regs[R_SIZE];
268     hwaddr dst = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
269 
270     assert(len <= size);
271 
272     size -= len;
273     s->regs[R_SIZE] = size;
274 
275     if (!xlnx_csu_dma_burst_is_fixed(s)) {
276         dst += len;
277         s->regs[R_ADDR] = (uint32_t) dst;
278         s->regs[R_ADDR_MSB] = dst >> 32;
279     }
280 
281     if (size == 0) {
282         xlnx_csu_dma_done(s);
283     }
284 
285     return size;
286 }
287 
288 static void xlnx_csu_dma_src_notify(void *opaque)
289 {
290     XlnxCSUDMA *s = XLNX_CSU_DMA(opaque);
291     unsigned char buf[4 * 1024];
292     size_t rlen = 0;
293 
294     ptimer_transaction_begin(s->src_timer);
295     /* Stop the backpreassure timer */
296     ptimer_stop(s->src_timer);
297 
298     while (s->regs[R_SIZE] && !xlnx_csu_dma_is_paused(s) &&
299            stream_can_push(s->tx_dev, xlnx_csu_dma_src_notify, s)) {
300         uint32_t plen = MIN(s->regs[R_SIZE], sizeof buf);
301         bool eop = false;
302 
303         /* Did we fit it all? */
304         if (s->regs[R_SIZE] == plen && xlnx_csu_dma_get_eop(s)) {
305             eop = true;
306         }
307 
308         /* DMA transfer */
309         xlnx_csu_dma_read(s, buf, plen);
310         rlen = stream_push(s->tx_dev, buf, plen, eop);
311         xlnx_csu_dma_advance(s, rlen);
312     }
313 
314     if (xlnx_csu_dma_timeout_enabled(s) && s->regs[R_SIZE] &&
315         !stream_can_push(s->tx_dev, xlnx_csu_dma_src_notify, s)) {
316         uint32_t timeout = ARRAY_FIELD_EX32(s->regs, CTRL, TIMEOUT_VAL);
317         uint32_t div = ARRAY_FIELD_EX32(s->regs, CTRL2, TIMEOUT_PRE) + 1;
318         uint32_t freq = XLNX_CSU_DMA_TIMER_FREQ;
319 
320         freq /= div;
321         ptimer_set_freq(s->src_timer, freq);
322         ptimer_set_count(s->src_timer, timeout);
323         ptimer_run(s->src_timer, 1);
324     }
325 
326     ptimer_transaction_commit(s->src_timer);
327     xlnx_csu_dma_update_irq(s);
328 }
329 
330 static uint64_t addr_pre_write(RegisterInfo *reg, uint64_t val)
331 {
332     /* Address is word aligned */
333     return val & R_ADDR_ADDR_MASK;
334 }
335 
336 static uint64_t size_pre_write(RegisterInfo *reg, uint64_t val)
337 {
338     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
339 
340     if (s->regs[R_SIZE] != 0) {
341         qemu_log_mask(LOG_GUEST_ERROR,
342                       "%s: Starting DMA while already running.\n", __func__);
343     }
344 
345     if (!s->is_dst) {
346         s->r_size_last_word = !!(val & R_SIZE_LAST_WORD_MASK);
347     }
348 
349     /* Size is word aligned */
350     return val & R_SIZE_SIZE_MASK;
351 }
352 
353 static uint64_t size_post_read(RegisterInfo *reg, uint64_t val)
354 {
355     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
356 
357     return val | s->r_size_last_word;
358 }
359 
360 static void size_post_write(RegisterInfo *reg, uint64_t val)
361 {
362     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
363 
364     s->regs[R_STATUS] |= R_STATUS_BUSY_MASK;
365 
366     /*
367      * Note that if SIZE is programmed to 0, and the DMA is started,
368      * the interrupts DONE and MEM_DONE will be asserted.
369      */
370     if (s->regs[R_SIZE] == 0) {
371         xlnx_csu_dma_done(s);
372         xlnx_csu_dma_update_irq(s);
373         return;
374     }
375 
376     /* Set SIZE is considered the last step in transfer configuration */
377     if (!s->is_dst) {
378         xlnx_csu_dma_src_notify(s);
379     } else {
380         if (s->notify) {
381             s->notify(s->notify_opaque);
382         }
383     }
384 }
385 
386 static uint64_t status_pre_write(RegisterInfo *reg, uint64_t val)
387 {
388     return val & (R_STATUS_DONE_CNT_MASK | R_STATUS_BUSY_MASK);
389 }
390 
391 static void ctrl_post_write(RegisterInfo *reg, uint64_t val)
392 {
393     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
394 
395     if (!s->is_dst) {
396         if (!xlnx_csu_dma_is_paused(s)) {
397             xlnx_csu_dma_src_notify(s);
398         }
399     } else {
400         if (!xlnx_csu_dma_is_paused(s) && s->notify) {
401             s->notify(s->notify_opaque);
402         }
403     }
404 }
405 
406 static uint64_t int_status_pre_write(RegisterInfo *reg, uint64_t val)
407 {
408     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
409 
410     /* DMA counter decrements when flag 'DONE' is cleared */
411     if ((val & s->regs[R_INT_STATUS] & R_INT_STATUS_DONE_MASK)) {
412         xlnx_csu_dma_update_done_cnt(s, -1);
413     }
414 
415     return s->regs[R_INT_STATUS] & ~val;
416 }
417 
418 static void int_status_post_write(RegisterInfo *reg, uint64_t val)
419 {
420     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
421 
422     xlnx_csu_dma_update_irq(s);
423 }
424 
425 static uint64_t int_enable_pre_write(RegisterInfo *reg, uint64_t val)
426 {
427     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
428     uint32_t v32 = val;
429 
430     /*
431      * R_INT_ENABLE doesn't have its own state.
432      * It is used to indirectly modify R_INT_MASK.
433      *
434      * 1: Enable this interrupt field (the mask bit will be cleared to 0)
435      * 0: No effect
436      */
437     s->regs[R_INT_MASK] &= ~v32;
438     return 0;
439 }
440 
441 static void int_enable_post_write(RegisterInfo *reg, uint64_t val)
442 {
443     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
444 
445     xlnx_csu_dma_update_irq(s);
446 }
447 
448 static uint64_t int_disable_pre_write(RegisterInfo *reg, uint64_t val)
449 {
450     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
451     uint32_t v32 = val;
452 
453     /*
454      * R_INT_DISABLE doesn't have its own state.
455      * It is used to indirectly modify R_INT_MASK.
456      *
457      * 1: Disable this interrupt field (the mask bit will be set to 1)
458      * 0: No effect
459      */
460     s->regs[R_INT_MASK] |= v32;
461     return 0;
462 }
463 
464 static void int_disable_post_write(RegisterInfo *reg, uint64_t val)
465 {
466     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
467 
468     xlnx_csu_dma_update_irq(s);
469 }
470 
471 static uint64_t addr_msb_pre_write(RegisterInfo *reg, uint64_t val)
472 {
473     return val & R_ADDR_MSB_ADDR_MSB_MASK;
474 }
475 
476 static const RegisterAccessInfo *xlnx_csu_dma_regs_info[] = {
477 #define DMACH_REGINFO(NAME, snd)                                              \
478     (const RegisterAccessInfo []) {                                           \
479         {                                                                     \
480             .name = #NAME "_ADDR",                                            \
481             .addr = A_ADDR,                                                   \
482             .pre_write = addr_pre_write                                       \
483         }, {                                                                  \
484             .name = #NAME "_SIZE",                                            \
485             .addr = A_SIZE,                                                   \
486             .pre_write = size_pre_write,                                      \
487             .post_write = size_post_write,                                    \
488             .post_read = size_post_read                                       \
489         }, {                                                                  \
490             .name = #NAME "_STATUS",                                          \
491             .addr = A_STATUS,                                                 \
492             .pre_write = status_pre_write,                                    \
493             .w1c = R_STATUS_DONE_CNT_MASK,                                    \
494             .ro = (R_STATUS_BUSY_MASK                                         \
495                    | R_STATUS_FIFO_LEVEL_MASK                                 \
496                    | R_STATUS_OUTSTANDING_MASK)                               \
497         }, {                                                                  \
498             .name = #NAME "_CTRL",                                            \
499             .addr = A_CTRL,                                                   \
500             .post_write = ctrl_post_write,                                    \
501             .reset = ((R_CTRL_TIMEOUT_VAL_RESET << R_CTRL_TIMEOUT_VAL_SHIFT)  \
502                       | (R_CTRL_FIFO_THRESH_RESET << R_CTRL_FIFO_THRESH_SHIFT)\
503                       | (snd ? 0 : R_CTRL_FIFOTHRESH_RESET                    \
504                          << R_CTRL_FIFOTHRESH_SHIFT))                         \
505         }, {                                                                  \
506             .name = #NAME "_CRC",                                             \
507             .addr = A_CRC,                                                    \
508         }, {                                                                  \
509             .name =  #NAME "_INT_STATUS",                                     \
510             .addr = A_INT_STATUS,                                             \
511             .pre_write = int_status_pre_write,                                \
512             .post_write = int_status_post_write                               \
513         }, {                                                                  \
514             .name = #NAME "_INT_ENABLE",                                      \
515             .addr = A_INT_ENABLE,                                             \
516             .pre_write = int_enable_pre_write,                                \
517             .post_write = int_enable_post_write                               \
518         }, {                                                                  \
519             .name = #NAME "_INT_DISABLE",                                     \
520             .addr = A_INT_DISABLE,                                            \
521             .pre_write = int_disable_pre_write,                               \
522             .post_write = int_disable_post_write                              \
523         }, {                                                                  \
524             .name = #NAME "_INT_MASK",                                        \
525             .addr = A_INT_MASK,                                               \
526             .ro = ~0,                                                         \
527             .reset = XLNX_CSU_DMA_INT_R_MASK                                  \
528         }, {                                                                  \
529             .name = #NAME "_CTRL2",                                           \
530             .addr = A_CTRL2,                                                  \
531             .reset = ((R_CTRL2_TIMEOUT_PRE_RESET                              \
532                        << R_CTRL2_TIMEOUT_PRE_SHIFT)                          \
533                       | (R_CTRL2_MAX_OUTS_CMDS_RESET                          \
534                          << R_CTRL2_MAX_OUTS_CMDS_SHIFT))                     \
535         }, {                                                                  \
536             .name = #NAME "_ADDR_MSB",                                        \
537             .addr = A_ADDR_MSB,                                               \
538             .pre_write = addr_msb_pre_write                                   \
539         }                                                                     \
540     }
541 
542     DMACH_REGINFO(DMA_SRC, true),
543     DMACH_REGINFO(DMA_DST, false)
544 };
545 
546 static const MemoryRegionOps xlnx_csu_dma_ops = {
547     .read = register_read_memory,
548     .write = register_write_memory,
549     .endianness = DEVICE_LITTLE_ENDIAN,
550     .valid = {
551         .min_access_size = 4,
552         .max_access_size = 4,
553     }
554 };
555 
556 static void xlnx_csu_dma_src_timeout_hit(void *opaque)
557 {
558     XlnxCSUDMA *s = XLNX_CSU_DMA(opaque);
559 
560     /* Ignore if the timeout is masked */
561     if (!xlnx_csu_dma_timeout_enabled(s)) {
562         return;
563     }
564 
565     s->regs[R_INT_STATUS] |= R_INT_STATUS_TIMEOUT_STRM_MASK;
566     xlnx_csu_dma_update_irq(s);
567 }
568 
569 static size_t xlnx_csu_dma_stream_push(StreamSink *obj, uint8_t *buf,
570                                        size_t len, bool eop)
571 {
572     XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
573     uint32_t size = s->regs[R_SIZE];
574     uint32_t mlen = MIN(size, len) & (~3); /* Size is word aligned */
575 
576     /* Be called when it's DST */
577     assert(s->is_dst);
578 
579     if (size == 0 || len <= 0) {
580         return 0;
581     }
582 
583     if (len && (xlnx_csu_dma_is_paused(s) || mlen == 0)) {
584         qemu_log_mask(LOG_GUEST_ERROR,
585                       "csu-dma: DST channel dropping %zd b of data.\n", len);
586         s->regs[R_INT_STATUS] |= R_INT_STATUS_FIFO_OVERFLOW_MASK;
587         return len;
588     }
589 
590     if (xlnx_csu_dma_write(s, buf, mlen) != mlen) {
591         return 0;
592     }
593 
594     xlnx_csu_dma_advance(s, mlen);
595     xlnx_csu_dma_update_irq(s);
596 
597     return mlen;
598 }
599 
600 static bool xlnx_csu_dma_stream_can_push(StreamSink *obj,
601                                          StreamCanPushNotifyFn notify,
602                                          void *notify_opaque)
603 {
604     XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
605 
606     if (s->regs[R_SIZE] != 0) {
607         return true;
608     } else {
609         s->notify = notify;
610         s->notify_opaque = notify_opaque;
611         return false;
612     }
613 }
614 
615 static void xlnx_csu_dma_reset(DeviceState *dev)
616 {
617     XlnxCSUDMA *s = XLNX_CSU_DMA(dev);
618     unsigned int i;
619 
620     for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
621         register_reset(&s->regs_info[i]);
622     }
623 }
624 
625 static void xlnx_csu_dma_realize(DeviceState *dev, Error **errp)
626 {
627     XlnxCSUDMA *s = XLNX_CSU_DMA(dev);
628     RegisterInfoArray *reg_array;
629 
630     reg_array =
631         register_init_block32(dev, xlnx_csu_dma_regs_info[!!s->is_dst],
632                               XLNX_CSU_DMA_R_MAX,
633                               s->regs_info, s->regs,
634                               &xlnx_csu_dma_ops,
635                               XLNX_CSU_DMA_ERR_DEBUG,
636                               XLNX_CSU_DMA_R_MAX * 4);
637     memory_region_add_subregion(&s->iomem,
638                                 0x0,
639                                 &reg_array->mem);
640 
641     sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
642     sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
643 
644     if (!s->is_dst && !s->tx_dev) {
645         error_setg(errp, "zynqmp.csu-dma: Stream not connected");
646         return;
647     }
648 
649     s->src_timer = ptimer_init(xlnx_csu_dma_src_timeout_hit,
650                                s, PTIMER_POLICY_DEFAULT);
651 
652     if (s->dma_mr) {
653         s->dma_as = g_malloc0(sizeof(AddressSpace));
654         address_space_init(s->dma_as, s->dma_mr, NULL);
655     } else {
656         s->dma_as = &address_space_memory;
657     }
658 
659     s->attr = MEMTXATTRS_UNSPECIFIED;
660 
661     s->r_size_last_word = 0;
662 }
663 
664 static const VMStateDescription vmstate_xlnx_csu_dma = {
665     .name = TYPE_XLNX_CSU_DMA,
666     .version_id = 0,
667     .minimum_version_id = 0,
668     .minimum_version_id_old = 0,
669     .fields = (VMStateField[]) {
670         VMSTATE_PTIMER(src_timer, XlnxCSUDMA),
671         VMSTATE_UINT16(width, XlnxCSUDMA),
672         VMSTATE_BOOL(is_dst, XlnxCSUDMA),
673         VMSTATE_BOOL(r_size_last_word, XlnxCSUDMA),
674         VMSTATE_UINT32_ARRAY(regs, XlnxCSUDMA, XLNX_CSU_DMA_R_MAX),
675         VMSTATE_END_OF_LIST(),
676     }
677 };
678 
679 static Property xlnx_csu_dma_properties[] = {
680     /*
681      * Ref PG021, Stream Data Width:
682      * Data width in bits of the AXI S2MM AXI4-Stream Data bus.
683      * This value must be equal or less than the Memory Map Data Width.
684      * Valid values are 8, 16, 32, 64, 128, 512 and 1024.
685      * "dma-width" is the byte value of the "Stream Data Width".
686      */
687     DEFINE_PROP_UINT16("dma-width", XlnxCSUDMA, width, 4),
688     /*
689      * The CSU DMA is a two-channel, simple DMA, allowing separate control of
690      * the SRC (read) channel and DST (write) channel. "is-dst" is used to mark
691      * which channel the device is connected to.
692      */
693     DEFINE_PROP_BOOL("is-dst", XlnxCSUDMA, is_dst, true),
694     DEFINE_PROP_END_OF_LIST(),
695 };
696 
697 static void xlnx_csu_dma_class_init(ObjectClass *klass, void *data)
698 {
699     DeviceClass *dc = DEVICE_CLASS(klass);
700     StreamSinkClass *ssc = STREAM_SINK_CLASS(klass);
701 
702     dc->reset = xlnx_csu_dma_reset;
703     dc->realize = xlnx_csu_dma_realize;
704     dc->vmsd = &vmstate_xlnx_csu_dma;
705     device_class_set_props(dc, xlnx_csu_dma_properties);
706 
707     ssc->push = xlnx_csu_dma_stream_push;
708     ssc->can_push = xlnx_csu_dma_stream_can_push;
709 }
710 
711 static void xlnx_csu_dma_init(Object *obj)
712 {
713     XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
714 
715     memory_region_init(&s->iomem, obj, TYPE_XLNX_CSU_DMA,
716                        XLNX_CSU_DMA_R_MAX * 4);
717 
718     object_property_add_link(obj, "stream-connected-dma", TYPE_STREAM_SINK,
719                              (Object **)&s->tx_dev,
720                              qdev_prop_allow_set_link_before_realize,
721                              OBJ_PROP_LINK_STRONG);
722     object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
723                              (Object **)&s->dma_mr,
724                              qdev_prop_allow_set_link_before_realize,
725                              OBJ_PROP_LINK_STRONG);
726 }
727 
728 static const TypeInfo xlnx_csu_dma_info = {
729     .name          = TYPE_XLNX_CSU_DMA,
730     .parent        = TYPE_SYS_BUS_DEVICE,
731     .instance_size = sizeof(XlnxCSUDMA),
732     .class_init    = xlnx_csu_dma_class_init,
733     .instance_init = xlnx_csu_dma_init,
734     .interfaces = (InterfaceInfo[]) {
735         { TYPE_STREAM_SINK },
736         { }
737     }
738 };
739 
740 static void xlnx_csu_dma_register_types(void)
741 {
742     type_register_static(&xlnx_csu_dma_info);
743 }
744 
745 type_init(xlnx_csu_dma_register_types)
746