1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6 #define pr_fmt(fmt) "udma: " fmt
7
8 #include <common.h>
9 #include <cpu_func.h>
10 #include <log.h>
11 #include <asm/cache.h>
12 #include <asm/io.h>
13 #include <asm/bitops.h>
14 #include <malloc.h>
15 #include <linux/bitops.h>
16 #include <linux/dma-mapping.h>
17 #include <dm.h>
18 #include <dm/device_compat.h>
19 #include <dm/devres.h>
20 #include <dm/read.h>
21 #include <dm/of_access.h>
22 #include <dma.h>
23 #include <dma-uclass.h>
24 #include <linux/delay.h>
25 #include <linux/bitmap.h>
26 #include <linux/err.h>
27 #include <linux/soc/ti/k3-navss-ringacc.h>
28 #include <linux/soc/ti/cppi5.h>
29 #include <linux/soc/ti/ti-udma.h>
30 #include <linux/soc/ti/ti_sci_protocol.h>
31 #include <linux/soc/ti/cppi5.h>
32
33 #include "k3-udma-hwdef.h"
34 #include "k3-psil-priv.h"
35
36 #define K3_UDMA_MAX_RFLOWS 1024
37
38 struct udma_chan;
39
40 enum k3_dma_type {
41 DMA_TYPE_UDMA = 0,
42 DMA_TYPE_BCDMA,
43 DMA_TYPE_PKTDMA,
44 };
45
46 enum udma_mmr {
47 MMR_GCFG = 0,
48 MMR_BCHANRT,
49 MMR_RCHANRT,
50 MMR_TCHANRT,
51 MMR_LAST,
52 };
53
54 static const char * const mmr_names[] = {
55 [MMR_GCFG] = "gcfg",
56 [MMR_BCHANRT] = "bchanrt",
57 [MMR_RCHANRT] = "rchanrt",
58 [MMR_TCHANRT] = "tchanrt",
59 };
60
61 struct udma_tchan {
62 void __iomem *reg_rt;
63
64 int id;
65 struct k3_nav_ring *t_ring; /* Transmit ring */
66 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
67 int tflow_id; /* applicable only for PKTDMA */
68
69 };
70
71 #define udma_bchan udma_tchan
72
73 struct udma_rflow {
74 int id;
75 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
76 struct k3_nav_ring *r_ring; /* Receive ring */
77 };
78
79 struct udma_rchan {
80 void __iomem *reg_rt;
81
82 int id;
83 };
84
85 struct udma_oes_offsets {
86 /* K3 UDMA Output Event Offset */
87 u32 udma_rchan;
88
89 /* BCDMA Output Event Offsets */
90 u32 bcdma_bchan_data;
91 u32 bcdma_bchan_ring;
92 u32 bcdma_tchan_data;
93 u32 bcdma_tchan_ring;
94 u32 bcdma_rchan_data;
95 u32 bcdma_rchan_ring;
96
97 /* PKTDMA Output Event Offsets */
98 u32 pktdma_tchan_flow;
99 u32 pktdma_rchan_flow;
100 };
101
102 #define UDMA_FLAG_PDMA_ACC32 BIT(0)
103 #define UDMA_FLAG_PDMA_BURST BIT(1)
104 #define UDMA_FLAG_TDTYPE BIT(2)
105
106 struct udma_match_data {
107 enum k3_dma_type type;
108 u32 psil_base;
109 bool enable_memcpy_support;
110 u32 flags;
111 u32 statictr_z_mask;
112 struct udma_oes_offsets oes;
113
114 u8 tpl_levels;
115 u32 level_start_idx[];
116 };
117
118 enum udma_rm_range {
119 RM_RANGE_BCHAN = 0,
120 RM_RANGE_TCHAN,
121 RM_RANGE_RCHAN,
122 RM_RANGE_RFLOW,
123 RM_RANGE_TFLOW,
124 RM_RANGE_LAST,
125 };
126
127 struct udma_tisci_rm {
128 const struct ti_sci_handle *tisci;
129 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
130 u32 tisci_dev_id;
131
132 /* tisci information for PSI-L thread pairing/unpairing */
133 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
134 u32 tisci_navss_dev_id;
135
136 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
137 };
138
139 struct udma_dev {
140 struct udevice *dev;
141 void __iomem *mmrs[MMR_LAST];
142
143 struct udma_tisci_rm tisci_rm;
144 struct k3_nav_ringacc *ringacc;
145
146 u32 features;
147
148 int bchan_cnt;
149 int tchan_cnt;
150 int echan_cnt;
151 int rchan_cnt;
152 int rflow_cnt;
153 int tflow_cnt;
154 unsigned long *bchan_map;
155 unsigned long *tchan_map;
156 unsigned long *rchan_map;
157 unsigned long *rflow_map;
158 unsigned long *rflow_map_reserved;
159 unsigned long *rflow_in_use;
160 unsigned long *tflow_map;
161
162 struct udma_bchan *bchans;
163 struct udma_tchan *tchans;
164 struct udma_rchan *rchans;
165 struct udma_rflow *rflows;
166
167 struct udma_match_data *match_data;
168
169 struct udma_chan *channels;
170 u32 psil_base;
171
172 u32 ch_count;
173 };
174
175 struct udma_chan_config {
176 u32 psd_size; /* size of Protocol Specific Data */
177 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
178 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
179 int remote_thread_id;
180 u32 atype;
181 u32 src_thread;
182 u32 dst_thread;
183 enum psil_endpoint_type ep_type;
184 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
185
186 /* PKTDMA mapped channel */
187 int mapped_channel_id;
188 /* PKTDMA default tflow or rflow for mapped channel */
189 int default_flow_id;
190
191 enum dma_direction dir;
192
193 unsigned int pkt_mode:1; /* TR or packet */
194 unsigned int needs_epib:1; /* EPIB is needed for the communication or not */
195 unsigned int enable_acc32:1;
196 unsigned int enable_burst:1;
197 unsigned int notdpkt:1; /* Suppress sending TDC packet */
198 };
199
200 struct udma_chan {
201 struct udma_dev *ud;
202 char name[20];
203
204 struct udma_bchan *bchan;
205 struct udma_tchan *tchan;
206 struct udma_rchan *rchan;
207 struct udma_rflow *rflow;
208
209 struct ti_udma_drv_chan_cfg_data cfg_data;
210
211 u32 bcnt; /* number of bytes completed since the start of the channel */
212
213 struct udma_chan_config config;
214
215 u32 id;
216
217 struct cppi5_host_desc_t *desc_tx;
218 bool in_use;
219 void *desc_rx;
220 u32 num_rx_bufs;
221 u32 desc_rx_cur;
222
223 };
224
225 #define UDMA_CH_1000(ch) (ch * 0x1000)
226 #define UDMA_CH_100(ch) (ch * 0x100)
227 #define UDMA_CH_40(ch) (ch * 0x40)
228
229 #ifdef PKTBUFSRX
230 #define UDMA_RX_DESC_NUM PKTBUFSRX
231 #else
232 #define UDMA_RX_DESC_NUM 4
233 #endif
234
235 /* Generic register access functions */
udma_read(void __iomem * base,int reg)236 static inline u32 udma_read(void __iomem *base, int reg)
237 {
238 u32 v;
239
240 v = __raw_readl(base + reg);
241 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
242 return v;
243 }
244
udma_write(void __iomem * base,int reg,u32 val)245 static inline void udma_write(void __iomem *base, int reg, u32 val)
246 {
247 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
248 __raw_writel(val, base + reg);
249 }
250
udma_update_bits(void __iomem * base,int reg,u32 mask,u32 val)251 static inline void udma_update_bits(void __iomem *base, int reg,
252 u32 mask, u32 val)
253 {
254 u32 tmp, orig;
255
256 orig = udma_read(base, reg);
257 tmp = orig & ~mask;
258 tmp |= (val & mask);
259
260 if (tmp != orig)
261 udma_write(base, reg, tmp);
262 }
263
264 /* TCHANRT */
udma_tchanrt_read(struct udma_tchan * tchan,int reg)265 static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
266 {
267 if (!tchan)
268 return 0;
269 return udma_read(tchan->reg_rt, reg);
270 }
271
udma_tchanrt_write(struct udma_tchan * tchan,int reg,u32 val)272 static inline void udma_tchanrt_write(struct udma_tchan *tchan,
273 int reg, u32 val)
274 {
275 if (!tchan)
276 return;
277 udma_write(tchan->reg_rt, reg, val);
278 }
279
280 /* RCHANRT */
udma_rchanrt_read(struct udma_rchan * rchan,int reg)281 static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
282 {
283 if (!rchan)
284 return 0;
285 return udma_read(rchan->reg_rt, reg);
286 }
287
udma_rchanrt_write(struct udma_rchan * rchan,int reg,u32 val)288 static inline void udma_rchanrt_write(struct udma_rchan *rchan,
289 int reg, u32 val)
290 {
291 if (!rchan)
292 return;
293 udma_write(rchan->reg_rt, reg, val);
294 }
295
udma_navss_psil_pair(struct udma_dev * ud,u32 src_thread,u32 dst_thread)296 static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
297 u32 dst_thread)
298 {
299 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
300
301 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
302
303 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
304 tisci_rm->tisci_navss_dev_id,
305 src_thread, dst_thread);
306 }
307
udma_navss_psil_unpair(struct udma_dev * ud,u32 src_thread,u32 dst_thread)308 static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
309 u32 dst_thread)
310 {
311 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
312
313 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
314
315 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
316 tisci_rm->tisci_navss_dev_id,
317 src_thread, dst_thread);
318 }
319
udma_get_dir_text(enum dma_direction dir)320 static inline char *udma_get_dir_text(enum dma_direction dir)
321 {
322 switch (dir) {
323 case DMA_DEV_TO_MEM:
324 return "DEV_TO_MEM";
325 case DMA_MEM_TO_DEV:
326 return "MEM_TO_DEV";
327 case DMA_MEM_TO_MEM:
328 return "MEM_TO_MEM";
329 case DMA_DEV_TO_DEV:
330 return "DEV_TO_DEV";
331 default:
332 break;
333 }
334
335 return "invalid";
336 }
337
udma_reset_uchan(struct udma_chan * uc)338 static void udma_reset_uchan(struct udma_chan *uc)
339 {
340 memset(&uc->config, 0, sizeof(uc->config));
341 uc->config.remote_thread_id = -1;
342 uc->config.mapped_channel_id = -1;
343 uc->config.default_flow_id = -1;
344 }
345
udma_is_chan_running(struct udma_chan * uc)346 static inline bool udma_is_chan_running(struct udma_chan *uc)
347 {
348 u32 trt_ctl = 0;
349 u32 rrt_ctl = 0;
350
351 switch (uc->config.dir) {
352 case DMA_DEV_TO_MEM:
353 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
354 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
355 __func__, rrt_ctl,
356 udma_rchanrt_read(uc->rchan,
357 UDMA_RCHAN_RT_PEER_RT_EN_REG));
358 break;
359 case DMA_MEM_TO_DEV:
360 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
361 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
362 __func__, trt_ctl,
363 udma_tchanrt_read(uc->tchan,
364 UDMA_TCHAN_RT_PEER_RT_EN_REG));
365 break;
366 case DMA_MEM_TO_MEM:
367 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
368 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
369 break;
370 default:
371 break;
372 }
373
374 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
375 return true;
376
377 return false;
378 }
379
udma_pop_from_ring(struct udma_chan * uc,dma_addr_t * addr)380 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
381 {
382 struct k3_nav_ring *ring = NULL;
383 int ret = -ENOENT;
384
385 switch (uc->config.dir) {
386 case DMA_DEV_TO_MEM:
387 ring = uc->rflow->r_ring;
388 break;
389 case DMA_MEM_TO_DEV:
390 ring = uc->tchan->tc_ring;
391 break;
392 case DMA_MEM_TO_MEM:
393 ring = uc->tchan->tc_ring;
394 break;
395 default:
396 break;
397 }
398
399 if (ring && k3_nav_ringacc_ring_get_occ(ring))
400 ret = k3_nav_ringacc_ring_pop(ring, addr);
401
402 return ret;
403 }
404
udma_reset_rings(struct udma_chan * uc)405 static void udma_reset_rings(struct udma_chan *uc)
406 {
407 struct k3_nav_ring *ring1 = NULL;
408 struct k3_nav_ring *ring2 = NULL;
409
410 switch (uc->config.dir) {
411 case DMA_DEV_TO_MEM:
412 ring1 = uc->rflow->fd_ring;
413 ring2 = uc->rflow->r_ring;
414 break;
415 case DMA_MEM_TO_DEV:
416 ring1 = uc->tchan->t_ring;
417 ring2 = uc->tchan->tc_ring;
418 break;
419 case DMA_MEM_TO_MEM:
420 ring1 = uc->tchan->t_ring;
421 ring2 = uc->tchan->tc_ring;
422 break;
423 default:
424 break;
425 }
426
427 if (ring1)
428 k3_nav_ringacc_ring_reset_dma(ring1, k3_nav_ringacc_ring_get_occ(ring1));
429 if (ring2)
430 k3_nav_ringacc_ring_reset(ring2);
431 }
432
udma_reset_counters(struct udma_chan * uc)433 static void udma_reset_counters(struct udma_chan *uc)
434 {
435 u32 val;
436
437 if (uc->tchan) {
438 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
439 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
440
441 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
442 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
443
444 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
445 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
446
447 if (!uc->bchan) {
448 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
449 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
450 }
451 }
452
453 if (uc->rchan) {
454 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
455 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
456
457 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
458 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
459
460 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
461 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
462
463 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
464 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
465 }
466
467 uc->bcnt = 0;
468 }
469
udma_stop_hard(struct udma_chan * uc)470 static inline int udma_stop_hard(struct udma_chan *uc)
471 {
472 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
473
474 switch (uc->config.dir) {
475 case DMA_DEV_TO_MEM:
476 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
477 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
478 break;
479 case DMA_MEM_TO_DEV:
480 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
481 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
482 break;
483 case DMA_MEM_TO_MEM:
484 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
485 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
486 break;
487 default:
488 return -EINVAL;
489 }
490
491 return 0;
492 }
493
udma_start(struct udma_chan * uc)494 static int udma_start(struct udma_chan *uc)
495 {
496 /* Channel is already running, no need to proceed further */
497 if (udma_is_chan_running(uc))
498 goto out;
499
500 pr_debug("%s: chan:%d dir:%s\n",
501 __func__, uc->id, udma_get_dir_text(uc->config.dir));
502
503 /* Make sure that we clear the teardown bit, if it is set */
504 udma_stop_hard(uc);
505
506 /* Reset all counters */
507 udma_reset_counters(uc);
508
509 switch (uc->config.dir) {
510 case DMA_DEV_TO_MEM:
511 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
512 UDMA_CHAN_RT_CTL_EN);
513
514 /* Enable remote */
515 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
516 UDMA_PEER_RT_EN_ENABLE);
517
518 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
519 __func__,
520 udma_rchanrt_read(uc->rchan,
521 UDMA_RCHAN_RT_CTL_REG),
522 udma_rchanrt_read(uc->rchan,
523 UDMA_RCHAN_RT_PEER_RT_EN_REG));
524 break;
525 case DMA_MEM_TO_DEV:
526 /* Enable remote */
527 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
528 UDMA_PEER_RT_EN_ENABLE);
529
530 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
531 UDMA_CHAN_RT_CTL_EN);
532
533 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
534 __func__,
535 udma_tchanrt_read(uc->tchan,
536 UDMA_TCHAN_RT_CTL_REG),
537 udma_tchanrt_read(uc->tchan,
538 UDMA_TCHAN_RT_PEER_RT_EN_REG));
539 break;
540 case DMA_MEM_TO_MEM:
541 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
542 UDMA_CHAN_RT_CTL_EN);
543 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
544 UDMA_CHAN_RT_CTL_EN);
545
546 break;
547 default:
548 return -EINVAL;
549 }
550
551 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
552 out:
553 return 0;
554 }
555
udma_stop_mem2dev(struct udma_chan * uc,bool sync)556 static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
557 {
558 int i = 0;
559 u32 val;
560
561 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
562 UDMA_CHAN_RT_CTL_EN |
563 UDMA_CHAN_RT_CTL_TDOWN);
564
565 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
566
567 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
568 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
569 udelay(1);
570 if (i > 1000) {
571 printf(" %s TIMEOUT !\n", __func__);
572 break;
573 }
574 i++;
575 }
576
577 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
578 if (val & UDMA_PEER_RT_EN_ENABLE)
579 printf("%s: peer not stopped TIMEOUT !\n", __func__);
580 }
581
udma_stop_dev2mem(struct udma_chan * uc,bool sync)582 static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
583 {
584 int i = 0;
585 u32 val;
586
587 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
588 UDMA_PEER_RT_EN_ENABLE |
589 UDMA_PEER_RT_EN_TEARDOWN);
590
591 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
592
593 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
594 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
595 udelay(1);
596 if (i > 1000) {
597 printf("%s TIMEOUT !\n", __func__);
598 break;
599 }
600 i++;
601 }
602
603 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
604 if (val & UDMA_PEER_RT_EN_ENABLE)
605 printf("%s: peer not stopped TIMEOUT !\n", __func__);
606 }
607
udma_stop(struct udma_chan * uc)608 static inline int udma_stop(struct udma_chan *uc)
609 {
610 pr_debug("%s: chan:%d dir:%s\n",
611 __func__, uc->id, udma_get_dir_text(uc->config.dir));
612
613 udma_reset_counters(uc);
614 switch (uc->config.dir) {
615 case DMA_DEV_TO_MEM:
616 udma_stop_dev2mem(uc, true);
617 break;
618 case DMA_MEM_TO_DEV:
619 udma_stop_mem2dev(uc, true);
620 break;
621 case DMA_MEM_TO_MEM:
622 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
623 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
624 break;
625 default:
626 return -EINVAL;
627 }
628
629 return 0;
630 }
631
udma_poll_completion(struct udma_chan * uc,dma_addr_t * paddr)632 static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
633 {
634 int i = 1;
635
636 while (udma_pop_from_ring(uc, paddr)) {
637 udelay(1);
638 if (!(i % 1000000))
639 printf(".");
640 i++;
641 }
642 }
643
__udma_reserve_rflow(struct udma_dev * ud,int id)644 static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
645 {
646 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
647
648 if (id >= 0) {
649 if (test_bit(id, ud->rflow_map)) {
650 dev_err(ud->dev, "rflow%d is in use\n", id);
651 return ERR_PTR(-ENOENT);
652 }
653 } else {
654 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
655 ud->rflow_cnt);
656
657 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
658 if (id >= ud->rflow_cnt)
659 return ERR_PTR(-ENOENT);
660 }
661
662 __set_bit(id, ud->rflow_map);
663 return &ud->rflows[id];
664 }
665
666 #define UDMA_RESERVE_RESOURCE(res) \
667 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
668 int id) \
669 { \
670 if (id >= 0) { \
671 if (test_bit(id, ud->res##_map)) { \
672 dev_err(ud->dev, "res##%d is in use\n", id); \
673 return ERR_PTR(-ENOENT); \
674 } \
675 } else { \
676 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
677 if (id == ud->res##_cnt) { \
678 return ERR_PTR(-ENOENT); \
679 } \
680 } \
681 \
682 __set_bit(id, ud->res##_map); \
683 return &ud->res##s[id]; \
684 }
685
686 UDMA_RESERVE_RESOURCE(tchan);
687 UDMA_RESERVE_RESOURCE(rchan);
688
udma_get_tchan(struct udma_chan * uc)689 static int udma_get_tchan(struct udma_chan *uc)
690 {
691 struct udma_dev *ud = uc->ud;
692
693 if (uc->tchan) {
694 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
695 uc->id, uc->tchan->id);
696 return 0;
697 }
698
699 uc->tchan = __udma_reserve_tchan(ud, uc->config.mapped_channel_id);
700 if (IS_ERR(uc->tchan))
701 return PTR_ERR(uc->tchan);
702
703 if (ud->tflow_cnt) {
704 int tflow_id;
705
706 /* Only PKTDMA have support for tx flows */
707 if (uc->config.default_flow_id >= 0)
708 tflow_id = uc->config.default_flow_id;
709 else
710 tflow_id = uc->tchan->id;
711
712 if (test_bit(tflow_id, ud->tflow_map)) {
713 dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
714 __clear_bit(uc->tchan->id, ud->tchan_map);
715 uc->tchan = NULL;
716 return -ENOENT;
717 }
718
719 uc->tchan->tflow_id = tflow_id;
720 __set_bit(tflow_id, ud->tflow_map);
721 } else {
722 uc->tchan->tflow_id = -1;
723 }
724
725 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
726
727 return 0;
728 }
729
udma_get_rchan(struct udma_chan * uc)730 static int udma_get_rchan(struct udma_chan *uc)
731 {
732 struct udma_dev *ud = uc->ud;
733
734 if (uc->rchan) {
735 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
736 uc->id, uc->rchan->id);
737 return 0;
738 }
739
740 uc->rchan = __udma_reserve_rchan(ud, uc->config.mapped_channel_id);
741 if (IS_ERR(uc->rchan))
742 return PTR_ERR(uc->rchan);
743
744 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
745
746 return 0;
747 }
748
udma_get_chan_pair(struct udma_chan * uc)749 static int udma_get_chan_pair(struct udma_chan *uc)
750 {
751 struct udma_dev *ud = uc->ud;
752 int chan_id, end;
753
754 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
755 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
756 uc->id, uc->tchan->id);
757 return 0;
758 }
759
760 if (uc->tchan) {
761 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
762 uc->id, uc->tchan->id);
763 return -EBUSY;
764 } else if (uc->rchan) {
765 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
766 uc->id, uc->rchan->id);
767 return -EBUSY;
768 }
769
770 /* Can be optimized, but let's have it like this for now */
771 end = min(ud->tchan_cnt, ud->rchan_cnt);
772 for (chan_id = 0; chan_id < end; chan_id++) {
773 if (!test_bit(chan_id, ud->tchan_map) &&
774 !test_bit(chan_id, ud->rchan_map))
775 break;
776 }
777
778 if (chan_id == end)
779 return -ENOENT;
780
781 __set_bit(chan_id, ud->tchan_map);
782 __set_bit(chan_id, ud->rchan_map);
783 uc->tchan = &ud->tchans[chan_id];
784 uc->rchan = &ud->rchans[chan_id];
785
786 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
787
788 return 0;
789 }
790
udma_get_rflow(struct udma_chan * uc,int flow_id)791 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
792 {
793 struct udma_dev *ud = uc->ud;
794
795 if (uc->rflow) {
796 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
797 uc->id, uc->rflow->id);
798 return 0;
799 }
800
801 if (!uc->rchan)
802 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
803
804 uc->rflow = __udma_reserve_rflow(ud, flow_id);
805 if (IS_ERR(uc->rflow))
806 return PTR_ERR(uc->rflow);
807
808 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
809 return 0;
810 }
811
udma_put_rchan(struct udma_chan * uc)812 static void udma_put_rchan(struct udma_chan *uc)
813 {
814 struct udma_dev *ud = uc->ud;
815
816 if (uc->rchan) {
817 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
818 uc->rchan->id);
819 __clear_bit(uc->rchan->id, ud->rchan_map);
820 uc->rchan = NULL;
821 }
822 }
823
udma_put_tchan(struct udma_chan * uc)824 static void udma_put_tchan(struct udma_chan *uc)
825 {
826 struct udma_dev *ud = uc->ud;
827
828 if (uc->tchan) {
829 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
830 uc->tchan->id);
831 __clear_bit(uc->tchan->id, ud->tchan_map);
832 if (uc->tchan->tflow_id >= 0)
833 __clear_bit(uc->tchan->tflow_id, ud->tflow_map);
834 uc->tchan = NULL;
835 }
836 }
837
udma_put_rflow(struct udma_chan * uc)838 static void udma_put_rflow(struct udma_chan *uc)
839 {
840 struct udma_dev *ud = uc->ud;
841
842 if (uc->rflow) {
843 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
844 uc->rflow->id);
845 __clear_bit(uc->rflow->id, ud->rflow_map);
846 uc->rflow = NULL;
847 }
848 }
849
udma_free_tx_resources(struct udma_chan * uc)850 static void udma_free_tx_resources(struct udma_chan *uc)
851 {
852 if (!uc->tchan)
853 return;
854
855 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
856 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
857 uc->tchan->t_ring = NULL;
858 uc->tchan->tc_ring = NULL;
859
860 udma_put_tchan(uc);
861 }
862
udma_alloc_tx_resources(struct udma_chan * uc)863 static int udma_alloc_tx_resources(struct udma_chan *uc)
864 {
865 struct k3_nav_ring_cfg ring_cfg;
866 struct udma_dev *ud = uc->ud;
867 int ret;
868
869 ret = udma_get_tchan(uc);
870 if (ret)
871 return ret;
872
873 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
874 &uc->tchan->t_ring,
875 &uc->tchan->tc_ring);
876 if (ret) {
877 ret = -EBUSY;
878 goto err_tx_ring;
879 }
880
881 memset(&ring_cfg, 0, sizeof(ring_cfg));
882 ring_cfg.size = 16;
883 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
884 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
885
886 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
887 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
888
889 if (ret)
890 goto err_ringcfg;
891
892 return 0;
893
894 err_ringcfg:
895 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
896 uc->tchan->tc_ring = NULL;
897 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
898 uc->tchan->t_ring = NULL;
899 err_tx_ring:
900 udma_put_tchan(uc);
901
902 return ret;
903 }
904
udma_free_rx_resources(struct udma_chan * uc)905 static void udma_free_rx_resources(struct udma_chan *uc)
906 {
907 if (!uc->rchan)
908 return;
909
910 if (uc->rflow) {
911 k3_nav_ringacc_ring_free(uc->rflow->fd_ring);
912 k3_nav_ringacc_ring_free(uc->rflow->r_ring);
913 uc->rflow->fd_ring = NULL;
914 uc->rflow->r_ring = NULL;
915
916 udma_put_rflow(uc);
917 }
918
919 udma_put_rchan(uc);
920 }
921
udma_alloc_rx_resources(struct udma_chan * uc)922 static int udma_alloc_rx_resources(struct udma_chan *uc)
923 {
924 struct k3_nav_ring_cfg ring_cfg;
925 struct udma_dev *ud = uc->ud;
926 struct udma_rflow *rflow;
927 int fd_ring_id;
928 int ret;
929
930 ret = udma_get_rchan(uc);
931 if (ret)
932 return ret;
933
934 /* For MEM_TO_MEM we don't need rflow or rings */
935 if (uc->config.dir == DMA_MEM_TO_MEM)
936 return 0;
937
938 if (uc->config.default_flow_id >= 0)
939 ret = udma_get_rflow(uc, uc->config.default_flow_id);
940 else
941 ret = udma_get_rflow(uc, uc->rchan->id);
942
943 if (ret) {
944 ret = -EBUSY;
945 goto err_rflow;
946 }
947
948 rflow = uc->rflow;
949 if (ud->tflow_cnt) {
950 fd_ring_id = ud->tflow_cnt + rflow->id;
951 } else {
952 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
953 uc->rchan->id;
954 }
955
956 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
957 &rflow->fd_ring, &rflow->r_ring);
958 if (ret) {
959 ret = -EBUSY;
960 goto err_rx_ring;
961 }
962
963 memset(&ring_cfg, 0, sizeof(ring_cfg));
964 ring_cfg.size = 16;
965 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
966 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
967
968 ret = k3_nav_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
969 ret |= k3_nav_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
970 if (ret)
971 goto err_ringcfg;
972
973 return 0;
974
975 err_ringcfg:
976 k3_nav_ringacc_ring_free(rflow->r_ring);
977 rflow->r_ring = NULL;
978 k3_nav_ringacc_ring_free(rflow->fd_ring);
979 rflow->fd_ring = NULL;
980 err_rx_ring:
981 udma_put_rflow(uc);
982 err_rflow:
983 udma_put_rchan(uc);
984
985 return ret;
986 }
987
udma_alloc_tchan_sci_req(struct udma_chan * uc)988 static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
989 {
990 struct udma_dev *ud = uc->ud;
991 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
992 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
993 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
994 u32 mode;
995 int ret;
996
997 if (uc->config.pkt_mode)
998 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
999 else
1000 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1001
1002 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
1003 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1004 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
1005 req.nav_id = tisci_rm->tisci_dev_id;
1006 req.index = uc->tchan->id;
1007 req.tx_chan_type = mode;
1008 if (uc->config.dir == DMA_MEM_TO_MEM)
1009 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1010 else
1011 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1012 uc->config.psd_size,
1013 0) >> 2;
1014 req.txcq_qnum = tc_ring;
1015
1016 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
1017 if (ret)
1018 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
1019
1020 return ret;
1021 }
1022
udma_alloc_rchan_sci_req(struct udma_chan * uc)1023 static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
1024 {
1025 struct udma_dev *ud = uc->ud;
1026 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring);
1027 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring);
1028 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1029 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
1030 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1031 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1032 u32 mode;
1033 int ret;
1034
1035 if (uc->config.pkt_mode)
1036 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1037 else
1038 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1039
1040 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1041 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
1042 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
1043 req.nav_id = tisci_rm->tisci_dev_id;
1044 req.index = uc->rchan->id;
1045 req.rx_chan_type = mode;
1046 if (uc->config.dir == DMA_MEM_TO_MEM) {
1047 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1048 req.rxcq_qnum = tc_ring;
1049 } else {
1050 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1051 uc->config.psd_size,
1052 0) >> 2;
1053 req.rxcq_qnum = rx_ring;
1054 }
1055 if (ud->match_data->type == DMA_TYPE_UDMA &&
1056 uc->rflow->id != uc->rchan->id &&
1057 uc->config.dir != DMA_MEM_TO_MEM) {
1058 req.flowid_start = uc->rflow->id;
1059 req.flowid_cnt = 1;
1060 req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
1061 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
1062 }
1063
1064 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
1065 if (ret) {
1066 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
1067 uc->rchan->id, ret);
1068 return ret;
1069 }
1070 if (uc->config.dir == DMA_MEM_TO_MEM)
1071 return ret;
1072
1073 flow_req.valid_params =
1074 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1075 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1076 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1077 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1078 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1079 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1080 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1081 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1082 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1083 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1084 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1085 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1086 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
1087 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
1088
1089 flow_req.nav_id = tisci_rm->tisci_dev_id;
1090 flow_req.flow_index = uc->rflow->id;
1091
1092 if (uc->config.needs_epib)
1093 flow_req.rx_einfo_present = 1;
1094 else
1095 flow_req.rx_einfo_present = 0;
1096
1097 if (uc->config.psd_size)
1098 flow_req.rx_psinfo_present = 1;
1099 else
1100 flow_req.rx_psinfo_present = 0;
1101
1102 flow_req.rx_error_handling = 0;
1103 flow_req.rx_desc_type = 0;
1104 flow_req.rx_dest_qnum = rx_ring;
1105 flow_req.rx_src_tag_hi_sel = 2;
1106 flow_req.rx_src_tag_lo_sel = 4;
1107 flow_req.rx_dest_tag_hi_sel = 5;
1108 flow_req.rx_dest_tag_lo_sel = 4;
1109 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1110 flow_req.rx_fdq1_qnum = fd_ring;
1111 flow_req.rx_fdq2_qnum = fd_ring;
1112 flow_req.rx_fdq3_qnum = fd_ring;
1113 flow_req.rx_ps_location = 0;
1114
1115 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1116 &flow_req);
1117 if (ret)
1118 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1119 uc->rchan->id, uc->rflow->id, ret);
1120
1121 return ret;
1122 }
1123
udma_alloc_chan_resources(struct udma_chan * uc)1124 static int udma_alloc_chan_resources(struct udma_chan *uc)
1125 {
1126 struct udma_dev *ud = uc->ud;
1127 int ret;
1128
1129 pr_debug("%s: chan:%d as %s\n",
1130 __func__, uc->id, udma_get_dir_text(uc->config.dir));
1131
1132 switch (uc->config.dir) {
1133 case DMA_MEM_TO_MEM:
1134 /* Non synchronized - mem to mem type of transfer */
1135 uc->config.pkt_mode = false;
1136 ret = udma_get_chan_pair(uc);
1137 if (ret)
1138 return ret;
1139
1140 ret = udma_alloc_tx_resources(uc);
1141 if (ret)
1142 goto err_free_res;
1143
1144 ret = udma_alloc_rx_resources(uc);
1145 if (ret)
1146 goto err_free_res;
1147
1148 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1149 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1150 break;
1151 case DMA_MEM_TO_DEV:
1152 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1153 ret = udma_alloc_tx_resources(uc);
1154 if (ret)
1155 goto err_free_res;
1156
1157 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1158 uc->config.dst_thread = uc->config.remote_thread_id;
1159 uc->config.dst_thread |= 0x8000;
1160
1161 break;
1162 case DMA_DEV_TO_MEM:
1163 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1164 ret = udma_alloc_rx_resources(uc);
1165 if (ret)
1166 goto err_free_res;
1167
1168 uc->config.src_thread = uc->config.remote_thread_id;
1169 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1170
1171 break;
1172 default:
1173 /* Can not happen */
1174 pr_debug("%s: chan:%d invalid direction (%u)\n",
1175 __func__, uc->id, uc->config.dir);
1176 return -EINVAL;
1177 }
1178
1179 /* We have channel indexes and rings */
1180 if (uc->config.dir == DMA_MEM_TO_MEM) {
1181 ret = udma_alloc_tchan_sci_req(uc);
1182 if (ret)
1183 goto err_free_res;
1184
1185 ret = udma_alloc_rchan_sci_req(uc);
1186 if (ret)
1187 goto err_free_res;
1188 } else {
1189 /* Slave transfer */
1190 if (uc->config.dir == DMA_MEM_TO_DEV) {
1191 ret = udma_alloc_tchan_sci_req(uc);
1192 if (ret)
1193 goto err_free_res;
1194 } else {
1195 ret = udma_alloc_rchan_sci_req(uc);
1196 if (ret)
1197 goto err_free_res;
1198 }
1199 }
1200
1201 if (udma_is_chan_running(uc)) {
1202 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1203 udma_stop(uc);
1204 if (udma_is_chan_running(uc)) {
1205 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1206 goto err_free_res;
1207 }
1208 }
1209
1210 /* PSI-L pairing */
1211 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1212 if (ret) {
1213 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1214 goto err_free_res;
1215 }
1216
1217 return 0;
1218
1219 err_free_res:
1220 udma_free_tx_resources(uc);
1221 udma_free_rx_resources(uc);
1222 uc->config.remote_thread_id = -1;
1223 return ret;
1224 }
1225
udma_free_chan_resources(struct udma_chan * uc)1226 static void udma_free_chan_resources(struct udma_chan *uc)
1227 {
1228 /* Hard reset UDMA channel */
1229 udma_stop_hard(uc);
1230 udma_reset_counters(uc);
1231
1232 /* Release PSI-L pairing */
1233 udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread);
1234
1235 /* Reset the rings for a new start */
1236 udma_reset_rings(uc);
1237 udma_free_tx_resources(uc);
1238 udma_free_rx_resources(uc);
1239
1240 uc->config.remote_thread_id = -1;
1241 uc->config.dir = DMA_MEM_TO_MEM;
1242 }
1243
1244 static const char * const range_names[] = {
1245 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
1246 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
1247 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
1248 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
1249 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
1250 };
1251
udma_get_mmrs(struct udevice * dev)1252 static int udma_get_mmrs(struct udevice *dev)
1253 {
1254 struct udma_dev *ud = dev_get_priv(dev);
1255 u32 cap2, cap3, cap4;
1256 int i;
1257
1258 ud->mmrs[MMR_GCFG] = (uint32_t *)devfdt_get_addr_name(dev, mmr_names[MMR_GCFG]);
1259 if (!ud->mmrs[MMR_GCFG])
1260 return -EINVAL;
1261
1262 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1263 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1264
1265 switch (ud->match_data->type) {
1266 case DMA_TYPE_UDMA:
1267 ud->rflow_cnt = cap3 & 0x3fff;
1268 ud->tchan_cnt = cap2 & 0x1ff;
1269 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1270 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1271 break;
1272 case DMA_TYPE_BCDMA:
1273 ud->bchan_cnt = cap2 & 0x1ff;
1274 ud->tchan_cnt = (cap2 >> 9) & 0x1ff;
1275 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1276 break;
1277 case DMA_TYPE_PKTDMA:
1278 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
1279 ud->tchan_cnt = cap2 & 0x1ff;
1280 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1281 ud->rflow_cnt = cap3 & 0x3fff;
1282 ud->tflow_cnt = cap4 & 0x3fff;
1283 break;
1284 default:
1285 return -EINVAL;
1286 }
1287
1288 for (i = 1; i < MMR_LAST; i++) {
1289 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
1290 continue;
1291 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
1292 continue;
1293 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
1294 continue;
1295
1296 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1297 mmr_names[i]);
1298 if (!ud->mmrs[i])
1299 return -EINVAL;
1300 }
1301
1302 return 0;
1303 }
1304
udma_setup_resources(struct udma_dev * ud)1305 static int udma_setup_resources(struct udma_dev *ud)
1306 {
1307 struct udevice *dev = ud->dev;
1308 int i;
1309 struct ti_sci_resource_desc *rm_desc;
1310 struct ti_sci_resource *rm_res;
1311 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1312
1313 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1314 sizeof(unsigned long), GFP_KERNEL);
1315 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1316 GFP_KERNEL);
1317 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1318 sizeof(unsigned long), GFP_KERNEL);
1319 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1320 GFP_KERNEL);
1321 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1322 sizeof(unsigned long), GFP_KERNEL);
1323 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1324 sizeof(unsigned long),
1325 GFP_KERNEL);
1326 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1327 GFP_KERNEL);
1328
1329 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1330 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1331 !ud->rflows)
1332 return -ENOMEM;
1333
1334 /*
1335 * RX flows with the same Ids as RX channels are reserved to be used
1336 * as default flows if remote HW can't generate flow_ids. Those
1337 * RX flows can be requested only explicitly by id.
1338 */
1339 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1340
1341 /* Get resource ranges from tisci */
1342 for (i = 0; i < RM_RANGE_LAST; i++) {
1343 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
1344 continue;
1345
1346 tisci_rm->rm_ranges[i] =
1347 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1348 tisci_rm->tisci_dev_id,
1349 (char *)range_names[i]);
1350 }
1351
1352 /* tchan ranges */
1353 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1354 if (IS_ERR(rm_res)) {
1355 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1356 } else {
1357 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1358 for (i = 0; i < rm_res->sets; i++) {
1359 rm_desc = &rm_res->desc[i];
1360 bitmap_clear(ud->tchan_map, rm_desc->start,
1361 rm_desc->num);
1362 }
1363 }
1364
1365 /* rchan and matching default flow ranges */
1366 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1367 if (IS_ERR(rm_res)) {
1368 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1369 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1370 } else {
1371 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1372 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1373 for (i = 0; i < rm_res->sets; i++) {
1374 rm_desc = &rm_res->desc[i];
1375 bitmap_clear(ud->rchan_map, rm_desc->start,
1376 rm_desc->num);
1377 bitmap_clear(ud->rflow_map, rm_desc->start,
1378 rm_desc->num);
1379 }
1380 }
1381
1382 /* GP rflow ranges */
1383 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1384 if (IS_ERR(rm_res)) {
1385 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1386 ud->rflow_cnt - ud->rchan_cnt);
1387 } else {
1388 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1389 ud->rflow_cnt - ud->rchan_cnt);
1390 for (i = 0; i < rm_res->sets; i++) {
1391 rm_desc = &rm_res->desc[i];
1392 bitmap_clear(ud->rflow_map, rm_desc->start,
1393 rm_desc->num);
1394 }
1395 }
1396
1397 return 0;
1398 }
1399
bcdma_setup_resources(struct udma_dev * ud)1400 static int bcdma_setup_resources(struct udma_dev *ud)
1401 {
1402 int i;
1403 struct udevice *dev = ud->dev;
1404 struct ti_sci_resource_desc *rm_desc;
1405 struct ti_sci_resource *rm_res;
1406 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1407
1408 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
1409 sizeof(unsigned long), GFP_KERNEL);
1410 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
1411 GFP_KERNEL);
1412 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1413 sizeof(unsigned long), GFP_KERNEL);
1414 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1415 GFP_KERNEL);
1416 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1417 sizeof(unsigned long), GFP_KERNEL);
1418 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1419 GFP_KERNEL);
1420 /* BCDMA do not really have flows, but the driver expect it */
1421 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
1422 sizeof(unsigned long),
1423 GFP_KERNEL);
1424 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
1425 GFP_KERNEL);
1426
1427 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
1428 !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
1429 !ud->rflows)
1430 return -ENOMEM;
1431
1432 /* Get resource ranges from tisci */
1433 for (i = 0; i < RM_RANGE_LAST; i++) {
1434 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
1435 continue;
1436
1437 tisci_rm->rm_ranges[i] =
1438 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1439 tisci_rm->tisci_dev_id,
1440 (char *)range_names[i]);
1441 }
1442
1443 /* bchan ranges */
1444 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
1445 if (IS_ERR(rm_res)) {
1446 bitmap_zero(ud->bchan_map, ud->bchan_cnt);
1447 } else {
1448 bitmap_fill(ud->bchan_map, ud->bchan_cnt);
1449 for (i = 0; i < rm_res->sets; i++) {
1450 rm_desc = &rm_res->desc[i];
1451 bitmap_clear(ud->bchan_map, rm_desc->start,
1452 rm_desc->num);
1453 dev_dbg(dev, "ti-sci-res: bchan: %d:%d\n",
1454 rm_desc->start, rm_desc->num);
1455 }
1456 }
1457
1458 /* tchan ranges */
1459 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1460 if (IS_ERR(rm_res)) {
1461 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1462 } else {
1463 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1464 for (i = 0; i < rm_res->sets; i++) {
1465 rm_desc = &rm_res->desc[i];
1466 bitmap_clear(ud->tchan_map, rm_desc->start,
1467 rm_desc->num);
1468 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1469 rm_desc->start, rm_desc->num);
1470 }
1471 }
1472
1473 /* rchan ranges */
1474 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1475 if (IS_ERR(rm_res)) {
1476 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1477 } else {
1478 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1479 for (i = 0; i < rm_res->sets; i++) {
1480 rm_desc = &rm_res->desc[i];
1481 bitmap_clear(ud->rchan_map, rm_desc->start,
1482 rm_desc->num);
1483 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1484 rm_desc->start, rm_desc->num);
1485 }
1486 }
1487
1488 return 0;
1489 }
1490
pktdma_setup_resources(struct udma_dev * ud)1491 static int pktdma_setup_resources(struct udma_dev *ud)
1492 {
1493 int i;
1494 struct udevice *dev = ud->dev;
1495 struct ti_sci_resource *rm_res;
1496 struct ti_sci_resource_desc *rm_desc;
1497 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1498
1499 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1500 sizeof(unsigned long), GFP_KERNEL);
1501 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1502 GFP_KERNEL);
1503 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1504 sizeof(unsigned long), GFP_KERNEL);
1505 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1506 GFP_KERNEL);
1507 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1508 sizeof(unsigned long),
1509 GFP_KERNEL);
1510 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1511 GFP_KERNEL);
1512 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
1513 sizeof(unsigned long), GFP_KERNEL);
1514
1515 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
1516 !ud->rchans || !ud->rflows || !ud->rflow_in_use)
1517 return -ENOMEM;
1518
1519 /* Get resource ranges from tisci */
1520 for (i = 0; i < RM_RANGE_LAST; i++) {
1521 if (i == RM_RANGE_BCHAN)
1522 continue;
1523
1524 tisci_rm->rm_ranges[i] =
1525 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1526 tisci_rm->tisci_dev_id,
1527 (char *)range_names[i]);
1528 }
1529
1530 /* tchan ranges */
1531 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1532 if (IS_ERR(rm_res)) {
1533 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1534 } else {
1535 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1536 for (i = 0; i < rm_res->sets; i++) {
1537 rm_desc = &rm_res->desc[i];
1538 bitmap_clear(ud->tchan_map, rm_desc->start,
1539 rm_desc->num);
1540 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1541 rm_desc->start, rm_desc->num);
1542 }
1543 }
1544
1545 /* rchan ranges */
1546 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1547 if (IS_ERR(rm_res)) {
1548 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1549 } else {
1550 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1551 for (i = 0; i < rm_res->sets; i++) {
1552 rm_desc = &rm_res->desc[i];
1553 bitmap_clear(ud->rchan_map, rm_desc->start,
1554 rm_desc->num);
1555 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1556 rm_desc->start, rm_desc->num);
1557 }
1558 }
1559
1560 /* rflow ranges */
1561 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1562 if (IS_ERR(rm_res)) {
1563 /* all rflows are assigned exclusively to Linux */
1564 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
1565 } else {
1566 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
1567 for (i = 0; i < rm_res->sets; i++) {
1568 rm_desc = &rm_res->desc[i];
1569 bitmap_clear(ud->rflow_in_use, rm_desc->start,
1570 rm_desc->num);
1571 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
1572 rm_desc->start, rm_desc->num);
1573 }
1574 }
1575
1576 /* tflow ranges */
1577 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
1578 if (IS_ERR(rm_res)) {
1579 /* all tflows are assigned exclusively to Linux */
1580 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
1581 } else {
1582 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
1583 for (i = 0; i < rm_res->sets; i++) {
1584 rm_desc = &rm_res->desc[i];
1585 bitmap_clear(ud->tflow_map, rm_desc->start,
1586 rm_desc->num);
1587 dev_dbg(dev, "ti-sci-res: tflow: %d:%d\n",
1588 rm_desc->start, rm_desc->num);
1589 }
1590 }
1591
1592 return 0;
1593 }
1594
setup_resources(struct udma_dev * ud)1595 static int setup_resources(struct udma_dev *ud)
1596 {
1597 struct udevice *dev = ud->dev;
1598 int ch_count, ret;
1599
1600 switch (ud->match_data->type) {
1601 case DMA_TYPE_UDMA:
1602 ret = udma_setup_resources(ud);
1603 break;
1604 case DMA_TYPE_BCDMA:
1605 ret = bcdma_setup_resources(ud);
1606 break;
1607 case DMA_TYPE_PKTDMA:
1608 ret = pktdma_setup_resources(ud);
1609 break;
1610 default:
1611 return -EINVAL;
1612 }
1613
1614 if (ret)
1615 return ret;
1616
1617 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
1618 if (ud->bchan_cnt)
1619 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
1620 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1621 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1622 if (!ch_count)
1623 return -ENODEV;
1624
1625 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1626 GFP_KERNEL);
1627 if (!ud->channels)
1628 return -ENOMEM;
1629
1630 switch (ud->match_data->type) {
1631 case DMA_TYPE_UDMA:
1632 dev_dbg(dev,
1633 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
1634 ch_count,
1635 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1636 ud->tchan_cnt),
1637 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1638 ud->rchan_cnt),
1639 ud->rflow_cnt - bitmap_weight(ud->rflow_map,
1640 ud->rflow_cnt));
1641 break;
1642 case DMA_TYPE_BCDMA:
1643 dev_dbg(dev,
1644 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
1645 ch_count,
1646 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
1647 ud->bchan_cnt),
1648 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1649 ud->tchan_cnt),
1650 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1651 ud->rchan_cnt));
1652 break;
1653 case DMA_TYPE_PKTDMA:
1654 dev_dbg(dev,
1655 "Channels: %d (tchan: %u, rchan: %u)\n",
1656 ch_count,
1657 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1658 ud->tchan_cnt),
1659 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1660 ud->rchan_cnt));
1661 break;
1662 default:
1663 break;
1664 }
1665
1666 return ch_count;
1667 }
1668
udma_probe(struct udevice * dev)1669 static int udma_probe(struct udevice *dev)
1670 {
1671 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1672 struct udma_dev *ud = dev_get_priv(dev);
1673 int i, ret;
1674 struct udevice *tmp;
1675 struct udevice *tisci_dev = NULL;
1676 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1677 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1678
1679
1680 ud->match_data = (void *)dev_get_driver_data(dev);
1681 ret = udma_get_mmrs(dev);
1682 if (ret)
1683 return ret;
1684
1685 ud->psil_base = ud->match_data->psil_base;
1686
1687 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1688 "ti,sci", &tisci_dev);
1689 if (ret) {
1690 debug("Failed to get TISCI phandle (%d)\n", ret);
1691 tisci_rm->tisci = NULL;
1692 return -EINVAL;
1693 }
1694 tisci_rm->tisci = (struct ti_sci_handle *)
1695 (ti_sci_get_handle_from_sysfw(tisci_dev));
1696
1697 tisci_rm->tisci_dev_id = -1;
1698 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1699 if (ret) {
1700 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1701 return ret;
1702 }
1703
1704 tisci_rm->tisci_navss_dev_id = -1;
1705 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1706 &tisci_rm->tisci_navss_dev_id);
1707 if (ret) {
1708 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1709 return ret;
1710 }
1711
1712 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1713 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
1714
1715 if (ud->match_data->type == DMA_TYPE_UDMA) {
1716 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1717 "ti,ringacc", &tmp);
1718 ud->ringacc = dev_get_priv(tmp);
1719 } else {
1720 struct k3_ringacc_init_data ring_init_data;
1721
1722 ring_init_data.tisci = ud->tisci_rm.tisci;
1723 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
1724 if (ud->match_data->type == DMA_TYPE_BCDMA) {
1725 ring_init_data.num_rings = ud->bchan_cnt +
1726 ud->tchan_cnt +
1727 ud->rchan_cnt;
1728 } else {
1729 ring_init_data.num_rings = ud->rflow_cnt +
1730 ud->tflow_cnt;
1731 }
1732
1733 ud->ringacc = k3_ringacc_dmarings_init(dev, &ring_init_data);
1734 }
1735 if (IS_ERR(ud->ringacc))
1736 return PTR_ERR(ud->ringacc);
1737
1738 ud->dev = dev;
1739 ud->ch_count = setup_resources(ud);
1740 if (ud->ch_count <= 0)
1741 return ud->ch_count;
1742
1743 for (i = 0; i < ud->bchan_cnt; i++) {
1744 struct udma_bchan *bchan = &ud->bchans[i];
1745
1746 bchan->id = i;
1747 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
1748 }
1749
1750 for (i = 0; i < ud->tchan_cnt; i++) {
1751 struct udma_tchan *tchan = &ud->tchans[i];
1752
1753 tchan->id = i;
1754 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1755 }
1756
1757 for (i = 0; i < ud->rchan_cnt; i++) {
1758 struct udma_rchan *rchan = &ud->rchans[i];
1759
1760 rchan->id = i;
1761 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1762 }
1763
1764 for (i = 0; i < ud->rflow_cnt; i++) {
1765 struct udma_rflow *rflow = &ud->rflows[i];
1766
1767 rflow->id = i;
1768 }
1769
1770 for (i = 0; i < ud->ch_count; i++) {
1771 struct udma_chan *uc = &ud->channels[i];
1772
1773 uc->ud = ud;
1774 uc->id = i;
1775 uc->config.remote_thread_id = -1;
1776 uc->bchan = NULL;
1777 uc->tchan = NULL;
1778 uc->rchan = NULL;
1779 uc->config.mapped_channel_id = -1;
1780 uc->config.default_flow_id = -1;
1781 uc->config.dir = DMA_MEM_TO_MEM;
1782 sprintf(uc->name, "UDMA chan%d\n", i);
1783 if (!i)
1784 uc->in_use = true;
1785 }
1786
1787 pr_debug("%s(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1788 dev->name,
1789 udma_read(ud->mmrs[MMR_GCFG], 0),
1790 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1791 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1792 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1793 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1794
1795 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1796
1797 return ret;
1798 }
1799
udma_push_to_ring(struct k3_nav_ring * ring,void * elem)1800 static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1801 {
1802 u64 addr = 0;
1803
1804 memcpy(&addr, &elem, sizeof(elem));
1805 return k3_nav_ringacc_ring_push(ring, &addr);
1806 }
1807
udma_prep_dma_memcpy(struct udma_chan * uc,dma_addr_t dest,dma_addr_t src,size_t len)1808 static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1809 dma_addr_t src, size_t len)
1810 {
1811 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1812 struct cppi5_tr_type15_t *tr_req;
1813 int num_tr;
1814 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1815 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1816 unsigned long dummy;
1817 void *tr_desc;
1818 size_t desc_size;
1819
1820 if (len < SZ_64K) {
1821 num_tr = 1;
1822 tr0_cnt0 = len;
1823 tr0_cnt1 = 1;
1824 } else {
1825 unsigned long align_to = __ffs(src | dest);
1826
1827 if (align_to > 3)
1828 align_to = 3;
1829 /*
1830 * Keep simple: tr0: SZ_64K-alignment blocks,
1831 * tr1: the remaining
1832 */
1833 num_tr = 2;
1834 tr0_cnt0 = (SZ_64K - BIT(align_to));
1835 if (len / tr0_cnt0 >= SZ_64K) {
1836 dev_err(uc->ud->dev, "size %zu is not supported\n",
1837 len);
1838 return NULL;
1839 }
1840
1841 tr0_cnt1 = len / tr0_cnt0;
1842 tr1_cnt0 = len % tr0_cnt0;
1843 }
1844
1845 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1846 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1847 if (!tr_desc)
1848 return NULL;
1849 memset(tr_desc, 0, desc_size);
1850
1851 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1852 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1853 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1854
1855 tr_req = tr_desc + tr_size;
1856
1857 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1858 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1859 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1860
1861 tr_req[0].addr = src;
1862 tr_req[0].icnt0 = tr0_cnt0;
1863 tr_req[0].icnt1 = tr0_cnt1;
1864 tr_req[0].icnt2 = 1;
1865 tr_req[0].icnt3 = 1;
1866 tr_req[0].dim1 = tr0_cnt0;
1867
1868 tr_req[0].daddr = dest;
1869 tr_req[0].dicnt0 = tr0_cnt0;
1870 tr_req[0].dicnt1 = tr0_cnt1;
1871 tr_req[0].dicnt2 = 1;
1872 tr_req[0].dicnt3 = 1;
1873 tr_req[0].ddim1 = tr0_cnt0;
1874
1875 if (num_tr == 2) {
1876 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1877 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1878 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1879
1880 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1881 tr_req[1].icnt0 = tr1_cnt0;
1882 tr_req[1].icnt1 = 1;
1883 tr_req[1].icnt2 = 1;
1884 tr_req[1].icnt3 = 1;
1885
1886 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1887 tr_req[1].dicnt0 = tr1_cnt0;
1888 tr_req[1].dicnt1 = 1;
1889 tr_req[1].dicnt2 = 1;
1890 tr_req[1].dicnt3 = 1;
1891 }
1892
1893 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1894
1895 flush_dcache_range((unsigned long)tr_desc,
1896 ALIGN((unsigned long)tr_desc + desc_size,
1897 ARCH_DMA_MINALIGN));
1898
1899 udma_push_to_ring(uc->tchan->t_ring, tr_desc);
1900
1901 return 0;
1902 }
1903
1904 #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
1905 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1906 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1907
1908 #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
1909 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1910 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1911
1912 #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
1913 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1914
1915 #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
1916 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1917 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1918 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1919 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1920 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1921 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1922 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1923 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1924
1925 #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
1926 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1927 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1928 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1929 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1930 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1931 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1932 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1933 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1934 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1935
bcdma_tisci_m2m_channel_config(struct udma_chan * uc)1936 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1937 {
1938 struct udma_dev *ud = uc->ud;
1939 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1940 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1941 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1942 struct udma_bchan *bchan = uc->bchan;
1943 int ret = 0;
1944
1945 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1946 req_tx.nav_id = tisci_rm->tisci_dev_id;
1947 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1948 req_tx.index = bchan->id;
1949
1950 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1951 if (ret)
1952 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1953
1954 return ret;
1955 }
1956
__bcdma_reserve_bchan(struct udma_dev * ud,int id)1957 static struct udma_bchan *__bcdma_reserve_bchan(struct udma_dev *ud, int id)
1958 {
1959 if (id >= 0) {
1960 if (test_bit(id, ud->bchan_map)) {
1961 dev_err(ud->dev, "bchan%d is in use\n", id);
1962 return ERR_PTR(-ENOENT);
1963 }
1964 } else {
1965 id = find_next_zero_bit(ud->bchan_map, ud->bchan_cnt, 0);
1966 if (id == ud->bchan_cnt)
1967 return ERR_PTR(-ENOENT);
1968 }
1969 __set_bit(id, ud->bchan_map);
1970 return &ud->bchans[id];
1971 }
1972
bcdma_get_bchan(struct udma_chan * uc)1973 static int bcdma_get_bchan(struct udma_chan *uc)
1974 {
1975 struct udma_dev *ud = uc->ud;
1976
1977 if (uc->bchan) {
1978 dev_err(ud->dev, "chan%d: already have bchan%d allocated\n",
1979 uc->id, uc->bchan->id);
1980 return 0;
1981 }
1982
1983 uc->bchan = __bcdma_reserve_bchan(ud, -1);
1984 if (IS_ERR(uc->bchan))
1985 return PTR_ERR(uc->bchan);
1986
1987 uc->tchan = uc->bchan;
1988
1989 return 0;
1990 }
1991
bcdma_put_bchan(struct udma_chan * uc)1992 static void bcdma_put_bchan(struct udma_chan *uc)
1993 {
1994 struct udma_dev *ud = uc->ud;
1995
1996 if (uc->bchan) {
1997 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
1998 uc->bchan->id);
1999 __clear_bit(uc->bchan->id, ud->bchan_map);
2000 uc->bchan = NULL;
2001 uc->tchan = NULL;
2002 }
2003 }
2004
bcdma_free_bchan_resources(struct udma_chan * uc)2005 static void bcdma_free_bchan_resources(struct udma_chan *uc)
2006 {
2007 if (!uc->bchan)
2008 return;
2009
2010 k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2011 k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2012 uc->bchan->tc_ring = NULL;
2013 uc->bchan->t_ring = NULL;
2014
2015 bcdma_put_bchan(uc);
2016 }
2017
bcdma_alloc_bchan_resources(struct udma_chan * uc)2018 static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
2019 {
2020 struct k3_nav_ring_cfg ring_cfg;
2021 struct udma_dev *ud = uc->ud;
2022 int ret;
2023
2024 ret = bcdma_get_bchan(uc);
2025 if (ret)
2026 return ret;
2027
2028 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
2029 &uc->bchan->t_ring,
2030 &uc->bchan->tc_ring);
2031 if (ret) {
2032 ret = -EBUSY;
2033 goto err_ring;
2034 }
2035
2036 memset(&ring_cfg, 0, sizeof(ring_cfg));
2037 ring_cfg.size = 16;
2038 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
2039 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
2040
2041 ret = k3_nav_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
2042 if (ret)
2043 goto err_ringcfg;
2044
2045 return 0;
2046
2047 err_ringcfg:
2048 k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2049 uc->bchan->tc_ring = NULL;
2050 k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2051 uc->bchan->t_ring = NULL;
2052 err_ring:
2053 bcdma_put_bchan(uc);
2054
2055 return ret;
2056 }
2057
bcdma_tisci_tx_channel_config(struct udma_chan * uc)2058 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
2059 {
2060 struct udma_dev *ud = uc->ud;
2061 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2062 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2063 struct udma_tchan *tchan = uc->tchan;
2064 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2065 int ret = 0;
2066
2067 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2068 req_tx.nav_id = tisci_rm->tisci_dev_id;
2069 req_tx.index = tchan->id;
2070 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2071 if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
2072 ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2073 /* wait for peer to complete the teardown for PDMAs */
2074 req_tx.valid_params |=
2075 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2076 req_tx.tx_tdtype = 1;
2077 }
2078
2079 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2080 if (ret)
2081 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2082
2083 return ret;
2084 }
2085
2086 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2087
pktdma_tisci_rx_channel_config(struct udma_chan * uc)2088 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2089 {
2090 struct udma_dev *ud = uc->ud;
2091 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2092 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2093 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2094 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2095 int ret = 0;
2096
2097 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2098 req_rx.nav_id = tisci_rm->tisci_dev_id;
2099 req_rx.index = uc->rchan->id;
2100
2101 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2102 if (ret) {
2103 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2104 return ret;
2105 }
2106
2107 flow_req.valid_params =
2108 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2109 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2110 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2111
2112 flow_req.nav_id = tisci_rm->tisci_dev_id;
2113 flow_req.flow_index = uc->rflow->id;
2114
2115 if (uc->config.needs_epib)
2116 flow_req.rx_einfo_present = 1;
2117 else
2118 flow_req.rx_einfo_present = 0;
2119 if (uc->config.psd_size)
2120 flow_req.rx_psinfo_present = 1;
2121 else
2122 flow_req.rx_psinfo_present = 0;
2123 flow_req.rx_error_handling = 1;
2124
2125 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2126
2127 if (ret)
2128 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2129 ret);
2130
2131 return ret;
2132 }
2133
bcdma_alloc_chan_resources(struct udma_chan * uc)2134 static int bcdma_alloc_chan_resources(struct udma_chan *uc)
2135 {
2136 int ret;
2137
2138 uc->config.pkt_mode = false;
2139
2140 switch (uc->config.dir) {
2141 case DMA_MEM_TO_MEM:
2142 /* Non synchronized - mem to mem type of transfer */
2143 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2144 uc->id);
2145
2146 ret = bcdma_alloc_bchan_resources(uc);
2147 if (ret)
2148 return ret;
2149
2150 ret = bcdma_tisci_m2m_channel_config(uc);
2151 break;
2152 default:
2153 /* Can not happen */
2154 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2155 __func__, uc->id, uc->config.dir);
2156 return -EINVAL;
2157 }
2158
2159 /* check if the channel configuration was successful */
2160 if (ret)
2161 goto err_res_free;
2162
2163 if (udma_is_chan_running(uc)) {
2164 dev_warn(uc->ud->dev, "chan%d: is running!\n", uc->id);
2165 udma_stop(uc);
2166 if (udma_is_chan_running(uc)) {
2167 dev_err(uc->ud->dev, "chan%d: won't stop!\n", uc->id);
2168 goto err_res_free;
2169 }
2170 }
2171
2172 udma_reset_rings(uc);
2173
2174 return 0;
2175
2176 err_res_free:
2177 bcdma_free_bchan_resources(uc);
2178 udma_free_tx_resources(uc);
2179 udma_free_rx_resources(uc);
2180
2181 udma_reset_uchan(uc);
2182
2183 return ret;
2184 }
2185
pktdma_alloc_chan_resources(struct udma_chan * uc)2186 static int pktdma_alloc_chan_resources(struct udma_chan *uc)
2187 {
2188 struct udma_dev *ud = uc->ud;
2189 int ret;
2190
2191 switch (uc->config.dir) {
2192 case DMA_MEM_TO_DEV:
2193 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2194 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2195 uc->id);
2196
2197 ret = udma_alloc_tx_resources(uc);
2198 if (ret) {
2199 uc->config.remote_thread_id = -1;
2200 return ret;
2201 }
2202
2203 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2204 uc->config.dst_thread = uc->config.remote_thread_id;
2205 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2206
2207 ret = pktdma_tisci_tx_channel_config(uc);
2208 break;
2209 case DMA_DEV_TO_MEM:
2210 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2211 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2212 uc->id);
2213
2214 ret = udma_alloc_rx_resources(uc);
2215 if (ret) {
2216 uc->config.remote_thread_id = -1;
2217 return ret;
2218 }
2219
2220 uc->config.src_thread = uc->config.remote_thread_id;
2221 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2222 K3_PSIL_DST_THREAD_ID_OFFSET;
2223
2224 ret = pktdma_tisci_rx_channel_config(uc);
2225 break;
2226 default:
2227 /* Can not happen */
2228 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2229 __func__, uc->id, uc->config.dir);
2230 return -EINVAL;
2231 }
2232
2233 /* check if the channel configuration was successful */
2234 if (ret)
2235 goto err_res_free;
2236
2237 /* PSI-L pairing */
2238 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2239 if (ret) {
2240 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2241 uc->config.src_thread, uc->config.dst_thread);
2242 goto err_res_free;
2243 }
2244
2245 if (udma_is_chan_running(uc)) {
2246 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2247 udma_stop(uc);
2248 if (udma_is_chan_running(uc)) {
2249 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2250 goto err_res_free;
2251 }
2252 }
2253
2254 udma_reset_rings(uc);
2255
2256 if (uc->tchan)
2257 dev_dbg(ud->dev,
2258 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2259 uc->id, uc->tchan->id, uc->tchan->tflow_id,
2260 uc->config.remote_thread_id);
2261 else if (uc->rchan)
2262 dev_dbg(ud->dev,
2263 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2264 uc->id, uc->rchan->id, uc->rflow->id,
2265 uc->config.remote_thread_id);
2266 return 0;
2267
2268 err_res_free:
2269 udma_free_tx_resources(uc);
2270 udma_free_rx_resources(uc);
2271
2272 udma_reset_uchan(uc);
2273
2274 return ret;
2275 }
2276
udma_transfer(struct udevice * dev,int direction,void * dst,void * src,size_t len)2277 static int udma_transfer(struct udevice *dev, int direction,
2278 void *dst, void *src, size_t len)
2279 {
2280 struct udma_dev *ud = dev_get_priv(dev);
2281 /* Channel0 is reserved for memcpy */
2282 struct udma_chan *uc = &ud->channels[0];
2283 dma_addr_t paddr = 0;
2284 int ret;
2285
2286 switch (ud->match_data->type) {
2287 case DMA_TYPE_UDMA:
2288 ret = udma_alloc_chan_resources(uc);
2289 break;
2290 case DMA_TYPE_BCDMA:
2291 ret = bcdma_alloc_chan_resources(uc);
2292 break;
2293 default:
2294 return -EINVAL;
2295 };
2296 if (ret)
2297 return ret;
2298
2299 udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
2300 udma_start(uc);
2301 udma_poll_completion(uc, &paddr);
2302 udma_stop(uc);
2303
2304 switch (ud->match_data->type) {
2305 case DMA_TYPE_UDMA:
2306 udma_free_chan_resources(uc);
2307 break;
2308 case DMA_TYPE_BCDMA:
2309 bcdma_free_bchan_resources(uc);
2310 break;
2311 default:
2312 return -EINVAL;
2313 };
2314
2315 return 0;
2316 }
2317
udma_request(struct dma * dma)2318 static int udma_request(struct dma *dma)
2319 {
2320 struct udma_dev *ud = dev_get_priv(dma->dev);
2321 struct udma_chan_config *ucc;
2322 struct udma_chan *uc;
2323 unsigned long dummy;
2324 int ret;
2325
2326 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2327 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2328 return -EINVAL;
2329 }
2330
2331 uc = &ud->channels[dma->id];
2332 ucc = &uc->config;
2333 switch (ud->match_data->type) {
2334 case DMA_TYPE_UDMA:
2335 ret = udma_alloc_chan_resources(uc);
2336 break;
2337 case DMA_TYPE_BCDMA:
2338 ret = bcdma_alloc_chan_resources(uc);
2339 break;
2340 case DMA_TYPE_PKTDMA:
2341 ret = pktdma_alloc_chan_resources(uc);
2342 break;
2343 default:
2344 return -EINVAL;
2345 }
2346 if (ret) {
2347 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
2348 return -EINVAL;
2349 }
2350
2351 if (uc->config.dir == DMA_MEM_TO_DEV) {
2352 uc->desc_tx = dma_alloc_coherent(ucc->hdesc_size, &dummy);
2353 memset(uc->desc_tx, 0, ucc->hdesc_size);
2354 } else {
2355 uc->desc_rx = dma_alloc_coherent(
2356 ucc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
2357 memset(uc->desc_rx, 0, ucc->hdesc_size * UDMA_RX_DESC_NUM);
2358 }
2359
2360 uc->in_use = true;
2361 uc->desc_rx_cur = 0;
2362 uc->num_rx_bufs = 0;
2363
2364 if (uc->config.dir == DMA_DEV_TO_MEM) {
2365 uc->cfg_data.flow_id_base = uc->rflow->id;
2366 uc->cfg_data.flow_id_cnt = 1;
2367 }
2368
2369 return 0;
2370 }
2371
udma_rfree(struct dma * dma)2372 static int udma_rfree(struct dma *dma)
2373 {
2374 struct udma_dev *ud = dev_get_priv(dma->dev);
2375 struct udma_chan *uc;
2376
2377 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2378 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2379 return -EINVAL;
2380 }
2381 uc = &ud->channels[dma->id];
2382
2383 if (udma_is_chan_running(uc))
2384 udma_stop(uc);
2385
2386 udma_navss_psil_unpair(ud, uc->config.src_thread,
2387 uc->config.dst_thread);
2388
2389 bcdma_free_bchan_resources(uc);
2390 udma_free_tx_resources(uc);
2391 udma_free_rx_resources(uc);
2392 udma_reset_uchan(uc);
2393
2394 uc->in_use = false;
2395
2396 return 0;
2397 }
2398
udma_enable(struct dma * dma)2399 static int udma_enable(struct dma *dma)
2400 {
2401 struct udma_dev *ud = dev_get_priv(dma->dev);
2402 struct udma_chan *uc;
2403 int ret;
2404
2405 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2406 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2407 return -EINVAL;
2408 }
2409 uc = &ud->channels[dma->id];
2410
2411 ret = udma_start(uc);
2412
2413 return ret;
2414 }
2415
udma_disable(struct dma * dma)2416 static int udma_disable(struct dma *dma)
2417 {
2418 struct udma_dev *ud = dev_get_priv(dma->dev);
2419 struct udma_chan *uc;
2420 int ret = 0;
2421
2422 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2423 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2424 return -EINVAL;
2425 }
2426 uc = &ud->channels[dma->id];
2427
2428 if (udma_is_chan_running(uc))
2429 ret = udma_stop(uc);
2430 else
2431 dev_err(dma->dev, "%s not running\n", __func__);
2432
2433 return ret;
2434 }
2435
udma_send(struct dma * dma,void * src,size_t len,void * metadata)2436 static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
2437 {
2438 struct udma_dev *ud = dev_get_priv(dma->dev);
2439 struct cppi5_host_desc_t *desc_tx;
2440 dma_addr_t dma_src = (dma_addr_t)src;
2441 struct ti_udma_drv_packet_data packet_data = { 0 };
2442 dma_addr_t paddr;
2443 struct udma_chan *uc;
2444 u32 tc_ring_id;
2445 int ret;
2446
2447 if (metadata)
2448 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
2449
2450 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2451 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2452 return -EINVAL;
2453 }
2454 uc = &ud->channels[dma->id];
2455
2456 if (uc->config.dir != DMA_MEM_TO_DEV)
2457 return -EINVAL;
2458
2459 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
2460
2461 desc_tx = uc->desc_tx;
2462
2463 cppi5_hdesc_reset_hbdesc(desc_tx);
2464
2465 cppi5_hdesc_init(desc_tx,
2466 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2467 uc->config.psd_size);
2468 cppi5_hdesc_set_pktlen(desc_tx, len);
2469 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
2470 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
2471 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
2472 /* pass below information from caller */
2473 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
2474 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
2475
2476 flush_dcache_range((unsigned long)dma_src,
2477 ALIGN((unsigned long)dma_src + len,
2478 ARCH_DMA_MINALIGN));
2479 flush_dcache_range((unsigned long)desc_tx,
2480 ALIGN((unsigned long)desc_tx + uc->config.hdesc_size,
2481 ARCH_DMA_MINALIGN));
2482
2483 ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
2484 if (ret) {
2485 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
2486 dma->id, ret);
2487 return ret;
2488 }
2489
2490 udma_poll_completion(uc, &paddr);
2491
2492 return 0;
2493 }
2494
udma_receive(struct dma * dma,void ** dst,void * metadata)2495 static int udma_receive(struct dma *dma, void **dst, void *metadata)
2496 {
2497 struct udma_dev *ud = dev_get_priv(dma->dev);
2498 struct udma_chan_config *ucc;
2499 struct cppi5_host_desc_t *desc_rx;
2500 dma_addr_t buf_dma;
2501 struct udma_chan *uc;
2502 u32 buf_dma_len, pkt_len;
2503 u32 port_id = 0;
2504 int ret;
2505
2506 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2507 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2508 return -EINVAL;
2509 }
2510 uc = &ud->channels[dma->id];
2511 ucc = &uc->config;
2512
2513 if (uc->config.dir != DMA_DEV_TO_MEM)
2514 return -EINVAL;
2515 if (!uc->num_rx_bufs)
2516 return -EINVAL;
2517
2518 ret = k3_nav_ringacc_ring_pop(uc->rflow->r_ring, &desc_rx);
2519 if (ret && ret != -ENODATA) {
2520 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
2521 return ret;
2522 } else if (ret == -ENODATA) {
2523 return 0;
2524 }
2525
2526 /* invalidate cache data */
2527 invalidate_dcache_range((ulong)desc_rx,
2528 (ulong)(desc_rx + ucc->hdesc_size));
2529
2530 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
2531 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
2532
2533 /* invalidate cache data */
2534 invalidate_dcache_range((ulong)buf_dma,
2535 (ulong)(buf_dma + buf_dma_len));
2536
2537 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
2538
2539 *dst = (void *)buf_dma;
2540 uc->num_rx_bufs--;
2541
2542 return pkt_len;
2543 }
2544
udma_of_xlate(struct dma * dma,struct ofnode_phandle_args * args)2545 static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
2546 {
2547 struct udma_chan_config *ucc;
2548 struct udma_dev *ud = dev_get_priv(dma->dev);
2549 struct udma_chan *uc = &ud->channels[0];
2550 struct psil_endpoint_config *ep_config;
2551 u32 val;
2552
2553 for (val = 0; val < ud->ch_count; val++) {
2554 uc = &ud->channels[val];
2555 if (!uc->in_use)
2556 break;
2557 }
2558
2559 if (val == ud->ch_count)
2560 return -EBUSY;
2561
2562 ucc = &uc->config;
2563 ucc->remote_thread_id = args->args[0];
2564 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
2565 ucc->dir = DMA_MEM_TO_DEV;
2566 else
2567 ucc->dir = DMA_DEV_TO_MEM;
2568
2569 ep_config = psil_get_ep_config(ucc->remote_thread_id);
2570 if (IS_ERR(ep_config)) {
2571 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
2572 uc->config.remote_thread_id);
2573 ucc->dir = DMA_MEM_TO_MEM;
2574 ucc->remote_thread_id = -1;
2575 return false;
2576 }
2577
2578 ucc->pkt_mode = ep_config->pkt_mode;
2579 ucc->channel_tpl = ep_config->channel_tpl;
2580 ucc->notdpkt = ep_config->notdpkt;
2581 ucc->ep_type = ep_config->ep_type;
2582
2583 if (ud->match_data->type == DMA_TYPE_PKTDMA &&
2584 ep_config->mapped_channel_id >= 0) {
2585 ucc->mapped_channel_id = ep_config->mapped_channel_id;
2586 ucc->default_flow_id = ep_config->default_flow_id;
2587 } else {
2588 ucc->mapped_channel_id = -1;
2589 ucc->default_flow_id = -1;
2590 }
2591
2592 ucc->needs_epib = ep_config->needs_epib;
2593 ucc->psd_size = ep_config->psd_size;
2594 ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size;
2595
2596 ucc->hdesc_size = cppi5_hdesc_calc_size(ucc->needs_epib,
2597 ucc->psd_size, 0);
2598 ucc->hdesc_size = ALIGN(ucc->hdesc_size, ARCH_DMA_MINALIGN);
2599
2600 dma->id = uc->id;
2601 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
2602 dma->id, ucc->needs_epib,
2603 ucc->psd_size, ucc->metadata_size,
2604 ucc->remote_thread_id);
2605
2606 return 0;
2607 }
2608
udma_prepare_rcv_buf(struct dma * dma,void * dst,size_t size)2609 int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
2610 {
2611 struct udma_dev *ud = dev_get_priv(dma->dev);
2612 struct cppi5_host_desc_t *desc_rx;
2613 dma_addr_t dma_dst;
2614 struct udma_chan *uc;
2615 u32 desc_num;
2616
2617 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2618 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2619 return -EINVAL;
2620 }
2621 uc = &ud->channels[dma->id];
2622
2623 if (uc->config.dir != DMA_DEV_TO_MEM)
2624 return -EINVAL;
2625
2626 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
2627 return -EINVAL;
2628
2629 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
2630 desc_rx = uc->desc_rx + (desc_num * uc->config.hdesc_size);
2631 dma_dst = (dma_addr_t)dst;
2632
2633 cppi5_hdesc_reset_hbdesc(desc_rx);
2634
2635 cppi5_hdesc_init(desc_rx,
2636 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2637 uc->config.psd_size);
2638 cppi5_hdesc_set_pktlen(desc_rx, size);
2639 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
2640
2641 flush_dcache_range((unsigned long)desc_rx,
2642 ALIGN((unsigned long)desc_rx + uc->config.hdesc_size,
2643 ARCH_DMA_MINALIGN));
2644
2645 udma_push_to_ring(uc->rflow->fd_ring, desc_rx);
2646
2647 uc->num_rx_bufs++;
2648 uc->desc_rx_cur++;
2649
2650 return 0;
2651 }
2652
udma_get_cfg(struct dma * dma,u32 id,void ** data)2653 static int udma_get_cfg(struct dma *dma, u32 id, void **data)
2654 {
2655 struct udma_dev *ud = dev_get_priv(dma->dev);
2656 struct udma_chan *uc;
2657
2658 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2659 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2660 return -EINVAL;
2661 }
2662
2663 switch (id) {
2664 case TI_UDMA_CHAN_PRIV_INFO:
2665 uc = &ud->channels[dma->id];
2666 *data = &uc->cfg_data;
2667 return 0;
2668 }
2669
2670 return -EINVAL;
2671 }
2672
2673 static const struct dma_ops udma_ops = {
2674 .transfer = udma_transfer,
2675 .of_xlate = udma_of_xlate,
2676 .request = udma_request,
2677 .rfree = udma_rfree,
2678 .enable = udma_enable,
2679 .disable = udma_disable,
2680 .send = udma_send,
2681 .receive = udma_receive,
2682 .prepare_rcv_buf = udma_prepare_rcv_buf,
2683 .get_cfg = udma_get_cfg,
2684 };
2685
2686 static struct udma_match_data am654_main_data = {
2687 .type = DMA_TYPE_UDMA,
2688 .psil_base = 0x1000,
2689 .enable_memcpy_support = true,
2690 .statictr_z_mask = GENMASK(11, 0),
2691 .oes = {
2692 .udma_rchan = 0x200,
2693 },
2694 .tpl_levels = 2,
2695 .level_start_idx = {
2696 [0] = 8, /* Normal channels */
2697 [1] = 0, /* High Throughput channels */
2698 },
2699 };
2700
2701 static struct udma_match_data am654_mcu_data = {
2702 .type = DMA_TYPE_UDMA,
2703 .psil_base = 0x6000,
2704 .enable_memcpy_support = true,
2705 .statictr_z_mask = GENMASK(11, 0),
2706 .oes = {
2707 .udma_rchan = 0x200,
2708 },
2709 .tpl_levels = 2,
2710 .level_start_idx = {
2711 [0] = 2, /* Normal channels */
2712 [1] = 0, /* High Throughput channels */
2713 },
2714 };
2715
2716 static struct udma_match_data j721e_main_data = {
2717 .type = DMA_TYPE_UDMA,
2718 .psil_base = 0x1000,
2719 .enable_memcpy_support = true,
2720 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2721 .statictr_z_mask = GENMASK(23, 0),
2722 .oes = {
2723 .udma_rchan = 0x400,
2724 },
2725 .tpl_levels = 3,
2726 .level_start_idx = {
2727 [0] = 16, /* Normal channels */
2728 [1] = 4, /* High Throughput channels */
2729 [2] = 0, /* Ultra High Throughput channels */
2730 },
2731 };
2732
2733 static struct udma_match_data j721e_mcu_data = {
2734 .type = DMA_TYPE_UDMA,
2735 .psil_base = 0x6000,
2736 .enable_memcpy_support = true,
2737 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2738 .statictr_z_mask = GENMASK(23, 0),
2739 .oes = {
2740 .udma_rchan = 0x400,
2741 },
2742 .tpl_levels = 2,
2743 .level_start_idx = {
2744 [0] = 2, /* Normal channels */
2745 [1] = 0, /* High Throughput channels */
2746 },
2747 };
2748
2749 static struct udma_match_data am64_bcdma_data = {
2750 .type = DMA_TYPE_BCDMA,
2751 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
2752 .enable_memcpy_support = true, /* Supported via bchan */
2753 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2754 .statictr_z_mask = GENMASK(23, 0),
2755 .oes = {
2756 .bcdma_bchan_data = 0x2200,
2757 .bcdma_bchan_ring = 0x2400,
2758 .bcdma_tchan_data = 0x2800,
2759 .bcdma_tchan_ring = 0x2a00,
2760 .bcdma_rchan_data = 0x2e00,
2761 .bcdma_rchan_ring = 0x3000,
2762 },
2763 /* No throughput levels */
2764 };
2765
2766 static struct udma_match_data am64_pktdma_data = {
2767 .type = DMA_TYPE_PKTDMA,
2768 .psil_base = 0x1000,
2769 .enable_memcpy_support = false,
2770 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2771 .statictr_z_mask = GENMASK(23, 0),
2772 .oes = {
2773 .pktdma_tchan_flow = 0x1200,
2774 .pktdma_rchan_flow = 0x1600,
2775 },
2776 /* No throughput levels */
2777 };
2778
2779 static const struct udevice_id udma_ids[] = {
2780 {
2781 .compatible = "ti,am654-navss-main-udmap",
2782 .data = (ulong)&am654_main_data,
2783 },
2784 {
2785 .compatible = "ti,am654-navss-mcu-udmap",
2786 .data = (ulong)&am654_mcu_data,
2787 }, {
2788 .compatible = "ti,j721e-navss-main-udmap",
2789 .data = (ulong)&j721e_main_data,
2790 }, {
2791 .compatible = "ti,j721e-navss-mcu-udmap",
2792 .data = (ulong)&j721e_mcu_data,
2793 },
2794 {
2795 .compatible = "ti,am64-dmss-bcdma",
2796 .data = (ulong)&am64_bcdma_data,
2797 },
2798 {
2799 .compatible = "ti,am64-dmss-pktdma",
2800 .data = (ulong)&am64_pktdma_data,
2801 },
2802 { /* Sentinel */ },
2803 };
2804
2805 U_BOOT_DRIVER(ti_edma3) = {
2806 .name = "ti-udma",
2807 .id = UCLASS_DMA,
2808 .of_match = udma_ids,
2809 .ops = &udma_ops,
2810 .probe = udma_probe,
2811 .priv_auto = sizeof(struct udma_dev),
2812 };
2813