xref: /linux/drivers/dma/ti/k3-udma.c (revision d6fd48ef)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4  *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmapool.h>
13 #include <linux/err.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/list.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/sys_soc.h>
21 #include <linux/of.h>
22 #include <linux/of_dma.h>
23 #include <linux/of_device.h>
24 #include <linux/of_irq.h>
25 #include <linux/workqueue.h>
26 #include <linux/completion.h>
27 #include <linux/soc/ti/k3-ringacc.h>
28 #include <linux/soc/ti/ti_sci_protocol.h>
29 #include <linux/soc/ti/ti_sci_inta_msi.h>
30 #include <linux/dma/k3-event-router.h>
31 #include <linux/dma/ti-cppi5.h>
32 
33 #include "../virt-dma.h"
34 #include "k3-udma.h"
35 #include "k3-psil-priv.h"
36 
37 struct udma_static_tr {
38 	u8 elsize; /* RPSTR0 */
39 	u16 elcnt; /* RPSTR0 */
40 	u16 bstcnt; /* RPSTR1 */
41 };
42 
43 #define K3_UDMA_MAX_RFLOWS		1024
44 #define K3_UDMA_DEFAULT_RING_SIZE	16
45 
46 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
47 #define UDMA_RFLOW_SRCTAG_NONE		0
48 #define UDMA_RFLOW_SRCTAG_CFG_TAG	1
49 #define UDMA_RFLOW_SRCTAG_FLOW_ID	2
50 #define UDMA_RFLOW_SRCTAG_SRC_TAG	4
51 
52 #define UDMA_RFLOW_DSTTAG_NONE		0
53 #define UDMA_RFLOW_DSTTAG_CFG_TAG	1
54 #define UDMA_RFLOW_DSTTAG_FLOW_ID	2
55 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO	4
56 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI	5
57 
58 struct udma_chan;
59 
60 enum k3_dma_type {
61 	DMA_TYPE_UDMA = 0,
62 	DMA_TYPE_BCDMA,
63 	DMA_TYPE_PKTDMA,
64 };
65 
66 enum udma_mmr {
67 	MMR_GCFG = 0,
68 	MMR_BCHANRT,
69 	MMR_RCHANRT,
70 	MMR_TCHANRT,
71 	MMR_LAST,
72 };
73 
74 static const char * const mmr_names[] = {
75 	[MMR_GCFG] = "gcfg",
76 	[MMR_BCHANRT] = "bchanrt",
77 	[MMR_RCHANRT] = "rchanrt",
78 	[MMR_TCHANRT] = "tchanrt",
79 };
80 
81 struct udma_tchan {
82 	void __iomem *reg_rt;
83 
84 	int id;
85 	struct k3_ring *t_ring; /* Transmit ring */
86 	struct k3_ring *tc_ring; /* Transmit Completion ring */
87 	int tflow_id; /* applicable only for PKTDMA */
88 
89 };
90 
91 #define udma_bchan udma_tchan
92 
93 struct udma_rflow {
94 	int id;
95 	struct k3_ring *fd_ring; /* Free Descriptor ring */
96 	struct k3_ring *r_ring; /* Receive ring */
97 };
98 
99 struct udma_rchan {
100 	void __iomem *reg_rt;
101 
102 	int id;
103 };
104 
105 struct udma_oes_offsets {
106 	/* K3 UDMA Output Event Offset */
107 	u32 udma_rchan;
108 
109 	/* BCDMA Output Event Offsets */
110 	u32 bcdma_bchan_data;
111 	u32 bcdma_bchan_ring;
112 	u32 bcdma_tchan_data;
113 	u32 bcdma_tchan_ring;
114 	u32 bcdma_rchan_data;
115 	u32 bcdma_rchan_ring;
116 
117 	/* PKTDMA Output Event Offsets */
118 	u32 pktdma_tchan_flow;
119 	u32 pktdma_rchan_flow;
120 };
121 
122 #define UDMA_FLAG_PDMA_ACC32		BIT(0)
123 #define UDMA_FLAG_PDMA_BURST		BIT(1)
124 #define UDMA_FLAG_TDTYPE		BIT(2)
125 #define UDMA_FLAG_BURST_SIZE		BIT(3)
126 #define UDMA_FLAGS_J7_CLASS		(UDMA_FLAG_PDMA_ACC32 | \
127 					 UDMA_FLAG_PDMA_BURST | \
128 					 UDMA_FLAG_TDTYPE | \
129 					 UDMA_FLAG_BURST_SIZE)
130 
131 struct udma_match_data {
132 	enum k3_dma_type type;
133 	u32 psil_base;
134 	bool enable_memcpy_support;
135 	u32 flags;
136 	u32 statictr_z_mask;
137 	u8 burst_size[3];
138 	struct udma_soc_data *soc_data;
139 };
140 
141 struct udma_soc_data {
142 	struct udma_oes_offsets oes;
143 	u32 bcdma_trigger_event_offset;
144 };
145 
146 struct udma_hwdesc {
147 	size_t cppi5_desc_size;
148 	void *cppi5_desc_vaddr;
149 	dma_addr_t cppi5_desc_paddr;
150 
151 	/* TR descriptor internal pointers */
152 	void *tr_req_base;
153 	struct cppi5_tr_resp_t *tr_resp_base;
154 };
155 
156 struct udma_rx_flush {
157 	struct udma_hwdesc hwdescs[2];
158 
159 	size_t buffer_size;
160 	void *buffer_vaddr;
161 	dma_addr_t buffer_paddr;
162 };
163 
164 struct udma_tpl {
165 	u8 levels;
166 	u32 start_idx[3];
167 };
168 
169 struct udma_dev {
170 	struct dma_device ddev;
171 	struct device *dev;
172 	void __iomem *mmrs[MMR_LAST];
173 	const struct udma_match_data *match_data;
174 	const struct udma_soc_data *soc_data;
175 
176 	struct udma_tpl bchan_tpl;
177 	struct udma_tpl tchan_tpl;
178 	struct udma_tpl rchan_tpl;
179 
180 	size_t desc_align; /* alignment to use for descriptors */
181 
182 	struct udma_tisci_rm tisci_rm;
183 
184 	struct k3_ringacc *ringacc;
185 
186 	struct work_struct purge_work;
187 	struct list_head desc_to_purge;
188 	spinlock_t lock;
189 
190 	struct udma_rx_flush rx_flush;
191 
192 	int bchan_cnt;
193 	int tchan_cnt;
194 	int echan_cnt;
195 	int rchan_cnt;
196 	int rflow_cnt;
197 	int tflow_cnt;
198 	unsigned long *bchan_map;
199 	unsigned long *tchan_map;
200 	unsigned long *rchan_map;
201 	unsigned long *rflow_gp_map;
202 	unsigned long *rflow_gp_map_allocated;
203 	unsigned long *rflow_in_use;
204 	unsigned long *tflow_map;
205 
206 	struct udma_bchan *bchans;
207 	struct udma_tchan *tchans;
208 	struct udma_rchan *rchans;
209 	struct udma_rflow *rflows;
210 
211 	struct udma_chan *channels;
212 	u32 psil_base;
213 	u32 atype;
214 	u32 asel;
215 };
216 
217 struct udma_desc {
218 	struct virt_dma_desc vd;
219 
220 	bool terminated;
221 
222 	enum dma_transfer_direction dir;
223 
224 	struct udma_static_tr static_tr;
225 	u32 residue;
226 
227 	unsigned int sglen;
228 	unsigned int desc_idx; /* Only used for cyclic in packet mode */
229 	unsigned int tr_idx;
230 
231 	u32 metadata_size;
232 	void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
233 
234 	unsigned int hwdesc_count;
235 	struct udma_hwdesc hwdesc[];
236 };
237 
238 enum udma_chan_state {
239 	UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
240 	UDMA_CHAN_IS_ACTIVE, /* Normal operation */
241 	UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
242 };
243 
244 struct udma_tx_drain {
245 	struct delayed_work work;
246 	ktime_t tstamp;
247 	u32 residue;
248 };
249 
250 struct udma_chan_config {
251 	bool pkt_mode; /* TR or packet */
252 	bool needs_epib; /* EPIB is needed for the communication or not */
253 	u32 psd_size; /* size of Protocol Specific Data */
254 	u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
255 	u32 hdesc_size; /* Size of a packet descriptor in packet mode */
256 	bool notdpkt; /* Suppress sending TDC packet */
257 	int remote_thread_id;
258 	u32 atype;
259 	u32 asel;
260 	u32 src_thread;
261 	u32 dst_thread;
262 	enum psil_endpoint_type ep_type;
263 	bool enable_acc32;
264 	bool enable_burst;
265 	enum udma_tp_level channel_tpl; /* Channel Throughput Level */
266 
267 	u32 tr_trigger_type;
268 	unsigned long tx_flags;
269 
270 	/* PKDMA mapped channel */
271 	int mapped_channel_id;
272 	/* PKTDMA default tflow or rflow for mapped channel */
273 	int default_flow_id;
274 
275 	enum dma_transfer_direction dir;
276 };
277 
278 struct udma_chan {
279 	struct virt_dma_chan vc;
280 	struct dma_slave_config	cfg;
281 	struct udma_dev *ud;
282 	struct device *dma_dev;
283 	struct udma_desc *desc;
284 	struct udma_desc *terminated_desc;
285 	struct udma_static_tr static_tr;
286 	char *name;
287 
288 	struct udma_bchan *bchan;
289 	struct udma_tchan *tchan;
290 	struct udma_rchan *rchan;
291 	struct udma_rflow *rflow;
292 
293 	bool psil_paired;
294 
295 	int irq_num_ring;
296 	int irq_num_udma;
297 
298 	bool cyclic;
299 	bool paused;
300 
301 	enum udma_chan_state state;
302 	struct completion teardown_completed;
303 
304 	struct udma_tx_drain tx_drain;
305 
306 	/* Channel configuration parameters */
307 	struct udma_chan_config config;
308 
309 	/* dmapool for packet mode descriptors */
310 	bool use_dma_pool;
311 	struct dma_pool *hdesc_pool;
312 
313 	u32 id;
314 };
315 
316 static inline struct udma_dev *to_udma_dev(struct dma_device *d)
317 {
318 	return container_of(d, struct udma_dev, ddev);
319 }
320 
321 static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
322 {
323 	return container_of(c, struct udma_chan, vc.chan);
324 }
325 
326 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
327 {
328 	return container_of(t, struct udma_desc, vd.tx);
329 }
330 
331 /* Generic register access functions */
332 static inline u32 udma_read(void __iomem *base, int reg)
333 {
334 	return readl(base + reg);
335 }
336 
337 static inline void udma_write(void __iomem *base, int reg, u32 val)
338 {
339 	writel(val, base + reg);
340 }
341 
342 static inline void udma_update_bits(void __iomem *base, int reg,
343 				    u32 mask, u32 val)
344 {
345 	u32 tmp, orig;
346 
347 	orig = readl(base + reg);
348 	tmp = orig & ~mask;
349 	tmp |= (val & mask);
350 
351 	if (tmp != orig)
352 		writel(tmp, base + reg);
353 }
354 
355 /* TCHANRT */
356 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
357 {
358 	if (!uc->tchan)
359 		return 0;
360 	return udma_read(uc->tchan->reg_rt, reg);
361 }
362 
363 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
364 {
365 	if (!uc->tchan)
366 		return;
367 	udma_write(uc->tchan->reg_rt, reg, val);
368 }
369 
370 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
371 					    u32 mask, u32 val)
372 {
373 	if (!uc->tchan)
374 		return;
375 	udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
376 }
377 
378 /* RCHANRT */
379 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
380 {
381 	if (!uc->rchan)
382 		return 0;
383 	return udma_read(uc->rchan->reg_rt, reg);
384 }
385 
386 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
387 {
388 	if (!uc->rchan)
389 		return;
390 	udma_write(uc->rchan->reg_rt, reg, val);
391 }
392 
393 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
394 					    u32 mask, u32 val)
395 {
396 	if (!uc->rchan)
397 		return;
398 	udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
399 }
400 
401 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
402 {
403 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
404 
405 	dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
406 	return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
407 					      tisci_rm->tisci_navss_dev_id,
408 					      src_thread, dst_thread);
409 }
410 
411 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
412 			     u32 dst_thread)
413 {
414 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
415 
416 	dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
417 	return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
418 						tisci_rm->tisci_navss_dev_id,
419 						src_thread, dst_thread);
420 }
421 
422 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
423 {
424 	struct device *chan_dev = &chan->dev->device;
425 
426 	if (asel == 0) {
427 		/* No special handling for the channel */
428 		chan->dev->chan_dma_dev = false;
429 
430 		chan_dev->dma_coherent = false;
431 		chan_dev->dma_parms = NULL;
432 	} else if (asel == 14 || asel == 15) {
433 		chan->dev->chan_dma_dev = true;
434 
435 		chan_dev->dma_coherent = true;
436 		dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
437 		chan_dev->dma_parms = chan_dev->parent->dma_parms;
438 	} else {
439 		dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
440 
441 		chan_dev->dma_coherent = false;
442 		chan_dev->dma_parms = NULL;
443 	}
444 }
445 
446 static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
447 {
448 	int i;
449 
450 	for (i = 0; i < tpl_map->levels; i++) {
451 		if (chan_id >= tpl_map->start_idx[i])
452 			return i;
453 	}
454 
455 	return 0;
456 }
457 
458 static void udma_reset_uchan(struct udma_chan *uc)
459 {
460 	memset(&uc->config, 0, sizeof(uc->config));
461 	uc->config.remote_thread_id = -1;
462 	uc->config.mapped_channel_id = -1;
463 	uc->config.default_flow_id = -1;
464 	uc->state = UDMA_CHAN_IS_IDLE;
465 }
466 
467 static void udma_dump_chan_stdata(struct udma_chan *uc)
468 {
469 	struct device *dev = uc->ud->dev;
470 	u32 offset;
471 	int i;
472 
473 	if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
474 		dev_dbg(dev, "TCHAN State data:\n");
475 		for (i = 0; i < 32; i++) {
476 			offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
477 			dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
478 				udma_tchanrt_read(uc, offset));
479 		}
480 	}
481 
482 	if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
483 		dev_dbg(dev, "RCHAN State data:\n");
484 		for (i = 0; i < 32; i++) {
485 			offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
486 			dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
487 				udma_rchanrt_read(uc, offset));
488 		}
489 	}
490 }
491 
492 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
493 						    int idx)
494 {
495 	return d->hwdesc[idx].cppi5_desc_paddr;
496 }
497 
498 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
499 {
500 	return d->hwdesc[idx].cppi5_desc_vaddr;
501 }
502 
503 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
504 						   dma_addr_t paddr)
505 {
506 	struct udma_desc *d = uc->terminated_desc;
507 
508 	if (d) {
509 		dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
510 								   d->desc_idx);
511 
512 		if (desc_paddr != paddr)
513 			d = NULL;
514 	}
515 
516 	if (!d) {
517 		d = uc->desc;
518 		if (d) {
519 			dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
520 								d->desc_idx);
521 
522 			if (desc_paddr != paddr)
523 				d = NULL;
524 		}
525 	}
526 
527 	return d;
528 }
529 
530 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
531 {
532 	if (uc->use_dma_pool) {
533 		int i;
534 
535 		for (i = 0; i < d->hwdesc_count; i++) {
536 			if (!d->hwdesc[i].cppi5_desc_vaddr)
537 				continue;
538 
539 			dma_pool_free(uc->hdesc_pool,
540 				      d->hwdesc[i].cppi5_desc_vaddr,
541 				      d->hwdesc[i].cppi5_desc_paddr);
542 
543 			d->hwdesc[i].cppi5_desc_vaddr = NULL;
544 		}
545 	} else if (d->hwdesc[0].cppi5_desc_vaddr) {
546 		dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
547 				  d->hwdesc[0].cppi5_desc_vaddr,
548 				  d->hwdesc[0].cppi5_desc_paddr);
549 
550 		d->hwdesc[0].cppi5_desc_vaddr = NULL;
551 	}
552 }
553 
554 static void udma_purge_desc_work(struct work_struct *work)
555 {
556 	struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
557 	struct virt_dma_desc *vd, *_vd;
558 	unsigned long flags;
559 	LIST_HEAD(head);
560 
561 	spin_lock_irqsave(&ud->lock, flags);
562 	list_splice_tail_init(&ud->desc_to_purge, &head);
563 	spin_unlock_irqrestore(&ud->lock, flags);
564 
565 	list_for_each_entry_safe(vd, _vd, &head, node) {
566 		struct udma_chan *uc = to_udma_chan(vd->tx.chan);
567 		struct udma_desc *d = to_udma_desc(&vd->tx);
568 
569 		udma_free_hwdesc(uc, d);
570 		list_del(&vd->node);
571 		kfree(d);
572 	}
573 
574 	/* If more to purge, schedule the work again */
575 	if (!list_empty(&ud->desc_to_purge))
576 		schedule_work(&ud->purge_work);
577 }
578 
579 static void udma_desc_free(struct virt_dma_desc *vd)
580 {
581 	struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
582 	struct udma_chan *uc = to_udma_chan(vd->tx.chan);
583 	struct udma_desc *d = to_udma_desc(&vd->tx);
584 	unsigned long flags;
585 
586 	if (uc->terminated_desc == d)
587 		uc->terminated_desc = NULL;
588 
589 	if (uc->use_dma_pool) {
590 		udma_free_hwdesc(uc, d);
591 		kfree(d);
592 		return;
593 	}
594 
595 	spin_lock_irqsave(&ud->lock, flags);
596 	list_add_tail(&vd->node, &ud->desc_to_purge);
597 	spin_unlock_irqrestore(&ud->lock, flags);
598 
599 	schedule_work(&ud->purge_work);
600 }
601 
602 static bool udma_is_chan_running(struct udma_chan *uc)
603 {
604 	u32 trt_ctl = 0;
605 	u32 rrt_ctl = 0;
606 
607 	if (uc->tchan)
608 		trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
609 	if (uc->rchan)
610 		rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
611 
612 	if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
613 		return true;
614 
615 	return false;
616 }
617 
618 static bool udma_is_chan_paused(struct udma_chan *uc)
619 {
620 	u32 val, pause_mask;
621 
622 	switch (uc->config.dir) {
623 	case DMA_DEV_TO_MEM:
624 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
625 		pause_mask = UDMA_PEER_RT_EN_PAUSE;
626 		break;
627 	case DMA_MEM_TO_DEV:
628 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
629 		pause_mask = UDMA_PEER_RT_EN_PAUSE;
630 		break;
631 	case DMA_MEM_TO_MEM:
632 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
633 		pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
634 		break;
635 	default:
636 		return false;
637 	}
638 
639 	if (val & pause_mask)
640 		return true;
641 
642 	return false;
643 }
644 
645 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
646 {
647 	return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
648 }
649 
650 static int udma_push_to_ring(struct udma_chan *uc, int idx)
651 {
652 	struct udma_desc *d = uc->desc;
653 	struct k3_ring *ring = NULL;
654 	dma_addr_t paddr;
655 
656 	switch (uc->config.dir) {
657 	case DMA_DEV_TO_MEM:
658 		ring = uc->rflow->fd_ring;
659 		break;
660 	case DMA_MEM_TO_DEV:
661 	case DMA_MEM_TO_MEM:
662 		ring = uc->tchan->t_ring;
663 		break;
664 	default:
665 		return -EINVAL;
666 	}
667 
668 	/* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
669 	if (idx == -1) {
670 		paddr = udma_get_rx_flush_hwdesc_paddr(uc);
671 	} else {
672 		paddr = udma_curr_cppi5_desc_paddr(d, idx);
673 
674 		wmb(); /* Ensure that writes are not moved over this point */
675 	}
676 
677 	return k3_ringacc_ring_push(ring, &paddr);
678 }
679 
680 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
681 {
682 	if (uc->config.dir != DMA_DEV_TO_MEM)
683 		return false;
684 
685 	if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
686 		return true;
687 
688 	return false;
689 }
690 
691 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
692 {
693 	struct k3_ring *ring = NULL;
694 	int ret;
695 
696 	switch (uc->config.dir) {
697 	case DMA_DEV_TO_MEM:
698 		ring = uc->rflow->r_ring;
699 		break;
700 	case DMA_MEM_TO_DEV:
701 	case DMA_MEM_TO_MEM:
702 		ring = uc->tchan->tc_ring;
703 		break;
704 	default:
705 		return -ENOENT;
706 	}
707 
708 	ret = k3_ringacc_ring_pop(ring, addr);
709 	if (ret)
710 		return ret;
711 
712 	rmb(); /* Ensure that reads are not moved before this point */
713 
714 	/* Teardown completion */
715 	if (cppi5_desc_is_tdcm(*addr))
716 		return 0;
717 
718 	/* Check for flush descriptor */
719 	if (udma_desc_is_rx_flush(uc, *addr))
720 		return -ENOENT;
721 
722 	return 0;
723 }
724 
725 static void udma_reset_rings(struct udma_chan *uc)
726 {
727 	struct k3_ring *ring1 = NULL;
728 	struct k3_ring *ring2 = NULL;
729 
730 	switch (uc->config.dir) {
731 	case DMA_DEV_TO_MEM:
732 		if (uc->rchan) {
733 			ring1 = uc->rflow->fd_ring;
734 			ring2 = uc->rflow->r_ring;
735 		}
736 		break;
737 	case DMA_MEM_TO_DEV:
738 	case DMA_MEM_TO_MEM:
739 		if (uc->tchan) {
740 			ring1 = uc->tchan->t_ring;
741 			ring2 = uc->tchan->tc_ring;
742 		}
743 		break;
744 	default:
745 		break;
746 	}
747 
748 	if (ring1)
749 		k3_ringacc_ring_reset_dma(ring1,
750 					  k3_ringacc_ring_get_occ(ring1));
751 	if (ring2)
752 		k3_ringacc_ring_reset(ring2);
753 
754 	/* make sure we are not leaking memory by stalled descriptor */
755 	if (uc->terminated_desc) {
756 		udma_desc_free(&uc->terminated_desc->vd);
757 		uc->terminated_desc = NULL;
758 	}
759 }
760 
761 static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
762 {
763 	if (uc->desc->dir == DMA_DEV_TO_MEM) {
764 		udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
765 		udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
766 		if (uc->config.ep_type != PSIL_EP_NATIVE)
767 			udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
768 	} else {
769 		udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
770 		udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
771 		if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
772 			udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
773 	}
774 }
775 
776 static void udma_reset_counters(struct udma_chan *uc)
777 {
778 	u32 val;
779 
780 	if (uc->tchan) {
781 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
782 		udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
783 
784 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
785 		udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
786 
787 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
788 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
789 
790 		if (!uc->bchan) {
791 			val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
792 			udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
793 		}
794 	}
795 
796 	if (uc->rchan) {
797 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
798 		udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
799 
800 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
801 		udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
802 
803 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
804 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
805 
806 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
807 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
808 	}
809 }
810 
811 static int udma_reset_chan(struct udma_chan *uc, bool hard)
812 {
813 	switch (uc->config.dir) {
814 	case DMA_DEV_TO_MEM:
815 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
816 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
817 		break;
818 	case DMA_MEM_TO_DEV:
819 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
820 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
821 		break;
822 	case DMA_MEM_TO_MEM:
823 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
824 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
825 		break;
826 	default:
827 		return -EINVAL;
828 	}
829 
830 	/* Reset all counters */
831 	udma_reset_counters(uc);
832 
833 	/* Hard reset: re-initialize the channel to reset */
834 	if (hard) {
835 		struct udma_chan_config ucc_backup;
836 		int ret;
837 
838 		memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
839 		uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
840 
841 		/* restore the channel configuration */
842 		memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
843 		ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
844 		if (ret)
845 			return ret;
846 
847 		/*
848 		 * Setting forced teardown after forced reset helps recovering
849 		 * the rchan.
850 		 */
851 		if (uc->config.dir == DMA_DEV_TO_MEM)
852 			udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
853 					   UDMA_CHAN_RT_CTL_EN |
854 					   UDMA_CHAN_RT_CTL_TDOWN |
855 					   UDMA_CHAN_RT_CTL_FTDOWN);
856 	}
857 	uc->state = UDMA_CHAN_IS_IDLE;
858 
859 	return 0;
860 }
861 
862 static void udma_start_desc(struct udma_chan *uc)
863 {
864 	struct udma_chan_config *ucc = &uc->config;
865 
866 	if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
867 	    (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
868 		int i;
869 
870 		/*
871 		 * UDMA only: Push all descriptors to ring for packet mode
872 		 * cyclic or RX
873 		 * PKTDMA supports pre-linked descriptor and cyclic is not
874 		 * supported
875 		 */
876 		for (i = 0; i < uc->desc->sglen; i++)
877 			udma_push_to_ring(uc, i);
878 	} else {
879 		udma_push_to_ring(uc, 0);
880 	}
881 }
882 
883 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
884 {
885 	/* Only PDMAs have staticTR */
886 	if (uc->config.ep_type == PSIL_EP_NATIVE)
887 		return false;
888 
889 	/* Check if the staticTR configuration has changed for TX */
890 	if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
891 		return true;
892 
893 	return false;
894 }
895 
896 static int udma_start(struct udma_chan *uc)
897 {
898 	struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
899 
900 	if (!vd) {
901 		uc->desc = NULL;
902 		return -ENOENT;
903 	}
904 
905 	list_del(&vd->node);
906 
907 	uc->desc = to_udma_desc(&vd->tx);
908 
909 	/* Channel is already running and does not need reconfiguration */
910 	if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
911 		udma_start_desc(uc);
912 		goto out;
913 	}
914 
915 	/* Make sure that we clear the teardown bit, if it is set */
916 	udma_reset_chan(uc, false);
917 
918 	/* Push descriptors before we start the channel */
919 	udma_start_desc(uc);
920 
921 	switch (uc->desc->dir) {
922 	case DMA_DEV_TO_MEM:
923 		/* Config remote TR */
924 		if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
925 			u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
926 				  PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
927 			const struct udma_match_data *match_data =
928 							uc->ud->match_data;
929 
930 			if (uc->config.enable_acc32)
931 				val |= PDMA_STATIC_TR_XY_ACC32;
932 			if (uc->config.enable_burst)
933 				val |= PDMA_STATIC_TR_XY_BURST;
934 
935 			udma_rchanrt_write(uc,
936 					   UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
937 					   val);
938 
939 			udma_rchanrt_write(uc,
940 				UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
941 				PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
942 						 match_data->statictr_z_mask));
943 
944 			/* save the current staticTR configuration */
945 			memcpy(&uc->static_tr, &uc->desc->static_tr,
946 			       sizeof(uc->static_tr));
947 		}
948 
949 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
950 				   UDMA_CHAN_RT_CTL_EN);
951 
952 		/* Enable remote */
953 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
954 				   UDMA_PEER_RT_EN_ENABLE);
955 
956 		break;
957 	case DMA_MEM_TO_DEV:
958 		/* Config remote TR */
959 		if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
960 			u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
961 				  PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
962 
963 			if (uc->config.enable_acc32)
964 				val |= PDMA_STATIC_TR_XY_ACC32;
965 			if (uc->config.enable_burst)
966 				val |= PDMA_STATIC_TR_XY_BURST;
967 
968 			udma_tchanrt_write(uc,
969 					   UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
970 					   val);
971 
972 			/* save the current staticTR configuration */
973 			memcpy(&uc->static_tr, &uc->desc->static_tr,
974 			       sizeof(uc->static_tr));
975 		}
976 
977 		/* Enable remote */
978 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
979 				   UDMA_PEER_RT_EN_ENABLE);
980 
981 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
982 				   UDMA_CHAN_RT_CTL_EN);
983 
984 		break;
985 	case DMA_MEM_TO_MEM:
986 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
987 				   UDMA_CHAN_RT_CTL_EN);
988 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
989 				   UDMA_CHAN_RT_CTL_EN);
990 
991 		break;
992 	default:
993 		return -EINVAL;
994 	}
995 
996 	uc->state = UDMA_CHAN_IS_ACTIVE;
997 out:
998 
999 	return 0;
1000 }
1001 
1002 static int udma_stop(struct udma_chan *uc)
1003 {
1004 	enum udma_chan_state old_state = uc->state;
1005 
1006 	uc->state = UDMA_CHAN_IS_TERMINATING;
1007 	reinit_completion(&uc->teardown_completed);
1008 
1009 	switch (uc->config.dir) {
1010 	case DMA_DEV_TO_MEM:
1011 		if (!uc->cyclic && !uc->desc)
1012 			udma_push_to_ring(uc, -1);
1013 
1014 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1015 				   UDMA_PEER_RT_EN_ENABLE |
1016 				   UDMA_PEER_RT_EN_TEARDOWN);
1017 		break;
1018 	case DMA_MEM_TO_DEV:
1019 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1020 				   UDMA_PEER_RT_EN_ENABLE |
1021 				   UDMA_PEER_RT_EN_FLUSH);
1022 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1023 				   UDMA_CHAN_RT_CTL_EN |
1024 				   UDMA_CHAN_RT_CTL_TDOWN);
1025 		break;
1026 	case DMA_MEM_TO_MEM:
1027 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1028 				   UDMA_CHAN_RT_CTL_EN |
1029 				   UDMA_CHAN_RT_CTL_TDOWN);
1030 		break;
1031 	default:
1032 		uc->state = old_state;
1033 		complete_all(&uc->teardown_completed);
1034 		return -EINVAL;
1035 	}
1036 
1037 	return 0;
1038 }
1039 
1040 static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
1041 {
1042 	struct udma_desc *d = uc->desc;
1043 	struct cppi5_host_desc_t *h_desc;
1044 
1045 	h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
1046 	cppi5_hdesc_reset_to_original(h_desc);
1047 	udma_push_to_ring(uc, d->desc_idx);
1048 	d->desc_idx = (d->desc_idx + 1) % d->sglen;
1049 }
1050 
1051 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
1052 {
1053 	struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
1054 
1055 	memcpy(d->metadata, h_desc->epib, d->metadata_size);
1056 }
1057 
1058 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
1059 {
1060 	u32 peer_bcnt, bcnt;
1061 
1062 	/*
1063 	 * Only TX towards PDMA is affected.
1064 	 * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer
1065 	 * completion calculation, consumer must ensure that there is no stale
1066 	 * data in DMA fabric in this case.
1067 	 */
1068 	if (uc->config.ep_type == PSIL_EP_NATIVE ||
1069 	    uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT))
1070 		return true;
1071 
1072 	peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
1073 	bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
1074 
1075 	/* Transfer is incomplete, store current residue and time stamp */
1076 	if (peer_bcnt < bcnt) {
1077 		uc->tx_drain.residue = bcnt - peer_bcnt;
1078 		uc->tx_drain.tstamp = ktime_get();
1079 		return false;
1080 	}
1081 
1082 	return true;
1083 }
1084 
1085 static void udma_check_tx_completion(struct work_struct *work)
1086 {
1087 	struct udma_chan *uc = container_of(work, typeof(*uc),
1088 					    tx_drain.work.work);
1089 	bool desc_done = true;
1090 	u32 residue_diff;
1091 	ktime_t time_diff;
1092 	unsigned long delay;
1093 
1094 	while (1) {
1095 		if (uc->desc) {
1096 			/* Get previous residue and time stamp */
1097 			residue_diff = uc->tx_drain.residue;
1098 			time_diff = uc->tx_drain.tstamp;
1099 			/*
1100 			 * Get current residue and time stamp or see if
1101 			 * transfer is complete
1102 			 */
1103 			desc_done = udma_is_desc_really_done(uc, uc->desc);
1104 		}
1105 
1106 		if (!desc_done) {
1107 			/*
1108 			 * Find the time delta and residue delta w.r.t
1109 			 * previous poll
1110 			 */
1111 			time_diff = ktime_sub(uc->tx_drain.tstamp,
1112 					      time_diff) + 1;
1113 			residue_diff -= uc->tx_drain.residue;
1114 			if (residue_diff) {
1115 				/*
1116 				 * Try to guess when we should check
1117 				 * next time by calculating rate at
1118 				 * which data is being drained at the
1119 				 * peer device
1120 				 */
1121 				delay = (time_diff / residue_diff) *
1122 					uc->tx_drain.residue;
1123 			} else {
1124 				/* No progress, check again in 1 second  */
1125 				schedule_delayed_work(&uc->tx_drain.work, HZ);
1126 				break;
1127 			}
1128 
1129 			usleep_range(ktime_to_us(delay),
1130 				     ktime_to_us(delay) + 10);
1131 			continue;
1132 		}
1133 
1134 		if (uc->desc) {
1135 			struct udma_desc *d = uc->desc;
1136 
1137 			udma_decrement_byte_counters(uc, d->residue);
1138 			udma_start(uc);
1139 			vchan_cookie_complete(&d->vd);
1140 			break;
1141 		}
1142 
1143 		break;
1144 	}
1145 }
1146 
1147 static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1148 {
1149 	struct udma_chan *uc = data;
1150 	struct udma_desc *d;
1151 	dma_addr_t paddr = 0;
1152 
1153 	if (udma_pop_from_ring(uc, &paddr) || !paddr)
1154 		return IRQ_HANDLED;
1155 
1156 	spin_lock(&uc->vc.lock);
1157 
1158 	/* Teardown completion message */
1159 	if (cppi5_desc_is_tdcm(paddr)) {
1160 		complete_all(&uc->teardown_completed);
1161 
1162 		if (uc->terminated_desc) {
1163 			udma_desc_free(&uc->terminated_desc->vd);
1164 			uc->terminated_desc = NULL;
1165 		}
1166 
1167 		if (!uc->desc)
1168 			udma_start(uc);
1169 
1170 		goto out;
1171 	}
1172 
1173 	d = udma_udma_desc_from_paddr(uc, paddr);
1174 
1175 	if (d) {
1176 		dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1177 								   d->desc_idx);
1178 		if (desc_paddr != paddr) {
1179 			dev_err(uc->ud->dev, "not matching descriptors!\n");
1180 			goto out;
1181 		}
1182 
1183 		if (d == uc->desc) {
1184 			/* active descriptor */
1185 			if (uc->cyclic) {
1186 				udma_cyclic_packet_elapsed(uc);
1187 				vchan_cyclic_callback(&d->vd);
1188 			} else {
1189 				if (udma_is_desc_really_done(uc, d)) {
1190 					udma_decrement_byte_counters(uc, d->residue);
1191 					udma_start(uc);
1192 					vchan_cookie_complete(&d->vd);
1193 				} else {
1194 					schedule_delayed_work(&uc->tx_drain.work,
1195 							      0);
1196 				}
1197 			}
1198 		} else {
1199 			/*
1200 			 * terminated descriptor, mark the descriptor as
1201 			 * completed to update the channel's cookie marker
1202 			 */
1203 			dma_cookie_complete(&d->vd.tx);
1204 		}
1205 	}
1206 out:
1207 	spin_unlock(&uc->vc.lock);
1208 
1209 	return IRQ_HANDLED;
1210 }
1211 
1212 static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1213 {
1214 	struct udma_chan *uc = data;
1215 	struct udma_desc *d;
1216 
1217 	spin_lock(&uc->vc.lock);
1218 	d = uc->desc;
1219 	if (d) {
1220 		d->tr_idx = (d->tr_idx + 1) % d->sglen;
1221 
1222 		if (uc->cyclic) {
1223 			vchan_cyclic_callback(&d->vd);
1224 		} else {
1225 			/* TODO: figure out the real amount of data */
1226 			udma_decrement_byte_counters(uc, d->residue);
1227 			udma_start(uc);
1228 			vchan_cookie_complete(&d->vd);
1229 		}
1230 	}
1231 
1232 	spin_unlock(&uc->vc.lock);
1233 
1234 	return IRQ_HANDLED;
1235 }
1236 
1237 /**
1238  * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1239  * @ud: UDMA device
1240  * @from: Start the search from this flow id number
1241  * @cnt: Number of consecutive flow ids to allocate
1242  *
1243  * Allocate range of RX flow ids for future use, those flows can be requested
1244  * only using explicit flow id number. if @from is set to -1 it will try to find
1245  * first free range. if @from is positive value it will force allocation only
1246  * of the specified range of flows.
1247  *
1248  * Returns -ENOMEM if can't find free range.
1249  * -EEXIST if requested range is busy.
1250  * -EINVAL if wrong input values passed.
1251  * Returns flow id on success.
1252  */
1253 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1254 {
1255 	int start, tmp_from;
1256 	DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1257 
1258 	tmp_from = from;
1259 	if (tmp_from < 0)
1260 		tmp_from = ud->rchan_cnt;
1261 	/* default flows can't be allocated and accessible only by id */
1262 	if (tmp_from < ud->rchan_cnt)
1263 		return -EINVAL;
1264 
1265 	if (tmp_from + cnt > ud->rflow_cnt)
1266 		return -EINVAL;
1267 
1268 	bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1269 		  ud->rflow_cnt);
1270 
1271 	start = bitmap_find_next_zero_area(tmp,
1272 					   ud->rflow_cnt,
1273 					   tmp_from, cnt, 0);
1274 	if (start >= ud->rflow_cnt)
1275 		return -ENOMEM;
1276 
1277 	if (from >= 0 && start != from)
1278 		return -EEXIST;
1279 
1280 	bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1281 	return start;
1282 }
1283 
1284 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1285 {
1286 	if (from < ud->rchan_cnt)
1287 		return -EINVAL;
1288 	if (from + cnt > ud->rflow_cnt)
1289 		return -EINVAL;
1290 
1291 	bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1292 	return 0;
1293 }
1294 
1295 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1296 {
1297 	/*
1298 	 * Attempt to request rflow by ID can be made for any rflow
1299 	 * if not in use with assumption that caller knows what's doing.
1300 	 * TI-SCI FW will perform additional permission check ant way, it's
1301 	 * safe
1302 	 */
1303 
1304 	if (id < 0 || id >= ud->rflow_cnt)
1305 		return ERR_PTR(-ENOENT);
1306 
1307 	if (test_bit(id, ud->rflow_in_use))
1308 		return ERR_PTR(-ENOENT);
1309 
1310 	if (ud->rflow_gp_map) {
1311 		/* GP rflow has to be allocated first */
1312 		if (!test_bit(id, ud->rflow_gp_map) &&
1313 		    !test_bit(id, ud->rflow_gp_map_allocated))
1314 			return ERR_PTR(-EINVAL);
1315 	}
1316 
1317 	dev_dbg(ud->dev, "get rflow%d\n", id);
1318 	set_bit(id, ud->rflow_in_use);
1319 	return &ud->rflows[id];
1320 }
1321 
1322 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1323 {
1324 	if (!test_bit(rflow->id, ud->rflow_in_use)) {
1325 		dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1326 		return;
1327 	}
1328 
1329 	dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1330 	clear_bit(rflow->id, ud->rflow_in_use);
1331 }
1332 
1333 #define UDMA_RESERVE_RESOURCE(res)					\
1334 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,	\
1335 					       enum udma_tp_level tpl,	\
1336 					       int id)			\
1337 {									\
1338 	if (id >= 0) {							\
1339 		if (test_bit(id, ud->res##_map)) {			\
1340 			dev_err(ud->dev, "res##%d is in use\n", id);	\
1341 			return ERR_PTR(-ENOENT);			\
1342 		}							\
1343 	} else {							\
1344 		int start;						\
1345 									\
1346 		if (tpl >= ud->res##_tpl.levels)			\
1347 			tpl = ud->res##_tpl.levels - 1;			\
1348 									\
1349 		start = ud->res##_tpl.start_idx[tpl];			\
1350 									\
1351 		id = find_next_zero_bit(ud->res##_map, ud->res##_cnt,	\
1352 					start);				\
1353 		if (id == ud->res##_cnt) {				\
1354 			return ERR_PTR(-ENOENT);			\
1355 		}							\
1356 	}								\
1357 									\
1358 	set_bit(id, ud->res##_map);					\
1359 	return &ud->res##s[id];						\
1360 }
1361 
1362 UDMA_RESERVE_RESOURCE(bchan);
1363 UDMA_RESERVE_RESOURCE(tchan);
1364 UDMA_RESERVE_RESOURCE(rchan);
1365 
1366 static int bcdma_get_bchan(struct udma_chan *uc)
1367 {
1368 	struct udma_dev *ud = uc->ud;
1369 	enum udma_tp_level tpl;
1370 	int ret;
1371 
1372 	if (uc->bchan) {
1373 		dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
1374 			uc->id, uc->bchan->id);
1375 		return 0;
1376 	}
1377 
1378 	/*
1379 	 * Use normal channels for peripherals, and highest TPL channel for
1380 	 * mem2mem
1381 	 */
1382 	if (uc->config.tr_trigger_type)
1383 		tpl = 0;
1384 	else
1385 		tpl = ud->bchan_tpl.levels - 1;
1386 
1387 	uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
1388 	if (IS_ERR(uc->bchan)) {
1389 		ret = PTR_ERR(uc->bchan);
1390 		uc->bchan = NULL;
1391 		return ret;
1392 	}
1393 
1394 	uc->tchan = uc->bchan;
1395 
1396 	return 0;
1397 }
1398 
1399 static int udma_get_tchan(struct udma_chan *uc)
1400 {
1401 	struct udma_dev *ud = uc->ud;
1402 	int ret;
1403 
1404 	if (uc->tchan) {
1405 		dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1406 			uc->id, uc->tchan->id);
1407 		return 0;
1408 	}
1409 
1410 	/*
1411 	 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1412 	 * For PKTDMA mapped channels it is configured to a channel which must
1413 	 * be used to service the peripheral.
1414 	 */
1415 	uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
1416 					 uc->config.mapped_channel_id);
1417 	if (IS_ERR(uc->tchan)) {
1418 		ret = PTR_ERR(uc->tchan);
1419 		uc->tchan = NULL;
1420 		return ret;
1421 	}
1422 
1423 	if (ud->tflow_cnt) {
1424 		int tflow_id;
1425 
1426 		/* Only PKTDMA have support for tx flows */
1427 		if (uc->config.default_flow_id >= 0)
1428 			tflow_id = uc->config.default_flow_id;
1429 		else
1430 			tflow_id = uc->tchan->id;
1431 
1432 		if (test_bit(tflow_id, ud->tflow_map)) {
1433 			dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
1434 			clear_bit(uc->tchan->id, ud->tchan_map);
1435 			uc->tchan = NULL;
1436 			return -ENOENT;
1437 		}
1438 
1439 		uc->tchan->tflow_id = tflow_id;
1440 		set_bit(tflow_id, ud->tflow_map);
1441 	} else {
1442 		uc->tchan->tflow_id = -1;
1443 	}
1444 
1445 	return 0;
1446 }
1447 
1448 static int udma_get_rchan(struct udma_chan *uc)
1449 {
1450 	struct udma_dev *ud = uc->ud;
1451 	int ret;
1452 
1453 	if (uc->rchan) {
1454 		dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1455 			uc->id, uc->rchan->id);
1456 		return 0;
1457 	}
1458 
1459 	/*
1460 	 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1461 	 * For PKTDMA mapped channels it is configured to a channel which must
1462 	 * be used to service the peripheral.
1463 	 */
1464 	uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
1465 					 uc->config.mapped_channel_id);
1466 	if (IS_ERR(uc->rchan)) {
1467 		ret = PTR_ERR(uc->rchan);
1468 		uc->rchan = NULL;
1469 		return ret;
1470 	}
1471 
1472 	return 0;
1473 }
1474 
1475 static int udma_get_chan_pair(struct udma_chan *uc)
1476 {
1477 	struct udma_dev *ud = uc->ud;
1478 	int chan_id, end;
1479 
1480 	if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1481 		dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1482 			 uc->id, uc->tchan->id);
1483 		return 0;
1484 	}
1485 
1486 	if (uc->tchan) {
1487 		dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1488 			uc->id, uc->tchan->id);
1489 		return -EBUSY;
1490 	} else if (uc->rchan) {
1491 		dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1492 			uc->id, uc->rchan->id);
1493 		return -EBUSY;
1494 	}
1495 
1496 	/* Can be optimized, but let's have it like this for now */
1497 	end = min(ud->tchan_cnt, ud->rchan_cnt);
1498 	/*
1499 	 * Try to use the highest TPL channel pair for MEM_TO_MEM channels
1500 	 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
1501 	 */
1502 	chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
1503 	for (; chan_id < end; chan_id++) {
1504 		if (!test_bit(chan_id, ud->tchan_map) &&
1505 		    !test_bit(chan_id, ud->rchan_map))
1506 			break;
1507 	}
1508 
1509 	if (chan_id == end)
1510 		return -ENOENT;
1511 
1512 	set_bit(chan_id, ud->tchan_map);
1513 	set_bit(chan_id, ud->rchan_map);
1514 	uc->tchan = &ud->tchans[chan_id];
1515 	uc->rchan = &ud->rchans[chan_id];
1516 
1517 	/* UDMA does not use tx flows */
1518 	uc->tchan->tflow_id = -1;
1519 
1520 	return 0;
1521 }
1522 
1523 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1524 {
1525 	struct udma_dev *ud = uc->ud;
1526 	int ret;
1527 
1528 	if (!uc->rchan) {
1529 		dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1530 		return -EINVAL;
1531 	}
1532 
1533 	if (uc->rflow) {
1534 		dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1535 			uc->id, uc->rflow->id);
1536 		return 0;
1537 	}
1538 
1539 	uc->rflow = __udma_get_rflow(ud, flow_id);
1540 	if (IS_ERR(uc->rflow)) {
1541 		ret = PTR_ERR(uc->rflow);
1542 		uc->rflow = NULL;
1543 		return ret;
1544 	}
1545 
1546 	return 0;
1547 }
1548 
1549 static void bcdma_put_bchan(struct udma_chan *uc)
1550 {
1551 	struct udma_dev *ud = uc->ud;
1552 
1553 	if (uc->bchan) {
1554 		dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
1555 			uc->bchan->id);
1556 		clear_bit(uc->bchan->id, ud->bchan_map);
1557 		uc->bchan = NULL;
1558 		uc->tchan = NULL;
1559 	}
1560 }
1561 
1562 static void udma_put_rchan(struct udma_chan *uc)
1563 {
1564 	struct udma_dev *ud = uc->ud;
1565 
1566 	if (uc->rchan) {
1567 		dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1568 			uc->rchan->id);
1569 		clear_bit(uc->rchan->id, ud->rchan_map);
1570 		uc->rchan = NULL;
1571 	}
1572 }
1573 
1574 static void udma_put_tchan(struct udma_chan *uc)
1575 {
1576 	struct udma_dev *ud = uc->ud;
1577 
1578 	if (uc->tchan) {
1579 		dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1580 			uc->tchan->id);
1581 		clear_bit(uc->tchan->id, ud->tchan_map);
1582 
1583 		if (uc->tchan->tflow_id >= 0)
1584 			clear_bit(uc->tchan->tflow_id, ud->tflow_map);
1585 
1586 		uc->tchan = NULL;
1587 	}
1588 }
1589 
1590 static void udma_put_rflow(struct udma_chan *uc)
1591 {
1592 	struct udma_dev *ud = uc->ud;
1593 
1594 	if (uc->rflow) {
1595 		dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1596 			uc->rflow->id);
1597 		__udma_put_rflow(ud, uc->rflow);
1598 		uc->rflow = NULL;
1599 	}
1600 }
1601 
1602 static void bcdma_free_bchan_resources(struct udma_chan *uc)
1603 {
1604 	if (!uc->bchan)
1605 		return;
1606 
1607 	k3_ringacc_ring_free(uc->bchan->tc_ring);
1608 	k3_ringacc_ring_free(uc->bchan->t_ring);
1609 	uc->bchan->tc_ring = NULL;
1610 	uc->bchan->t_ring = NULL;
1611 	k3_configure_chan_coherency(&uc->vc.chan, 0);
1612 
1613 	bcdma_put_bchan(uc);
1614 }
1615 
1616 static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
1617 {
1618 	struct k3_ring_cfg ring_cfg;
1619 	struct udma_dev *ud = uc->ud;
1620 	int ret;
1621 
1622 	ret = bcdma_get_bchan(uc);
1623 	if (ret)
1624 		return ret;
1625 
1626 	ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
1627 					    &uc->bchan->t_ring,
1628 					    &uc->bchan->tc_ring);
1629 	if (ret) {
1630 		ret = -EBUSY;
1631 		goto err_ring;
1632 	}
1633 
1634 	memset(&ring_cfg, 0, sizeof(ring_cfg));
1635 	ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1636 	ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1637 	ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1638 
1639 	k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
1640 	ring_cfg.asel = ud->asel;
1641 	ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1642 
1643 	ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
1644 	if (ret)
1645 		goto err_ringcfg;
1646 
1647 	return 0;
1648 
1649 err_ringcfg:
1650 	k3_ringacc_ring_free(uc->bchan->tc_ring);
1651 	uc->bchan->tc_ring = NULL;
1652 	k3_ringacc_ring_free(uc->bchan->t_ring);
1653 	uc->bchan->t_ring = NULL;
1654 	k3_configure_chan_coherency(&uc->vc.chan, 0);
1655 err_ring:
1656 	bcdma_put_bchan(uc);
1657 
1658 	return ret;
1659 }
1660 
1661 static void udma_free_tx_resources(struct udma_chan *uc)
1662 {
1663 	if (!uc->tchan)
1664 		return;
1665 
1666 	k3_ringacc_ring_free(uc->tchan->t_ring);
1667 	k3_ringacc_ring_free(uc->tchan->tc_ring);
1668 	uc->tchan->t_ring = NULL;
1669 	uc->tchan->tc_ring = NULL;
1670 
1671 	udma_put_tchan(uc);
1672 }
1673 
1674 static int udma_alloc_tx_resources(struct udma_chan *uc)
1675 {
1676 	struct k3_ring_cfg ring_cfg;
1677 	struct udma_dev *ud = uc->ud;
1678 	struct udma_tchan *tchan;
1679 	int ring_idx, ret;
1680 
1681 	ret = udma_get_tchan(uc);
1682 	if (ret)
1683 		return ret;
1684 
1685 	tchan = uc->tchan;
1686 	if (tchan->tflow_id >= 0)
1687 		ring_idx = tchan->tflow_id;
1688 	else
1689 		ring_idx = ud->bchan_cnt + tchan->id;
1690 
1691 	ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
1692 					    &tchan->t_ring,
1693 					    &tchan->tc_ring);
1694 	if (ret) {
1695 		ret = -EBUSY;
1696 		goto err_ring;
1697 	}
1698 
1699 	memset(&ring_cfg, 0, sizeof(ring_cfg));
1700 	ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1701 	ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1702 	if (ud->match_data->type == DMA_TYPE_UDMA) {
1703 		ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1704 	} else {
1705 		ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1706 
1707 		k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1708 		ring_cfg.asel = uc->config.asel;
1709 		ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1710 	}
1711 
1712 	ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
1713 	ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
1714 
1715 	if (ret)
1716 		goto err_ringcfg;
1717 
1718 	return 0;
1719 
1720 err_ringcfg:
1721 	k3_ringacc_ring_free(uc->tchan->tc_ring);
1722 	uc->tchan->tc_ring = NULL;
1723 	k3_ringacc_ring_free(uc->tchan->t_ring);
1724 	uc->tchan->t_ring = NULL;
1725 err_ring:
1726 	udma_put_tchan(uc);
1727 
1728 	return ret;
1729 }
1730 
1731 static void udma_free_rx_resources(struct udma_chan *uc)
1732 {
1733 	if (!uc->rchan)
1734 		return;
1735 
1736 	if (uc->rflow) {
1737 		struct udma_rflow *rflow = uc->rflow;
1738 
1739 		k3_ringacc_ring_free(rflow->fd_ring);
1740 		k3_ringacc_ring_free(rflow->r_ring);
1741 		rflow->fd_ring = NULL;
1742 		rflow->r_ring = NULL;
1743 
1744 		udma_put_rflow(uc);
1745 	}
1746 
1747 	udma_put_rchan(uc);
1748 }
1749 
1750 static int udma_alloc_rx_resources(struct udma_chan *uc)
1751 {
1752 	struct udma_dev *ud = uc->ud;
1753 	struct k3_ring_cfg ring_cfg;
1754 	struct udma_rflow *rflow;
1755 	int fd_ring_id;
1756 	int ret;
1757 
1758 	ret = udma_get_rchan(uc);
1759 	if (ret)
1760 		return ret;
1761 
1762 	/* For MEM_TO_MEM we don't need rflow or rings */
1763 	if (uc->config.dir == DMA_MEM_TO_MEM)
1764 		return 0;
1765 
1766 	if (uc->config.default_flow_id >= 0)
1767 		ret = udma_get_rflow(uc, uc->config.default_flow_id);
1768 	else
1769 		ret = udma_get_rflow(uc, uc->rchan->id);
1770 
1771 	if (ret) {
1772 		ret = -EBUSY;
1773 		goto err_rflow;
1774 	}
1775 
1776 	rflow = uc->rflow;
1777 	if (ud->tflow_cnt)
1778 		fd_ring_id = ud->tflow_cnt + rflow->id;
1779 	else
1780 		fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
1781 			     uc->rchan->id;
1782 
1783 	ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1784 					    &rflow->fd_ring, &rflow->r_ring);
1785 	if (ret) {
1786 		ret = -EBUSY;
1787 		goto err_ring;
1788 	}
1789 
1790 	memset(&ring_cfg, 0, sizeof(ring_cfg));
1791 
1792 	ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1793 	if (ud->match_data->type == DMA_TYPE_UDMA) {
1794 		if (uc->config.pkt_mode)
1795 			ring_cfg.size = SG_MAX_SEGMENTS;
1796 		else
1797 			ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1798 
1799 		ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1800 	} else {
1801 		ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1802 		ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1803 
1804 		k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1805 		ring_cfg.asel = uc->config.asel;
1806 		ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1807 	}
1808 
1809 	ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1810 
1811 	ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1812 	ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1813 
1814 	if (ret)
1815 		goto err_ringcfg;
1816 
1817 	return 0;
1818 
1819 err_ringcfg:
1820 	k3_ringacc_ring_free(rflow->r_ring);
1821 	rflow->r_ring = NULL;
1822 	k3_ringacc_ring_free(rflow->fd_ring);
1823 	rflow->fd_ring = NULL;
1824 err_ring:
1825 	udma_put_rflow(uc);
1826 err_rflow:
1827 	udma_put_rchan(uc);
1828 
1829 	return ret;
1830 }
1831 
1832 #define TISCI_BCDMA_BCHAN_VALID_PARAMS (			\
1833 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1834 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1835 
1836 #define TISCI_BCDMA_TCHAN_VALID_PARAMS (			\
1837 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1838 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1839 
1840 #define TISCI_BCDMA_RCHAN_VALID_PARAMS (			\
1841 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1842 
1843 #define TISCI_UDMA_TCHAN_VALID_PARAMS (				\
1844 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1845 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |	\
1846 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |	\
1847 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |		\
1848 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |	\
1849 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |		\
1850 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |		\
1851 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1852 
1853 #define TISCI_UDMA_RCHAN_VALID_PARAMS (				\
1854 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1855 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |		\
1856 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |		\
1857 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |		\
1858 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |	\
1859 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |	\
1860 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |	\
1861 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |	\
1862 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1863 
1864 static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1865 {
1866 	struct udma_dev *ud = uc->ud;
1867 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1868 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1869 	struct udma_tchan *tchan = uc->tchan;
1870 	struct udma_rchan *rchan = uc->rchan;
1871 	u8 burst_size = 0;
1872 	int ret;
1873 	u8 tpl;
1874 
1875 	/* Non synchronized - mem to mem type of transfer */
1876 	int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1877 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1878 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1879 
1880 	if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1881 		tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
1882 
1883 		burst_size = ud->match_data->burst_size[tpl];
1884 	}
1885 
1886 	req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1887 	req_tx.nav_id = tisci_rm->tisci_dev_id;
1888 	req_tx.index = tchan->id;
1889 	req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1890 	req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1891 	req_tx.txcq_qnum = tc_ring;
1892 	req_tx.tx_atype = ud->atype;
1893 	if (burst_size) {
1894 		req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1895 		req_tx.tx_burst_size = burst_size;
1896 	}
1897 
1898 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1899 	if (ret) {
1900 		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1901 		return ret;
1902 	}
1903 
1904 	req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
1905 	req_rx.nav_id = tisci_rm->tisci_dev_id;
1906 	req_rx.index = rchan->id;
1907 	req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1908 	req_rx.rxcq_qnum = tc_ring;
1909 	req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1910 	req_rx.rx_atype = ud->atype;
1911 	if (burst_size) {
1912 		req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1913 		req_rx.rx_burst_size = burst_size;
1914 	}
1915 
1916 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1917 	if (ret)
1918 		dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1919 
1920 	return ret;
1921 }
1922 
1923 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1924 {
1925 	struct udma_dev *ud = uc->ud;
1926 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1927 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1928 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1929 	struct udma_bchan *bchan = uc->bchan;
1930 	u8 burst_size = 0;
1931 	int ret;
1932 	u8 tpl;
1933 
1934 	if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1935 		tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
1936 
1937 		burst_size = ud->match_data->burst_size[tpl];
1938 	}
1939 
1940 	req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1941 	req_tx.nav_id = tisci_rm->tisci_dev_id;
1942 	req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1943 	req_tx.index = bchan->id;
1944 	if (burst_size) {
1945 		req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1946 		req_tx.tx_burst_size = burst_size;
1947 	}
1948 
1949 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1950 	if (ret)
1951 		dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1952 
1953 	return ret;
1954 }
1955 
1956 static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1957 {
1958 	struct udma_dev *ud = uc->ud;
1959 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1960 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1961 	struct udma_tchan *tchan = uc->tchan;
1962 	int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1963 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1964 	u32 mode, fetch_size;
1965 	int ret;
1966 
1967 	if (uc->config.pkt_mode) {
1968 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1969 		fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1970 						   uc->config.psd_size, 0);
1971 	} else {
1972 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1973 		fetch_size = sizeof(struct cppi5_desc_hdr_t);
1974 	}
1975 
1976 	req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1977 	req_tx.nav_id = tisci_rm->tisci_dev_id;
1978 	req_tx.index = tchan->id;
1979 	req_tx.tx_chan_type = mode;
1980 	req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1981 	req_tx.tx_fetch_size = fetch_size >> 2;
1982 	req_tx.txcq_qnum = tc_ring;
1983 	req_tx.tx_atype = uc->config.atype;
1984 	if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
1985 	    ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1986 		/* wait for peer to complete the teardown for PDMAs */
1987 		req_tx.valid_params |=
1988 				TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1989 		req_tx.tx_tdtype = 1;
1990 	}
1991 
1992 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1993 	if (ret)
1994 		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1995 
1996 	return ret;
1997 }
1998 
1999 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
2000 {
2001 	struct udma_dev *ud = uc->ud;
2002 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2003 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2004 	struct udma_tchan *tchan = uc->tchan;
2005 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2006 	int ret;
2007 
2008 	req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2009 	req_tx.nav_id = tisci_rm->tisci_dev_id;
2010 	req_tx.index = tchan->id;
2011 	req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2012 	if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2013 		/* wait for peer to complete the teardown for PDMAs */
2014 		req_tx.valid_params |=
2015 				TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2016 		req_tx.tx_tdtype = 1;
2017 	}
2018 
2019 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2020 	if (ret)
2021 		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2022 
2023 	return ret;
2024 }
2025 
2026 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2027 
2028 static int udma_tisci_rx_channel_config(struct udma_chan *uc)
2029 {
2030 	struct udma_dev *ud = uc->ud;
2031 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2032 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2033 	struct udma_rchan *rchan = uc->rchan;
2034 	int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
2035 	int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2036 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2037 	struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2038 	u32 mode, fetch_size;
2039 	int ret;
2040 
2041 	if (uc->config.pkt_mode) {
2042 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
2043 		fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
2044 						   uc->config.psd_size, 0);
2045 	} else {
2046 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
2047 		fetch_size = sizeof(struct cppi5_desc_hdr_t);
2048 	}
2049 
2050 	req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
2051 	req_rx.nav_id = tisci_rm->tisci_dev_id;
2052 	req_rx.index = rchan->id;
2053 	req_rx.rx_fetch_size =  fetch_size >> 2;
2054 	req_rx.rxcq_qnum = rx_ring;
2055 	req_rx.rx_chan_type = mode;
2056 	req_rx.rx_atype = uc->config.atype;
2057 
2058 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2059 	if (ret) {
2060 		dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2061 		return ret;
2062 	}
2063 
2064 	flow_req.valid_params =
2065 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2066 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2067 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
2068 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
2069 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
2070 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
2071 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
2072 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
2073 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
2074 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
2075 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
2076 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
2077 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
2078 
2079 	flow_req.nav_id = tisci_rm->tisci_dev_id;
2080 	flow_req.flow_index = rchan->id;
2081 
2082 	if (uc->config.needs_epib)
2083 		flow_req.rx_einfo_present = 1;
2084 	else
2085 		flow_req.rx_einfo_present = 0;
2086 	if (uc->config.psd_size)
2087 		flow_req.rx_psinfo_present = 1;
2088 	else
2089 		flow_req.rx_psinfo_present = 0;
2090 	flow_req.rx_error_handling = 1;
2091 	flow_req.rx_dest_qnum = rx_ring;
2092 	flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
2093 	flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
2094 	flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
2095 	flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
2096 	flow_req.rx_fdq0_sz0_qnum = fd_ring;
2097 	flow_req.rx_fdq1_qnum = fd_ring;
2098 	flow_req.rx_fdq2_qnum = fd_ring;
2099 	flow_req.rx_fdq3_qnum = fd_ring;
2100 
2101 	ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2102 
2103 	if (ret)
2104 		dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
2105 
2106 	return 0;
2107 }
2108 
2109 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
2110 {
2111 	struct udma_dev *ud = uc->ud;
2112 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2113 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2114 	struct udma_rchan *rchan = uc->rchan;
2115 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2116 	int ret;
2117 
2118 	req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2119 	req_rx.nav_id = tisci_rm->tisci_dev_id;
2120 	req_rx.index = rchan->id;
2121 
2122 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2123 	if (ret)
2124 		dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2125 
2126 	return ret;
2127 }
2128 
2129 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2130 {
2131 	struct udma_dev *ud = uc->ud;
2132 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2133 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2134 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2135 	struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2136 	int ret;
2137 
2138 	req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2139 	req_rx.nav_id = tisci_rm->tisci_dev_id;
2140 	req_rx.index = uc->rchan->id;
2141 
2142 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2143 	if (ret) {
2144 		dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2145 		return ret;
2146 	}
2147 
2148 	flow_req.valid_params =
2149 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2150 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2151 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2152 
2153 	flow_req.nav_id = tisci_rm->tisci_dev_id;
2154 	flow_req.flow_index = uc->rflow->id;
2155 
2156 	if (uc->config.needs_epib)
2157 		flow_req.rx_einfo_present = 1;
2158 	else
2159 		flow_req.rx_einfo_present = 0;
2160 	if (uc->config.psd_size)
2161 		flow_req.rx_psinfo_present = 1;
2162 	else
2163 		flow_req.rx_psinfo_present = 0;
2164 	flow_req.rx_error_handling = 1;
2165 
2166 	ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2167 
2168 	if (ret)
2169 		dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2170 			ret);
2171 
2172 	return ret;
2173 }
2174 
2175 static int udma_alloc_chan_resources(struct dma_chan *chan)
2176 {
2177 	struct udma_chan *uc = to_udma_chan(chan);
2178 	struct udma_dev *ud = to_udma_dev(chan->device);
2179 	const struct udma_soc_data *soc_data = ud->soc_data;
2180 	struct k3_ring *irq_ring;
2181 	u32 irq_udma_idx;
2182 	int ret;
2183 
2184 	uc->dma_dev = ud->dev;
2185 
2186 	if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
2187 		uc->use_dma_pool = true;
2188 		/* in case of MEM_TO_MEM we have maximum of two TRs */
2189 		if (uc->config.dir == DMA_MEM_TO_MEM) {
2190 			uc->config.hdesc_size = cppi5_trdesc_calc_size(
2191 					sizeof(struct cppi5_tr_type15_t), 2);
2192 			uc->config.pkt_mode = false;
2193 		}
2194 	}
2195 
2196 	if (uc->use_dma_pool) {
2197 		uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2198 						 uc->config.hdesc_size,
2199 						 ud->desc_align,
2200 						 0);
2201 		if (!uc->hdesc_pool) {
2202 			dev_err(ud->ddev.dev,
2203 				"Descriptor pool allocation failed\n");
2204 			uc->use_dma_pool = false;
2205 			ret = -ENOMEM;
2206 			goto err_cleanup;
2207 		}
2208 	}
2209 
2210 	/*
2211 	 * Make sure that the completion is in a known state:
2212 	 * No teardown, the channel is idle
2213 	 */
2214 	reinit_completion(&uc->teardown_completed);
2215 	complete_all(&uc->teardown_completed);
2216 	uc->state = UDMA_CHAN_IS_IDLE;
2217 
2218 	switch (uc->config.dir) {
2219 	case DMA_MEM_TO_MEM:
2220 		/* Non synchronized - mem to mem type of transfer */
2221 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2222 			uc->id);
2223 
2224 		ret = udma_get_chan_pair(uc);
2225 		if (ret)
2226 			goto err_cleanup;
2227 
2228 		ret = udma_alloc_tx_resources(uc);
2229 		if (ret) {
2230 			udma_put_rchan(uc);
2231 			goto err_cleanup;
2232 		}
2233 
2234 		ret = udma_alloc_rx_resources(uc);
2235 		if (ret) {
2236 			udma_free_tx_resources(uc);
2237 			goto err_cleanup;
2238 		}
2239 
2240 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
2241 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2242 					K3_PSIL_DST_THREAD_ID_OFFSET;
2243 
2244 		irq_ring = uc->tchan->tc_ring;
2245 		irq_udma_idx = uc->tchan->id;
2246 
2247 		ret = udma_tisci_m2m_channel_config(uc);
2248 		break;
2249 	case DMA_MEM_TO_DEV:
2250 		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
2251 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2252 			uc->id);
2253 
2254 		ret = udma_alloc_tx_resources(uc);
2255 		if (ret)
2256 			goto err_cleanup;
2257 
2258 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
2259 		uc->config.dst_thread = uc->config.remote_thread_id;
2260 		uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2261 
2262 		irq_ring = uc->tchan->tc_ring;
2263 		irq_udma_idx = uc->tchan->id;
2264 
2265 		ret = udma_tisci_tx_channel_config(uc);
2266 		break;
2267 	case DMA_DEV_TO_MEM:
2268 		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
2269 		dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2270 			uc->id);
2271 
2272 		ret = udma_alloc_rx_resources(uc);
2273 		if (ret)
2274 			goto err_cleanup;
2275 
2276 		uc->config.src_thread = uc->config.remote_thread_id;
2277 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2278 					K3_PSIL_DST_THREAD_ID_OFFSET;
2279 
2280 		irq_ring = uc->rflow->r_ring;
2281 		irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
2282 
2283 		ret = udma_tisci_rx_channel_config(uc);
2284 		break;
2285 	default:
2286 		/* Can not happen */
2287 		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2288 			__func__, uc->id, uc->config.dir);
2289 		ret = -EINVAL;
2290 		goto err_cleanup;
2291 
2292 	}
2293 
2294 	/* check if the channel configuration was successful */
2295 	if (ret)
2296 		goto err_res_free;
2297 
2298 	if (udma_is_chan_running(uc)) {
2299 		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2300 		udma_reset_chan(uc, false);
2301 		if (udma_is_chan_running(uc)) {
2302 			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2303 			ret = -EBUSY;
2304 			goto err_res_free;
2305 		}
2306 	}
2307 
2308 	/* PSI-L pairing */
2309 	ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2310 	if (ret) {
2311 		dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2312 			uc->config.src_thread, uc->config.dst_thread);
2313 		goto err_res_free;
2314 	}
2315 
2316 	uc->psil_paired = true;
2317 
2318 	uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
2319 	if (uc->irq_num_ring <= 0) {
2320 		dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2321 			k3_ringacc_get_ring_id(irq_ring));
2322 		ret = -EINVAL;
2323 		goto err_psi_free;
2324 	}
2325 
2326 	ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2327 			  IRQF_TRIGGER_HIGH, uc->name, uc);
2328 	if (ret) {
2329 		dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2330 		goto err_irq_free;
2331 	}
2332 
2333 	/* Event from UDMA (TR events) only needed for slave TR mode channels */
2334 	if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
2335 		uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2336 		if (uc->irq_num_udma <= 0) {
2337 			dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
2338 				irq_udma_idx);
2339 			free_irq(uc->irq_num_ring, uc);
2340 			ret = -EINVAL;
2341 			goto err_irq_free;
2342 		}
2343 
2344 		ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2345 				  uc->name, uc);
2346 		if (ret) {
2347 			dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
2348 				uc->id);
2349 			free_irq(uc->irq_num_ring, uc);
2350 			goto err_irq_free;
2351 		}
2352 	} else {
2353 		uc->irq_num_udma = 0;
2354 	}
2355 
2356 	udma_reset_rings(uc);
2357 
2358 	return 0;
2359 
2360 err_irq_free:
2361 	uc->irq_num_ring = 0;
2362 	uc->irq_num_udma = 0;
2363 err_psi_free:
2364 	navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2365 	uc->psil_paired = false;
2366 err_res_free:
2367 	udma_free_tx_resources(uc);
2368 	udma_free_rx_resources(uc);
2369 err_cleanup:
2370 	udma_reset_uchan(uc);
2371 
2372 	if (uc->use_dma_pool) {
2373 		dma_pool_destroy(uc->hdesc_pool);
2374 		uc->use_dma_pool = false;
2375 	}
2376 
2377 	return ret;
2378 }
2379 
2380 static int bcdma_alloc_chan_resources(struct dma_chan *chan)
2381 {
2382 	struct udma_chan *uc = to_udma_chan(chan);
2383 	struct udma_dev *ud = to_udma_dev(chan->device);
2384 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2385 	u32 irq_udma_idx, irq_ring_idx;
2386 	int ret;
2387 
2388 	/* Only TR mode is supported */
2389 	uc->config.pkt_mode = false;
2390 
2391 	/*
2392 	 * Make sure that the completion is in a known state:
2393 	 * No teardown, the channel is idle
2394 	 */
2395 	reinit_completion(&uc->teardown_completed);
2396 	complete_all(&uc->teardown_completed);
2397 	uc->state = UDMA_CHAN_IS_IDLE;
2398 
2399 	switch (uc->config.dir) {
2400 	case DMA_MEM_TO_MEM:
2401 		/* Non synchronized - mem to mem type of transfer */
2402 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2403 			uc->id);
2404 
2405 		ret = bcdma_alloc_bchan_resources(uc);
2406 		if (ret)
2407 			return ret;
2408 
2409 		irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
2410 		irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
2411 
2412 		ret = bcdma_tisci_m2m_channel_config(uc);
2413 		break;
2414 	case DMA_MEM_TO_DEV:
2415 		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
2416 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2417 			uc->id);
2418 
2419 		ret = udma_alloc_tx_resources(uc);
2420 		if (ret) {
2421 			uc->config.remote_thread_id = -1;
2422 			return ret;
2423 		}
2424 
2425 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
2426 		uc->config.dst_thread = uc->config.remote_thread_id;
2427 		uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2428 
2429 		irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
2430 		irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
2431 
2432 		ret = bcdma_tisci_tx_channel_config(uc);
2433 		break;
2434 	case DMA_DEV_TO_MEM:
2435 		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
2436 		dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2437 			uc->id);
2438 
2439 		ret = udma_alloc_rx_resources(uc);
2440 		if (ret) {
2441 			uc->config.remote_thread_id = -1;
2442 			return ret;
2443 		}
2444 
2445 		uc->config.src_thread = uc->config.remote_thread_id;
2446 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2447 					K3_PSIL_DST_THREAD_ID_OFFSET;
2448 
2449 		irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
2450 		irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
2451 
2452 		ret = bcdma_tisci_rx_channel_config(uc);
2453 		break;
2454 	default:
2455 		/* Can not happen */
2456 		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2457 			__func__, uc->id, uc->config.dir);
2458 		return -EINVAL;
2459 	}
2460 
2461 	/* check if the channel configuration was successful */
2462 	if (ret)
2463 		goto err_res_free;
2464 
2465 	if (udma_is_chan_running(uc)) {
2466 		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2467 		udma_reset_chan(uc, false);
2468 		if (udma_is_chan_running(uc)) {
2469 			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2470 			ret = -EBUSY;
2471 			goto err_res_free;
2472 		}
2473 	}
2474 
2475 	uc->dma_dev = dmaengine_get_dma_device(chan);
2476 	if (uc->config.dir == DMA_MEM_TO_MEM  && !uc->config.tr_trigger_type) {
2477 		uc->config.hdesc_size = cppi5_trdesc_calc_size(
2478 					sizeof(struct cppi5_tr_type15_t), 2);
2479 
2480 		uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2481 						 uc->config.hdesc_size,
2482 						 ud->desc_align,
2483 						 0);
2484 		if (!uc->hdesc_pool) {
2485 			dev_err(ud->ddev.dev,
2486 				"Descriptor pool allocation failed\n");
2487 			uc->use_dma_pool = false;
2488 			ret = -ENOMEM;
2489 			goto err_res_free;
2490 		}
2491 
2492 		uc->use_dma_pool = true;
2493 	} else if (uc->config.dir != DMA_MEM_TO_MEM) {
2494 		/* PSI-L pairing */
2495 		ret = navss_psil_pair(ud, uc->config.src_thread,
2496 				      uc->config.dst_thread);
2497 		if (ret) {
2498 			dev_err(ud->dev,
2499 				"PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2500 				uc->config.src_thread, uc->config.dst_thread);
2501 			goto err_res_free;
2502 		}
2503 
2504 		uc->psil_paired = true;
2505 	}
2506 
2507 	uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2508 	if (uc->irq_num_ring <= 0) {
2509 		dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2510 			irq_ring_idx);
2511 		ret = -EINVAL;
2512 		goto err_psi_free;
2513 	}
2514 
2515 	ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2516 			  IRQF_TRIGGER_HIGH, uc->name, uc);
2517 	if (ret) {
2518 		dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2519 		goto err_irq_free;
2520 	}
2521 
2522 	/* Event from BCDMA (TR events) only needed for slave channels */
2523 	if (is_slave_direction(uc->config.dir)) {
2524 		uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2525 		if (uc->irq_num_udma <= 0) {
2526 			dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
2527 				irq_udma_idx);
2528 			free_irq(uc->irq_num_ring, uc);
2529 			ret = -EINVAL;
2530 			goto err_irq_free;
2531 		}
2532 
2533 		ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2534 				  uc->name, uc);
2535 		if (ret) {
2536 			dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
2537 				uc->id);
2538 			free_irq(uc->irq_num_ring, uc);
2539 			goto err_irq_free;
2540 		}
2541 	} else {
2542 		uc->irq_num_udma = 0;
2543 	}
2544 
2545 	udma_reset_rings(uc);
2546 
2547 	INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2548 				  udma_check_tx_completion);
2549 	return 0;
2550 
2551 err_irq_free:
2552 	uc->irq_num_ring = 0;
2553 	uc->irq_num_udma = 0;
2554 err_psi_free:
2555 	if (uc->psil_paired)
2556 		navss_psil_unpair(ud, uc->config.src_thread,
2557 				  uc->config.dst_thread);
2558 	uc->psil_paired = false;
2559 err_res_free:
2560 	bcdma_free_bchan_resources(uc);
2561 	udma_free_tx_resources(uc);
2562 	udma_free_rx_resources(uc);
2563 
2564 	udma_reset_uchan(uc);
2565 
2566 	if (uc->use_dma_pool) {
2567 		dma_pool_destroy(uc->hdesc_pool);
2568 		uc->use_dma_pool = false;
2569 	}
2570 
2571 	return ret;
2572 }
2573 
2574 static int bcdma_router_config(struct dma_chan *chan)
2575 {
2576 	struct k3_event_route_data *router_data = chan->route_data;
2577 	struct udma_chan *uc = to_udma_chan(chan);
2578 	u32 trigger_event;
2579 
2580 	if (!uc->bchan)
2581 		return -EINVAL;
2582 
2583 	if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
2584 		return -EINVAL;
2585 
2586 	trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
2587 	trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
2588 
2589 	return router_data->set_event(router_data->priv, trigger_event);
2590 }
2591 
2592 static int pktdma_alloc_chan_resources(struct dma_chan *chan)
2593 {
2594 	struct udma_chan *uc = to_udma_chan(chan);
2595 	struct udma_dev *ud = to_udma_dev(chan->device);
2596 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2597 	u32 irq_ring_idx;
2598 	int ret;
2599 
2600 	/*
2601 	 * Make sure that the completion is in a known state:
2602 	 * No teardown, the channel is idle
2603 	 */
2604 	reinit_completion(&uc->teardown_completed);
2605 	complete_all(&uc->teardown_completed);
2606 	uc->state = UDMA_CHAN_IS_IDLE;
2607 
2608 	switch (uc->config.dir) {
2609 	case DMA_MEM_TO_DEV:
2610 		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
2611 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2612 			uc->id);
2613 
2614 		ret = udma_alloc_tx_resources(uc);
2615 		if (ret) {
2616 			uc->config.remote_thread_id = -1;
2617 			return ret;
2618 		}
2619 
2620 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
2621 		uc->config.dst_thread = uc->config.remote_thread_id;
2622 		uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2623 
2624 		irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
2625 
2626 		ret = pktdma_tisci_tx_channel_config(uc);
2627 		break;
2628 	case DMA_DEV_TO_MEM:
2629 		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
2630 		dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2631 			uc->id);
2632 
2633 		ret = udma_alloc_rx_resources(uc);
2634 		if (ret) {
2635 			uc->config.remote_thread_id = -1;
2636 			return ret;
2637 		}
2638 
2639 		uc->config.src_thread = uc->config.remote_thread_id;
2640 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2641 					K3_PSIL_DST_THREAD_ID_OFFSET;
2642 
2643 		irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
2644 
2645 		ret = pktdma_tisci_rx_channel_config(uc);
2646 		break;
2647 	default:
2648 		/* Can not happen */
2649 		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2650 			__func__, uc->id, uc->config.dir);
2651 		return -EINVAL;
2652 	}
2653 
2654 	/* check if the channel configuration was successful */
2655 	if (ret)
2656 		goto err_res_free;
2657 
2658 	if (udma_is_chan_running(uc)) {
2659 		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2660 		udma_reset_chan(uc, false);
2661 		if (udma_is_chan_running(uc)) {
2662 			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2663 			ret = -EBUSY;
2664 			goto err_res_free;
2665 		}
2666 	}
2667 
2668 	uc->dma_dev = dmaengine_get_dma_device(chan);
2669 	uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
2670 					 uc->config.hdesc_size, ud->desc_align,
2671 					 0);
2672 	if (!uc->hdesc_pool) {
2673 		dev_err(ud->ddev.dev,
2674 			"Descriptor pool allocation failed\n");
2675 		uc->use_dma_pool = false;
2676 		ret = -ENOMEM;
2677 		goto err_res_free;
2678 	}
2679 
2680 	uc->use_dma_pool = true;
2681 
2682 	/* PSI-L pairing */
2683 	ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2684 	if (ret) {
2685 		dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2686 			uc->config.src_thread, uc->config.dst_thread);
2687 		goto err_res_free;
2688 	}
2689 
2690 	uc->psil_paired = true;
2691 
2692 	uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2693 	if (uc->irq_num_ring <= 0) {
2694 		dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2695 			irq_ring_idx);
2696 		ret = -EINVAL;
2697 		goto err_psi_free;
2698 	}
2699 
2700 	ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2701 			  IRQF_TRIGGER_HIGH, uc->name, uc);
2702 	if (ret) {
2703 		dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2704 		goto err_irq_free;
2705 	}
2706 
2707 	uc->irq_num_udma = 0;
2708 
2709 	udma_reset_rings(uc);
2710 
2711 	INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2712 				  udma_check_tx_completion);
2713 
2714 	if (uc->tchan)
2715 		dev_dbg(ud->dev,
2716 			"chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2717 			uc->id, uc->tchan->id, uc->tchan->tflow_id,
2718 			uc->config.remote_thread_id);
2719 	else if (uc->rchan)
2720 		dev_dbg(ud->dev,
2721 			"chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2722 			uc->id, uc->rchan->id, uc->rflow->id,
2723 			uc->config.remote_thread_id);
2724 	return 0;
2725 
2726 err_irq_free:
2727 	uc->irq_num_ring = 0;
2728 err_psi_free:
2729 	navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2730 	uc->psil_paired = false;
2731 err_res_free:
2732 	udma_free_tx_resources(uc);
2733 	udma_free_rx_resources(uc);
2734 
2735 	udma_reset_uchan(uc);
2736 
2737 	dma_pool_destroy(uc->hdesc_pool);
2738 	uc->use_dma_pool = false;
2739 
2740 	return ret;
2741 }
2742 
2743 static int udma_slave_config(struct dma_chan *chan,
2744 			     struct dma_slave_config *cfg)
2745 {
2746 	struct udma_chan *uc = to_udma_chan(chan);
2747 
2748 	memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
2749 
2750 	return 0;
2751 }
2752 
2753 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
2754 					    size_t tr_size, int tr_count,
2755 					    enum dma_transfer_direction dir)
2756 {
2757 	struct udma_hwdesc *hwdesc;
2758 	struct cppi5_desc_hdr_t *tr_desc;
2759 	struct udma_desc *d;
2760 	u32 reload_count = 0;
2761 	u32 ring_id;
2762 
2763 	switch (tr_size) {
2764 	case 16:
2765 	case 32:
2766 	case 64:
2767 	case 128:
2768 		break;
2769 	default:
2770 		dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
2771 		return NULL;
2772 	}
2773 
2774 	/* We have only one descriptor containing multiple TRs */
2775 	d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
2776 	if (!d)
2777 		return NULL;
2778 
2779 	d->sglen = tr_count;
2780 
2781 	d->hwdesc_count = 1;
2782 	hwdesc = &d->hwdesc[0];
2783 
2784 	/* Allocate memory for DMA ring descriptor */
2785 	if (uc->use_dma_pool) {
2786 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2787 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2788 						GFP_NOWAIT,
2789 						&hwdesc->cppi5_desc_paddr);
2790 	} else {
2791 		hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
2792 								 tr_count);
2793 		hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
2794 						uc->ud->desc_align);
2795 		hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
2796 						hwdesc->cppi5_desc_size,
2797 						&hwdesc->cppi5_desc_paddr,
2798 						GFP_NOWAIT);
2799 	}
2800 
2801 	if (!hwdesc->cppi5_desc_vaddr) {
2802 		kfree(d);
2803 		return NULL;
2804 	}
2805 
2806 	/* Start of the TR req records */
2807 	hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
2808 	/* Start address of the TR response array */
2809 	hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
2810 
2811 	tr_desc = hwdesc->cppi5_desc_vaddr;
2812 
2813 	if (uc->cyclic)
2814 		reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2815 
2816 	if (dir == DMA_DEV_TO_MEM)
2817 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2818 	else
2819 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2820 
2821 	cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2822 	cppi5_desc_set_pktids(tr_desc, uc->id,
2823 			      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2824 	cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2825 
2826 	return d;
2827 }
2828 
2829 /**
2830  * udma_get_tr_counters - calculate TR counters for a given length
2831  * @len: Length of the trasnfer
2832  * @align_to: Preferred alignment
2833  * @tr0_cnt0: First TR icnt0
2834  * @tr0_cnt1: First TR icnt1
2835  * @tr1_cnt0: Second (if used) TR icnt0
2836  *
2837  * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2838  * For len >= SZ_64K two TRs are used in a simple way:
2839  * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2840  * Second TR: the remaining length (tr1_cnt0)
2841  *
2842  * Returns the number of TRs the length needs (1 or 2)
2843  * -EINVAL if the length can not be supported
2844  */
2845 static int udma_get_tr_counters(size_t len, unsigned long align_to,
2846 				u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2847 {
2848 	if (len < SZ_64K) {
2849 		*tr0_cnt0 = len;
2850 		*tr0_cnt1 = 1;
2851 
2852 		return 1;
2853 	}
2854 
2855 	if (align_to > 3)
2856 		align_to = 3;
2857 
2858 realign:
2859 	*tr0_cnt0 = SZ_64K - BIT(align_to);
2860 	if (len / *tr0_cnt0 >= SZ_64K) {
2861 		if (align_to) {
2862 			align_to--;
2863 			goto realign;
2864 		}
2865 		return -EINVAL;
2866 	}
2867 
2868 	*tr0_cnt1 = len / *tr0_cnt0;
2869 	*tr1_cnt0 = len % *tr0_cnt0;
2870 
2871 	return 2;
2872 }
2873 
2874 static struct udma_desc *
2875 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2876 		      unsigned int sglen, enum dma_transfer_direction dir,
2877 		      unsigned long tx_flags, void *context)
2878 {
2879 	struct scatterlist *sgent;
2880 	struct udma_desc *d;
2881 	struct cppi5_tr_type1_t *tr_req = NULL;
2882 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2883 	unsigned int i;
2884 	size_t tr_size;
2885 	int num_tr = 0;
2886 	int tr_idx = 0;
2887 	u64 asel;
2888 
2889 	/* estimate the number of TRs we will need */
2890 	for_each_sg(sgl, sgent, sglen, i) {
2891 		if (sg_dma_len(sgent) < SZ_64K)
2892 			num_tr++;
2893 		else
2894 			num_tr += 2;
2895 	}
2896 
2897 	/* Now allocate and setup the descriptor. */
2898 	tr_size = sizeof(struct cppi5_tr_type1_t);
2899 	d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2900 	if (!d)
2901 		return NULL;
2902 
2903 	d->sglen = sglen;
2904 
2905 	if (uc->ud->match_data->type == DMA_TYPE_UDMA)
2906 		asel = 0;
2907 	else
2908 		asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
2909 
2910 	tr_req = d->hwdesc[0].tr_req_base;
2911 	for_each_sg(sgl, sgent, sglen, i) {
2912 		dma_addr_t sg_addr = sg_dma_address(sgent);
2913 
2914 		num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2915 					      &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2916 		if (num_tr < 0) {
2917 			dev_err(uc->ud->dev, "size %u is not supported\n",
2918 				sg_dma_len(sgent));
2919 			udma_free_hwdesc(uc, d);
2920 			kfree(d);
2921 			return NULL;
2922 		}
2923 
2924 		cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2925 			      false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2926 		cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
2927 
2928 		sg_addr |= asel;
2929 		tr_req[tr_idx].addr = sg_addr;
2930 		tr_req[tr_idx].icnt0 = tr0_cnt0;
2931 		tr_req[tr_idx].icnt1 = tr0_cnt1;
2932 		tr_req[tr_idx].dim1 = tr0_cnt0;
2933 		tr_idx++;
2934 
2935 		if (num_tr == 2) {
2936 			cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2937 				      false, false,
2938 				      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2939 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2940 					 CPPI5_TR_CSF_SUPR_EVT);
2941 
2942 			tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2943 			tr_req[tr_idx].icnt0 = tr1_cnt0;
2944 			tr_req[tr_idx].icnt1 = 1;
2945 			tr_req[tr_idx].dim1 = tr1_cnt0;
2946 			tr_idx++;
2947 		}
2948 
2949 		d->residue += sg_dma_len(sgent);
2950 	}
2951 
2952 	cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2953 			 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2954 
2955 	return d;
2956 }
2957 
2958 static struct udma_desc *
2959 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
2960 				unsigned int sglen,
2961 				enum dma_transfer_direction dir,
2962 				unsigned long tx_flags, void *context)
2963 {
2964 	struct scatterlist *sgent;
2965 	struct cppi5_tr_type15_t *tr_req = NULL;
2966 	enum dma_slave_buswidth dev_width;
2967 	u16 tr_cnt0, tr_cnt1;
2968 	dma_addr_t dev_addr;
2969 	struct udma_desc *d;
2970 	unsigned int i;
2971 	size_t tr_size, sg_len;
2972 	int num_tr = 0;
2973 	int tr_idx = 0;
2974 	u32 burst, trigger_size, port_window;
2975 	u64 asel;
2976 
2977 	if (dir == DMA_DEV_TO_MEM) {
2978 		dev_addr = uc->cfg.src_addr;
2979 		dev_width = uc->cfg.src_addr_width;
2980 		burst = uc->cfg.src_maxburst;
2981 		port_window = uc->cfg.src_port_window_size;
2982 	} else if (dir == DMA_MEM_TO_DEV) {
2983 		dev_addr = uc->cfg.dst_addr;
2984 		dev_width = uc->cfg.dst_addr_width;
2985 		burst = uc->cfg.dst_maxburst;
2986 		port_window = uc->cfg.dst_port_window_size;
2987 	} else {
2988 		dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2989 		return NULL;
2990 	}
2991 
2992 	if (!burst)
2993 		burst = 1;
2994 
2995 	if (port_window) {
2996 		if (port_window != burst) {
2997 			dev_err(uc->ud->dev,
2998 				"The burst must be equal to port_window\n");
2999 			return NULL;
3000 		}
3001 
3002 		tr_cnt0 = dev_width * port_window;
3003 		tr_cnt1 = 1;
3004 	} else {
3005 		tr_cnt0 = dev_width;
3006 		tr_cnt1 = burst;
3007 	}
3008 	trigger_size = tr_cnt0 * tr_cnt1;
3009 
3010 	/* estimate the number of TRs we will need */
3011 	for_each_sg(sgl, sgent, sglen, i) {
3012 		sg_len = sg_dma_len(sgent);
3013 
3014 		if (sg_len % trigger_size) {
3015 			dev_err(uc->ud->dev,
3016 				"Not aligned SG entry (%zu for %u)\n", sg_len,
3017 				trigger_size);
3018 			return NULL;
3019 		}
3020 
3021 		if (sg_len / trigger_size < SZ_64K)
3022 			num_tr++;
3023 		else
3024 			num_tr += 2;
3025 	}
3026 
3027 	/* Now allocate and setup the descriptor. */
3028 	tr_size = sizeof(struct cppi5_tr_type15_t);
3029 	d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
3030 	if (!d)
3031 		return NULL;
3032 
3033 	d->sglen = sglen;
3034 
3035 	if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
3036 		asel = 0;
3037 	} else {
3038 		asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3039 		dev_addr |= asel;
3040 	}
3041 
3042 	tr_req = d->hwdesc[0].tr_req_base;
3043 	for_each_sg(sgl, sgent, sglen, i) {
3044 		u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
3045 		dma_addr_t sg_addr = sg_dma_address(sgent);
3046 
3047 		sg_len = sg_dma_len(sgent);
3048 		num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
3049 					      &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
3050 		if (num_tr < 0) {
3051 			dev_err(uc->ud->dev, "size %zu is not supported\n",
3052 				sg_len);
3053 			udma_free_hwdesc(uc, d);
3054 			kfree(d);
3055 			return NULL;
3056 		}
3057 
3058 		cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
3059 			      true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3060 		cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
3061 		cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3062 				     uc->config.tr_trigger_type,
3063 				     CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
3064 
3065 		sg_addr |= asel;
3066 		if (dir == DMA_DEV_TO_MEM) {
3067 			tr_req[tr_idx].addr = dev_addr;
3068 			tr_req[tr_idx].icnt0 = tr_cnt0;
3069 			tr_req[tr_idx].icnt1 = tr_cnt1;
3070 			tr_req[tr_idx].icnt2 = tr0_cnt2;
3071 			tr_req[tr_idx].icnt3 = tr0_cnt3;
3072 			tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3073 
3074 			tr_req[tr_idx].daddr = sg_addr;
3075 			tr_req[tr_idx].dicnt0 = tr_cnt0;
3076 			tr_req[tr_idx].dicnt1 = tr_cnt1;
3077 			tr_req[tr_idx].dicnt2 = tr0_cnt2;
3078 			tr_req[tr_idx].dicnt3 = tr0_cnt3;
3079 			tr_req[tr_idx].ddim1 = tr_cnt0;
3080 			tr_req[tr_idx].ddim2 = trigger_size;
3081 			tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
3082 		} else {
3083 			tr_req[tr_idx].addr = sg_addr;
3084 			tr_req[tr_idx].icnt0 = tr_cnt0;
3085 			tr_req[tr_idx].icnt1 = tr_cnt1;
3086 			tr_req[tr_idx].icnt2 = tr0_cnt2;
3087 			tr_req[tr_idx].icnt3 = tr0_cnt3;
3088 			tr_req[tr_idx].dim1 = tr_cnt0;
3089 			tr_req[tr_idx].dim2 = trigger_size;
3090 			tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
3091 
3092 			tr_req[tr_idx].daddr = dev_addr;
3093 			tr_req[tr_idx].dicnt0 = tr_cnt0;
3094 			tr_req[tr_idx].dicnt1 = tr_cnt1;
3095 			tr_req[tr_idx].dicnt2 = tr0_cnt2;
3096 			tr_req[tr_idx].dicnt3 = tr0_cnt3;
3097 			tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3098 		}
3099 
3100 		tr_idx++;
3101 
3102 		if (num_tr == 2) {
3103 			cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
3104 				      false, true,
3105 				      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3106 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3107 					 CPPI5_TR_CSF_SUPR_EVT);
3108 			cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3109 					     uc->config.tr_trigger_type,
3110 					     CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
3111 					     0, 0);
3112 
3113 			sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
3114 			if (dir == DMA_DEV_TO_MEM) {
3115 				tr_req[tr_idx].addr = dev_addr;
3116 				tr_req[tr_idx].icnt0 = tr_cnt0;
3117 				tr_req[tr_idx].icnt1 = tr_cnt1;
3118 				tr_req[tr_idx].icnt2 = tr1_cnt2;
3119 				tr_req[tr_idx].icnt3 = 1;
3120 				tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3121 
3122 				tr_req[tr_idx].daddr = sg_addr;
3123 				tr_req[tr_idx].dicnt0 = tr_cnt0;
3124 				tr_req[tr_idx].dicnt1 = tr_cnt1;
3125 				tr_req[tr_idx].dicnt2 = tr1_cnt2;
3126 				tr_req[tr_idx].dicnt3 = 1;
3127 				tr_req[tr_idx].ddim1 = tr_cnt0;
3128 				tr_req[tr_idx].ddim2 = trigger_size;
3129 			} else {
3130 				tr_req[tr_idx].addr = sg_addr;
3131 				tr_req[tr_idx].icnt0 = tr_cnt0;
3132 				tr_req[tr_idx].icnt1 = tr_cnt1;
3133 				tr_req[tr_idx].icnt2 = tr1_cnt2;
3134 				tr_req[tr_idx].icnt3 = 1;
3135 				tr_req[tr_idx].dim1 = tr_cnt0;
3136 				tr_req[tr_idx].dim2 = trigger_size;
3137 
3138 				tr_req[tr_idx].daddr = dev_addr;
3139 				tr_req[tr_idx].dicnt0 = tr_cnt0;
3140 				tr_req[tr_idx].dicnt1 = tr_cnt1;
3141 				tr_req[tr_idx].dicnt2 = tr1_cnt2;
3142 				tr_req[tr_idx].dicnt3 = 1;
3143 				tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3144 			}
3145 			tr_idx++;
3146 		}
3147 
3148 		d->residue += sg_len;
3149 	}
3150 
3151 	cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
3152 			 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3153 
3154 	return d;
3155 }
3156 
3157 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
3158 				   enum dma_slave_buswidth dev_width,
3159 				   u16 elcnt)
3160 {
3161 	if (uc->config.ep_type != PSIL_EP_PDMA_XY)
3162 		return 0;
3163 
3164 	/* Bus width translates to the element size (ES) */
3165 	switch (dev_width) {
3166 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
3167 		d->static_tr.elsize = 0;
3168 		break;
3169 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
3170 		d->static_tr.elsize = 1;
3171 		break;
3172 	case DMA_SLAVE_BUSWIDTH_3_BYTES:
3173 		d->static_tr.elsize = 2;
3174 		break;
3175 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
3176 		d->static_tr.elsize = 3;
3177 		break;
3178 	case DMA_SLAVE_BUSWIDTH_8_BYTES:
3179 		d->static_tr.elsize = 4;
3180 		break;
3181 	default: /* not reached */
3182 		return -EINVAL;
3183 	}
3184 
3185 	d->static_tr.elcnt = elcnt;
3186 
3187 	/*
3188 	 * PDMA must to close the packet when the channel is in packet mode.
3189 	 * For TR mode when the channel is not cyclic we also need PDMA to close
3190 	 * the packet otherwise the transfer will stall because PDMA holds on
3191 	 * the data it has received from the peripheral.
3192 	 */
3193 	if (uc->config.pkt_mode || !uc->cyclic) {
3194 		unsigned int div = dev_width * elcnt;
3195 
3196 		if (uc->cyclic)
3197 			d->static_tr.bstcnt = d->residue / d->sglen / div;
3198 		else
3199 			d->static_tr.bstcnt = d->residue / div;
3200 
3201 		if (uc->config.dir == DMA_DEV_TO_MEM &&
3202 		    d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
3203 			return -EINVAL;
3204 	} else {
3205 		d->static_tr.bstcnt = 0;
3206 	}
3207 
3208 	return 0;
3209 }
3210 
3211 static struct udma_desc *
3212 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
3213 		       unsigned int sglen, enum dma_transfer_direction dir,
3214 		       unsigned long tx_flags, void *context)
3215 {
3216 	struct scatterlist *sgent;
3217 	struct cppi5_host_desc_t *h_desc = NULL;
3218 	struct udma_desc *d;
3219 	u32 ring_id;
3220 	unsigned int i;
3221 	u64 asel;
3222 
3223 	d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
3224 	if (!d)
3225 		return NULL;
3226 
3227 	d->sglen = sglen;
3228 	d->hwdesc_count = sglen;
3229 
3230 	if (dir == DMA_DEV_TO_MEM)
3231 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3232 	else
3233 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3234 
3235 	if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3236 		asel = 0;
3237 	else
3238 		asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3239 
3240 	for_each_sg(sgl, sgent, sglen, i) {
3241 		struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3242 		dma_addr_t sg_addr = sg_dma_address(sgent);
3243 		struct cppi5_host_desc_t *desc;
3244 		size_t sg_len = sg_dma_len(sgent);
3245 
3246 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3247 						GFP_NOWAIT,
3248 						&hwdesc->cppi5_desc_paddr);
3249 		if (!hwdesc->cppi5_desc_vaddr) {
3250 			dev_err(uc->ud->dev,
3251 				"descriptor%d allocation failed\n", i);
3252 
3253 			udma_free_hwdesc(uc, d);
3254 			kfree(d);
3255 			return NULL;
3256 		}
3257 
3258 		d->residue += sg_len;
3259 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3260 		desc = hwdesc->cppi5_desc_vaddr;
3261 
3262 		if (i == 0) {
3263 			cppi5_hdesc_init(desc, 0, 0);
3264 			/* Flow and Packed ID */
3265 			cppi5_desc_set_pktids(&desc->hdr, uc->id,
3266 					      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3267 			cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
3268 		} else {
3269 			cppi5_hdesc_reset_hbdesc(desc);
3270 			cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
3271 		}
3272 
3273 		/* attach the sg buffer to the descriptor */
3274 		sg_addr |= asel;
3275 		cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
3276 
3277 		/* Attach link as host buffer descriptor */
3278 		if (h_desc)
3279 			cppi5_hdesc_link_hbdesc(h_desc,
3280 						hwdesc->cppi5_desc_paddr | asel);
3281 
3282 		if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
3283 		    dir == DMA_MEM_TO_DEV)
3284 			h_desc = desc;
3285 	}
3286 
3287 	if (d->residue >= SZ_4M) {
3288 		dev_err(uc->ud->dev,
3289 			"%s: Transfer size %u is over the supported 4M range\n",
3290 			__func__, d->residue);
3291 		udma_free_hwdesc(uc, d);
3292 		kfree(d);
3293 		return NULL;
3294 	}
3295 
3296 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3297 	cppi5_hdesc_set_pktlen(h_desc, d->residue);
3298 
3299 	return d;
3300 }
3301 
3302 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
3303 				void *data, size_t len)
3304 {
3305 	struct udma_desc *d = to_udma_desc(desc);
3306 	struct udma_chan *uc = to_udma_chan(desc->chan);
3307 	struct cppi5_host_desc_t *h_desc;
3308 	u32 psd_size = len;
3309 	u32 flags = 0;
3310 
3311 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
3312 		return -ENOTSUPP;
3313 
3314 	if (!data || len > uc->config.metadata_size)
3315 		return -EINVAL;
3316 
3317 	if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3318 		return -EINVAL;
3319 
3320 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3321 	if (d->dir == DMA_MEM_TO_DEV)
3322 		memcpy(h_desc->epib, data, len);
3323 
3324 	if (uc->config.needs_epib)
3325 		psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3326 
3327 	d->metadata = data;
3328 	d->metadata_size = len;
3329 	if (uc->config.needs_epib)
3330 		flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3331 
3332 	cppi5_hdesc_update_flags(h_desc, flags);
3333 	cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3334 
3335 	return 0;
3336 }
3337 
3338 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
3339 				   size_t *payload_len, size_t *max_len)
3340 {
3341 	struct udma_desc *d = to_udma_desc(desc);
3342 	struct udma_chan *uc = to_udma_chan(desc->chan);
3343 	struct cppi5_host_desc_t *h_desc;
3344 
3345 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
3346 		return ERR_PTR(-ENOTSUPP);
3347 
3348 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3349 
3350 	*max_len = uc->config.metadata_size;
3351 
3352 	*payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
3353 		       CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
3354 	*payload_len += cppi5_hdesc_get_psdata_size(h_desc);
3355 
3356 	return h_desc->epib;
3357 }
3358 
3359 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
3360 				 size_t payload_len)
3361 {
3362 	struct udma_desc *d = to_udma_desc(desc);
3363 	struct udma_chan *uc = to_udma_chan(desc->chan);
3364 	struct cppi5_host_desc_t *h_desc;
3365 	u32 psd_size = payload_len;
3366 	u32 flags = 0;
3367 
3368 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
3369 		return -ENOTSUPP;
3370 
3371 	if (payload_len > uc->config.metadata_size)
3372 		return -EINVAL;
3373 
3374 	if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3375 		return -EINVAL;
3376 
3377 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3378 
3379 	if (uc->config.needs_epib) {
3380 		psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3381 		flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3382 	}
3383 
3384 	cppi5_hdesc_update_flags(h_desc, flags);
3385 	cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3386 
3387 	return 0;
3388 }
3389 
3390 static struct dma_descriptor_metadata_ops metadata_ops = {
3391 	.attach = udma_attach_metadata,
3392 	.get_ptr = udma_get_metadata_ptr,
3393 	.set_len = udma_set_metadata_len,
3394 };
3395 
3396 static struct dma_async_tx_descriptor *
3397 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
3398 		   unsigned int sglen, enum dma_transfer_direction dir,
3399 		   unsigned long tx_flags, void *context)
3400 {
3401 	struct udma_chan *uc = to_udma_chan(chan);
3402 	enum dma_slave_buswidth dev_width;
3403 	struct udma_desc *d;
3404 	u32 burst;
3405 
3406 	if (dir != uc->config.dir &&
3407 	    (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
3408 		dev_err(chan->device->dev,
3409 			"%s: chan%d is for %s, not supporting %s\n",
3410 			__func__, uc->id,
3411 			dmaengine_get_direction_text(uc->config.dir),
3412 			dmaengine_get_direction_text(dir));
3413 		return NULL;
3414 	}
3415 
3416 	if (dir == DMA_DEV_TO_MEM) {
3417 		dev_width = uc->cfg.src_addr_width;
3418 		burst = uc->cfg.src_maxburst;
3419 	} else if (dir == DMA_MEM_TO_DEV) {
3420 		dev_width = uc->cfg.dst_addr_width;
3421 		burst = uc->cfg.dst_maxburst;
3422 	} else {
3423 		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
3424 		return NULL;
3425 	}
3426 
3427 	if (!burst)
3428 		burst = 1;
3429 
3430 	uc->config.tx_flags = tx_flags;
3431 
3432 	if (uc->config.pkt_mode)
3433 		d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
3434 					   context);
3435 	else if (is_slave_direction(uc->config.dir))
3436 		d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
3437 					  context);
3438 	else
3439 		d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
3440 						    tx_flags, context);
3441 
3442 	if (!d)
3443 		return NULL;
3444 
3445 	d->dir = dir;
3446 	d->desc_idx = 0;
3447 	d->tr_idx = 0;
3448 
3449 	/* static TR for remote PDMA */
3450 	if (udma_configure_statictr(uc, d, dev_width, burst)) {
3451 		dev_err(uc->ud->dev,
3452 			"%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3453 			__func__, d->static_tr.bstcnt);
3454 
3455 		udma_free_hwdesc(uc, d);
3456 		kfree(d);
3457 		return NULL;
3458 	}
3459 
3460 	if (uc->config.metadata_size)
3461 		d->vd.tx.metadata_ops = &metadata_ops;
3462 
3463 	return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3464 }
3465 
3466 static struct udma_desc *
3467 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
3468 			size_t buf_len, size_t period_len,
3469 			enum dma_transfer_direction dir, unsigned long flags)
3470 {
3471 	struct udma_desc *d;
3472 	size_t tr_size, period_addr;
3473 	struct cppi5_tr_type1_t *tr_req;
3474 	unsigned int periods = buf_len / period_len;
3475 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3476 	unsigned int i;
3477 	int num_tr;
3478 
3479 	num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
3480 				      &tr0_cnt1, &tr1_cnt0);
3481 	if (num_tr < 0) {
3482 		dev_err(uc->ud->dev, "size %zu is not supported\n",
3483 			period_len);
3484 		return NULL;
3485 	}
3486 
3487 	/* Now allocate and setup the descriptor. */
3488 	tr_size = sizeof(struct cppi5_tr_type1_t);
3489 	d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
3490 	if (!d)
3491 		return NULL;
3492 
3493 	tr_req = d->hwdesc[0].tr_req_base;
3494 	if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3495 		period_addr = buf_addr;
3496 	else
3497 		period_addr = buf_addr |
3498 			((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
3499 
3500 	for (i = 0; i < periods; i++) {
3501 		int tr_idx = i * num_tr;
3502 
3503 		cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
3504 			      false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3505 
3506 		tr_req[tr_idx].addr = period_addr;
3507 		tr_req[tr_idx].icnt0 = tr0_cnt0;
3508 		tr_req[tr_idx].icnt1 = tr0_cnt1;
3509 		tr_req[tr_idx].dim1 = tr0_cnt0;
3510 
3511 		if (num_tr == 2) {
3512 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3513 					 CPPI5_TR_CSF_SUPR_EVT);
3514 			tr_idx++;
3515 
3516 			cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
3517 				      false, false,
3518 				      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3519 
3520 			tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
3521 			tr_req[tr_idx].icnt0 = tr1_cnt0;
3522 			tr_req[tr_idx].icnt1 = 1;
3523 			tr_req[tr_idx].dim1 = tr1_cnt0;
3524 		}
3525 
3526 		if (!(flags & DMA_PREP_INTERRUPT))
3527 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3528 					 CPPI5_TR_CSF_SUPR_EVT);
3529 
3530 		period_addr += period_len;
3531 	}
3532 
3533 	return d;
3534 }
3535 
3536 static struct udma_desc *
3537 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
3538 			 size_t buf_len, size_t period_len,
3539 			 enum dma_transfer_direction dir, unsigned long flags)
3540 {
3541 	struct udma_desc *d;
3542 	u32 ring_id;
3543 	int i;
3544 	int periods = buf_len / period_len;
3545 
3546 	if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
3547 		return NULL;
3548 
3549 	if (period_len >= SZ_4M)
3550 		return NULL;
3551 
3552 	d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
3553 	if (!d)
3554 		return NULL;
3555 
3556 	d->hwdesc_count = periods;
3557 
3558 	/* TODO: re-check this... */
3559 	if (dir == DMA_DEV_TO_MEM)
3560 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3561 	else
3562 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3563 
3564 	if (uc->ud->match_data->type != DMA_TYPE_UDMA)
3565 		buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3566 
3567 	for (i = 0; i < periods; i++) {
3568 		struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3569 		dma_addr_t period_addr = buf_addr + (period_len * i);
3570 		struct cppi5_host_desc_t *h_desc;
3571 
3572 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3573 						GFP_NOWAIT,
3574 						&hwdesc->cppi5_desc_paddr);
3575 		if (!hwdesc->cppi5_desc_vaddr) {
3576 			dev_err(uc->ud->dev,
3577 				"descriptor%d allocation failed\n", i);
3578 
3579 			udma_free_hwdesc(uc, d);
3580 			kfree(d);
3581 			return NULL;
3582 		}
3583 
3584 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3585 		h_desc = hwdesc->cppi5_desc_vaddr;
3586 
3587 		cppi5_hdesc_init(h_desc, 0, 0);
3588 		cppi5_hdesc_set_pktlen(h_desc, period_len);
3589 
3590 		/* Flow and Packed ID */
3591 		cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
3592 				      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3593 		cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
3594 
3595 		/* attach each period to a new descriptor */
3596 		cppi5_hdesc_attach_buf(h_desc,
3597 				       period_addr, period_len,
3598 				       period_addr, period_len);
3599 	}
3600 
3601 	return d;
3602 }
3603 
3604 static struct dma_async_tx_descriptor *
3605 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
3606 		     size_t period_len, enum dma_transfer_direction dir,
3607 		     unsigned long flags)
3608 {
3609 	struct udma_chan *uc = to_udma_chan(chan);
3610 	enum dma_slave_buswidth dev_width;
3611 	struct udma_desc *d;
3612 	u32 burst;
3613 
3614 	if (dir != uc->config.dir) {
3615 		dev_err(chan->device->dev,
3616 			"%s: chan%d is for %s, not supporting %s\n",
3617 			__func__, uc->id,
3618 			dmaengine_get_direction_text(uc->config.dir),
3619 			dmaengine_get_direction_text(dir));
3620 		return NULL;
3621 	}
3622 
3623 	uc->cyclic = true;
3624 
3625 	if (dir == DMA_DEV_TO_MEM) {
3626 		dev_width = uc->cfg.src_addr_width;
3627 		burst = uc->cfg.src_maxburst;
3628 	} else if (dir == DMA_MEM_TO_DEV) {
3629 		dev_width = uc->cfg.dst_addr_width;
3630 		burst = uc->cfg.dst_maxburst;
3631 	} else {
3632 		dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
3633 		return NULL;
3634 	}
3635 
3636 	if (!burst)
3637 		burst = 1;
3638 
3639 	if (uc->config.pkt_mode)
3640 		d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
3641 					     dir, flags);
3642 	else
3643 		d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
3644 					    dir, flags);
3645 
3646 	if (!d)
3647 		return NULL;
3648 
3649 	d->sglen = buf_len / period_len;
3650 
3651 	d->dir = dir;
3652 	d->residue = buf_len;
3653 
3654 	/* static TR for remote PDMA */
3655 	if (udma_configure_statictr(uc, d, dev_width, burst)) {
3656 		dev_err(uc->ud->dev,
3657 			"%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3658 			__func__, d->static_tr.bstcnt);
3659 
3660 		udma_free_hwdesc(uc, d);
3661 		kfree(d);
3662 		return NULL;
3663 	}
3664 
3665 	if (uc->config.metadata_size)
3666 		d->vd.tx.metadata_ops = &metadata_ops;
3667 
3668 	return vchan_tx_prep(&uc->vc, &d->vd, flags);
3669 }
3670 
3671 static struct dma_async_tx_descriptor *
3672 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
3673 		     size_t len, unsigned long tx_flags)
3674 {
3675 	struct udma_chan *uc = to_udma_chan(chan);
3676 	struct udma_desc *d;
3677 	struct cppi5_tr_type15_t *tr_req;
3678 	int num_tr;
3679 	size_t tr_size = sizeof(struct cppi5_tr_type15_t);
3680 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3681 
3682 	if (uc->config.dir != DMA_MEM_TO_MEM) {
3683 		dev_err(chan->device->dev,
3684 			"%s: chan%d is for %s, not supporting %s\n",
3685 			__func__, uc->id,
3686 			dmaengine_get_direction_text(uc->config.dir),
3687 			dmaengine_get_direction_text(DMA_MEM_TO_MEM));
3688 		return NULL;
3689 	}
3690 
3691 	num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
3692 				      &tr0_cnt1, &tr1_cnt0);
3693 	if (num_tr < 0) {
3694 		dev_err(uc->ud->dev, "size %zu is not supported\n",
3695 			len);
3696 		return NULL;
3697 	}
3698 
3699 	d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
3700 	if (!d)
3701 		return NULL;
3702 
3703 	d->dir = DMA_MEM_TO_MEM;
3704 	d->desc_idx = 0;
3705 	d->tr_idx = 0;
3706 	d->residue = len;
3707 
3708 	if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
3709 		src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3710 		dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3711 	}
3712 
3713 	tr_req = d->hwdesc[0].tr_req_base;
3714 
3715 	cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
3716 		      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3717 	cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
3718 
3719 	tr_req[0].addr = src;
3720 	tr_req[0].icnt0 = tr0_cnt0;
3721 	tr_req[0].icnt1 = tr0_cnt1;
3722 	tr_req[0].icnt2 = 1;
3723 	tr_req[0].icnt3 = 1;
3724 	tr_req[0].dim1 = tr0_cnt0;
3725 
3726 	tr_req[0].daddr = dest;
3727 	tr_req[0].dicnt0 = tr0_cnt0;
3728 	tr_req[0].dicnt1 = tr0_cnt1;
3729 	tr_req[0].dicnt2 = 1;
3730 	tr_req[0].dicnt3 = 1;
3731 	tr_req[0].ddim1 = tr0_cnt0;
3732 
3733 	if (num_tr == 2) {
3734 		cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
3735 			      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3736 		cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
3737 
3738 		tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
3739 		tr_req[1].icnt0 = tr1_cnt0;
3740 		tr_req[1].icnt1 = 1;
3741 		tr_req[1].icnt2 = 1;
3742 		tr_req[1].icnt3 = 1;
3743 
3744 		tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
3745 		tr_req[1].dicnt0 = tr1_cnt0;
3746 		tr_req[1].dicnt1 = 1;
3747 		tr_req[1].dicnt2 = 1;
3748 		tr_req[1].dicnt3 = 1;
3749 	}
3750 
3751 	cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
3752 			 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3753 
3754 	if (uc->config.metadata_size)
3755 		d->vd.tx.metadata_ops = &metadata_ops;
3756 
3757 	return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3758 }
3759 
3760 static void udma_issue_pending(struct dma_chan *chan)
3761 {
3762 	struct udma_chan *uc = to_udma_chan(chan);
3763 	unsigned long flags;
3764 
3765 	spin_lock_irqsave(&uc->vc.lock, flags);
3766 
3767 	/* If we have something pending and no active descriptor, then */
3768 	if (vchan_issue_pending(&uc->vc) && !uc->desc) {
3769 		/*
3770 		 * start a descriptor if the channel is NOT [marked as
3771 		 * terminating _and_ it is still running (teardown has not
3772 		 * completed yet)].
3773 		 */
3774 		if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
3775 		      udma_is_chan_running(uc)))
3776 			udma_start(uc);
3777 	}
3778 
3779 	spin_unlock_irqrestore(&uc->vc.lock, flags);
3780 }
3781 
3782 static enum dma_status udma_tx_status(struct dma_chan *chan,
3783 				      dma_cookie_t cookie,
3784 				      struct dma_tx_state *txstate)
3785 {
3786 	struct udma_chan *uc = to_udma_chan(chan);
3787 	enum dma_status ret;
3788 	unsigned long flags;
3789 
3790 	spin_lock_irqsave(&uc->vc.lock, flags);
3791 
3792 	ret = dma_cookie_status(chan, cookie, txstate);
3793 
3794 	if (!udma_is_chan_running(uc))
3795 		ret = DMA_COMPLETE;
3796 
3797 	if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
3798 		ret = DMA_PAUSED;
3799 
3800 	if (ret == DMA_COMPLETE || !txstate)
3801 		goto out;
3802 
3803 	if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
3804 		u32 peer_bcnt = 0;
3805 		u32 bcnt = 0;
3806 		u32 residue = uc->desc->residue;
3807 		u32 delay = 0;
3808 
3809 		if (uc->desc->dir == DMA_MEM_TO_DEV) {
3810 			bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
3811 
3812 			if (uc->config.ep_type != PSIL_EP_NATIVE) {
3813 				peer_bcnt = udma_tchanrt_read(uc,
3814 						UDMA_CHAN_RT_PEER_BCNT_REG);
3815 
3816 				if (bcnt > peer_bcnt)
3817 					delay = bcnt - peer_bcnt;
3818 			}
3819 		} else if (uc->desc->dir == DMA_DEV_TO_MEM) {
3820 			bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3821 
3822 			if (uc->config.ep_type != PSIL_EP_NATIVE) {
3823 				peer_bcnt = udma_rchanrt_read(uc,
3824 						UDMA_CHAN_RT_PEER_BCNT_REG);
3825 
3826 				if (peer_bcnt > bcnt)
3827 					delay = peer_bcnt - bcnt;
3828 			}
3829 		} else {
3830 			bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3831 		}
3832 
3833 		if (bcnt && !(bcnt % uc->desc->residue))
3834 			residue = 0;
3835 		else
3836 			residue -= bcnt % uc->desc->residue;
3837 
3838 		if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
3839 			ret = DMA_COMPLETE;
3840 			delay = 0;
3841 		}
3842 
3843 		dma_set_residue(txstate, residue);
3844 		dma_set_in_flight_bytes(txstate, delay);
3845 
3846 	} else {
3847 		ret = DMA_COMPLETE;
3848 	}
3849 
3850 out:
3851 	spin_unlock_irqrestore(&uc->vc.lock, flags);
3852 	return ret;
3853 }
3854 
3855 static int udma_pause(struct dma_chan *chan)
3856 {
3857 	struct udma_chan *uc = to_udma_chan(chan);
3858 
3859 	/* pause the channel */
3860 	switch (uc->config.dir) {
3861 	case DMA_DEV_TO_MEM:
3862 		udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3863 					 UDMA_PEER_RT_EN_PAUSE,
3864 					 UDMA_PEER_RT_EN_PAUSE);
3865 		break;
3866 	case DMA_MEM_TO_DEV:
3867 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3868 					 UDMA_PEER_RT_EN_PAUSE,
3869 					 UDMA_PEER_RT_EN_PAUSE);
3870 		break;
3871 	case DMA_MEM_TO_MEM:
3872 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3873 					 UDMA_CHAN_RT_CTL_PAUSE,
3874 					 UDMA_CHAN_RT_CTL_PAUSE);
3875 		break;
3876 	default:
3877 		return -EINVAL;
3878 	}
3879 
3880 	return 0;
3881 }
3882 
3883 static int udma_resume(struct dma_chan *chan)
3884 {
3885 	struct udma_chan *uc = to_udma_chan(chan);
3886 
3887 	/* resume the channel */
3888 	switch (uc->config.dir) {
3889 	case DMA_DEV_TO_MEM:
3890 		udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3891 					 UDMA_PEER_RT_EN_PAUSE, 0);
3892 
3893 		break;
3894 	case DMA_MEM_TO_DEV:
3895 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3896 					 UDMA_PEER_RT_EN_PAUSE, 0);
3897 		break;
3898 	case DMA_MEM_TO_MEM:
3899 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3900 					 UDMA_CHAN_RT_CTL_PAUSE, 0);
3901 		break;
3902 	default:
3903 		return -EINVAL;
3904 	}
3905 
3906 	return 0;
3907 }
3908 
3909 static int udma_terminate_all(struct dma_chan *chan)
3910 {
3911 	struct udma_chan *uc = to_udma_chan(chan);
3912 	unsigned long flags;
3913 	LIST_HEAD(head);
3914 
3915 	spin_lock_irqsave(&uc->vc.lock, flags);
3916 
3917 	if (udma_is_chan_running(uc))
3918 		udma_stop(uc);
3919 
3920 	if (uc->desc) {
3921 		uc->terminated_desc = uc->desc;
3922 		uc->desc = NULL;
3923 		uc->terminated_desc->terminated = true;
3924 		cancel_delayed_work(&uc->tx_drain.work);
3925 	}
3926 
3927 	uc->paused = false;
3928 
3929 	vchan_get_all_descriptors(&uc->vc, &head);
3930 	spin_unlock_irqrestore(&uc->vc.lock, flags);
3931 	vchan_dma_desc_free_list(&uc->vc, &head);
3932 
3933 	return 0;
3934 }
3935 
3936 static void udma_synchronize(struct dma_chan *chan)
3937 {
3938 	struct udma_chan *uc = to_udma_chan(chan);
3939 	unsigned long timeout = msecs_to_jiffies(1000);
3940 
3941 	vchan_synchronize(&uc->vc);
3942 
3943 	if (uc->state == UDMA_CHAN_IS_TERMINATING) {
3944 		timeout = wait_for_completion_timeout(&uc->teardown_completed,
3945 						      timeout);
3946 		if (!timeout) {
3947 			dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
3948 				 uc->id);
3949 			udma_dump_chan_stdata(uc);
3950 			udma_reset_chan(uc, true);
3951 		}
3952 	}
3953 
3954 	udma_reset_chan(uc, false);
3955 	if (udma_is_chan_running(uc))
3956 		dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
3957 
3958 	cancel_delayed_work_sync(&uc->tx_drain.work);
3959 	udma_reset_rings(uc);
3960 }
3961 
3962 static void udma_desc_pre_callback(struct virt_dma_chan *vc,
3963 				   struct virt_dma_desc *vd,
3964 				   struct dmaengine_result *result)
3965 {
3966 	struct udma_chan *uc = to_udma_chan(&vc->chan);
3967 	struct udma_desc *d;
3968 
3969 	if (!vd)
3970 		return;
3971 
3972 	d = to_udma_desc(&vd->tx);
3973 
3974 	if (d->metadata_size)
3975 		udma_fetch_epib(uc, d);
3976 
3977 	/* Provide residue information for the client */
3978 	if (result) {
3979 		void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
3980 
3981 		if (cppi5_desc_get_type(desc_vaddr) ==
3982 		    CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
3983 			result->residue = d->residue -
3984 					  cppi5_hdesc_get_pktlen(desc_vaddr);
3985 			if (result->residue)
3986 				result->result = DMA_TRANS_ABORTED;
3987 			else
3988 				result->result = DMA_TRANS_NOERROR;
3989 		} else {
3990 			result->residue = 0;
3991 			result->result = DMA_TRANS_NOERROR;
3992 		}
3993 	}
3994 }
3995 
3996 /*
3997  * This tasklet handles the completion of a DMA descriptor by
3998  * calling its callback and freeing it.
3999  */
4000 static void udma_vchan_complete(struct tasklet_struct *t)
4001 {
4002 	struct virt_dma_chan *vc = from_tasklet(vc, t, task);
4003 	struct virt_dma_desc *vd, *_vd;
4004 	struct dmaengine_desc_callback cb;
4005 	LIST_HEAD(head);
4006 
4007 	spin_lock_irq(&vc->lock);
4008 	list_splice_tail_init(&vc->desc_completed, &head);
4009 	vd = vc->cyclic;
4010 	if (vd) {
4011 		vc->cyclic = NULL;
4012 		dmaengine_desc_get_callback(&vd->tx, &cb);
4013 	} else {
4014 		memset(&cb, 0, sizeof(cb));
4015 	}
4016 	spin_unlock_irq(&vc->lock);
4017 
4018 	udma_desc_pre_callback(vc, vd, NULL);
4019 	dmaengine_desc_callback_invoke(&cb, NULL);
4020 
4021 	list_for_each_entry_safe(vd, _vd, &head, node) {
4022 		struct dmaengine_result result;
4023 
4024 		dmaengine_desc_get_callback(&vd->tx, &cb);
4025 
4026 		list_del(&vd->node);
4027 
4028 		udma_desc_pre_callback(vc, vd, &result);
4029 		dmaengine_desc_callback_invoke(&cb, &result);
4030 
4031 		vchan_vdesc_fini(vd);
4032 	}
4033 }
4034 
4035 static void udma_free_chan_resources(struct dma_chan *chan)
4036 {
4037 	struct udma_chan *uc = to_udma_chan(chan);
4038 	struct udma_dev *ud = to_udma_dev(chan->device);
4039 
4040 	udma_terminate_all(chan);
4041 	if (uc->terminated_desc) {
4042 		udma_reset_chan(uc, false);
4043 		udma_reset_rings(uc);
4044 	}
4045 
4046 	cancel_delayed_work_sync(&uc->tx_drain.work);
4047 
4048 	if (uc->irq_num_ring > 0) {
4049 		free_irq(uc->irq_num_ring, uc);
4050 
4051 		uc->irq_num_ring = 0;
4052 	}
4053 	if (uc->irq_num_udma > 0) {
4054 		free_irq(uc->irq_num_udma, uc);
4055 
4056 		uc->irq_num_udma = 0;
4057 	}
4058 
4059 	/* Release PSI-L pairing */
4060 	if (uc->psil_paired) {
4061 		navss_psil_unpair(ud, uc->config.src_thread,
4062 				  uc->config.dst_thread);
4063 		uc->psil_paired = false;
4064 	}
4065 
4066 	vchan_free_chan_resources(&uc->vc);
4067 	tasklet_kill(&uc->vc.task);
4068 
4069 	bcdma_free_bchan_resources(uc);
4070 	udma_free_tx_resources(uc);
4071 	udma_free_rx_resources(uc);
4072 	udma_reset_uchan(uc);
4073 
4074 	if (uc->use_dma_pool) {
4075 		dma_pool_destroy(uc->hdesc_pool);
4076 		uc->use_dma_pool = false;
4077 	}
4078 }
4079 
4080 static struct platform_driver udma_driver;
4081 static struct platform_driver bcdma_driver;
4082 static struct platform_driver pktdma_driver;
4083 
4084 struct udma_filter_param {
4085 	int remote_thread_id;
4086 	u32 atype;
4087 	u32 asel;
4088 	u32 tr_trigger_type;
4089 };
4090 
4091 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
4092 {
4093 	struct udma_chan_config *ucc;
4094 	struct psil_endpoint_config *ep_config;
4095 	struct udma_filter_param *filter_param;
4096 	struct udma_chan *uc;
4097 	struct udma_dev *ud;
4098 
4099 	if (chan->device->dev->driver != &udma_driver.driver &&
4100 	    chan->device->dev->driver != &bcdma_driver.driver &&
4101 	    chan->device->dev->driver != &pktdma_driver.driver)
4102 		return false;
4103 
4104 	uc = to_udma_chan(chan);
4105 	ucc = &uc->config;
4106 	ud = uc->ud;
4107 	filter_param = param;
4108 
4109 	if (filter_param->atype > 2) {
4110 		dev_err(ud->dev, "Invalid channel atype: %u\n",
4111 			filter_param->atype);
4112 		return false;
4113 	}
4114 
4115 	if (filter_param->asel > 15) {
4116 		dev_err(ud->dev, "Invalid channel asel: %u\n",
4117 			filter_param->asel);
4118 		return false;
4119 	}
4120 
4121 	ucc->remote_thread_id = filter_param->remote_thread_id;
4122 	ucc->atype = filter_param->atype;
4123 	ucc->asel = filter_param->asel;
4124 	ucc->tr_trigger_type = filter_param->tr_trigger_type;
4125 
4126 	if (ucc->tr_trigger_type) {
4127 		ucc->dir = DMA_MEM_TO_MEM;
4128 		goto triggered_bchan;
4129 	} else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
4130 		ucc->dir = DMA_MEM_TO_DEV;
4131 	} else {
4132 		ucc->dir = DMA_DEV_TO_MEM;
4133 	}
4134 
4135 	ep_config = psil_get_ep_config(ucc->remote_thread_id);
4136 	if (IS_ERR(ep_config)) {
4137 		dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
4138 			ucc->remote_thread_id);
4139 		ucc->dir = DMA_MEM_TO_MEM;
4140 		ucc->remote_thread_id = -1;
4141 		ucc->atype = 0;
4142 		ucc->asel = 0;
4143 		return false;
4144 	}
4145 
4146 	if (ud->match_data->type == DMA_TYPE_BCDMA &&
4147 	    ep_config->pkt_mode) {
4148 		dev_err(ud->dev,
4149 			"Only TR mode is supported (psi-l thread 0x%04x)\n",
4150 			ucc->remote_thread_id);
4151 		ucc->dir = DMA_MEM_TO_MEM;
4152 		ucc->remote_thread_id = -1;
4153 		ucc->atype = 0;
4154 		ucc->asel = 0;
4155 		return false;
4156 	}
4157 
4158 	ucc->pkt_mode = ep_config->pkt_mode;
4159 	ucc->channel_tpl = ep_config->channel_tpl;
4160 	ucc->notdpkt = ep_config->notdpkt;
4161 	ucc->ep_type = ep_config->ep_type;
4162 
4163 	if (ud->match_data->type == DMA_TYPE_PKTDMA &&
4164 	    ep_config->mapped_channel_id >= 0) {
4165 		ucc->mapped_channel_id = ep_config->mapped_channel_id;
4166 		ucc->default_flow_id = ep_config->default_flow_id;
4167 	} else {
4168 		ucc->mapped_channel_id = -1;
4169 		ucc->default_flow_id = -1;
4170 	}
4171 
4172 	if (ucc->ep_type != PSIL_EP_NATIVE) {
4173 		const struct udma_match_data *match_data = ud->match_data;
4174 
4175 		if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
4176 			ucc->enable_acc32 = ep_config->pdma_acc32;
4177 		if (match_data->flags & UDMA_FLAG_PDMA_BURST)
4178 			ucc->enable_burst = ep_config->pdma_burst;
4179 	}
4180 
4181 	ucc->needs_epib = ep_config->needs_epib;
4182 	ucc->psd_size = ep_config->psd_size;
4183 	ucc->metadata_size =
4184 			(ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
4185 			ucc->psd_size;
4186 
4187 	if (ucc->pkt_mode)
4188 		ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
4189 				 ucc->metadata_size, ud->desc_align);
4190 
4191 	dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
4192 		ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
4193 
4194 	return true;
4195 
4196 triggered_bchan:
4197 	dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
4198 		ucc->tr_trigger_type);
4199 
4200 	return true;
4201 
4202 }
4203 
4204 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
4205 				      struct of_dma *ofdma)
4206 {
4207 	struct udma_dev *ud = ofdma->of_dma_data;
4208 	dma_cap_mask_t mask = ud->ddev.cap_mask;
4209 	struct udma_filter_param filter_param;
4210 	struct dma_chan *chan;
4211 
4212 	if (ud->match_data->type == DMA_TYPE_BCDMA) {
4213 		if (dma_spec->args_count != 3)
4214 			return NULL;
4215 
4216 		filter_param.tr_trigger_type = dma_spec->args[0];
4217 		filter_param.remote_thread_id = dma_spec->args[1];
4218 		filter_param.asel = dma_spec->args[2];
4219 		filter_param.atype = 0;
4220 	} else {
4221 		if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
4222 			return NULL;
4223 
4224 		filter_param.remote_thread_id = dma_spec->args[0];
4225 		filter_param.tr_trigger_type = 0;
4226 		if (dma_spec->args_count == 2) {
4227 			if (ud->match_data->type == DMA_TYPE_UDMA) {
4228 				filter_param.atype = dma_spec->args[1];
4229 				filter_param.asel = 0;
4230 			} else {
4231 				filter_param.atype = 0;
4232 				filter_param.asel = dma_spec->args[1];
4233 			}
4234 		} else {
4235 			filter_param.atype = 0;
4236 			filter_param.asel = 0;
4237 		}
4238 	}
4239 
4240 	chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
4241 				     ofdma->of_node);
4242 	if (!chan) {
4243 		dev_err(ud->dev, "get channel fail in %s.\n", __func__);
4244 		return ERR_PTR(-EINVAL);
4245 	}
4246 
4247 	return chan;
4248 }
4249 
4250 static struct udma_match_data am654_main_data = {
4251 	.type = DMA_TYPE_UDMA,
4252 	.psil_base = 0x1000,
4253 	.enable_memcpy_support = true,
4254 	.statictr_z_mask = GENMASK(11, 0),
4255 	.burst_size = {
4256 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4257 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4258 		0, /* No UH Channels */
4259 	},
4260 };
4261 
4262 static struct udma_match_data am654_mcu_data = {
4263 	.type = DMA_TYPE_UDMA,
4264 	.psil_base = 0x6000,
4265 	.enable_memcpy_support = false,
4266 	.statictr_z_mask = GENMASK(11, 0),
4267 	.burst_size = {
4268 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4269 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4270 		0, /* No UH Channels */
4271 	},
4272 };
4273 
4274 static struct udma_match_data j721e_main_data = {
4275 	.type = DMA_TYPE_UDMA,
4276 	.psil_base = 0x1000,
4277 	.enable_memcpy_support = true,
4278 	.flags = UDMA_FLAGS_J7_CLASS,
4279 	.statictr_z_mask = GENMASK(23, 0),
4280 	.burst_size = {
4281 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4282 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
4283 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
4284 	},
4285 };
4286 
4287 static struct udma_match_data j721e_mcu_data = {
4288 	.type = DMA_TYPE_UDMA,
4289 	.psil_base = 0x6000,
4290 	.enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
4291 	.flags = UDMA_FLAGS_J7_CLASS,
4292 	.statictr_z_mask = GENMASK(23, 0),
4293 	.burst_size = {
4294 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4295 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
4296 		0, /* No UH Channels */
4297 	},
4298 };
4299 
4300 static struct udma_soc_data am62a_dmss_csi_soc_data = {
4301 	.oes = {
4302 		.bcdma_rchan_data = 0xe00,
4303 		.bcdma_rchan_ring = 0x1000,
4304 	},
4305 };
4306 
4307 static struct udma_match_data am62a_bcdma_csirx_data = {
4308 	.type = DMA_TYPE_BCDMA,
4309 	.psil_base = 0x3100,
4310 	.enable_memcpy_support = false,
4311 	.burst_size = {
4312 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4313 		0, /* No H Channels */
4314 		0, /* No UH Channels */
4315 	},
4316 	.soc_data = &am62a_dmss_csi_soc_data,
4317 };
4318 
4319 static struct udma_match_data am64_bcdma_data = {
4320 	.type = DMA_TYPE_BCDMA,
4321 	.psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
4322 	.enable_memcpy_support = true, /* Supported via bchan */
4323 	.flags = UDMA_FLAGS_J7_CLASS,
4324 	.statictr_z_mask = GENMASK(23, 0),
4325 	.burst_size = {
4326 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4327 		0, /* No H Channels */
4328 		0, /* No UH Channels */
4329 	},
4330 };
4331 
4332 static struct udma_match_data am64_pktdma_data = {
4333 	.type = DMA_TYPE_PKTDMA,
4334 	.psil_base = 0x1000,
4335 	.enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
4336 	.flags = UDMA_FLAGS_J7_CLASS,
4337 	.statictr_z_mask = GENMASK(23, 0),
4338 	.burst_size = {
4339 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4340 		0, /* No H Channels */
4341 		0, /* No UH Channels */
4342 	},
4343 };
4344 
4345 static const struct of_device_id udma_of_match[] = {
4346 	{
4347 		.compatible = "ti,am654-navss-main-udmap",
4348 		.data = &am654_main_data,
4349 	},
4350 	{
4351 		.compatible = "ti,am654-navss-mcu-udmap",
4352 		.data = &am654_mcu_data,
4353 	}, {
4354 		.compatible = "ti,j721e-navss-main-udmap",
4355 		.data = &j721e_main_data,
4356 	}, {
4357 		.compatible = "ti,j721e-navss-mcu-udmap",
4358 		.data = &j721e_mcu_data,
4359 	},
4360 	{
4361 		.compatible = "ti,am64-dmss-bcdma",
4362 		.data = &am64_bcdma_data,
4363 	},
4364 	{
4365 		.compatible = "ti,am64-dmss-pktdma",
4366 		.data = &am64_pktdma_data,
4367 	},
4368 	{
4369 		.compatible = "ti,am62a-dmss-bcdma-csirx",
4370 		.data = &am62a_bcdma_csirx_data,
4371 	},
4372 	{ /* Sentinel */ },
4373 };
4374 
4375 static struct udma_soc_data am654_soc_data = {
4376 	.oes = {
4377 		.udma_rchan = 0x200,
4378 	},
4379 };
4380 
4381 static struct udma_soc_data j721e_soc_data = {
4382 	.oes = {
4383 		.udma_rchan = 0x400,
4384 	},
4385 };
4386 
4387 static struct udma_soc_data j7200_soc_data = {
4388 	.oes = {
4389 		.udma_rchan = 0x80,
4390 	},
4391 };
4392 
4393 static struct udma_soc_data am64_soc_data = {
4394 	.oes = {
4395 		.bcdma_bchan_data = 0x2200,
4396 		.bcdma_bchan_ring = 0x2400,
4397 		.bcdma_tchan_data = 0x2800,
4398 		.bcdma_tchan_ring = 0x2a00,
4399 		.bcdma_rchan_data = 0x2e00,
4400 		.bcdma_rchan_ring = 0x3000,
4401 		.pktdma_tchan_flow = 0x1200,
4402 		.pktdma_rchan_flow = 0x1600,
4403 	},
4404 	.bcdma_trigger_event_offset = 0xc400,
4405 };
4406 
4407 static const struct soc_device_attribute k3_soc_devices[] = {
4408 	{ .family = "AM65X", .data = &am654_soc_data },
4409 	{ .family = "J721E", .data = &j721e_soc_data },
4410 	{ .family = "J7200", .data = &j7200_soc_data },
4411 	{ .family = "AM64X", .data = &am64_soc_data },
4412 	{ .family = "J721S2", .data = &j721e_soc_data},
4413 	{ .family = "AM62X", .data = &am64_soc_data },
4414 	{ .family = "AM62AX", .data = &am64_soc_data },
4415 	{ /* sentinel */ }
4416 };
4417 
4418 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
4419 {
4420 	u32 cap2, cap3, cap4;
4421 	int i;
4422 
4423 	ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
4424 	if (IS_ERR(ud->mmrs[MMR_GCFG]))
4425 		return PTR_ERR(ud->mmrs[MMR_GCFG]);
4426 
4427 	cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
4428 	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4429 
4430 	switch (ud->match_data->type) {
4431 	case DMA_TYPE_UDMA:
4432 		ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4433 		ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4434 		ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
4435 		ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4436 		break;
4437 	case DMA_TYPE_BCDMA:
4438 		ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
4439 		ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
4440 		ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
4441 		ud->rflow_cnt = ud->rchan_cnt;
4442 		break;
4443 	case DMA_TYPE_PKTDMA:
4444 		cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4445 		ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4446 		ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4447 		ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4448 		ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
4449 		break;
4450 	default:
4451 		return -EINVAL;
4452 	}
4453 
4454 	for (i = 1; i < MMR_LAST; i++) {
4455 		if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
4456 			continue;
4457 		if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
4458 			continue;
4459 		if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
4460 			continue;
4461 
4462 		ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
4463 		if (IS_ERR(ud->mmrs[i]))
4464 			return PTR_ERR(ud->mmrs[i]);
4465 	}
4466 
4467 	return 0;
4468 }
4469 
4470 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
4471 				      struct ti_sci_resource_desc *rm_desc,
4472 				      char *name)
4473 {
4474 	bitmap_clear(map, rm_desc->start, rm_desc->num);
4475 	bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
4476 	dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
4477 		rm_desc->start, rm_desc->num, rm_desc->start_sec,
4478 		rm_desc->num_sec);
4479 }
4480 
4481 static const char * const range_names[] = {
4482 	[RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
4483 	[RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
4484 	[RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
4485 	[RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
4486 	[RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
4487 };
4488 
4489 static int udma_setup_resources(struct udma_dev *ud)
4490 {
4491 	int ret, i, j;
4492 	struct device *dev = ud->dev;
4493 	struct ti_sci_resource *rm_res, irq_res;
4494 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4495 	u32 cap3;
4496 
4497 	/* Set up the throughput level start indexes */
4498 	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4499 	if (of_device_is_compatible(dev->of_node,
4500 				    "ti,am654-navss-main-udmap")) {
4501 		ud->tchan_tpl.levels = 2;
4502 		ud->tchan_tpl.start_idx[0] = 8;
4503 	} else if (of_device_is_compatible(dev->of_node,
4504 					   "ti,am654-navss-mcu-udmap")) {
4505 		ud->tchan_tpl.levels = 2;
4506 		ud->tchan_tpl.start_idx[0] = 2;
4507 	} else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4508 		ud->tchan_tpl.levels = 3;
4509 		ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4510 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4511 	} else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4512 		ud->tchan_tpl.levels = 2;
4513 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4514 	} else {
4515 		ud->tchan_tpl.levels = 1;
4516 	}
4517 
4518 	ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4519 	ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4520 	ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4521 
4522 	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4523 					   sizeof(unsigned long), GFP_KERNEL);
4524 	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4525 				  GFP_KERNEL);
4526 	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4527 					   sizeof(unsigned long), GFP_KERNEL);
4528 	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4529 				  GFP_KERNEL);
4530 	ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
4531 					      sizeof(unsigned long),
4532 					      GFP_KERNEL);
4533 	ud->rflow_gp_map_allocated = devm_kcalloc(dev,
4534 						  BITS_TO_LONGS(ud->rflow_cnt),
4535 						  sizeof(unsigned long),
4536 						  GFP_KERNEL);
4537 	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4538 					sizeof(unsigned long),
4539 					GFP_KERNEL);
4540 	ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4541 				  GFP_KERNEL);
4542 
4543 	if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
4544 	    !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
4545 	    !ud->rflows || !ud->rflow_in_use)
4546 		return -ENOMEM;
4547 
4548 	/*
4549 	 * RX flows with the same Ids as RX channels are reserved to be used
4550 	 * as default flows if remote HW can't generate flow_ids. Those
4551 	 * RX flows can be requested only explicitly by id.
4552 	 */
4553 	bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
4554 
4555 	/* by default no GP rflows are assigned to Linux */
4556 	bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
4557 
4558 	/* Get resource ranges from tisci */
4559 	for (i = 0; i < RM_RANGE_LAST; i++) {
4560 		if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
4561 			continue;
4562 
4563 		tisci_rm->rm_ranges[i] =
4564 			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4565 						    tisci_rm->tisci_dev_id,
4566 						    (char *)range_names[i]);
4567 	}
4568 
4569 	/* tchan ranges */
4570 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4571 	if (IS_ERR(rm_res)) {
4572 		bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4573 		irq_res.sets = 1;
4574 	} else {
4575 		bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4576 		for (i = 0; i < rm_res->sets; i++)
4577 			udma_mark_resource_ranges(ud, ud->tchan_map,
4578 						  &rm_res->desc[i], "tchan");
4579 		irq_res.sets = rm_res->sets;
4580 	}
4581 
4582 	/* rchan and matching default flow ranges */
4583 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4584 	if (IS_ERR(rm_res)) {
4585 		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4586 		irq_res.sets++;
4587 	} else {
4588 		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4589 		for (i = 0; i < rm_res->sets; i++)
4590 			udma_mark_resource_ranges(ud, ud->rchan_map,
4591 						  &rm_res->desc[i], "rchan");
4592 		irq_res.sets += rm_res->sets;
4593 	}
4594 
4595 	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4596 	if (!irq_res.desc)
4597 		return -ENOMEM;
4598 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4599 	if (IS_ERR(rm_res)) {
4600 		irq_res.desc[0].start = 0;
4601 		irq_res.desc[0].num = ud->tchan_cnt;
4602 		i = 1;
4603 	} else {
4604 		for (i = 0; i < rm_res->sets; i++) {
4605 			irq_res.desc[i].start = rm_res->desc[i].start;
4606 			irq_res.desc[i].num = rm_res->desc[i].num;
4607 			irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
4608 			irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
4609 		}
4610 	}
4611 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4612 	if (IS_ERR(rm_res)) {
4613 		irq_res.desc[i].start = 0;
4614 		irq_res.desc[i].num = ud->rchan_cnt;
4615 	} else {
4616 		for (j = 0; j < rm_res->sets; j++, i++) {
4617 			if (rm_res->desc[j].num) {
4618 				irq_res.desc[i].start = rm_res->desc[j].start +
4619 						ud->soc_data->oes.udma_rchan;
4620 				irq_res.desc[i].num = rm_res->desc[j].num;
4621 			}
4622 			if (rm_res->desc[j].num_sec) {
4623 				irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
4624 						ud->soc_data->oes.udma_rchan;
4625 				irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
4626 			}
4627 		}
4628 	}
4629 	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4630 	kfree(irq_res.desc);
4631 	if (ret) {
4632 		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4633 		return ret;
4634 	}
4635 
4636 	/* GP rflow ranges */
4637 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4638 	if (IS_ERR(rm_res)) {
4639 		/* all gp flows are assigned exclusively to Linux */
4640 		bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
4641 			     ud->rflow_cnt - ud->rchan_cnt);
4642 	} else {
4643 		for (i = 0; i < rm_res->sets; i++)
4644 			udma_mark_resource_ranges(ud, ud->rflow_gp_map,
4645 						  &rm_res->desc[i], "gp-rflow");
4646 	}
4647 
4648 	return 0;
4649 }
4650 
4651 static int bcdma_setup_resources(struct udma_dev *ud)
4652 {
4653 	int ret, i, j;
4654 	struct device *dev = ud->dev;
4655 	struct ti_sci_resource *rm_res, irq_res;
4656 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4657 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4658 	u32 cap;
4659 
4660 	/* Set up the throughput level start indexes */
4661 	cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4662 	if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
4663 		ud->bchan_tpl.levels = 3;
4664 		ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
4665 		ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4666 	} else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
4667 		ud->bchan_tpl.levels = 2;
4668 		ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4669 	} else {
4670 		ud->bchan_tpl.levels = 1;
4671 	}
4672 
4673 	cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4674 	if (BCDMA_CAP4_URCHAN_CNT(cap)) {
4675 		ud->rchan_tpl.levels = 3;
4676 		ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
4677 		ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4678 	} else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
4679 		ud->rchan_tpl.levels = 2;
4680 		ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4681 	} else {
4682 		ud->rchan_tpl.levels = 1;
4683 	}
4684 
4685 	if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
4686 		ud->tchan_tpl.levels = 3;
4687 		ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
4688 		ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4689 	} else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
4690 		ud->tchan_tpl.levels = 2;
4691 		ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4692 	} else {
4693 		ud->tchan_tpl.levels = 1;
4694 	}
4695 
4696 	ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
4697 					   sizeof(unsigned long), GFP_KERNEL);
4698 	ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
4699 				  GFP_KERNEL);
4700 	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4701 					   sizeof(unsigned long), GFP_KERNEL);
4702 	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4703 				  GFP_KERNEL);
4704 	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4705 					   sizeof(unsigned long), GFP_KERNEL);
4706 	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4707 				  GFP_KERNEL);
4708 	/* BCDMA do not really have flows, but the driver expect it */
4709 	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
4710 					sizeof(unsigned long),
4711 					GFP_KERNEL);
4712 	ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
4713 				  GFP_KERNEL);
4714 
4715 	if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
4716 	    !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
4717 	    !ud->rflows)
4718 		return -ENOMEM;
4719 
4720 	/* Get resource ranges from tisci */
4721 	for (i = 0; i < RM_RANGE_LAST; i++) {
4722 		if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
4723 			continue;
4724 		if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
4725 			continue;
4726 		if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
4727 			continue;
4728 		if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
4729 			continue;
4730 
4731 		tisci_rm->rm_ranges[i] =
4732 			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4733 						    tisci_rm->tisci_dev_id,
4734 						    (char *)range_names[i]);
4735 	}
4736 
4737 	irq_res.sets = 0;
4738 
4739 	/* bchan ranges */
4740 	if (ud->bchan_cnt) {
4741 		rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4742 		if (IS_ERR(rm_res)) {
4743 			bitmap_zero(ud->bchan_map, ud->bchan_cnt);
4744 			irq_res.sets++;
4745 		} else {
4746 			bitmap_fill(ud->bchan_map, ud->bchan_cnt);
4747 			for (i = 0; i < rm_res->sets; i++)
4748 				udma_mark_resource_ranges(ud, ud->bchan_map,
4749 							  &rm_res->desc[i],
4750 							  "bchan");
4751 			irq_res.sets += rm_res->sets;
4752 		}
4753 	}
4754 
4755 	/* tchan ranges */
4756 	if (ud->tchan_cnt) {
4757 		rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4758 		if (IS_ERR(rm_res)) {
4759 			bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4760 			irq_res.sets += 2;
4761 		} else {
4762 			bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4763 			for (i = 0; i < rm_res->sets; i++)
4764 				udma_mark_resource_ranges(ud, ud->tchan_map,
4765 							  &rm_res->desc[i],
4766 							  "tchan");
4767 			irq_res.sets += rm_res->sets * 2;
4768 		}
4769 	}
4770 
4771 	/* rchan ranges */
4772 	if (ud->rchan_cnt) {
4773 		rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4774 		if (IS_ERR(rm_res)) {
4775 			bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4776 			irq_res.sets += 2;
4777 		} else {
4778 			bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4779 			for (i = 0; i < rm_res->sets; i++)
4780 				udma_mark_resource_ranges(ud, ud->rchan_map,
4781 							  &rm_res->desc[i],
4782 							  "rchan");
4783 			irq_res.sets += rm_res->sets * 2;
4784 		}
4785 	}
4786 
4787 	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4788 	if (!irq_res.desc)
4789 		return -ENOMEM;
4790 	if (ud->bchan_cnt) {
4791 		rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4792 		if (IS_ERR(rm_res)) {
4793 			irq_res.desc[0].start = oes->bcdma_bchan_ring;
4794 			irq_res.desc[0].num = ud->bchan_cnt;
4795 			i = 1;
4796 		} else {
4797 			for (i = 0; i < rm_res->sets; i++) {
4798 				irq_res.desc[i].start = rm_res->desc[i].start +
4799 							oes->bcdma_bchan_ring;
4800 				irq_res.desc[i].num = rm_res->desc[i].num;
4801 			}
4802 		}
4803 	} else {
4804 		i = 0;
4805 	}
4806 
4807 	if (ud->tchan_cnt) {
4808 		rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4809 		if (IS_ERR(rm_res)) {
4810 			irq_res.desc[i].start = oes->bcdma_tchan_data;
4811 			irq_res.desc[i].num = ud->tchan_cnt;
4812 			irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
4813 			irq_res.desc[i + 1].num = ud->tchan_cnt;
4814 			i += 2;
4815 		} else {
4816 			for (j = 0; j < rm_res->sets; j++, i += 2) {
4817 				irq_res.desc[i].start = rm_res->desc[j].start +
4818 							oes->bcdma_tchan_data;
4819 				irq_res.desc[i].num = rm_res->desc[j].num;
4820 
4821 				irq_res.desc[i + 1].start = rm_res->desc[j].start +
4822 							oes->bcdma_tchan_ring;
4823 				irq_res.desc[i + 1].num = rm_res->desc[j].num;
4824 			}
4825 		}
4826 	}
4827 	if (ud->rchan_cnt) {
4828 		rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4829 		if (IS_ERR(rm_res)) {
4830 			irq_res.desc[i].start = oes->bcdma_rchan_data;
4831 			irq_res.desc[i].num = ud->rchan_cnt;
4832 			irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
4833 			irq_res.desc[i + 1].num = ud->rchan_cnt;
4834 			i += 2;
4835 		} else {
4836 			for (j = 0; j < rm_res->sets; j++, i += 2) {
4837 				irq_res.desc[i].start = rm_res->desc[j].start +
4838 							oes->bcdma_rchan_data;
4839 				irq_res.desc[i].num = rm_res->desc[j].num;
4840 
4841 				irq_res.desc[i + 1].start = rm_res->desc[j].start +
4842 							oes->bcdma_rchan_ring;
4843 				irq_res.desc[i + 1].num = rm_res->desc[j].num;
4844 			}
4845 		}
4846 	}
4847 
4848 	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4849 	kfree(irq_res.desc);
4850 	if (ret) {
4851 		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4852 		return ret;
4853 	}
4854 
4855 	return 0;
4856 }
4857 
4858 static int pktdma_setup_resources(struct udma_dev *ud)
4859 {
4860 	int ret, i, j;
4861 	struct device *dev = ud->dev;
4862 	struct ti_sci_resource *rm_res, irq_res;
4863 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4864 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4865 	u32 cap3;
4866 
4867 	/* Set up the throughput level start indexes */
4868 	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4869 	if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4870 		ud->tchan_tpl.levels = 3;
4871 		ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4872 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4873 	} else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4874 		ud->tchan_tpl.levels = 2;
4875 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4876 	} else {
4877 		ud->tchan_tpl.levels = 1;
4878 	}
4879 
4880 	ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4881 	ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4882 	ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4883 
4884 	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4885 					   sizeof(unsigned long), GFP_KERNEL);
4886 	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4887 				  GFP_KERNEL);
4888 	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4889 					   sizeof(unsigned long), GFP_KERNEL);
4890 	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4891 				  GFP_KERNEL);
4892 	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4893 					sizeof(unsigned long),
4894 					GFP_KERNEL);
4895 	ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4896 				  GFP_KERNEL);
4897 	ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
4898 					   sizeof(unsigned long), GFP_KERNEL);
4899 
4900 	if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
4901 	    !ud->rchans || !ud->rflows || !ud->rflow_in_use)
4902 		return -ENOMEM;
4903 
4904 	/* Get resource ranges from tisci */
4905 	for (i = 0; i < RM_RANGE_LAST; i++) {
4906 		if (i == RM_RANGE_BCHAN)
4907 			continue;
4908 
4909 		tisci_rm->rm_ranges[i] =
4910 			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4911 						    tisci_rm->tisci_dev_id,
4912 						    (char *)range_names[i]);
4913 	}
4914 
4915 	/* tchan ranges */
4916 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4917 	if (IS_ERR(rm_res)) {
4918 		bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4919 	} else {
4920 		bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4921 		for (i = 0; i < rm_res->sets; i++)
4922 			udma_mark_resource_ranges(ud, ud->tchan_map,
4923 						  &rm_res->desc[i], "tchan");
4924 	}
4925 
4926 	/* rchan ranges */
4927 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4928 	if (IS_ERR(rm_res)) {
4929 		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4930 	} else {
4931 		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4932 		for (i = 0; i < rm_res->sets; i++)
4933 			udma_mark_resource_ranges(ud, ud->rchan_map,
4934 						  &rm_res->desc[i], "rchan");
4935 	}
4936 
4937 	/* rflow ranges */
4938 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4939 	if (IS_ERR(rm_res)) {
4940 		/* all rflows are assigned exclusively to Linux */
4941 		bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
4942 		irq_res.sets = 1;
4943 	} else {
4944 		bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
4945 		for (i = 0; i < rm_res->sets; i++)
4946 			udma_mark_resource_ranges(ud, ud->rflow_in_use,
4947 						  &rm_res->desc[i], "rflow");
4948 		irq_res.sets = rm_res->sets;
4949 	}
4950 
4951 	/* tflow ranges */
4952 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4953 	if (IS_ERR(rm_res)) {
4954 		/* all tflows are assigned exclusively to Linux */
4955 		bitmap_zero(ud->tflow_map, ud->tflow_cnt);
4956 		irq_res.sets++;
4957 	} else {
4958 		bitmap_fill(ud->tflow_map, ud->tflow_cnt);
4959 		for (i = 0; i < rm_res->sets; i++)
4960 			udma_mark_resource_ranges(ud, ud->tflow_map,
4961 						  &rm_res->desc[i], "tflow");
4962 		irq_res.sets += rm_res->sets;
4963 	}
4964 
4965 	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4966 	if (!irq_res.desc)
4967 		return -ENOMEM;
4968 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4969 	if (IS_ERR(rm_res)) {
4970 		irq_res.desc[0].start = oes->pktdma_tchan_flow;
4971 		irq_res.desc[0].num = ud->tflow_cnt;
4972 		i = 1;
4973 	} else {
4974 		for (i = 0; i < rm_res->sets; i++) {
4975 			irq_res.desc[i].start = rm_res->desc[i].start +
4976 						oes->pktdma_tchan_flow;
4977 			irq_res.desc[i].num = rm_res->desc[i].num;
4978 		}
4979 	}
4980 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4981 	if (IS_ERR(rm_res)) {
4982 		irq_res.desc[i].start = oes->pktdma_rchan_flow;
4983 		irq_res.desc[i].num = ud->rflow_cnt;
4984 	} else {
4985 		for (j = 0; j < rm_res->sets; j++, i++) {
4986 			irq_res.desc[i].start = rm_res->desc[j].start +
4987 						oes->pktdma_rchan_flow;
4988 			irq_res.desc[i].num = rm_res->desc[j].num;
4989 		}
4990 	}
4991 	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4992 	kfree(irq_res.desc);
4993 	if (ret) {
4994 		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4995 		return ret;
4996 	}
4997 
4998 	return 0;
4999 }
5000 
5001 static int setup_resources(struct udma_dev *ud)
5002 {
5003 	struct device *dev = ud->dev;
5004 	int ch_count, ret;
5005 
5006 	switch (ud->match_data->type) {
5007 	case DMA_TYPE_UDMA:
5008 		ret = udma_setup_resources(ud);
5009 		break;
5010 	case DMA_TYPE_BCDMA:
5011 		ret = bcdma_setup_resources(ud);
5012 		break;
5013 	case DMA_TYPE_PKTDMA:
5014 		ret = pktdma_setup_resources(ud);
5015 		break;
5016 	default:
5017 		return -EINVAL;
5018 	}
5019 
5020 	if (ret)
5021 		return ret;
5022 
5023 	ch_count  = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
5024 	if (ud->bchan_cnt)
5025 		ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
5026 	ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
5027 	ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
5028 	if (!ch_count)
5029 		return -ENODEV;
5030 
5031 	ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
5032 				    GFP_KERNEL);
5033 	if (!ud->channels)
5034 		return -ENOMEM;
5035 
5036 	switch (ud->match_data->type) {
5037 	case DMA_TYPE_UDMA:
5038 		dev_info(dev,
5039 			 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
5040 			 ch_count,
5041 			 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5042 						       ud->tchan_cnt),
5043 			 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5044 						       ud->rchan_cnt),
5045 			 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
5046 						       ud->rflow_cnt));
5047 		break;
5048 	case DMA_TYPE_BCDMA:
5049 		dev_info(dev,
5050 			 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
5051 			 ch_count,
5052 			 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
5053 						       ud->bchan_cnt),
5054 			 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5055 						       ud->tchan_cnt),
5056 			 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5057 						       ud->rchan_cnt));
5058 		break;
5059 	case DMA_TYPE_PKTDMA:
5060 		dev_info(dev,
5061 			 "Channels: %d (tchan: %u, rchan: %u)\n",
5062 			 ch_count,
5063 			 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5064 						       ud->tchan_cnt),
5065 			 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5066 						       ud->rchan_cnt));
5067 		break;
5068 	default:
5069 		break;
5070 	}
5071 
5072 	return ch_count;
5073 }
5074 
5075 static int udma_setup_rx_flush(struct udma_dev *ud)
5076 {
5077 	struct udma_rx_flush *rx_flush = &ud->rx_flush;
5078 	struct cppi5_desc_hdr_t *tr_desc;
5079 	struct cppi5_tr_type1_t *tr_req;
5080 	struct cppi5_host_desc_t *desc;
5081 	struct device *dev = ud->dev;
5082 	struct udma_hwdesc *hwdesc;
5083 	size_t tr_size;
5084 
5085 	/* Allocate 1K buffer for discarded data on RX channel teardown */
5086 	rx_flush->buffer_size = SZ_1K;
5087 	rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
5088 					      GFP_KERNEL);
5089 	if (!rx_flush->buffer_vaddr)
5090 		return -ENOMEM;
5091 
5092 	rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
5093 						rx_flush->buffer_size,
5094 						DMA_TO_DEVICE);
5095 	if (dma_mapping_error(dev, rx_flush->buffer_paddr))
5096 		return -ENOMEM;
5097 
5098 	/* Set up descriptor to be used for TR mode */
5099 	hwdesc = &rx_flush->hwdescs[0];
5100 	tr_size = sizeof(struct cppi5_tr_type1_t);
5101 	hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
5102 	hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
5103 					ud->desc_align);
5104 
5105 	hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5106 						GFP_KERNEL);
5107 	if (!hwdesc->cppi5_desc_vaddr)
5108 		return -ENOMEM;
5109 
5110 	hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5111 						  hwdesc->cppi5_desc_size,
5112 						  DMA_TO_DEVICE);
5113 	if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5114 		return -ENOMEM;
5115 
5116 	/* Start of the TR req records */
5117 	hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
5118 	/* Start address of the TR response array */
5119 	hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
5120 
5121 	tr_desc = hwdesc->cppi5_desc_vaddr;
5122 	cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
5123 	cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5124 	cppi5_desc_set_retpolicy(tr_desc, 0, 0);
5125 
5126 	tr_req = hwdesc->tr_req_base;
5127 	cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
5128 		      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
5129 	cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
5130 
5131 	tr_req->addr = rx_flush->buffer_paddr;
5132 	tr_req->icnt0 = rx_flush->buffer_size;
5133 	tr_req->icnt1 = 1;
5134 
5135 	dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5136 				   hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5137 
5138 	/* Set up descriptor to be used for packet mode */
5139 	hwdesc = &rx_flush->hwdescs[1];
5140 	hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
5141 					CPPI5_INFO0_HDESC_EPIB_SIZE +
5142 					CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
5143 					ud->desc_align);
5144 
5145 	hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5146 						GFP_KERNEL);
5147 	if (!hwdesc->cppi5_desc_vaddr)
5148 		return -ENOMEM;
5149 
5150 	hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5151 						  hwdesc->cppi5_desc_size,
5152 						  DMA_TO_DEVICE);
5153 	if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5154 		return -ENOMEM;
5155 
5156 	desc = hwdesc->cppi5_desc_vaddr;
5157 	cppi5_hdesc_init(desc, 0, 0);
5158 	cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5159 	cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
5160 
5161 	cppi5_hdesc_attach_buf(desc,
5162 			       rx_flush->buffer_paddr, rx_flush->buffer_size,
5163 			       rx_flush->buffer_paddr, rx_flush->buffer_size);
5164 
5165 	dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5166 				   hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5167 	return 0;
5168 }
5169 
5170 #ifdef CONFIG_DEBUG_FS
5171 static void udma_dbg_summary_show_chan(struct seq_file *s,
5172 				       struct dma_chan *chan)
5173 {
5174 	struct udma_chan *uc = to_udma_chan(chan);
5175 	struct udma_chan_config *ucc = &uc->config;
5176 
5177 	seq_printf(s, " %-13s| %s", dma_chan_name(chan),
5178 		   chan->dbg_client_name ?: "in-use");
5179 	if (ucc->tr_trigger_type)
5180 		seq_puts(s, " (triggered, ");
5181 	else
5182 		seq_printf(s, " (%s, ",
5183 			   dmaengine_get_direction_text(uc->config.dir));
5184 
5185 	switch (uc->config.dir) {
5186 	case DMA_MEM_TO_MEM:
5187 		if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
5188 			seq_printf(s, "bchan%d)\n", uc->bchan->id);
5189 			return;
5190 		}
5191 
5192 		seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
5193 			   ucc->src_thread, ucc->dst_thread);
5194 		break;
5195 	case DMA_DEV_TO_MEM:
5196 		seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
5197 			   ucc->src_thread, ucc->dst_thread);
5198 		if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5199 			seq_printf(s, "rflow%d, ", uc->rflow->id);
5200 		break;
5201 	case DMA_MEM_TO_DEV:
5202 		seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
5203 			   ucc->src_thread, ucc->dst_thread);
5204 		if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5205 			seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
5206 		break;
5207 	default:
5208 		seq_printf(s, ")\n");
5209 		return;
5210 	}
5211 
5212 	if (ucc->ep_type == PSIL_EP_NATIVE) {
5213 		seq_printf(s, "PSI-L Native");
5214 		if (ucc->metadata_size) {
5215 			seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
5216 			if (ucc->psd_size)
5217 				seq_printf(s, " PSDsize:%u", ucc->psd_size);
5218 			seq_printf(s, " ]");
5219 		}
5220 	} else {
5221 		seq_printf(s, "PDMA");
5222 		if (ucc->enable_acc32 || ucc->enable_burst)
5223 			seq_printf(s, "[%s%s ]",
5224 				   ucc->enable_acc32 ? " ACC32" : "",
5225 				   ucc->enable_burst ? " BURST" : "");
5226 	}
5227 
5228 	seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
5229 }
5230 
5231 static void udma_dbg_summary_show(struct seq_file *s,
5232 				  struct dma_device *dma_dev)
5233 {
5234 	struct dma_chan *chan;
5235 
5236 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
5237 		if (chan->client_count)
5238 			udma_dbg_summary_show_chan(s, chan);
5239 	}
5240 }
5241 #endif /* CONFIG_DEBUG_FS */
5242 
5243 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
5244 {
5245 	const struct udma_match_data *match_data = ud->match_data;
5246 	u8 tpl;
5247 
5248 	if (!match_data->enable_memcpy_support)
5249 		return DMAENGINE_ALIGN_8_BYTES;
5250 
5251 	/* Get the highest TPL level the device supports for memcpy */
5252 	if (ud->bchan_cnt)
5253 		tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
5254 	else if (ud->tchan_cnt)
5255 		tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
5256 	else
5257 		return DMAENGINE_ALIGN_8_BYTES;
5258 
5259 	switch (match_data->burst_size[tpl]) {
5260 	case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
5261 		return DMAENGINE_ALIGN_256_BYTES;
5262 	case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
5263 		return DMAENGINE_ALIGN_128_BYTES;
5264 	case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
5265 	fallthrough;
5266 	default:
5267 		return DMAENGINE_ALIGN_64_BYTES;
5268 	}
5269 }
5270 
5271 #define TI_UDMAC_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
5272 				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
5273 				 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
5274 				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
5275 				 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
5276 
5277 static int udma_probe(struct platform_device *pdev)
5278 {
5279 	struct device_node *navss_node = pdev->dev.parent->of_node;
5280 	const struct soc_device_attribute *soc;
5281 	struct device *dev = &pdev->dev;
5282 	struct udma_dev *ud;
5283 	const struct of_device_id *match;
5284 	int i, ret;
5285 	int ch_count;
5286 
5287 	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
5288 	if (ret)
5289 		dev_err(dev, "failed to set dma mask stuff\n");
5290 
5291 	ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
5292 	if (!ud)
5293 		return -ENOMEM;
5294 
5295 	match = of_match_node(udma_of_match, dev->of_node);
5296 	if (!match) {
5297 		dev_err(dev, "No compatible match found\n");
5298 		return -ENODEV;
5299 	}
5300 	ud->match_data = match->data;
5301 
5302 	ud->soc_data = ud->match_data->soc_data;
5303 	if (!ud->soc_data) {
5304 		soc = soc_device_match(k3_soc_devices);
5305 		if (!soc) {
5306 			dev_err(dev, "No compatible SoC found\n");
5307 			return -ENODEV;
5308 		}
5309 		ud->soc_data = soc->data;
5310 	}
5311 
5312 	ret = udma_get_mmrs(pdev, ud);
5313 	if (ret)
5314 		return ret;
5315 
5316 	ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
5317 	if (IS_ERR(ud->tisci_rm.tisci))
5318 		return PTR_ERR(ud->tisci_rm.tisci);
5319 
5320 	ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
5321 				   &ud->tisci_rm.tisci_dev_id);
5322 	if (ret) {
5323 		dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
5324 		return ret;
5325 	}
5326 	pdev->id = ud->tisci_rm.tisci_dev_id;
5327 
5328 	ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
5329 				   &ud->tisci_rm.tisci_navss_dev_id);
5330 	if (ret) {
5331 		dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
5332 		return ret;
5333 	}
5334 
5335 	if (ud->match_data->type == DMA_TYPE_UDMA) {
5336 		ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
5337 					   &ud->atype);
5338 		if (!ret && ud->atype > 2) {
5339 			dev_err(dev, "Invalid atype: %u\n", ud->atype);
5340 			return -EINVAL;
5341 		}
5342 	} else {
5343 		ret = of_property_read_u32(dev->of_node, "ti,asel",
5344 					   &ud->asel);
5345 		if (!ret && ud->asel > 15) {
5346 			dev_err(dev, "Invalid asel: %u\n", ud->asel);
5347 			return -EINVAL;
5348 		}
5349 	}
5350 
5351 	ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
5352 	ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
5353 
5354 	if (ud->match_data->type == DMA_TYPE_UDMA) {
5355 		ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
5356 	} else {
5357 		struct k3_ringacc_init_data ring_init_data;
5358 
5359 		ring_init_data.tisci = ud->tisci_rm.tisci;
5360 		ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
5361 		if (ud->match_data->type == DMA_TYPE_BCDMA) {
5362 			ring_init_data.num_rings = ud->bchan_cnt +
5363 						   ud->tchan_cnt +
5364 						   ud->rchan_cnt;
5365 		} else {
5366 			ring_init_data.num_rings = ud->rflow_cnt +
5367 						   ud->tflow_cnt;
5368 		}
5369 
5370 		ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
5371 	}
5372 
5373 	if (IS_ERR(ud->ringacc))
5374 		return PTR_ERR(ud->ringacc);
5375 
5376 	dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
5377 					    DOMAIN_BUS_TI_SCI_INTA_MSI);
5378 	if (!dev->msi.domain) {
5379 		return -EPROBE_DEFER;
5380 	}
5381 
5382 	dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
5383 	/* cyclic operation is not supported via PKTDMA */
5384 	if (ud->match_data->type != DMA_TYPE_PKTDMA) {
5385 		dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
5386 		ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
5387 	}
5388 
5389 	ud->ddev.device_config = udma_slave_config;
5390 	ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
5391 	ud->ddev.device_issue_pending = udma_issue_pending;
5392 	ud->ddev.device_tx_status = udma_tx_status;
5393 	ud->ddev.device_pause = udma_pause;
5394 	ud->ddev.device_resume = udma_resume;
5395 	ud->ddev.device_terminate_all = udma_terminate_all;
5396 	ud->ddev.device_synchronize = udma_synchronize;
5397 #ifdef CONFIG_DEBUG_FS
5398 	ud->ddev.dbg_summary_show = udma_dbg_summary_show;
5399 #endif
5400 
5401 	switch (ud->match_data->type) {
5402 	case DMA_TYPE_UDMA:
5403 		ud->ddev.device_alloc_chan_resources =
5404 					udma_alloc_chan_resources;
5405 		break;
5406 	case DMA_TYPE_BCDMA:
5407 		ud->ddev.device_alloc_chan_resources =
5408 					bcdma_alloc_chan_resources;
5409 		ud->ddev.device_router_config = bcdma_router_config;
5410 		break;
5411 	case DMA_TYPE_PKTDMA:
5412 		ud->ddev.device_alloc_chan_resources =
5413 					pktdma_alloc_chan_resources;
5414 		break;
5415 	default:
5416 		return -EINVAL;
5417 	}
5418 	ud->ddev.device_free_chan_resources = udma_free_chan_resources;
5419 
5420 	ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
5421 	ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
5422 	ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
5423 	ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
5424 	ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
5425 				       DESC_METADATA_ENGINE;
5426 	if (ud->match_data->enable_memcpy_support &&
5427 	    !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
5428 		dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
5429 		ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
5430 		ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
5431 	}
5432 
5433 	ud->ddev.dev = dev;
5434 	ud->dev = dev;
5435 	ud->psil_base = ud->match_data->psil_base;
5436 
5437 	INIT_LIST_HEAD(&ud->ddev.channels);
5438 	INIT_LIST_HEAD(&ud->desc_to_purge);
5439 
5440 	ch_count = setup_resources(ud);
5441 	if (ch_count <= 0)
5442 		return ch_count;
5443 
5444 	spin_lock_init(&ud->lock);
5445 	INIT_WORK(&ud->purge_work, udma_purge_desc_work);
5446 
5447 	ud->desc_align = 64;
5448 	if (ud->desc_align < dma_get_cache_alignment())
5449 		ud->desc_align = dma_get_cache_alignment();
5450 
5451 	ret = udma_setup_rx_flush(ud);
5452 	if (ret)
5453 		return ret;
5454 
5455 	for (i = 0; i < ud->bchan_cnt; i++) {
5456 		struct udma_bchan *bchan = &ud->bchans[i];
5457 
5458 		bchan->id = i;
5459 		bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
5460 	}
5461 
5462 	for (i = 0; i < ud->tchan_cnt; i++) {
5463 		struct udma_tchan *tchan = &ud->tchans[i];
5464 
5465 		tchan->id = i;
5466 		tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
5467 	}
5468 
5469 	for (i = 0; i < ud->rchan_cnt; i++) {
5470 		struct udma_rchan *rchan = &ud->rchans[i];
5471 
5472 		rchan->id = i;
5473 		rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
5474 	}
5475 
5476 	for (i = 0; i < ud->rflow_cnt; i++) {
5477 		struct udma_rflow *rflow = &ud->rflows[i];
5478 
5479 		rflow->id = i;
5480 	}
5481 
5482 	for (i = 0; i < ch_count; i++) {
5483 		struct udma_chan *uc = &ud->channels[i];
5484 
5485 		uc->ud = ud;
5486 		uc->vc.desc_free = udma_desc_free;
5487 		uc->id = i;
5488 		uc->bchan = NULL;
5489 		uc->tchan = NULL;
5490 		uc->rchan = NULL;
5491 		uc->config.remote_thread_id = -1;
5492 		uc->config.mapped_channel_id = -1;
5493 		uc->config.default_flow_id = -1;
5494 		uc->config.dir = DMA_MEM_TO_MEM;
5495 		uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
5496 					  dev_name(dev), i);
5497 
5498 		vchan_init(&uc->vc, &ud->ddev);
5499 		/* Use custom vchan completion handling */
5500 		tasklet_setup(&uc->vc.task, udma_vchan_complete);
5501 		init_completion(&uc->teardown_completed);
5502 		INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
5503 	}
5504 
5505 	/* Configure the copy_align to the maximum burst size the device supports */
5506 	ud->ddev.copy_align = udma_get_copy_align(ud);
5507 
5508 	ret = dma_async_device_register(&ud->ddev);
5509 	if (ret) {
5510 		dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
5511 		return ret;
5512 	}
5513 
5514 	platform_set_drvdata(pdev, ud);
5515 
5516 	ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
5517 	if (ret) {
5518 		dev_err(dev, "failed to register of_dma controller\n");
5519 		dma_async_device_unregister(&ud->ddev);
5520 	}
5521 
5522 	return ret;
5523 }
5524 
5525 static struct platform_driver udma_driver = {
5526 	.driver = {
5527 		.name	= "ti-udma",
5528 		.of_match_table = udma_of_match,
5529 		.suppress_bind_attrs = true,
5530 	},
5531 	.probe		= udma_probe,
5532 };
5533 
5534 module_platform_driver(udma_driver);
5535 MODULE_LICENSE("GPL v2");
5536 
5537 /* Private interfaces to UDMA */
5538 #include "k3-udma-private.c"
5539