1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * TI K3 AM65x NAVSS Ring accelerator Manager (RA) subsystem driver
4  *
5  * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
6  */
7 
8 #include <common.h>
9 #include <cpu_func.h>
10 #include <log.h>
11 #include <asm/cache.h>
12 #include <asm/io.h>
13 #include <malloc.h>
14 #include <asm/bitops.h>
15 #include <dm.h>
16 #include <dm/device_compat.h>
17 #include <dm/devres.h>
18 #include <dm/read.h>
19 #include <dm/uclass.h>
20 #include <linux/bitops.h>
21 #include <linux/compat.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/err.h>
24 #include <linux/soc/ti/k3-navss-ringacc.h>
25 #include <linux/soc/ti/ti_sci_protocol.h>
26 #include <linux/soc/ti/cppi5.h>
27 
28 #define set_bit(bit, bitmap)	__set_bit(bit, bitmap)
29 #define clear_bit(bit, bitmap)	__clear_bit(bit, bitmap)
30 #define dma_free_coherent(dev, size, cpu_addr, dma_handle) \
31 	dma_free_coherent(cpu_addr)
32 #define dma_zalloc_coherent(dev, size, dma_handle, flag) \
33 ({ \
34 	void	*ring_mem_virt; \
35 	ring_mem_virt = dma_alloc_coherent((size), \
36 					   (unsigned long *)(dma_handle)); \
37 	if (ring_mem_virt) \
38 		memset(ring_mem_virt, 0, (size)); \
39 	ring_mem_virt; \
40 })
41 
42 static LIST_HEAD(k3_nav_ringacc_list);
43 
ringacc_writel(u32 v,void __iomem * reg)44 static	void ringacc_writel(u32 v, void __iomem *reg)
45 {
46 	pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", v, reg);
47 	writel(v, reg);
48 }
49 
ringacc_readl(void __iomem * reg)50 static	u32 ringacc_readl(void __iomem *reg)
51 {
52 	u32 v;
53 
54 	v = readl(reg);
55 	pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, reg);
56 	return v;
57 }
58 
59 #define KNAV_RINGACC_CFG_RING_SIZE_ELCNT_MASK		GENMASK(19, 0)
60 #define K3_DMARING_RING_CFG_RING_SIZE_ELCNT_MASK		GENMASK(15, 0)
61 
62 /**
63  * struct k3_nav_ring_rt_regs -  The RA Control/Status Registers region
64  */
65 struct k3_nav_ring_rt_regs {
66 	u32	resv_16[4];
67 	u32	db;		/* RT Ring N Doorbell Register */
68 	u32	resv_4[1];
69 	u32	occ;		/* RT Ring N Occupancy Register */
70 	u32	indx;		/* RT Ring N Current Index Register */
71 	u32	hwocc;		/* RT Ring N Hardware Occupancy Register */
72 	u32	hwindx;		/* RT Ring N Current Index Register */
73 };
74 
75 #define KNAV_RINGACC_RT_REGS_STEP	0x1000
76 #define K3_DMARING_RING_RT_REGS_STEP			0x2000
77 #define K3_DMARING_RING_RT_REGS_REVERSE_OFS		0x1000
78 #define KNAV_RINGACC_RT_OCC_MASK		GENMASK(20, 0)
79 #define K3_DMARING_RING_RT_OCC_TDOWN_COMPLETE		BIT(31)
80 #define K3_DMARING_RING_RT_DB_ENTRY_MASK		GENMASK(7, 0)
81 #define K3_DMARING_RING_RT_DB_TDOWN_ACK		BIT(31)
82 
83 
84 /**
85  * struct k3_nav_ring_fifo_regs -  The Ring Accelerator Queues Registers region
86  */
87 struct k3_nav_ring_fifo_regs {
88 	u32	head_data[128];		/* Ring Head Entry Data Registers */
89 	u32	tail_data[128];		/* Ring Tail Entry Data Registers */
90 	u32	peek_head_data[128];	/* Ring Peek Head Entry Data Regs */
91 	u32	peek_tail_data[128];	/* Ring Peek Tail Entry Data Regs */
92 };
93 
94 #define KNAV_RINGACC_FIFO_WINDOW_SIZE_BYTES  (512U)
95 #define KNAV_RINGACC_FIFO_REGS_STEP	0x1000
96 #define KNAV_RINGACC_MAX_DB_RING_CNT    (127U)
97 
98 /**
99  * struct k3_nav_ring_ops -  Ring operations
100  */
101 struct k3_nav_ring_ops {
102 	int (*push_tail)(struct k3_nav_ring *ring, void *elm);
103 	int (*push_head)(struct k3_nav_ring *ring, void *elm);
104 	int (*pop_tail)(struct k3_nav_ring *ring, void *elm);
105 	int (*pop_head)(struct k3_nav_ring *ring, void *elm);
106 };
107 
108 /**
109  * struct k3_nav_ring_state - Internal state tracking structure
110  *
111  * @free: Number of free entries
112  * @occ: Occupancy
113  * @windex: Write index
114  * @rindex: Read index
115  */
116 struct k3_nav_ring_state {
117 	u32 free;
118 	u32 occ;
119 	u32 windex;
120 	u32 rindex;
121 	u32 tdown_complete:1;
122 };
123 
124 /**
125  * struct k3_nav_ring - RA Ring descriptor
126  *
127  * @rt - Ring control/status registers
128  * @fifos - Ring queues registers
129  * @ring_mem_dma - Ring buffer dma address
130  * @ring_mem_virt - Ring buffer virt address
131  * @ops - Ring operations
132  * @size - Ring size in elements
133  * @elm_size - Size of the ring element
134  * @mode - Ring mode
135  * @flags - flags
136  * @ring_id - Ring Id
137  * @parent - Pointer on struct @k3_nav_ringacc
138  * @use_count - Use count for shared rings
139  */
140 struct k3_nav_ring {
141 	struct k3_nav_ring_rt_regs __iomem *rt;
142 	struct k3_nav_ring_fifo_regs __iomem *fifos;
143 	dma_addr_t	ring_mem_dma;
144 	void		*ring_mem_virt;
145 	struct k3_nav_ring_ops *ops;
146 	u32		size;
147 	enum k3_nav_ring_size elm_size;
148 	enum k3_nav_ring_mode mode;
149 	u32		flags;
150 #define KNAV_RING_FLAG_BUSY	BIT(1)
151 #define K3_NAV_RING_FLAG_SHARED	BIT(2)
152 #define K3_NAV_RING_FLAG_REVERSE BIT(3)
153 	struct k3_nav_ring_state state;
154 	u32		ring_id;
155 	struct k3_nav_ringacc	*parent;
156 	u32		use_count;
157 };
158 
159 struct k3_nav_ringacc_ops {
160 	int (*init)(struct udevice *dev, struct k3_nav_ringacc *ringacc);
161 };
162 
163 /**
164  * struct k3_nav_ringacc - Rings accelerator descriptor
165  *
166  * @dev - pointer on RA device
167  * @num_rings - number of ring in RA
168  * @rm_gp_range - general purpose rings range from tisci
169  * @dma_ring_reset_quirk - DMA reset w/a enable
170  * @num_proxies - number of RA proxies
171  * @rings - array of rings descriptors (struct @k3_nav_ring)
172  * @list - list of RAs in the system
173  * @tisci - pointer ti-sci handle
174  * @tisci_ring_ops - ti-sci rings ops
175  * @tisci_dev_id - ti-sci device id
176  * @ops: SoC specific ringacc operation
177  * @dual_ring: indicate k3_dmaring dual ring support
178  */
179 struct k3_nav_ringacc {
180 	struct udevice *dev;
181 	u32 num_rings; /* number of rings in Ringacc module */
182 	unsigned long *rings_inuse;
183 	struct ti_sci_resource *rm_gp_range;
184 	bool dma_ring_reset_quirk;
185 	u32 num_proxies;
186 
187 	struct k3_nav_ring *rings;
188 	struct list_head list;
189 
190 	const struct ti_sci_handle *tisci;
191 	const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
192 	u32  tisci_dev_id;
193 
194 	const struct k3_nav_ringacc_ops *ops;
195 	bool dual_ring;
196 };
197 
k3_nav_ringacc_ring_read_occ(struct k3_nav_ring * ring)198 static int k3_nav_ringacc_ring_read_occ(struct k3_nav_ring *ring)
199 {
200 	return readl(&ring->rt->occ) & KNAV_RINGACC_RT_OCC_MASK;
201 }
202 
k3_nav_ringacc_ring_update_occ(struct k3_nav_ring * ring)203 static void k3_nav_ringacc_ring_update_occ(struct k3_nav_ring *ring)
204 {
205 	u32 val;
206 
207 	val = readl(&ring->rt->occ);
208 
209 	ring->state.occ = val & KNAV_RINGACC_RT_OCC_MASK;
210 	ring->state.tdown_complete = !!(val & K3_DMARING_RING_RT_OCC_TDOWN_COMPLETE);
211 }
212 
k3_nav_ringacc_get_elm_addr(struct k3_nav_ring * ring,u32 idx)213 static void *k3_nav_ringacc_get_elm_addr(struct k3_nav_ring *ring, u32 idx)
214 {
215 	return (idx * (4 << ring->elm_size) + ring->ring_mem_virt);
216 }
217 
218 static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring *ring, void *elem);
219 static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring *ring, void *elem);
220 static int k3_dmaring_ring_fwd_pop_mem(struct k3_nav_ring *ring, void *elem);
221 static int k3_dmaring_ring_reverse_pop_mem(struct k3_nav_ring *ring, void *elem);
222 
223 static struct k3_nav_ring_ops k3_nav_mode_ring_ops = {
224 		.push_tail = k3_nav_ringacc_ring_push_mem,
225 		.pop_head = k3_nav_ringacc_ring_pop_mem,
226 };
227 
228 static struct k3_nav_ring_ops k3_dmaring_fwd_ring_ops = {
229 		.push_tail = k3_nav_ringacc_ring_push_mem,
230 		.pop_head = k3_dmaring_ring_fwd_pop_mem,
231 };
232 
233 static struct k3_nav_ring_ops k3_dmaring_reverse_ring_ops = {
234 		.pop_head = k3_dmaring_ring_reverse_pop_mem,
235 };
236 
k3_nav_ringacc_get_dev(struct k3_nav_ringacc * ringacc)237 struct udevice *k3_nav_ringacc_get_dev(struct k3_nav_ringacc *ringacc)
238 {
239 	return ringacc->dev;
240 }
241 
k3_nav_ringacc_request_ring(struct k3_nav_ringacc * ringacc,int id)242 struct k3_nav_ring *k3_nav_ringacc_request_ring(struct k3_nav_ringacc *ringacc,
243 						int id)
244 {
245 	if (id == K3_NAV_RINGACC_RING_ID_ANY) {
246 		/* Request for any general purpose ring */
247 		struct ti_sci_resource_desc *gp_rings =
248 					&ringacc->rm_gp_range->desc[0];
249 		unsigned long size;
250 
251 		size = gp_rings->start + gp_rings->num;
252 		id = find_next_zero_bit(ringacc->rings_inuse,
253 					size, gp_rings->start);
254 		if (id == size)
255 			goto error;
256 	} else if (id < 0) {
257 		goto error;
258 	}
259 
260 	if (test_bit(id, ringacc->rings_inuse) &&
261 	    !(ringacc->rings[id].flags & K3_NAV_RING_FLAG_SHARED))
262 		goto error;
263 	else if (ringacc->rings[id].flags & K3_NAV_RING_FLAG_SHARED)
264 		goto out;
265 
266 	if (!try_module_get(ringacc->dev->driver->owner))
267 		goto error;
268 
269 	pr_debug("Giving ring#%d\n", id);
270 
271 	set_bit(id, ringacc->rings_inuse);
272 out:
273 	ringacc->rings[id].use_count++;
274 	return &ringacc->rings[id];
275 
276 error:
277 	return NULL;
278 }
279 
k3_dmaring_ring_request_rings_pair(struct k3_nav_ringacc * ringacc,int fwd_id,int compl_id,struct k3_nav_ring ** fwd_ring,struct k3_nav_ring ** compl_ring)280 static int k3_dmaring_ring_request_rings_pair(struct k3_nav_ringacc *ringacc,
281 					      int fwd_id, int compl_id,
282 					      struct k3_nav_ring **fwd_ring,
283 					      struct k3_nav_ring **compl_ring)
284 {
285 	/* k3_dmaring: fwd_id == compl_id, so we ignore compl_id */
286 	if (fwd_id < 0)
287 		return -EINVAL;
288 
289 	if (test_bit(fwd_id, ringacc->rings_inuse))
290 		return -EBUSY;
291 
292 	*fwd_ring = &ringacc->rings[fwd_id];
293 	*compl_ring = &ringacc->rings[fwd_id + ringacc->num_rings];
294 	set_bit(fwd_id, ringacc->rings_inuse);
295 	ringacc->rings[fwd_id].use_count++;
296 	dev_dbg(ringacc->dev, "Giving ring#%d\n", fwd_id);
297 
298 	return 0;
299 }
300 
k3_nav_ringacc_request_rings_pair(struct k3_nav_ringacc * ringacc,int fwd_id,int compl_id,struct k3_nav_ring ** fwd_ring,struct k3_nav_ring ** compl_ring)301 int k3_nav_ringacc_request_rings_pair(struct k3_nav_ringacc *ringacc,
302 				      int fwd_id, int compl_id,
303 				      struct k3_nav_ring **fwd_ring,
304 				      struct k3_nav_ring **compl_ring)
305 {
306 	int ret = 0;
307 
308 	if (!fwd_ring || !compl_ring)
309 		return -EINVAL;
310 
311 	if (ringacc->dual_ring)
312 		return k3_dmaring_ring_request_rings_pair(ringacc, fwd_id, compl_id,
313 						    fwd_ring, compl_ring);
314 
315 	*fwd_ring = k3_nav_ringacc_request_ring(ringacc, fwd_id);
316 	if (!(*fwd_ring))
317 		return -ENODEV;
318 
319 	*compl_ring = k3_nav_ringacc_request_ring(ringacc, compl_id);
320 	if (!(*compl_ring)) {
321 		k3_nav_ringacc_ring_free(*fwd_ring);
322 		ret = -ENODEV;
323 	}
324 
325 	return ret;
326 }
327 
k3_ringacc_ring_reset_sci(struct k3_nav_ring * ring)328 static void k3_ringacc_ring_reset_sci(struct k3_nav_ring *ring)
329 {
330 	struct k3_nav_ringacc *ringacc = ring->parent;
331 	int ret;
332 
333 	ret = ringacc->tisci_ring_ops->config(
334 			ringacc->tisci,
335 			TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
336 			ringacc->tisci_dev_id,
337 			ring->ring_id,
338 			0,
339 			0,
340 			ring->size,
341 			0,
342 			0,
343 			0);
344 	if (ret)
345 		dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
346 			ret, ring->ring_id);
347 }
348 
k3_nav_ringacc_ring_reset(struct k3_nav_ring * ring)349 void k3_nav_ringacc_ring_reset(struct k3_nav_ring *ring)
350 {
351 	if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
352 		return;
353 
354 	memset(&ring->state, 0, sizeof(ring->state));
355 
356 	k3_ringacc_ring_reset_sci(ring);
357 }
358 
k3_ringacc_ring_reconfig_qmode_sci(struct k3_nav_ring * ring,enum k3_nav_ring_mode mode)359 static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_nav_ring *ring,
360 					       enum k3_nav_ring_mode mode)
361 {
362 	struct k3_nav_ringacc *ringacc = ring->parent;
363 	int ret;
364 
365 	ret = ringacc->tisci_ring_ops->config(
366 			ringacc->tisci,
367 			TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
368 			ringacc->tisci_dev_id,
369 			ring->ring_id,
370 			0,
371 			0,
372 			0,
373 			mode,
374 			0,
375 			0);
376 	if (ret)
377 		dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
378 			ret, ring->ring_id);
379 }
380 
k3_nav_ringacc_ring_reset_dma(struct k3_nav_ring * ring,u32 occ)381 void k3_nav_ringacc_ring_reset_dma(struct k3_nav_ring *ring, u32 occ)
382 {
383 	if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
384 		return;
385 
386 	if (!ring->parent->dma_ring_reset_quirk) {
387 		k3_nav_ringacc_ring_reset(ring);
388 		return;
389 	}
390 
391 	if (!occ)
392 		occ = ringacc_readl(&ring->rt->occ);
393 
394 	if (occ) {
395 		u32 db_ring_cnt, db_ring_cnt_cur;
396 
397 		pr_debug("%s %u occ: %u\n", __func__,
398 			 ring->ring_id, occ);
399 		/* 2. Reset the ring */
400 		k3_ringacc_ring_reset_sci(ring);
401 
402 		/*
403 		 * 3. Setup the ring in ring/doorbell mode
404 		 * (if not already in this mode)
405 		 */
406 		if (ring->mode != K3_NAV_RINGACC_RING_MODE_RING)
407 			k3_ringacc_ring_reconfig_qmode_sci(
408 					ring, K3_NAV_RINGACC_RING_MODE_RING);
409 		/*
410 		 * 4. Ring the doorbell 2**22 – ringOcc times.
411 		 * This will wrap the internal UDMAP ring state occupancy
412 		 * counter (which is 21-bits wide) to 0.
413 		 */
414 		db_ring_cnt = (1U << 22) - occ;
415 
416 		while (db_ring_cnt != 0) {
417 			/*
418 			 * Ring the doorbell with the maximum count each
419 			 * iteration if possible to minimize the total
420 			 * of writes
421 			 */
422 			if (db_ring_cnt > KNAV_RINGACC_MAX_DB_RING_CNT)
423 				db_ring_cnt_cur = KNAV_RINGACC_MAX_DB_RING_CNT;
424 			else
425 				db_ring_cnt_cur = db_ring_cnt;
426 
427 			writel(db_ring_cnt_cur, &ring->rt->db);
428 			db_ring_cnt -= db_ring_cnt_cur;
429 		}
430 
431 		/* 5. Restore the original ring mode (if not ring mode) */
432 		if (ring->mode != K3_NAV_RINGACC_RING_MODE_RING)
433 			k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
434 	}
435 
436 	/* 2. Reset the ring */
437 	k3_nav_ringacc_ring_reset(ring);
438 }
439 
k3_ringacc_ring_free_sci(struct k3_nav_ring * ring)440 static void k3_ringacc_ring_free_sci(struct k3_nav_ring *ring)
441 {
442 	struct k3_nav_ringacc *ringacc = ring->parent;
443 	int ret;
444 
445 	ret = ringacc->tisci_ring_ops->config(
446 			ringacc->tisci,
447 			TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
448 			ringacc->tisci_dev_id,
449 			ring->ring_id,
450 			0,
451 			0,
452 			0,
453 			0,
454 			0,
455 			0);
456 	if (ret)
457 		dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
458 			ret, ring->ring_id);
459 }
460 
k3_nav_ringacc_ring_free(struct k3_nav_ring * ring)461 int k3_nav_ringacc_ring_free(struct k3_nav_ring *ring)
462 {
463 	struct k3_nav_ringacc *ringacc;
464 
465 	if (!ring)
466 		return -EINVAL;
467 
468 	ringacc = ring->parent;
469 
470 	/*
471 	 * k3_dmaring: rings shared memory and configuration, only forward ring is
472 	 * configured and reverse ring considered as slave.
473 	 */
474 	if (ringacc->dual_ring && (ring->flags & K3_NAV_RING_FLAG_REVERSE))
475 		return 0;
476 
477 	pr_debug("%s flags: 0x%08x\n", __func__, ring->flags);
478 
479 	if (!test_bit(ring->ring_id, ringacc->rings_inuse))
480 		return -EINVAL;
481 
482 	if (--ring->use_count)
483 		goto out;
484 
485 	if (!(ring->flags & KNAV_RING_FLAG_BUSY))
486 		goto no_init;
487 
488 	k3_ringacc_ring_free_sci(ring);
489 
490 	dma_free_coherent(ringacc->dev,
491 			  ring->size * (4 << ring->elm_size),
492 			  ring->ring_mem_virt, ring->ring_mem_dma);
493 	ring->flags &= ~KNAV_RING_FLAG_BUSY;
494 	ring->ops = NULL;
495 
496 no_init:
497 	clear_bit(ring->ring_id, ringacc->rings_inuse);
498 
499 	module_put(ringacc->dev->driver->owner);
500 
501 out:
502 	return 0;
503 }
504 
k3_nav_ringacc_get_ring_id(struct k3_nav_ring * ring)505 u32 k3_nav_ringacc_get_ring_id(struct k3_nav_ring *ring)
506 {
507 	if (!ring)
508 		return -EINVAL;
509 
510 	return ring->ring_id;
511 }
512 
k3_nav_ringacc_ring_cfg_sci(struct k3_nav_ring * ring)513 static int k3_nav_ringacc_ring_cfg_sci(struct k3_nav_ring *ring)
514 {
515 	struct k3_nav_ringacc *ringacc = ring->parent;
516 	u32 ring_idx;
517 	int ret;
518 
519 	if (!ringacc->tisci)
520 		return -EINVAL;
521 
522 	ring_idx = ring->ring_id;
523 	ret = ringacc->tisci_ring_ops->config(
524 			ringacc->tisci,
525 			TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
526 			ringacc->tisci_dev_id,
527 			ring_idx,
528 			lower_32_bits(ring->ring_mem_dma),
529 			upper_32_bits(ring->ring_mem_dma),
530 			ring->size,
531 			ring->mode,
532 			ring->elm_size,
533 			0);
534 	if (ret)
535 		dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
536 			ret, ring_idx);
537 
538 	return ret;
539 }
540 
k3_dmaring_ring_cfg(struct k3_nav_ring * ring,struct k3_nav_ring_cfg * cfg)541 static int k3_dmaring_ring_cfg(struct k3_nav_ring *ring, struct k3_nav_ring_cfg *cfg)
542 {
543 	struct k3_nav_ringacc *ringacc;
544 	struct k3_nav_ring *reverse_ring;
545 	int ret = 0;
546 
547 	if (cfg->elm_size != K3_NAV_RINGACC_RING_ELSIZE_8 ||
548 	    cfg->mode != K3_NAV_RINGACC_RING_MODE_RING ||
549 	    cfg->size & ~K3_DMARING_RING_CFG_RING_SIZE_ELCNT_MASK)
550 		return -EINVAL;
551 
552 	ringacc = ring->parent;
553 
554 	/*
555 	 * k3_dmaring: rings shared memory and configuration, only forward ring is
556 	 * configured and reverse ring considered as slave.
557 	 */
558 	if (ringacc->dual_ring && (ring->flags & K3_NAV_RING_FLAG_REVERSE))
559 		return 0;
560 
561 	if (!test_bit(ring->ring_id, ringacc->rings_inuse))
562 		return -EINVAL;
563 
564 	ring->size = cfg->size;
565 	ring->elm_size = cfg->elm_size;
566 	ring->mode = cfg->mode;
567 	memset(&ring->state, 0, sizeof(ring->state));
568 
569 	ring->ops = &k3_dmaring_fwd_ring_ops;
570 
571 	ring->ring_mem_virt =
572 		dma_alloc_coherent(ring->size * (4 << ring->elm_size),
573 				   (unsigned long *)&ring->ring_mem_dma);
574 	if (!ring->ring_mem_virt) {
575 		dev_err(ringacc->dev, "Failed to alloc ring mem\n");
576 		ret = -ENOMEM;
577 		goto err_free_ops;
578 	}
579 
580 	ret = k3_nav_ringacc_ring_cfg_sci(ring);
581 	if (ret)
582 		goto err_free_mem;
583 
584 	ring->flags |= KNAV_RING_FLAG_BUSY;
585 
586 	/* k3_dmaring: configure reverse ring */
587 	reverse_ring = &ringacc->rings[ring->ring_id + ringacc->num_rings];
588 	reverse_ring->size = cfg->size;
589 	reverse_ring->elm_size = cfg->elm_size;
590 	reverse_ring->mode = cfg->mode;
591 	memset(&reverse_ring->state, 0, sizeof(reverse_ring->state));
592 	reverse_ring->ops = &k3_dmaring_reverse_ring_ops;
593 
594 	reverse_ring->ring_mem_virt = ring->ring_mem_virt;
595 	reverse_ring->ring_mem_dma = ring->ring_mem_dma;
596 	reverse_ring->flags |= KNAV_RING_FLAG_BUSY;
597 
598 	return 0;
599 
600 err_free_mem:
601 	dma_free_coherent(ringacc->dev,
602 			  ring->size * (4 << ring->elm_size),
603 			  ring->ring_mem_virt,
604 			  ring->ring_mem_dma);
605 err_free_ops:
606 	ring->ops = NULL;
607 	return ret;
608 }
609 
k3_nav_ringacc_ring_cfg(struct k3_nav_ring * ring,struct k3_nav_ring_cfg * cfg)610 int k3_nav_ringacc_ring_cfg(struct k3_nav_ring *ring,
611 			    struct k3_nav_ring_cfg *cfg)
612 {
613 	struct k3_nav_ringacc *ringacc = ring->parent;
614 	int ret = 0;
615 
616 	if (!ring || !cfg)
617 		return -EINVAL;
618 
619 	if (ringacc->dual_ring)
620 		return k3_dmaring_ring_cfg(ring, cfg);
621 
622 	if (cfg->elm_size > K3_NAV_RINGACC_RING_ELSIZE_256 ||
623 	    cfg->mode > K3_NAV_RINGACC_RING_MODE_QM ||
624 	    cfg->size & ~KNAV_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
625 	    !test_bit(ring->ring_id, ringacc->rings_inuse))
626 		return -EINVAL;
627 
628 	if (ring->use_count != 1)
629 		return 0;
630 
631 	ring->size = cfg->size;
632 	ring->elm_size = cfg->elm_size;
633 	ring->mode = cfg->mode;
634 	memset(&ring->state, 0, sizeof(ring->state));
635 
636 	switch (ring->mode) {
637 	case K3_NAV_RINGACC_RING_MODE_RING:
638 		ring->ops = &k3_nav_mode_ring_ops;
639 		break;
640 	default:
641 		ring->ops = NULL;
642 		ret = -EINVAL;
643 		goto err_free_ops;
644 	};
645 
646 	ring->ring_mem_virt =
647 			dma_zalloc_coherent(ringacc->dev,
648 					    ring->size * (4 << ring->elm_size),
649 					    &ring->ring_mem_dma, GFP_KERNEL);
650 	if (!ring->ring_mem_virt) {
651 		dev_err(ringacc->dev, "Failed to alloc ring mem\n");
652 		ret = -ENOMEM;
653 		goto err_free_ops;
654 	}
655 
656 	ret = k3_nav_ringacc_ring_cfg_sci(ring);
657 
658 	if (ret)
659 		goto err_free_mem;
660 
661 	ring->flags |= KNAV_RING_FLAG_BUSY;
662 	ring->flags |= (cfg->flags & K3_NAV_RINGACC_RING_SHARED) ?
663 			K3_NAV_RING_FLAG_SHARED : 0;
664 
665 	return 0;
666 
667 err_free_mem:
668 	dma_free_coherent(ringacc->dev,
669 			  ring->size * (4 << ring->elm_size),
670 			  ring->ring_mem_virt,
671 			  ring->ring_mem_dma);
672 err_free_ops:
673 	ring->ops = NULL;
674 	return ret;
675 }
676 
k3_nav_ringacc_ring_get_size(struct k3_nav_ring * ring)677 u32 k3_nav_ringacc_ring_get_size(struct k3_nav_ring *ring)
678 {
679 	if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
680 		return -EINVAL;
681 
682 	return ring->size;
683 }
684 
k3_nav_ringacc_ring_get_free(struct k3_nav_ring * ring)685 u32 k3_nav_ringacc_ring_get_free(struct k3_nav_ring *ring)
686 {
687 	if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
688 		return -EINVAL;
689 
690 	if (!ring->state.free)
691 		ring->state.free = ring->size - ringacc_readl(&ring->rt->occ);
692 
693 	return ring->state.free;
694 }
695 
k3_nav_ringacc_ring_get_occ(struct k3_nav_ring * ring)696 u32 k3_nav_ringacc_ring_get_occ(struct k3_nav_ring *ring)
697 {
698 	if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
699 		return -EINVAL;
700 
701 	return ringacc_readl(&ring->rt->occ);
702 }
703 
k3_nav_ringacc_ring_is_full(struct k3_nav_ring * ring)704 u32 k3_nav_ringacc_ring_is_full(struct k3_nav_ring *ring)
705 {
706 	return !k3_nav_ringacc_ring_get_free(ring);
707 }
708 
709 enum k3_ringacc_access_mode {
710 	K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
711 	K3_RINGACC_ACCESS_MODE_POP_HEAD,
712 	K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
713 	K3_RINGACC_ACCESS_MODE_POP_TAIL,
714 	K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
715 	K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
716 };
717 
k3_dmaring_ring_fwd_pop_mem(struct k3_nav_ring * ring,void * elem)718 static int k3_dmaring_ring_fwd_pop_mem(struct k3_nav_ring *ring, void *elem)
719 {
720 	void *elem_ptr;
721 	u32 elem_idx;
722 
723 	/*
724 	 * k3_dmaring: forward ring is always tied DMA channel and HW does not
725 	 * maintain any state data required for POP operation and its unknown
726 	 * how much elements were consumed by HW. So, to actually
727 	 * do POP, the read pointer has to be recalculated every time.
728 	 */
729 	ring->state.occ = k3_nav_ringacc_ring_read_occ(ring);
730 	if (ring->state.windex >= ring->state.occ)
731 		elem_idx = ring->state.windex - ring->state.occ;
732 	else
733 		elem_idx = ring->size - (ring->state.occ - ring->state.windex);
734 
735 	elem_ptr = k3_nav_ringacc_get_elm_addr(ring, elem_idx);
736 	invalidate_dcache_range((unsigned long)ring->ring_mem_virt,
737 				ALIGN((unsigned long)ring->ring_mem_virt +
738 				      ring->size * (4 << ring->elm_size),
739 				      ARCH_DMA_MINALIGN));
740 
741 	memcpy(elem, elem_ptr, (4 << ring->elm_size));
742 
743 	ring->state.occ--;
744 	writel(-1, &ring->rt->db);
745 
746 	dev_dbg(ring->parent->dev, "%s: occ%d Windex%d Rindex%d pos_ptr%px\n",
747 		__func__, ring->state.occ, ring->state.windex, elem_idx,
748 		elem_ptr);
749 	return 0;
750 }
751 
k3_dmaring_ring_reverse_pop_mem(struct k3_nav_ring * ring,void * elem)752 static int k3_dmaring_ring_reverse_pop_mem(struct k3_nav_ring *ring, void *elem)
753 {
754 	void *elem_ptr;
755 
756 	elem_ptr = k3_nav_ringacc_get_elm_addr(ring, ring->state.rindex);
757 
758 	if (ring->state.occ) {
759 		invalidate_dcache_range((unsigned long)ring->ring_mem_virt,
760 					ALIGN((unsigned long)ring->ring_mem_virt +
761 					ring->size * (4 << ring->elm_size),
762 					ARCH_DMA_MINALIGN));
763 
764 		memcpy(elem, elem_ptr, (4 << ring->elm_size));
765 		ring->state.rindex = (ring->state.rindex + 1) % ring->size;
766 		ring->state.occ--;
767 		writel(-1 & K3_DMARING_RING_RT_DB_ENTRY_MASK, &ring->rt->db);
768 	}
769 
770 	dev_dbg(ring->parent->dev, "%s: occ%d index%d pos_ptr%px\n",
771 		__func__, ring->state.occ, ring->state.rindex, elem_ptr);
772 	return 0;
773 }
774 
k3_nav_ringacc_ring_push_mem(struct k3_nav_ring * ring,void * elem)775 static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring *ring, void *elem)
776 {
777 	void *elem_ptr;
778 
779 	elem_ptr = k3_nav_ringacc_get_elm_addr(ring, ring->state.windex);
780 
781 	memcpy(elem_ptr, elem, (4 << ring->elm_size));
782 
783 	flush_dcache_range((unsigned long)ring->ring_mem_virt,
784 			   ALIGN((unsigned long)ring->ring_mem_virt +
785 				 ring->size * (4 << ring->elm_size),
786 				 ARCH_DMA_MINALIGN));
787 
788 	ring->state.windex = (ring->state.windex + 1) % ring->size;
789 	ring->state.free--;
790 	ringacc_writel(1, &ring->rt->db);
791 
792 	pr_debug("ring_push_mem: free%d index%d\n",
793 		 ring->state.free, ring->state.windex);
794 
795 	return 0;
796 }
797 
k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring * ring,void * elem)798 static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring *ring, void *elem)
799 {
800 	void *elem_ptr;
801 
802 	elem_ptr = k3_nav_ringacc_get_elm_addr(ring, ring->state.rindex);
803 
804 	invalidate_dcache_range((unsigned long)ring->ring_mem_virt,
805 				ALIGN((unsigned long)ring->ring_mem_virt +
806 				      ring->size * (4 << ring->elm_size),
807 				      ARCH_DMA_MINALIGN));
808 
809 	memcpy(elem, elem_ptr, (4 << ring->elm_size));
810 
811 	ring->state.rindex = (ring->state.rindex + 1) % ring->size;
812 	ring->state.occ--;
813 	ringacc_writel(-1, &ring->rt->db);
814 
815 	pr_debug("ring_pop_mem: occ%d index%d pos_ptr%p\n",
816 		 ring->state.occ, ring->state.rindex, elem_ptr);
817 	return 0;
818 }
819 
k3_nav_ringacc_ring_push(struct k3_nav_ring * ring,void * elem)820 int k3_nav_ringacc_ring_push(struct k3_nav_ring *ring, void *elem)
821 {
822 	int ret = -EOPNOTSUPP;
823 
824 	if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
825 		return -EINVAL;
826 
827 	pr_debug("ring_push%d: free%d index%d\n",
828 		 ring->ring_id, ring->state.free, ring->state.windex);
829 
830 	if (k3_nav_ringacc_ring_is_full(ring))
831 		return -ENOMEM;
832 
833 	if (ring->ops && ring->ops->push_tail)
834 		ret = ring->ops->push_tail(ring, elem);
835 
836 	return ret;
837 }
838 
k3_nav_ringacc_ring_push_head(struct k3_nav_ring * ring,void * elem)839 int k3_nav_ringacc_ring_push_head(struct k3_nav_ring *ring, void *elem)
840 {
841 	int ret = -EOPNOTSUPP;
842 
843 	if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
844 		return -EINVAL;
845 
846 	pr_debug("ring_push_head: free%d index%d\n",
847 		 ring->state.free, ring->state.windex);
848 
849 	if (k3_nav_ringacc_ring_is_full(ring))
850 		return -ENOMEM;
851 
852 	if (ring->ops && ring->ops->push_head)
853 		ret = ring->ops->push_head(ring, elem);
854 
855 	return ret;
856 }
857 
k3_nav_ringacc_ring_pop(struct k3_nav_ring * ring,void * elem)858 int k3_nav_ringacc_ring_pop(struct k3_nav_ring *ring, void *elem)
859 {
860 	int ret = -EOPNOTSUPP;
861 
862 	if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
863 		return -EINVAL;
864 
865 	if (!ring->state.occ)
866 		k3_nav_ringacc_ring_update_occ(ring);
867 
868 	pr_debug("ring_pop%d: occ%d index%d\n",
869 		 ring->ring_id, ring->state.occ, ring->state.rindex);
870 
871 	if (!ring->state.occ && !ring->state.tdown_complete)
872 		return -ENODATA;
873 
874 	if (ring->ops && ring->ops->pop_head)
875 		ret = ring->ops->pop_head(ring, elem);
876 
877 	return ret;
878 }
879 
k3_nav_ringacc_ring_pop_tail(struct k3_nav_ring * ring,void * elem)880 int k3_nav_ringacc_ring_pop_tail(struct k3_nav_ring *ring, void *elem)
881 {
882 	int ret = -EOPNOTSUPP;
883 
884 	if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
885 		return -EINVAL;
886 
887 	if (!ring->state.occ)
888 		k3_nav_ringacc_ring_update_occ(ring);
889 
890 	pr_debug("ring_pop_tail: occ%d index%d\n",
891 		 ring->state.occ, ring->state.rindex);
892 
893 	if (!ring->state.occ)
894 		return -ENODATA;
895 
896 	if (ring->ops && ring->ops->pop_tail)
897 		ret = ring->ops->pop_tail(ring, elem);
898 
899 	return ret;
900 }
901 
k3_nav_ringacc_probe_dt(struct k3_nav_ringacc * ringacc)902 static int k3_nav_ringacc_probe_dt(struct k3_nav_ringacc *ringacc)
903 {
904 	struct udevice *dev = ringacc->dev;
905 	struct udevice *devp = dev;
906 	struct udevice *tisci_dev = NULL;
907 	int ret;
908 
909 	ringacc->num_rings = dev_read_u32_default(dev, "ti,num-rings", 0);
910 	if (!ringacc->num_rings) {
911 		dev_err(dev, "ti,num-rings read failure %d\n", ret);
912 		return -EINVAL;
913 	}
914 
915 	ringacc->dma_ring_reset_quirk =
916 			dev_read_bool(dev, "ti,dma-ring-reset-quirk");
917 
918 	ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, devp,
919 					   "ti,sci", &tisci_dev);
920 	if (ret) {
921 		pr_debug("TISCI RA RM get failed (%d)\n", ret);
922 		ringacc->tisci = NULL;
923 		return -ENODEV;
924 	}
925 	ringacc->tisci = (struct ti_sci_handle *)
926 			 (ti_sci_get_handle_from_sysfw(tisci_dev));
927 
928 	ret = dev_read_u32_default(devp, "ti,sci", 0);
929 	if (!ret) {
930 		dev_err(dev, "TISCI RA RM disabled\n");
931 		ringacc->tisci = NULL;
932 		return ret;
933 	}
934 
935 	ret = dev_read_u32(devp, "ti,sci-dev-id", &ringacc->tisci_dev_id);
936 	if (ret) {
937 		dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
938 		ringacc->tisci = NULL;
939 		return ret;
940 	}
941 
942 	ringacc->rm_gp_range = devm_ti_sci_get_of_resource(
943 					ringacc->tisci, dev,
944 					ringacc->tisci_dev_id,
945 					"ti,sci-rm-range-gp-rings");
946 	if (IS_ERR(ringacc->rm_gp_range))
947 		ret = PTR_ERR(ringacc->rm_gp_range);
948 
949 	return 0;
950 }
951 
k3_nav_ringacc_init(struct udevice * dev,struct k3_nav_ringacc * ringacc)952 static int k3_nav_ringacc_init(struct udevice *dev, struct k3_nav_ringacc *ringacc)
953 {
954 	void __iomem *base_rt;
955 	int ret, i;
956 
957 	ret = k3_nav_ringacc_probe_dt(ringacc);
958 	if (ret)
959 		return ret;
960 
961 	base_rt = (uint32_t *)devfdt_get_addr_name(dev, "rt");
962 	pr_debug("rt %p\n", base_rt);
963 	if (IS_ERR(base_rt))
964 		return PTR_ERR(base_rt);
965 
966 	ringacc->rings = devm_kzalloc(dev,
967 				      sizeof(*ringacc->rings) *
968 				      ringacc->num_rings,
969 				      GFP_KERNEL);
970 	ringacc->rings_inuse = devm_kcalloc(dev,
971 					    BITS_TO_LONGS(ringacc->num_rings),
972 					    sizeof(unsigned long), GFP_KERNEL);
973 
974 	if (!ringacc->rings || !ringacc->rings_inuse)
975 		return -ENOMEM;
976 
977 	for (i = 0; i < ringacc->num_rings; i++) {
978 		ringacc->rings[i].rt = base_rt +
979 				       KNAV_RINGACC_RT_REGS_STEP * i;
980 		ringacc->rings[i].parent = ringacc;
981 		ringacc->rings[i].ring_id = i;
982 	}
983 	dev_set_drvdata(dev, ringacc);
984 
985 	ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
986 
987 	list_add_tail(&ringacc->list, &k3_nav_ringacc_list);
988 
989 	dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
990 		 ringacc->num_rings,
991 		 ringacc->rm_gp_range->desc[0].start,
992 		 ringacc->rm_gp_range->desc[0].num,
993 		 ringacc->tisci_dev_id);
994 	dev_info(dev, "dma-ring-reset-quirk: %s\n",
995 		 ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
996 	return 0;
997 }
998 
k3_ringacc_dmarings_init(struct udevice * dev,struct k3_ringacc_init_data * data)999 struct k3_nav_ringacc *k3_ringacc_dmarings_init(struct udevice *dev,
1000 						struct k3_ringacc_init_data *data)
1001 {
1002 	struct k3_nav_ringacc *ringacc;
1003 	void __iomem *base_rt;
1004 	int i;
1005 
1006 	ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
1007 	if (!ringacc)
1008 		return ERR_PTR(-ENOMEM);
1009 
1010 	ringacc->dual_ring = true;
1011 
1012 	ringacc->dev = dev;
1013 	ringacc->num_rings = data->num_rings;
1014 	ringacc->tisci = data->tisci;
1015 	ringacc->tisci_dev_id = data->tisci_dev_id;
1016 
1017 	base_rt = (uint32_t *)devfdt_get_addr_name(dev, "ringrt");
1018 	if (IS_ERR(base_rt))
1019 		return base_rt;
1020 
1021 	ringacc->rings = devm_kzalloc(dev,
1022 				      sizeof(*ringacc->rings) *
1023 				      ringacc->num_rings * 2,
1024 				      GFP_KERNEL);
1025 	ringacc->rings_inuse = devm_kcalloc(dev,
1026 					    BITS_TO_LONGS(ringacc->num_rings),
1027 					    sizeof(unsigned long), GFP_KERNEL);
1028 
1029 	if (!ringacc->rings || !ringacc->rings_inuse)
1030 		return ERR_PTR(-ENOMEM);
1031 
1032 	for (i = 0; i < ringacc->num_rings; i++) {
1033 		struct k3_nav_ring *ring = &ringacc->rings[i];
1034 
1035 		ring->rt = base_rt + K3_DMARING_RING_RT_REGS_STEP * i;
1036 		ring->parent = ringacc;
1037 		ring->ring_id = i;
1038 
1039 		ring = &ringacc->rings[ringacc->num_rings + i];
1040 		ring->rt = base_rt + K3_DMARING_RING_RT_REGS_STEP * i +
1041 			   K3_DMARING_RING_RT_REGS_REVERSE_OFS;
1042 		ring->parent = ringacc;
1043 		ring->ring_id = i;
1044 		ring->flags = K3_NAV_RING_FLAG_REVERSE;
1045 	}
1046 
1047 	ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
1048 
1049 	dev_info(dev, "k3_dmaring Ring probed rings:%u, sci-dev-id:%u\n",
1050 		 ringacc->num_rings,
1051 		 ringacc->tisci_dev_id);
1052 	dev_info(dev, "dma-ring-reset-quirk: %s\n",
1053 		 ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
1054 
1055 	return ringacc;
1056 }
1057 
1058 struct ringacc_match_data {
1059 	struct k3_nav_ringacc_ops ops;
1060 };
1061 
1062 static struct ringacc_match_data k3_nav_ringacc_data = {
1063 	.ops = {
1064 		.init = k3_nav_ringacc_init,
1065 	},
1066 };
1067 
1068 static const struct udevice_id knav_ringacc_ids[] = {
1069 	{ .compatible = "ti,am654-navss-ringacc", .data = (ulong)&k3_nav_ringacc_data, },
1070 	{},
1071 };
1072 
k3_nav_ringacc_probe(struct udevice * dev)1073 static int k3_nav_ringacc_probe(struct udevice *dev)
1074 {
1075 	struct k3_nav_ringacc *ringacc;
1076 	int ret;
1077 	const struct ringacc_match_data *match_data;
1078 
1079 	match_data = (struct ringacc_match_data *)dev_get_driver_data(dev);
1080 
1081 	ringacc = dev_get_priv(dev);
1082 	if (!ringacc)
1083 		return -ENOMEM;
1084 
1085 	ringacc->dev = dev;
1086 	ringacc->ops = &match_data->ops;
1087 	ret = ringacc->ops->init(dev, ringacc);
1088 	if (ret)
1089 		return ret;
1090 
1091 	return 0;
1092 }
1093 
1094 U_BOOT_DRIVER(k3_navss_ringacc) = {
1095 	.name	= "k3-navss-ringacc",
1096 	.id	= UCLASS_MISC,
1097 	.of_match = knav_ringacc_ids,
1098 	.probe = k3_nav_ringacc_probe,
1099 	.priv_auto	= sizeof(struct k3_nav_ringacc),
1100 };
1101