1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TI K3 DSP Remote Processor(s) driver
4  *
5  * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6  *	Suman Anna <s-anna@ti.com>
7  */
8 
9 #include <linux/io.h>
10 #include <linux/mailbox_client.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_reserved_mem.h>
14 #include <linux/omap-mailbox.h>
15 #include <linux/platform_device.h>
16 #include <linux/remoteproc.h>
17 #include <linux/reset.h>
18 #include <linux/slab.h>
19 
20 #include "omap_remoteproc.h"
21 #include "remoteproc_internal.h"
22 #include "ti_sci_proc.h"
23 
24 #define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK	(SZ_16M - 1)
25 
26 /**
27  * struct k3_dsp_mem - internal memory structure
28  * @cpu_addr: MPU virtual address of the memory region
29  * @bus_addr: Bus address used to access the memory region
30  * @dev_addr: Device address of the memory region from DSP view
31  * @size: Size of the memory region
32  */
33 struct k3_dsp_mem {
34 	void __iomem *cpu_addr;
35 	phys_addr_t bus_addr;
36 	u32 dev_addr;
37 	size_t size;
38 };
39 
40 /**
41  * struct k3_dsp_mem_data - memory definitions for a DSP
42  * @name: name for this memory entry
43  * @dev_addr: device address for the memory entry
44  */
45 struct k3_dsp_mem_data {
46 	const char *name;
47 	const u32 dev_addr;
48 };
49 
50 /**
51  * struct k3_dsp_dev_data - device data structure for a DSP
52  * @mems: pointer to memory definitions for a DSP
53  * @num_mems: number of memory regions in @mems
54  * @boot_align_addr: boot vector address alignment granularity
55  * @uses_lreset: flag to denote the need for local reset management
56  */
57 struct k3_dsp_dev_data {
58 	const struct k3_dsp_mem_data *mems;
59 	u32 num_mems;
60 	u32 boot_align_addr;
61 	bool uses_lreset;
62 };
63 
64 /**
65  * struct k3_dsp_rproc - k3 DSP remote processor driver structure
66  * @dev: cached device pointer
67  * @rproc: remoteproc device handle
68  * @mem: internal memory regions data
69  * @num_mems: number of internal memory regions
70  * @rmem: reserved memory regions data
71  * @num_rmems: number of reserved memory regions
72  * @reset: reset control handle
73  * @data: pointer to DSP-specific device data
74  * @tsp: TI-SCI processor control handle
75  * @ti_sci: TI-SCI handle
76  * @ti_sci_id: TI-SCI device identifier
77  * @mbox: mailbox channel handle
78  * @client: mailbox client to request the mailbox channel
79  */
80 struct k3_dsp_rproc {
81 	struct device *dev;
82 	struct rproc *rproc;
83 	struct k3_dsp_mem *mem;
84 	int num_mems;
85 	struct k3_dsp_mem *rmem;
86 	int num_rmems;
87 	struct reset_control *reset;
88 	const struct k3_dsp_dev_data *data;
89 	struct ti_sci_proc *tsp;
90 	const struct ti_sci_handle *ti_sci;
91 	u32 ti_sci_id;
92 	struct mbox_chan *mbox;
93 	struct mbox_client client;
94 };
95 
96 /**
97  * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler
98  * @client: mailbox client pointer used for requesting the mailbox channel
99  * @data: mailbox payload
100  *
101  * This handler is invoked by the OMAP mailbox driver whenever a mailbox
102  * message is received. Usually, the mailbox payload simply contains
103  * the index of the virtqueue that is kicked by the remote processor,
104  * and we let remoteproc core handle it.
105  *
106  * In addition to virtqueue indices, we also have some out-of-band values
107  * that indicate different events. Those values are deliberately very
108  * large so they don't coincide with virtqueue indices.
109  */
k3_dsp_rproc_mbox_callback(struct mbox_client * client,void * data)110 static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
111 {
112 	struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc,
113 						  client);
114 	struct device *dev = kproc->rproc->dev.parent;
115 	const char *name = kproc->rproc->name;
116 	u32 msg = omap_mbox_message(data);
117 
118 	/* Do not forward messages from a detached core */
119 	if (kproc->rproc->state == RPROC_DETACHED)
120 		return;
121 
122 	dev_dbg(dev, "mbox msg: 0x%x\n", msg);
123 
124 	switch (msg) {
125 	case RP_MBOX_CRASH:
126 		/*
127 		 * remoteproc detected an exception, but error recovery is not
128 		 * supported. So, just log this for now
129 		 */
130 		dev_err(dev, "K3 DSP rproc %s crashed\n", name);
131 		break;
132 	case RP_MBOX_ECHO_REPLY:
133 		dev_info(dev, "received echo reply from %s\n", name);
134 		break;
135 	default:
136 		/* silently handle all other valid messages */
137 		if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
138 			return;
139 		if (msg > kproc->rproc->max_notifyid) {
140 			dev_dbg(dev, "dropping unknown message 0x%x", msg);
141 			return;
142 		}
143 		/* msg contains the index of the triggered vring */
144 		if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
145 			dev_dbg(dev, "no message was found in vqid %d\n", msg);
146 	}
147 }
148 
149 /*
150  * Kick the remote processor to notify about pending unprocessed messages.
151  * The vqid usage is not used and is inconsequential, as the kick is performed
152  * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
153  * the remote processor is expected to process both its Tx and Rx virtqueues.
154  */
k3_dsp_rproc_kick(struct rproc * rproc,int vqid)155 static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
156 {
157 	struct k3_dsp_rproc *kproc = rproc->priv;
158 	struct device *dev = rproc->dev.parent;
159 	mbox_msg_t msg = (mbox_msg_t)vqid;
160 	int ret;
161 
162 	/* Do not forward messages to a detached core */
163 	if (kproc->rproc->state == RPROC_DETACHED)
164 		return;
165 
166 	/* send the index of the triggered virtqueue in the mailbox payload */
167 	ret = mbox_send_message(kproc->mbox, (void *)msg);
168 	if (ret < 0)
169 		dev_err(dev, "failed to send mailbox message (%pe)\n",
170 			ERR_PTR(ret));
171 }
172 
173 /* Put the DSP processor into reset */
k3_dsp_rproc_reset(struct k3_dsp_rproc * kproc)174 static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
175 {
176 	struct device *dev = kproc->dev;
177 	int ret;
178 
179 	ret = reset_control_assert(kproc->reset);
180 	if (ret) {
181 		dev_err(dev, "local-reset assert failed (%pe)\n", ERR_PTR(ret));
182 		return ret;
183 	}
184 
185 	if (kproc->data->uses_lreset)
186 		return ret;
187 
188 	ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
189 						    kproc->ti_sci_id);
190 	if (ret) {
191 		dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
192 		if (reset_control_deassert(kproc->reset))
193 			dev_warn(dev, "local-reset deassert back failed\n");
194 	}
195 
196 	return ret;
197 }
198 
199 /* Release the DSP processor from reset */
k3_dsp_rproc_release(struct k3_dsp_rproc * kproc)200 static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
201 {
202 	struct device *dev = kproc->dev;
203 	int ret;
204 
205 	if (kproc->data->uses_lreset)
206 		goto lreset;
207 
208 	ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
209 						    kproc->ti_sci_id);
210 	if (ret) {
211 		dev_err(dev, "module-reset deassert failed (%pe)\n", ERR_PTR(ret));
212 		return ret;
213 	}
214 
215 lreset:
216 	ret = reset_control_deassert(kproc->reset);
217 	if (ret) {
218 		dev_err(dev, "local-reset deassert failed, (%pe)\n", ERR_PTR(ret));
219 		if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
220 							  kproc->ti_sci_id))
221 			dev_warn(dev, "module-reset assert back failed\n");
222 	}
223 
224 	return ret;
225 }
226 
k3_dsp_rproc_request_mbox(struct rproc * rproc)227 static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
228 {
229 	struct k3_dsp_rproc *kproc = rproc->priv;
230 	struct mbox_client *client = &kproc->client;
231 	struct device *dev = kproc->dev;
232 	int ret;
233 
234 	client->dev = dev;
235 	client->tx_done = NULL;
236 	client->rx_callback = k3_dsp_rproc_mbox_callback;
237 	client->tx_block = false;
238 	client->knows_txdone = false;
239 
240 	kproc->mbox = mbox_request_channel(client, 0);
241 	if (IS_ERR(kproc->mbox))
242 		return dev_err_probe(dev, PTR_ERR(kproc->mbox),
243 				     "mbox_request_channel failed\n");
244 
245 	/*
246 	 * Ping the remote processor, this is only for sanity-sake for now;
247 	 * there is no functional effect whatsoever.
248 	 *
249 	 * Note that the reply will _not_ arrive immediately: this message
250 	 * will wait in the mailbox fifo until the remote processor is booted.
251 	 */
252 	ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
253 	if (ret < 0) {
254 		dev_err(dev, "mbox_send_message failed (%pe)\n", ERR_PTR(ret));
255 		mbox_free_channel(kproc->mbox);
256 		return ret;
257 	}
258 
259 	return 0;
260 }
261 /*
262  * The C66x DSP cores have a local reset that affects only the CPU, and a
263  * generic module reset that powers on the device and allows the DSP internal
264  * memories to be accessed while the local reset is asserted. This function is
265  * used to release the global reset on C66x DSPs to allow loading into the DSP
266  * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
267  * firmware loading, and is followed by the .start() ops after loading to
268  * actually let the C66x DSP cores run. This callback is invoked only in
269  * remoteproc mode.
270  */
k3_dsp_rproc_prepare(struct rproc * rproc)271 static int k3_dsp_rproc_prepare(struct rproc *rproc)
272 {
273 	struct k3_dsp_rproc *kproc = rproc->priv;
274 	struct device *dev = kproc->dev;
275 	int ret;
276 
277 	ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
278 						    kproc->ti_sci_id);
279 	if (ret)
280 		dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading (%pe)\n",
281 			ERR_PTR(ret));
282 
283 	return ret;
284 }
285 
286 /*
287  * This function implements the .unprepare() ops and performs the complimentary
288  * operations to that of the .prepare() ops. The function is used to assert the
289  * global reset on applicable C66x cores. This completes the second portion of
290  * powering down the C66x DSP cores. The cores themselves are only halted in the
291  * .stop() callback through the local reset, and the .unprepare() ops is invoked
292  * by the remoteproc core after the remoteproc is stopped to balance the global
293  * reset. This callback is invoked only in remoteproc mode.
294  */
k3_dsp_rproc_unprepare(struct rproc * rproc)295 static int k3_dsp_rproc_unprepare(struct rproc *rproc)
296 {
297 	struct k3_dsp_rproc *kproc = rproc->priv;
298 	struct device *dev = kproc->dev;
299 	int ret;
300 
301 	ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
302 						    kproc->ti_sci_id);
303 	if (ret)
304 		dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
305 
306 	return ret;
307 }
308 
309 /*
310  * Power up the DSP remote processor.
311  *
312  * This function will be invoked only after the firmware for this rproc
313  * was loaded, parsed successfully, and all of its resource requirements
314  * were met. This callback is invoked only in remoteproc mode.
315  */
k3_dsp_rproc_start(struct rproc * rproc)316 static int k3_dsp_rproc_start(struct rproc *rproc)
317 {
318 	struct k3_dsp_rproc *kproc = rproc->priv;
319 	struct device *dev = kproc->dev;
320 	u32 boot_addr;
321 	int ret;
322 
323 	boot_addr = rproc->bootaddr;
324 	if (boot_addr & (kproc->data->boot_align_addr - 1)) {
325 		dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n",
326 			boot_addr, kproc->data->boot_align_addr);
327 		return -EINVAL;
328 	}
329 
330 	dev_dbg(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
331 	ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
332 	if (ret)
333 		return ret;
334 
335 	ret = k3_dsp_rproc_release(kproc);
336 	if (ret)
337 		return ret;
338 
339 	return 0;
340 }
341 
342 /*
343  * Stop the DSP remote processor.
344  *
345  * This function puts the DSP processor into reset, and finishes processing
346  * of any pending messages. This callback is invoked only in remoteproc mode.
347  */
k3_dsp_rproc_stop(struct rproc * rproc)348 static int k3_dsp_rproc_stop(struct rproc *rproc)
349 {
350 	struct k3_dsp_rproc *kproc = rproc->priv;
351 
352 	k3_dsp_rproc_reset(kproc);
353 
354 	return 0;
355 }
356 
357 /*
358  * Attach to a running DSP remote processor (IPC-only mode)
359  *
360  * This rproc attach callback is a NOP. The remote processor is already booted,
361  * and all required resources have been acquired during probe routine, so there
362  * is no need to issue any TI-SCI commands to boot the DSP core. This callback
363  * is invoked only in IPC-only mode and exists because rproc_validate() checks
364  * for its existence.
365  */
k3_dsp_rproc_attach(struct rproc * rproc)366 static int k3_dsp_rproc_attach(struct rproc *rproc) { return 0; }
367 
368 /*
369  * Detach from a running DSP remote processor (IPC-only mode)
370  *
371  * This rproc detach callback is a NOP. The DSP core is not stopped and will be
372  * left to continue to run its booted firmware. This callback is invoked only in
373  * IPC-only mode and exists for sanity sake.
374  */
k3_dsp_rproc_detach(struct rproc * rproc)375 static int k3_dsp_rproc_detach(struct rproc *rproc) { return 0; }
376 
377 /*
378  * This function implements the .get_loaded_rsc_table() callback and is used
379  * to provide the resource table for a booted DSP in IPC-only mode. The K3 DSP
380  * firmwares follow a design-by-contract approach and are expected to have the
381  * resource table at the base of the DDR region reserved for firmware usage.
382  * This provides flexibility for the remote processor to be booted by different
383  * bootloaders that may or may not have the ability to publish the resource table
384  * address and size through a DT property. This callback is invoked only in
385  * IPC-only mode.
386  */
k3_dsp_get_loaded_rsc_table(struct rproc * rproc,size_t * rsc_table_sz)387 static struct resource_table *k3_dsp_get_loaded_rsc_table(struct rproc *rproc,
388 							  size_t *rsc_table_sz)
389 {
390 	struct k3_dsp_rproc *kproc = rproc->priv;
391 	struct device *dev = kproc->dev;
392 
393 	if (!kproc->rmem[0].cpu_addr) {
394 		dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
395 		return ERR_PTR(-ENOMEM);
396 	}
397 
398 	/*
399 	 * NOTE: The resource table size is currently hard-coded to a maximum
400 	 * of 256 bytes. The most common resource table usage for K3 firmwares
401 	 * is to only have the vdev resource entry and an optional trace entry.
402 	 * The exact size could be computed based on resource table address, but
403 	 * the hard-coded value suffices to support the IPC-only mode.
404 	 */
405 	*rsc_table_sz = 256;
406 	return (struct resource_table *)kproc->rmem[0].cpu_addr;
407 }
408 
409 /*
410  * Custom function to translate a DSP device address (internal RAMs only) to a
411  * kernel virtual address.  The DSPs can access their RAMs at either an internal
412  * address visible only from a DSP, or at the SoC-level bus address. Both these
413  * addresses need to be looked through for translation. The translated addresses
414  * can be used either by the remoteproc core for loading (when using kernel
415  * remoteproc loader), or by any rpmsg bus drivers.
416  */
k3_dsp_rproc_da_to_va(struct rproc * rproc,u64 da,size_t len,bool * is_iomem)417 static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
418 {
419 	struct k3_dsp_rproc *kproc = rproc->priv;
420 	void __iomem *va = NULL;
421 	phys_addr_t bus_addr;
422 	u32 dev_addr, offset;
423 	size_t size;
424 	int i;
425 
426 	if (len == 0)
427 		return NULL;
428 
429 	for (i = 0; i < kproc->num_mems; i++) {
430 		bus_addr = kproc->mem[i].bus_addr;
431 		dev_addr = kproc->mem[i].dev_addr;
432 		size = kproc->mem[i].size;
433 
434 		if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
435 			/* handle DSP-view addresses */
436 			if (da >= dev_addr &&
437 			    ((da + len) <= (dev_addr + size))) {
438 				offset = da - dev_addr;
439 				va = kproc->mem[i].cpu_addr + offset;
440 				return (__force void *)va;
441 			}
442 		} else {
443 			/* handle SoC-view addresses */
444 			if (da >= bus_addr &&
445 			    (da + len) <= (bus_addr + size)) {
446 				offset = da - bus_addr;
447 				va = kproc->mem[i].cpu_addr + offset;
448 				return (__force void *)va;
449 			}
450 		}
451 	}
452 
453 	/* handle static DDR reserved memory regions */
454 	for (i = 0; i < kproc->num_rmems; i++) {
455 		dev_addr = kproc->rmem[i].dev_addr;
456 		size = kproc->rmem[i].size;
457 
458 		if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
459 			offset = da - dev_addr;
460 			va = kproc->rmem[i].cpu_addr + offset;
461 			return (__force void *)va;
462 		}
463 	}
464 
465 	return NULL;
466 }
467 
468 static const struct rproc_ops k3_dsp_rproc_ops = {
469 	.start		= k3_dsp_rproc_start,
470 	.stop		= k3_dsp_rproc_stop,
471 	.kick		= k3_dsp_rproc_kick,
472 	.da_to_va	= k3_dsp_rproc_da_to_va,
473 };
474 
k3_dsp_rproc_of_get_memories(struct platform_device * pdev,struct k3_dsp_rproc * kproc)475 static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
476 					struct k3_dsp_rproc *kproc)
477 {
478 	const struct k3_dsp_dev_data *data = kproc->data;
479 	struct device *dev = &pdev->dev;
480 	struct resource *res;
481 	int num_mems = 0;
482 	int i;
483 
484 	num_mems = kproc->data->num_mems;
485 	kproc->mem = devm_kcalloc(kproc->dev, num_mems,
486 				  sizeof(*kproc->mem), GFP_KERNEL);
487 	if (!kproc->mem)
488 		return -ENOMEM;
489 
490 	for (i = 0; i < num_mems; i++) {
491 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
492 						   data->mems[i].name);
493 		if (!res) {
494 			dev_err(dev, "found no memory resource for %s\n",
495 				data->mems[i].name);
496 			return -EINVAL;
497 		}
498 		if (!devm_request_mem_region(dev, res->start,
499 					     resource_size(res),
500 					     dev_name(dev))) {
501 			dev_err(dev, "could not request %s region for resource\n",
502 				data->mems[i].name);
503 			return -EBUSY;
504 		}
505 
506 		kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
507 							 resource_size(res));
508 		if (!kproc->mem[i].cpu_addr) {
509 			dev_err(dev, "failed to map %s memory\n",
510 				data->mems[i].name);
511 			return -ENOMEM;
512 		}
513 		kproc->mem[i].bus_addr = res->start;
514 		kproc->mem[i].dev_addr = data->mems[i].dev_addr;
515 		kproc->mem[i].size = resource_size(res);
516 
517 		dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
518 			data->mems[i].name, &kproc->mem[i].bus_addr,
519 			kproc->mem[i].size, kproc->mem[i].cpu_addr,
520 			kproc->mem[i].dev_addr);
521 	}
522 	kproc->num_mems = num_mems;
523 
524 	return 0;
525 }
526 
k3_dsp_mem_release(void * data)527 static void k3_dsp_mem_release(void *data)
528 {
529 	struct device *dev = data;
530 
531 	of_reserved_mem_device_release(dev);
532 }
533 
k3_dsp_reserved_mem_init(struct k3_dsp_rproc * kproc)534 static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
535 {
536 	struct device *dev = kproc->dev;
537 	struct device_node *np = dev->of_node;
538 	struct device_node *rmem_np;
539 	struct reserved_mem *rmem;
540 	int num_rmems;
541 	int ret, i;
542 
543 	num_rmems = of_property_count_elems_of_size(np, "memory-region",
544 						    sizeof(phandle));
545 	if (num_rmems < 0) {
546 		dev_err(dev, "device does not reserved memory regions (%pe)\n",
547 			ERR_PTR(num_rmems));
548 		return -EINVAL;
549 	}
550 	if (num_rmems < 2) {
551 		dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
552 			num_rmems);
553 		return -EINVAL;
554 	}
555 
556 	/* use reserved memory region 0 for vring DMA allocations */
557 	ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
558 	if (ret) {
559 		dev_err(dev, "device cannot initialize DMA pool (%pe)\n",
560 			ERR_PTR(ret));
561 		return ret;
562 	}
563 	ret = devm_add_action_or_reset(dev, k3_dsp_mem_release, dev);
564 	if (ret)
565 		return ret;
566 
567 	num_rmems--;
568 	kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
569 	if (!kproc->rmem)
570 		return -ENOMEM;
571 
572 	/* use remaining reserved memory regions for static carveouts */
573 	for (i = 0; i < num_rmems; i++) {
574 		rmem_np = of_parse_phandle(np, "memory-region", i + 1);
575 		if (!rmem_np)
576 			return -EINVAL;
577 
578 		rmem = of_reserved_mem_lookup(rmem_np);
579 		if (!rmem) {
580 			of_node_put(rmem_np);
581 			return -EINVAL;
582 		}
583 		of_node_put(rmem_np);
584 
585 		kproc->rmem[i].bus_addr = rmem->base;
586 		/* 64-bit address regions currently not supported */
587 		kproc->rmem[i].dev_addr = (u32)rmem->base;
588 		kproc->rmem[i].size = rmem->size;
589 		kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
590 		if (!kproc->rmem[i].cpu_addr) {
591 			dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
592 				i + 1, &rmem->base, &rmem->size);
593 			return -ENOMEM;
594 		}
595 
596 		dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
597 			i + 1, &kproc->rmem[i].bus_addr,
598 			kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
599 			kproc->rmem[i].dev_addr);
600 	}
601 	kproc->num_rmems = num_rmems;
602 
603 	return 0;
604 }
605 
k3_dsp_release_tsp(void * data)606 static void k3_dsp_release_tsp(void *data)
607 {
608 	struct ti_sci_proc *tsp = data;
609 
610 	ti_sci_proc_release(tsp);
611 }
612 
k3_dsp_rproc_probe(struct platform_device * pdev)613 static int k3_dsp_rproc_probe(struct platform_device *pdev)
614 {
615 	struct device *dev = &pdev->dev;
616 	struct device_node *np = dev->of_node;
617 	const struct k3_dsp_dev_data *data;
618 	struct k3_dsp_rproc *kproc;
619 	struct rproc *rproc;
620 	const char *fw_name;
621 	bool p_state = false;
622 	int ret = 0;
623 
624 	data = of_device_get_match_data(dev);
625 	if (!data)
626 		return -ENODEV;
627 
628 	ret = rproc_of_parse_firmware(dev, 0, &fw_name);
629 	if (ret)
630 		return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
631 
632 	rproc = devm_rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops,
633 				 fw_name, sizeof(*kproc));
634 	if (!rproc)
635 		return -ENOMEM;
636 
637 	rproc->has_iommu = false;
638 	rproc->recovery_disabled = true;
639 	if (data->uses_lreset) {
640 		rproc->ops->prepare = k3_dsp_rproc_prepare;
641 		rproc->ops->unprepare = k3_dsp_rproc_unprepare;
642 	}
643 	kproc = rproc->priv;
644 	kproc->rproc = rproc;
645 	kproc->dev = dev;
646 	kproc->data = data;
647 
648 	ret = k3_dsp_rproc_request_mbox(rproc);
649 	if (ret)
650 		return ret;
651 
652 	kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
653 	if (IS_ERR(kproc->ti_sci))
654 		return dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
655 				     "failed to get ti-sci handle\n");
656 
657 	ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
658 	if (ret)
659 		return dev_err_probe(dev, ret, "missing 'ti,sci-dev-id' property\n");
660 
661 	kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
662 	if (IS_ERR(kproc->reset))
663 		return dev_err_probe(dev, PTR_ERR(kproc->reset),
664 				     "failed to get reset\n");
665 
666 	kproc->tsp = ti_sci_proc_of_get_tsp(dev, kproc->ti_sci);
667 	if (IS_ERR(kproc->tsp))
668 		return dev_err_probe(dev, PTR_ERR(kproc->tsp),
669 				     "failed to construct ti-sci proc control\n");
670 
671 	ret = ti_sci_proc_request(kproc->tsp);
672 	if (ret < 0) {
673 		dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
674 		return ret;
675 	}
676 	ret = devm_add_action_or_reset(dev, k3_dsp_release_tsp, kproc->tsp);
677 	if (ret)
678 		return ret;
679 
680 	ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
681 	if (ret)
682 		return ret;
683 
684 	ret = k3_dsp_reserved_mem_init(kproc);
685 	if (ret)
686 		return dev_err_probe(dev, ret, "reserved memory init failed\n");
687 
688 	ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
689 					       NULL, &p_state);
690 	if (ret)
691 		return dev_err_probe(dev, ret, "failed to get initial state, mode cannot be determined\n");
692 
693 	/* configure J721E devices for either remoteproc or IPC-only mode */
694 	if (p_state) {
695 		dev_info(dev, "configured DSP for IPC-only mode\n");
696 		rproc->state = RPROC_DETACHED;
697 		/* override rproc ops with only required IPC-only mode ops */
698 		rproc->ops->prepare = NULL;
699 		rproc->ops->unprepare = NULL;
700 		rproc->ops->start = NULL;
701 		rproc->ops->stop = NULL;
702 		rproc->ops->attach = k3_dsp_rproc_attach;
703 		rproc->ops->detach = k3_dsp_rproc_detach;
704 		rproc->ops->get_loaded_rsc_table = k3_dsp_get_loaded_rsc_table;
705 	} else {
706 		dev_info(dev, "configured DSP for remoteproc mode\n");
707 		/*
708 		 * ensure the DSP local reset is asserted to ensure the DSP
709 		 * doesn't execute bogus code in .prepare() when the module
710 		 * reset is released.
711 		 */
712 		if (data->uses_lreset) {
713 			ret = reset_control_status(kproc->reset);
714 			if (ret < 0) {
715 				return dev_err_probe(dev, ret, "failed to get reset status\n");
716 			} else if (ret == 0) {
717 				dev_warn(dev, "local reset is deasserted for device\n");
718 				k3_dsp_rproc_reset(kproc);
719 			}
720 		}
721 	}
722 
723 	ret = devm_rproc_add(dev, rproc);
724 	if (ret)
725 		return dev_err_probe(dev, ret, "failed to add register device with remoteproc core\n");
726 
727 	platform_set_drvdata(pdev, kproc);
728 
729 	return 0;
730 }
731 
k3_dsp_rproc_remove(struct platform_device * pdev)732 static void k3_dsp_rproc_remove(struct platform_device *pdev)
733 {
734 	struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
735 	struct rproc *rproc = kproc->rproc;
736 	struct device *dev = &pdev->dev;
737 	int ret;
738 
739 	if (rproc->state == RPROC_ATTACHED) {
740 		ret = rproc_detach(rproc);
741 		if (ret)
742 			dev_err(dev, "failed to detach proc (%pe)\n", ERR_PTR(ret));
743 	}
744 
745 	mbox_free_channel(kproc->mbox);
746 }
747 
748 static const struct k3_dsp_mem_data c66_mems[] = {
749 	{ .name = "l2sram", .dev_addr = 0x800000 },
750 	{ .name = "l1pram", .dev_addr = 0xe00000 },
751 	{ .name = "l1dram", .dev_addr = 0xf00000 },
752 };
753 
754 /* C71x cores only have a L1P Cache, there are no L1P SRAMs */
755 static const struct k3_dsp_mem_data c71_mems[] = {
756 	{ .name = "l2sram", .dev_addr = 0x800000 },
757 	{ .name = "l1dram", .dev_addr = 0xe00000 },
758 };
759 
760 static const struct k3_dsp_mem_data c7xv_mems[] = {
761 	{ .name = "l2sram", .dev_addr = 0x800000 },
762 };
763 
764 static const struct k3_dsp_dev_data c66_data = {
765 	.mems = c66_mems,
766 	.num_mems = ARRAY_SIZE(c66_mems),
767 	.boot_align_addr = SZ_1K,
768 	.uses_lreset = true,
769 };
770 
771 static const struct k3_dsp_dev_data c71_data = {
772 	.mems = c71_mems,
773 	.num_mems = ARRAY_SIZE(c71_mems),
774 	.boot_align_addr = SZ_2M,
775 	.uses_lreset = false,
776 };
777 
778 static const struct k3_dsp_dev_data c7xv_data = {
779 	.mems = c7xv_mems,
780 	.num_mems = ARRAY_SIZE(c7xv_mems),
781 	.boot_align_addr = SZ_2M,
782 	.uses_lreset = false,
783 };
784 
785 static const struct of_device_id k3_dsp_of_match[] = {
786 	{ .compatible = "ti,j721e-c66-dsp", .data = &c66_data, },
787 	{ .compatible = "ti,j721e-c71-dsp", .data = &c71_data, },
788 	{ .compatible = "ti,j721s2-c71-dsp", .data = &c71_data, },
789 	{ .compatible = "ti,am62a-c7xv-dsp", .data = &c7xv_data, },
790 	{ /* sentinel */ },
791 };
792 MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
793 
794 static struct platform_driver k3_dsp_rproc_driver = {
795 	.probe	= k3_dsp_rproc_probe,
796 	.remove_new = k3_dsp_rproc_remove,
797 	.driver	= {
798 		.name = "k3-dsp-rproc",
799 		.of_match_table = k3_dsp_of_match,
800 	},
801 };
802 
803 module_platform_driver(k3_dsp_rproc_driver);
804 
805 MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
806 MODULE_LICENSE("GPL v2");
807 MODULE_DESCRIPTION("TI K3 DSP Remoteproc driver");
808