1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, Sony Mobile Communications AB.
4  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
5  */
6 
7 #include <linux/interrupt.h>
8 #include <linux/list.h>
9 #include <linux/io.h>
10 #include <linux/of.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/mailbox_client.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/regmap.h>
18 #include <linux/soc/qcom/smem.h>
19 #include <linux/soc/qcom/smem_state.h>
20 #include <linux/spinlock.h>
21 
22 /*
23  * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
24  * of a single 32-bit value between two processors.  Each value has a single
25  * writer (the local side) and a single reader (the remote side). Values are
26  * uniquely identified in the system by the directed edge (local processor ID
27  * to remote processor ID) and a string identifier.
28  *
29  * Each processor is responsible for creating the outgoing SMEM items and each
30  * item is writable by the local processor and readable by the remote
31  * processor.  By using two separate SMEM items that are single-reader and
32  * single-writer, SMP2P does not require any remote locking mechanisms.
33  *
34  * The driver uses the Linux GPIO and interrupt framework to expose a virtual
35  * GPIO for each outbound entry and a virtual interrupt controller for each
36  * inbound entry.
37  */
38 
39 #define SMP2P_MAX_ENTRY 16
40 #define SMP2P_MAX_ENTRY_NAME 16
41 
42 #define SMP2P_FEATURE_SSR_ACK 0x1
43 
44 #define SMP2P_MAGIC 0x504d5324
45 
46 /**
47  * struct smp2p_smem_item - in memory communication structure
48  * @magic:		magic number
49  * @version:		version - must be 1
50  * @features:		features flag - currently unused
51  * @local_pid:		processor id of sending end
52  * @remote_pid:		processor id of receiving end
53  * @total_entries:	number of entries - always SMP2P_MAX_ENTRY
54  * @valid_entries:	number of allocated entries
55  * @flags:
56  * @entries:		individual communication entries
57  *     @name:		name of the entry
58  *     @value:		content of the entry
59  */
60 struct smp2p_smem_item {
61 	u32 magic;
62 	u8 version;
63 	unsigned features:24;
64 	u16 local_pid;
65 	u16 remote_pid;
66 	u16 total_entries;
67 	u16 valid_entries;
68 	u32 flags;
69 
70 	struct {
71 		u8 name[SMP2P_MAX_ENTRY_NAME];
72 		u32 value;
73 	} entries[SMP2P_MAX_ENTRY];
74 } __packed;
75 
76 /**
77  * struct smp2p_entry - driver context matching one entry
78  * @node:	list entry to keep track of allocated entries
79  * @smp2p:	reference to the device driver context
80  * @name:	name of the entry, to match against smp2p_smem_item
81  * @value:	pointer to smp2p_smem_item entry value
82  * @last_value:	last handled value
83  * @domain:	irq_domain for inbound entries
84  * @irq_enabled:bitmap to track enabled irq bits
85  * @irq_rising:	bitmap to mark irq bits for rising detection
86  * @irq_falling:bitmap to mark irq bits for falling detection
87  * @state:	smem state handle
88  * @lock:	spinlock to protect read-modify-write of the value
89  */
90 struct smp2p_entry {
91 	struct list_head node;
92 	struct qcom_smp2p *smp2p;
93 
94 	const char *name;
95 	u32 *value;
96 	u32 last_value;
97 
98 	struct irq_domain *domain;
99 	DECLARE_BITMAP(irq_enabled, 32);
100 	DECLARE_BITMAP(irq_rising, 32);
101 	DECLARE_BITMAP(irq_falling, 32);
102 
103 	struct qcom_smem_state *state;
104 
105 	spinlock_t lock;
106 };
107 
108 #define SMP2P_INBOUND	0
109 #define SMP2P_OUTBOUND	1
110 
111 /**
112  * struct qcom_smp2p - device driver context
113  * @dev:	device driver handle
114  * @in:		pointer to the inbound smem item
115  * @out:	pointer to the outbound smem item
116  * @smem_items:	ids of the two smem items
117  * @valid_entries: already scanned inbound entries
118  * @local_pid:	processor id of the inbound edge
119  * @remote_pid:	processor id of the outbound edge
120  * @ipc_regmap:	regmap for the outbound ipc
121  * @ipc_offset:	offset within the regmap
122  * @ipc_bit:	bit in regmap@offset to kick to signal remote processor
123  * @mbox_client: mailbox client handle
124  * @mbox_chan:	apcs ipc mailbox channel handle
125  * @inbound:	list of inbound entries
126  * @outbound:	list of outbound entries
127  */
128 struct qcom_smp2p {
129 	struct device *dev;
130 
131 	struct smp2p_smem_item *in;
132 	struct smp2p_smem_item *out;
133 
134 	unsigned smem_items[SMP2P_OUTBOUND + 1];
135 
136 	unsigned valid_entries;
137 
138 	unsigned local_pid;
139 	unsigned remote_pid;
140 
141 	struct regmap *ipc_regmap;
142 	int ipc_offset;
143 	int ipc_bit;
144 
145 	struct mbox_client mbox_client;
146 	struct mbox_chan *mbox_chan;
147 
148 	struct list_head inbound;
149 	struct list_head outbound;
150 };
151 
qcom_smp2p_kick(struct qcom_smp2p * smp2p)152 static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
153 {
154 	/* Make sure any updated data is written before the kick */
155 	wmb();
156 
157 	if (smp2p->mbox_chan) {
158 		mbox_send_message(smp2p->mbox_chan, NULL);
159 		mbox_client_txdone(smp2p->mbox_chan, 0);
160 	} else {
161 		regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
162 	}
163 }
164 
165 /**
166  * qcom_smp2p_intr() - interrupt handler for incoming notifications
167  * @irq:	unused
168  * @data:	smp2p driver context
169  *
170  * Handle notifications from the remote side to handle newly allocated entries
171  * or any changes to the state bits of existing entries.
172  */
qcom_smp2p_intr(int irq,void * data)173 static irqreturn_t qcom_smp2p_intr(int irq, void *data)
174 {
175 	struct smp2p_smem_item *in;
176 	struct smp2p_entry *entry;
177 	struct qcom_smp2p *smp2p = data;
178 	unsigned smem_id = smp2p->smem_items[SMP2P_INBOUND];
179 	unsigned pid = smp2p->remote_pid;
180 	size_t size;
181 	int irq_pin;
182 	u32 status;
183 	char buf[SMP2P_MAX_ENTRY_NAME];
184 	u32 val;
185 	int i;
186 
187 	in = smp2p->in;
188 
189 	/* Acquire smem item, if not already found */
190 	if (!in) {
191 		in = qcom_smem_get(pid, smem_id, &size);
192 		if (IS_ERR(in)) {
193 			dev_err(smp2p->dev,
194 				"Unable to acquire remote smp2p item\n");
195 			return IRQ_HANDLED;
196 		}
197 
198 		smp2p->in = in;
199 	}
200 
201 	/* Match newly created entries */
202 	for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
203 		list_for_each_entry(entry, &smp2p->inbound, node) {
204 			memcpy(buf, in->entries[i].name, sizeof(buf));
205 			if (!strcmp(buf, entry->name)) {
206 				entry->value = &in->entries[i].value;
207 				break;
208 			}
209 		}
210 	}
211 	smp2p->valid_entries = i;
212 
213 	/* Fire interrupts based on any value changes */
214 	list_for_each_entry(entry, &smp2p->inbound, node) {
215 		/* Ignore entries not yet allocated by the remote side */
216 		if (!entry->value)
217 			continue;
218 
219 		val = readl(entry->value);
220 
221 		status = val ^ entry->last_value;
222 		entry->last_value = val;
223 
224 		/* No changes of this entry? */
225 		if (!status)
226 			continue;
227 
228 		for_each_set_bit(i, entry->irq_enabled, 32) {
229 			if (!(status & BIT(i)))
230 				continue;
231 
232 			if ((val & BIT(i) && test_bit(i, entry->irq_rising)) ||
233 			    (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) {
234 				irq_pin = irq_find_mapping(entry->domain, i);
235 				handle_nested_irq(irq_pin);
236 			}
237 		}
238 	}
239 
240 	return IRQ_HANDLED;
241 }
242 
smp2p_mask_irq(struct irq_data * irqd)243 static void smp2p_mask_irq(struct irq_data *irqd)
244 {
245 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
246 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
247 
248 	clear_bit(irq, entry->irq_enabled);
249 }
250 
smp2p_unmask_irq(struct irq_data * irqd)251 static void smp2p_unmask_irq(struct irq_data *irqd)
252 {
253 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
254 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
255 
256 	set_bit(irq, entry->irq_enabled);
257 }
258 
smp2p_set_irq_type(struct irq_data * irqd,unsigned int type)259 static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
260 {
261 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
262 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
263 
264 	if (!(type & IRQ_TYPE_EDGE_BOTH))
265 		return -EINVAL;
266 
267 	if (type & IRQ_TYPE_EDGE_RISING)
268 		set_bit(irq, entry->irq_rising);
269 	else
270 		clear_bit(irq, entry->irq_rising);
271 
272 	if (type & IRQ_TYPE_EDGE_FALLING)
273 		set_bit(irq, entry->irq_falling);
274 	else
275 		clear_bit(irq, entry->irq_falling);
276 
277 	return 0;
278 }
279 
280 static struct irq_chip smp2p_irq_chip = {
281 	.name           = "smp2p",
282 	.irq_mask       = smp2p_mask_irq,
283 	.irq_unmask     = smp2p_unmask_irq,
284 	.irq_set_type	= smp2p_set_irq_type,
285 };
286 
smp2p_irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw)287 static int smp2p_irq_map(struct irq_domain *d,
288 			 unsigned int irq,
289 			 irq_hw_number_t hw)
290 {
291 	struct smp2p_entry *entry = d->host_data;
292 
293 	irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq);
294 	irq_set_chip_data(irq, entry);
295 	irq_set_nested_thread(irq, 1);
296 	irq_set_noprobe(irq);
297 
298 	return 0;
299 }
300 
301 static const struct irq_domain_ops smp2p_irq_ops = {
302 	.map = smp2p_irq_map,
303 	.xlate = irq_domain_xlate_twocell,
304 };
305 
qcom_smp2p_inbound_entry(struct qcom_smp2p * smp2p,struct smp2p_entry * entry,struct device_node * node)306 static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
307 				    struct smp2p_entry *entry,
308 				    struct device_node *node)
309 {
310 	entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
311 	if (!entry->domain) {
312 		dev_err(smp2p->dev, "failed to add irq_domain\n");
313 		return -ENOMEM;
314 	}
315 
316 	return 0;
317 }
318 
smp2p_update_bits(void * data,u32 mask,u32 value)319 static int smp2p_update_bits(void *data, u32 mask, u32 value)
320 {
321 	struct smp2p_entry *entry = data;
322 	unsigned long flags;
323 	u32 orig;
324 	u32 val;
325 
326 	spin_lock_irqsave(&entry->lock, flags);
327 	val = orig = readl(entry->value);
328 	val &= ~mask;
329 	val |= value;
330 	writel(val, entry->value);
331 	spin_unlock_irqrestore(&entry->lock, flags);
332 
333 	if (val != orig)
334 		qcom_smp2p_kick(entry->smp2p);
335 
336 	return 0;
337 }
338 
339 static const struct qcom_smem_state_ops smp2p_state_ops = {
340 	.update_bits = smp2p_update_bits,
341 };
342 
qcom_smp2p_outbound_entry(struct qcom_smp2p * smp2p,struct smp2p_entry * entry,struct device_node * node)343 static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
344 				     struct smp2p_entry *entry,
345 				     struct device_node *node)
346 {
347 	struct smp2p_smem_item *out = smp2p->out;
348 	char buf[SMP2P_MAX_ENTRY_NAME] = {};
349 
350 	/* Allocate an entry from the smem item */
351 	strlcpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
352 	memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
353 
354 	/* Make the logical entry reference the physical value */
355 	entry->value = &out->entries[out->valid_entries].value;
356 
357 	out->valid_entries++;
358 
359 	entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
360 	if (IS_ERR(entry->state)) {
361 		dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
362 		return PTR_ERR(entry->state);
363 	}
364 
365 	return 0;
366 }
367 
qcom_smp2p_alloc_outbound_item(struct qcom_smp2p * smp2p)368 static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
369 {
370 	struct smp2p_smem_item *out;
371 	unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND];
372 	unsigned pid = smp2p->remote_pid;
373 	int ret;
374 
375 	ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
376 	if (ret < 0 && ret != -EEXIST) {
377 		if (ret != -EPROBE_DEFER)
378 			dev_err(smp2p->dev,
379 				"unable to allocate local smp2p item\n");
380 		return ret;
381 	}
382 
383 	out = qcom_smem_get(pid, smem_id, NULL);
384 	if (IS_ERR(out)) {
385 		dev_err(smp2p->dev, "Unable to acquire local smp2p item\n");
386 		return PTR_ERR(out);
387 	}
388 
389 	memset(out, 0, sizeof(*out));
390 	out->magic = SMP2P_MAGIC;
391 	out->local_pid = smp2p->local_pid;
392 	out->remote_pid = smp2p->remote_pid;
393 	out->total_entries = SMP2P_MAX_ENTRY;
394 	out->valid_entries = 0;
395 
396 	/*
397 	 * Make sure the rest of the header is written before we validate the
398 	 * item by writing a valid version number.
399 	 */
400 	wmb();
401 	out->version = 1;
402 
403 	qcom_smp2p_kick(smp2p);
404 
405 	smp2p->out = out;
406 
407 	return 0;
408 }
409 
smp2p_parse_ipc(struct qcom_smp2p * smp2p)410 static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
411 {
412 	struct device_node *syscon;
413 	struct device *dev = smp2p->dev;
414 	const char *key;
415 	int ret;
416 
417 	syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0);
418 	if (!syscon) {
419 		dev_err(dev, "no qcom,ipc node\n");
420 		return -ENODEV;
421 	}
422 
423 	smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
424 	if (IS_ERR(smp2p->ipc_regmap))
425 		return PTR_ERR(smp2p->ipc_regmap);
426 
427 	key = "qcom,ipc";
428 	ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset);
429 	if (ret < 0) {
430 		dev_err(dev, "no offset in %s\n", key);
431 		return -EINVAL;
432 	}
433 
434 	ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit);
435 	if (ret < 0) {
436 		dev_err(dev, "no bit in %s\n", key);
437 		return -EINVAL;
438 	}
439 
440 	return 0;
441 }
442 
qcom_smp2p_probe(struct platform_device * pdev)443 static int qcom_smp2p_probe(struct platform_device *pdev)
444 {
445 	struct smp2p_entry *entry;
446 	struct device_node *node;
447 	struct qcom_smp2p *smp2p;
448 	const char *key;
449 	int irq;
450 	int ret;
451 
452 	smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL);
453 	if (!smp2p)
454 		return -ENOMEM;
455 
456 	smp2p->dev = &pdev->dev;
457 	INIT_LIST_HEAD(&smp2p->inbound);
458 	INIT_LIST_HEAD(&smp2p->outbound);
459 
460 	platform_set_drvdata(pdev, smp2p);
461 
462 	key = "qcom,smem";
463 	ret = of_property_read_u32_array(pdev->dev.of_node, key,
464 					 smp2p->smem_items, 2);
465 	if (ret)
466 		return ret;
467 
468 	key = "qcom,local-pid";
469 	ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
470 	if (ret)
471 		goto report_read_failure;
472 
473 	key = "qcom,remote-pid";
474 	ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
475 	if (ret)
476 		goto report_read_failure;
477 
478 	irq = platform_get_irq(pdev, 0);
479 	if (irq < 0)
480 		return irq;
481 
482 	smp2p->mbox_client.dev = &pdev->dev;
483 	smp2p->mbox_client.knows_txdone = true;
484 	smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
485 	if (IS_ERR(smp2p->mbox_chan)) {
486 		if (PTR_ERR(smp2p->mbox_chan) != -ENODEV)
487 			return PTR_ERR(smp2p->mbox_chan);
488 
489 		smp2p->mbox_chan = NULL;
490 
491 		ret = smp2p_parse_ipc(smp2p);
492 		if (ret)
493 			return ret;
494 	}
495 
496 	ret = qcom_smp2p_alloc_outbound_item(smp2p);
497 	if (ret < 0)
498 		goto release_mbox;
499 
500 	for_each_available_child_of_node(pdev->dev.of_node, node) {
501 		entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
502 		if (!entry) {
503 			ret = -ENOMEM;
504 			goto unwind_interfaces;
505 		}
506 
507 		entry->smp2p = smp2p;
508 		spin_lock_init(&entry->lock);
509 
510 		ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
511 		if (ret < 0)
512 			goto unwind_interfaces;
513 
514 		if (of_property_read_bool(node, "interrupt-controller")) {
515 			ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
516 			if (ret < 0)
517 				goto unwind_interfaces;
518 
519 			list_add(&entry->node, &smp2p->inbound);
520 		} else  {
521 			ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
522 			if (ret < 0)
523 				goto unwind_interfaces;
524 
525 			list_add(&entry->node, &smp2p->outbound);
526 		}
527 	}
528 
529 	/* Kick the outgoing edge after allocating entries */
530 	qcom_smp2p_kick(smp2p);
531 
532 	ret = devm_request_threaded_irq(&pdev->dev, irq,
533 					NULL, qcom_smp2p_intr,
534 					IRQF_ONESHOT,
535 					"smp2p", (void *)smp2p);
536 	if (ret) {
537 		dev_err(&pdev->dev, "failed to request interrupt\n");
538 		goto unwind_interfaces;
539 	}
540 
541 
542 	return 0;
543 
544 unwind_interfaces:
545 	list_for_each_entry(entry, &smp2p->inbound, node)
546 		irq_domain_remove(entry->domain);
547 
548 	list_for_each_entry(entry, &smp2p->outbound, node)
549 		qcom_smem_state_unregister(entry->state);
550 
551 	smp2p->out->valid_entries = 0;
552 
553 release_mbox:
554 	mbox_free_channel(smp2p->mbox_chan);
555 
556 	return ret;
557 
558 report_read_failure:
559 	dev_err(&pdev->dev, "failed to read %s\n", key);
560 	return -EINVAL;
561 }
562 
qcom_smp2p_remove(struct platform_device * pdev)563 static int qcom_smp2p_remove(struct platform_device *pdev)
564 {
565 	struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
566 	struct smp2p_entry *entry;
567 
568 	list_for_each_entry(entry, &smp2p->inbound, node)
569 		irq_domain_remove(entry->domain);
570 
571 	list_for_each_entry(entry, &smp2p->outbound, node)
572 		qcom_smem_state_unregister(entry->state);
573 
574 	mbox_free_channel(smp2p->mbox_chan);
575 
576 	smp2p->out->valid_entries = 0;
577 
578 	return 0;
579 }
580 
581 static const struct of_device_id qcom_smp2p_of_match[] = {
582 	{ .compatible = "qcom,smp2p" },
583 	{}
584 };
585 MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match);
586 
587 static struct platform_driver qcom_smp2p_driver = {
588 	.probe = qcom_smp2p_probe,
589 	.remove = qcom_smp2p_remove,
590 	.driver  = {
591 		.name  = "qcom_smp2p",
592 		.of_match_table = qcom_smp2p_of_match,
593 	},
594 };
595 module_platform_driver(qcom_smp2p_driver);
596 
597 MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
598 MODULE_LICENSE("GPL v2");
599