xref: /linux/drivers/staging/greybus/gpio.c (revision d6fd48ef)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * GPIO Greybus driver.
4  *
5  * Copyright 2014 Google Inc.
6  * Copyright 2014 Linaro Ltd.
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/gpio/driver.h>
15 #include <linux/mutex.h>
16 #include <linux/greybus.h>
17 
18 #include "gbphy.h"
19 
20 struct gb_gpio_line {
21 	/* The following has to be an array of line_max entries */
22 	/* --> make them just a flags field */
23 	u8			active:    1,
24 				direction: 1,	/* 0 = output, 1 = input */
25 				value:     1;	/* 0 = low, 1 = high */
26 	u16			debounce_usec;
27 
28 	u8			irq_type;
29 	bool			irq_type_pending;
30 	bool			masked;
31 	bool			masked_pending;
32 };
33 
34 struct gb_gpio_controller {
35 	struct gbphy_device	*gbphy_dev;
36 	struct gb_connection	*connection;
37 	u8			line_max;	/* max line number */
38 	struct gb_gpio_line	*lines;
39 
40 	struct gpio_chip	chip;
41 	struct irq_chip		irqc;
42 	struct mutex		irq_lock;
43 };
44 #define gpio_chip_to_gb_gpio_controller(chip) \
45 	container_of(chip, struct gb_gpio_controller, chip)
46 
47 static struct gpio_chip *irq_data_to_gpio_chip(struct irq_data *d)
48 {
49 	return d->domain->host_data;
50 }
51 
52 static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
53 {
54 	struct gb_gpio_line_count_response response;
55 	int ret;
56 
57 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_LINE_COUNT,
58 				NULL, 0, &response, sizeof(response));
59 	if (!ret)
60 		ggc->line_max = response.count;
61 	return ret;
62 }
63 
64 static int gb_gpio_activate_operation(struct gb_gpio_controller *ggc, u8 which)
65 {
66 	struct gb_gpio_activate_request request;
67 	struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
68 	int ret;
69 
70 	ret = gbphy_runtime_get_sync(gbphy_dev);
71 	if (ret)
72 		return ret;
73 
74 	request.which = which;
75 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_ACTIVATE,
76 				&request, sizeof(request), NULL, 0);
77 	if (ret) {
78 		gbphy_runtime_put_autosuspend(gbphy_dev);
79 		return ret;
80 	}
81 
82 	ggc->lines[which].active = true;
83 
84 	return 0;
85 }
86 
87 static void gb_gpio_deactivate_operation(struct gb_gpio_controller *ggc,
88 					 u8 which)
89 {
90 	struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
91 	struct device *dev = &gbphy_dev->dev;
92 	struct gb_gpio_deactivate_request request;
93 	int ret;
94 
95 	request.which = which;
96 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DEACTIVATE,
97 				&request, sizeof(request), NULL, 0);
98 	if (ret) {
99 		dev_err(dev, "failed to deactivate gpio %u\n", which);
100 		goto out_pm_put;
101 	}
102 
103 	ggc->lines[which].active = false;
104 
105 out_pm_put:
106 	gbphy_runtime_put_autosuspend(gbphy_dev);
107 }
108 
109 static int gb_gpio_get_direction_operation(struct gb_gpio_controller *ggc,
110 					   u8 which)
111 {
112 	struct device *dev = &ggc->gbphy_dev->dev;
113 	struct gb_gpio_get_direction_request request;
114 	struct gb_gpio_get_direction_response response;
115 	int ret;
116 	u8 direction;
117 
118 	request.which = which;
119 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_DIRECTION,
120 				&request, sizeof(request),
121 				&response, sizeof(response));
122 	if (ret)
123 		return ret;
124 
125 	direction = response.direction;
126 	if (direction && direction != 1) {
127 		dev_warn(dev, "gpio %u direction was %u (should be 0 or 1)\n",
128 			 which, direction);
129 	}
130 	ggc->lines[which].direction = direction ? 1 : 0;
131 	return 0;
132 }
133 
134 static int gb_gpio_direction_in_operation(struct gb_gpio_controller *ggc,
135 					  u8 which)
136 {
137 	struct gb_gpio_direction_in_request request;
138 	int ret;
139 
140 	request.which = which;
141 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_IN,
142 				&request, sizeof(request), NULL, 0);
143 	if (!ret)
144 		ggc->lines[which].direction = 1;
145 	return ret;
146 }
147 
148 static int gb_gpio_direction_out_operation(struct gb_gpio_controller *ggc,
149 					   u8 which, bool value_high)
150 {
151 	struct gb_gpio_direction_out_request request;
152 	int ret;
153 
154 	request.which = which;
155 	request.value = value_high ? 1 : 0;
156 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_OUT,
157 				&request, sizeof(request), NULL, 0);
158 	if (!ret)
159 		ggc->lines[which].direction = 0;
160 	return ret;
161 }
162 
163 static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
164 				       u8 which)
165 {
166 	struct device *dev = &ggc->gbphy_dev->dev;
167 	struct gb_gpio_get_value_request request;
168 	struct gb_gpio_get_value_response response;
169 	int ret;
170 	u8 value;
171 
172 	request.which = which;
173 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_VALUE,
174 				&request, sizeof(request),
175 				&response, sizeof(response));
176 	if (ret) {
177 		dev_err(dev, "failed to get value of gpio %u\n", which);
178 		return ret;
179 	}
180 
181 	value = response.value;
182 	if (value && value != 1) {
183 		dev_warn(dev, "gpio %u value was %u (should be 0 or 1)\n",
184 			 which, value);
185 	}
186 	ggc->lines[which].value = value ? 1 : 0;
187 	return 0;
188 }
189 
190 static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
191 					u8 which, bool value_high)
192 {
193 	struct device *dev = &ggc->gbphy_dev->dev;
194 	struct gb_gpio_set_value_request request;
195 	int ret;
196 
197 	if (ggc->lines[which].direction == 1) {
198 		dev_warn(dev, "refusing to set value of input gpio %u\n",
199 			 which);
200 		return;
201 	}
202 
203 	request.which = which;
204 	request.value = value_high ? 1 : 0;
205 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_VALUE,
206 				&request, sizeof(request), NULL, 0);
207 	if (ret) {
208 		dev_err(dev, "failed to set value of gpio %u\n", which);
209 		return;
210 	}
211 
212 	ggc->lines[which].value = request.value;
213 }
214 
215 static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
216 					  u8 which, u16 debounce_usec)
217 {
218 	struct gb_gpio_set_debounce_request request;
219 	int ret;
220 
221 	request.which = which;
222 	request.usec = cpu_to_le16(debounce_usec);
223 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_DEBOUNCE,
224 				&request, sizeof(request), NULL, 0);
225 	if (!ret)
226 		ggc->lines[which].debounce_usec = debounce_usec;
227 	return ret;
228 }
229 
230 static void _gb_gpio_irq_mask(struct gb_gpio_controller *ggc, u8 hwirq)
231 {
232 	struct device *dev = &ggc->gbphy_dev->dev;
233 	struct gb_gpio_irq_mask_request request;
234 	int ret;
235 
236 	request.which = hwirq;
237 	ret = gb_operation_sync(ggc->connection,
238 				GB_GPIO_TYPE_IRQ_MASK,
239 				&request, sizeof(request), NULL, 0);
240 	if (ret)
241 		dev_err(dev, "failed to mask irq: %d\n", ret);
242 }
243 
244 static void _gb_gpio_irq_unmask(struct gb_gpio_controller *ggc, u8 hwirq)
245 {
246 	struct device *dev = &ggc->gbphy_dev->dev;
247 	struct gb_gpio_irq_unmask_request request;
248 	int ret;
249 
250 	request.which = hwirq;
251 	ret = gb_operation_sync(ggc->connection,
252 				GB_GPIO_TYPE_IRQ_UNMASK,
253 				&request, sizeof(request), NULL, 0);
254 	if (ret)
255 		dev_err(dev, "failed to unmask irq: %d\n", ret);
256 }
257 
258 static void _gb_gpio_irq_set_type(struct gb_gpio_controller *ggc,
259 				  u8 hwirq, u8 type)
260 {
261 	struct device *dev = &ggc->gbphy_dev->dev;
262 	struct gb_gpio_irq_type_request request;
263 	int ret;
264 
265 	request.which = hwirq;
266 	request.type = type;
267 
268 	ret = gb_operation_sync(ggc->connection,
269 				GB_GPIO_TYPE_IRQ_TYPE,
270 				&request, sizeof(request), NULL, 0);
271 	if (ret)
272 		dev_err(dev, "failed to set irq type: %d\n", ret);
273 }
274 
275 static void gb_gpio_irq_mask(struct irq_data *d)
276 {
277 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
278 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
279 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
280 
281 	line->masked = true;
282 	line->masked_pending = true;
283 }
284 
285 static void gb_gpio_irq_unmask(struct irq_data *d)
286 {
287 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
288 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
289 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
290 
291 	line->masked = false;
292 	line->masked_pending = true;
293 }
294 
295 static int gb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
296 {
297 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
298 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
299 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
300 	struct device *dev = &ggc->gbphy_dev->dev;
301 	u8 irq_type;
302 
303 	switch (type) {
304 	case IRQ_TYPE_NONE:
305 		irq_type = GB_GPIO_IRQ_TYPE_NONE;
306 		break;
307 	case IRQ_TYPE_EDGE_RISING:
308 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_RISING;
309 		break;
310 	case IRQ_TYPE_EDGE_FALLING:
311 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_FALLING;
312 		break;
313 	case IRQ_TYPE_EDGE_BOTH:
314 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_BOTH;
315 		break;
316 	case IRQ_TYPE_LEVEL_LOW:
317 		irq_type = GB_GPIO_IRQ_TYPE_LEVEL_LOW;
318 		break;
319 	case IRQ_TYPE_LEVEL_HIGH:
320 		irq_type = GB_GPIO_IRQ_TYPE_LEVEL_HIGH;
321 		break;
322 	default:
323 		dev_err(dev, "unsupported irq type: %u\n", type);
324 		return -EINVAL;
325 	}
326 
327 	line->irq_type = irq_type;
328 	line->irq_type_pending = true;
329 
330 	return 0;
331 }
332 
333 static void gb_gpio_irq_bus_lock(struct irq_data *d)
334 {
335 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
336 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
337 
338 	mutex_lock(&ggc->irq_lock);
339 }
340 
341 static void gb_gpio_irq_bus_sync_unlock(struct irq_data *d)
342 {
343 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
344 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
345 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
346 
347 	if (line->irq_type_pending) {
348 		_gb_gpio_irq_set_type(ggc, d->hwirq, line->irq_type);
349 		line->irq_type_pending = false;
350 	}
351 
352 	if (line->masked_pending) {
353 		if (line->masked)
354 			_gb_gpio_irq_mask(ggc, d->hwirq);
355 		else
356 			_gb_gpio_irq_unmask(ggc, d->hwirq);
357 		line->masked_pending = false;
358 	}
359 
360 	mutex_unlock(&ggc->irq_lock);
361 }
362 
363 static int gb_gpio_request_handler(struct gb_operation *op)
364 {
365 	struct gb_connection *connection = op->connection;
366 	struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
367 	struct device *dev = &ggc->gbphy_dev->dev;
368 	struct gb_message *request;
369 	struct gb_gpio_irq_event_request *event;
370 	u8 type = op->type;
371 	int irq, ret;
372 
373 	if (type != GB_GPIO_TYPE_IRQ_EVENT) {
374 		dev_err(dev, "unsupported unsolicited request: %u\n", type);
375 		return -EINVAL;
376 	}
377 
378 	request = op->request;
379 
380 	if (request->payload_size < sizeof(*event)) {
381 		dev_err(dev, "short event received (%zu < %zu)\n",
382 			request->payload_size, sizeof(*event));
383 		return -EINVAL;
384 	}
385 
386 	event = request->payload;
387 	if (event->which > ggc->line_max) {
388 		dev_err(dev, "invalid hw irq: %d\n", event->which);
389 		return -EINVAL;
390 	}
391 
392 	irq = irq_find_mapping(ggc->chip.irq.domain, event->which);
393 	if (!irq) {
394 		dev_err(dev, "failed to find IRQ\n");
395 		return -EINVAL;
396 	}
397 
398 	ret = generic_handle_irq_safe(irq);
399 	if (ret)
400 		dev_err(dev, "failed to invoke irq handler\n");
401 
402 	return ret;
403 }
404 
405 static int gb_gpio_request(struct gpio_chip *chip, unsigned int offset)
406 {
407 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
408 
409 	return gb_gpio_activate_operation(ggc, (u8)offset);
410 }
411 
412 static void gb_gpio_free(struct gpio_chip *chip, unsigned int offset)
413 {
414 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
415 
416 	gb_gpio_deactivate_operation(ggc, (u8)offset);
417 }
418 
419 static int gb_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
420 {
421 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
422 	u8 which;
423 	int ret;
424 
425 	which = (u8)offset;
426 	ret = gb_gpio_get_direction_operation(ggc, which);
427 	if (ret)
428 		return ret;
429 
430 	return ggc->lines[which].direction ? 1 : 0;
431 }
432 
433 static int gb_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
434 {
435 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
436 
437 	return gb_gpio_direction_in_operation(ggc, (u8)offset);
438 }
439 
440 static int gb_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
441 				    int value)
442 {
443 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
444 
445 	return gb_gpio_direction_out_operation(ggc, (u8)offset, !!value);
446 }
447 
448 static int gb_gpio_get(struct gpio_chip *chip, unsigned int offset)
449 {
450 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
451 	u8 which;
452 	int ret;
453 
454 	which = (u8)offset;
455 	ret = gb_gpio_get_value_operation(ggc, which);
456 	if (ret)
457 		return ret;
458 
459 	return ggc->lines[which].value;
460 }
461 
462 static void gb_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
463 {
464 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
465 
466 	gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
467 }
468 
469 static int gb_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
470 			      unsigned long config)
471 {
472 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
473 	u32 debounce;
474 
475 	if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
476 		return -ENOTSUPP;
477 
478 	debounce = pinconf_to_config_argument(config);
479 	if (debounce > U16_MAX)
480 		return -EINVAL;
481 
482 	return gb_gpio_set_debounce_operation(ggc, (u8)offset, (u16)debounce);
483 }
484 
485 static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
486 {
487 	int ret;
488 
489 	/* Now find out how many lines there are */
490 	ret = gb_gpio_line_count_operation(ggc);
491 	if (ret)
492 		return ret;
493 
494 	ggc->lines = kcalloc(ggc->line_max + 1, sizeof(*ggc->lines),
495 			     GFP_KERNEL);
496 	if (!ggc->lines)
497 		return -ENOMEM;
498 
499 	return ret;
500 }
501 
502 static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
503 			 const struct gbphy_device_id *id)
504 {
505 	struct gb_connection *connection;
506 	struct gb_gpio_controller *ggc;
507 	struct gpio_chip *gpio;
508 	struct gpio_irq_chip *girq;
509 	struct irq_chip *irqc;
510 	int ret;
511 
512 	ggc = kzalloc(sizeof(*ggc), GFP_KERNEL);
513 	if (!ggc)
514 		return -ENOMEM;
515 
516 	connection =
517 		gb_connection_create(gbphy_dev->bundle,
518 				     le16_to_cpu(gbphy_dev->cport_desc->id),
519 				     gb_gpio_request_handler);
520 	if (IS_ERR(connection)) {
521 		ret = PTR_ERR(connection);
522 		goto exit_ggc_free;
523 	}
524 
525 	ggc->connection = connection;
526 	gb_connection_set_data(connection, ggc);
527 	ggc->gbphy_dev = gbphy_dev;
528 	gb_gbphy_set_data(gbphy_dev, ggc);
529 
530 	ret = gb_connection_enable_tx(connection);
531 	if (ret)
532 		goto exit_connection_destroy;
533 
534 	ret = gb_gpio_controller_setup(ggc);
535 	if (ret)
536 		goto exit_connection_disable;
537 
538 	irqc = &ggc->irqc;
539 	irqc->irq_mask = gb_gpio_irq_mask;
540 	irqc->irq_unmask = gb_gpio_irq_unmask;
541 	irqc->irq_set_type = gb_gpio_irq_set_type;
542 	irqc->irq_bus_lock = gb_gpio_irq_bus_lock;
543 	irqc->irq_bus_sync_unlock = gb_gpio_irq_bus_sync_unlock;
544 	irqc->name = "greybus_gpio";
545 
546 	mutex_init(&ggc->irq_lock);
547 
548 	gpio = &ggc->chip;
549 
550 	gpio->label = "greybus_gpio";
551 	gpio->parent = &gbphy_dev->dev;
552 	gpio->owner = THIS_MODULE;
553 
554 	gpio->request = gb_gpio_request;
555 	gpio->free = gb_gpio_free;
556 	gpio->get_direction = gb_gpio_get_direction;
557 	gpio->direction_input = gb_gpio_direction_input;
558 	gpio->direction_output = gb_gpio_direction_output;
559 	gpio->get = gb_gpio_get;
560 	gpio->set = gb_gpio_set;
561 	gpio->set_config = gb_gpio_set_config;
562 	gpio->base = -1;		/* Allocate base dynamically */
563 	gpio->ngpio = ggc->line_max + 1;
564 	gpio->can_sleep = true;
565 
566 	girq = &gpio->irq;
567 	girq->chip = irqc;
568 	/* The event comes from the outside so no parent handler */
569 	girq->parent_handler = NULL;
570 	girq->num_parents = 0;
571 	girq->parents = NULL;
572 	girq->default_type = IRQ_TYPE_NONE;
573 	girq->handler = handle_level_irq;
574 
575 	ret = gb_connection_enable(connection);
576 	if (ret)
577 		goto exit_line_free;
578 
579 	ret = gpiochip_add(gpio);
580 	if (ret) {
581 		dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
582 		goto exit_line_free;
583 	}
584 
585 	gbphy_runtime_put_autosuspend(gbphy_dev);
586 	return 0;
587 
588 exit_line_free:
589 	kfree(ggc->lines);
590 exit_connection_disable:
591 	gb_connection_disable(connection);
592 exit_connection_destroy:
593 	gb_connection_destroy(connection);
594 exit_ggc_free:
595 	kfree(ggc);
596 	return ret;
597 }
598 
599 static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
600 {
601 	struct gb_gpio_controller *ggc = gb_gbphy_get_data(gbphy_dev);
602 	struct gb_connection *connection = ggc->connection;
603 	int ret;
604 
605 	ret = gbphy_runtime_get_sync(gbphy_dev);
606 	if (ret)
607 		gbphy_runtime_get_noresume(gbphy_dev);
608 
609 	gb_connection_disable_rx(connection);
610 	gpiochip_remove(&ggc->chip);
611 	gb_connection_disable(connection);
612 	gb_connection_destroy(connection);
613 	kfree(ggc->lines);
614 	kfree(ggc);
615 }
616 
617 static const struct gbphy_device_id gb_gpio_id_table[] = {
618 	{ GBPHY_PROTOCOL(GREYBUS_PROTOCOL_GPIO) },
619 	{ },
620 };
621 MODULE_DEVICE_TABLE(gbphy, gb_gpio_id_table);
622 
623 static struct gbphy_driver gpio_driver = {
624 	.name		= "gpio",
625 	.probe		= gb_gpio_probe,
626 	.remove		= gb_gpio_remove,
627 	.id_table	= gb_gpio_id_table,
628 };
629 
630 module_gbphy_driver(gpio_driver);
631 MODULE_LICENSE("GPL v2");
632