xref: /linux/drivers/staging/greybus/gpio.c (revision 84b9b44b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * GPIO Greybus driver.
4  *
5  * Copyright 2014 Google Inc.
6  * Copyright 2014 Linaro Ltd.
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/gpio/driver.h>
15 #include <linux/mutex.h>
16 #include <linux/greybus.h>
17 
18 #include "gbphy.h"
19 
20 struct gb_gpio_line {
21 	/* The following has to be an array of line_max entries */
22 	/* --> make them just a flags field */
23 	u8			active:    1,
24 				direction: 1,	/* 0 = output, 1 = input */
25 				value:     1;	/* 0 = low, 1 = high */
26 	u16			debounce_usec;
27 
28 	u8			irq_type;
29 	bool			irq_type_pending;
30 	bool			masked;
31 	bool			masked_pending;
32 };
33 
34 struct gb_gpio_controller {
35 	struct gbphy_device	*gbphy_dev;
36 	struct gb_connection	*connection;
37 	u8			line_max;	/* max line number */
38 	struct gb_gpio_line	*lines;
39 
40 	struct gpio_chip	chip;
41 	struct irq_chip		irqc;
42 	struct mutex		irq_lock;
43 };
44 
45 static inline struct gb_gpio_controller *gpio_chip_to_gb_gpio_controller(struct gpio_chip *chip)
46 {
47 	return container_of(chip, struct gb_gpio_controller, chip);
48 }
49 
50 static struct gpio_chip *irq_data_to_gpio_chip(struct irq_data *d)
51 {
52 	return d->domain->host_data;
53 }
54 
55 static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
56 {
57 	struct gb_gpio_line_count_response response;
58 	int ret;
59 
60 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_LINE_COUNT,
61 				NULL, 0, &response, sizeof(response));
62 	if (!ret)
63 		ggc->line_max = response.count;
64 	return ret;
65 }
66 
67 static int gb_gpio_activate_operation(struct gb_gpio_controller *ggc, u8 which)
68 {
69 	struct gb_gpio_activate_request request;
70 	struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
71 	int ret;
72 
73 	ret = gbphy_runtime_get_sync(gbphy_dev);
74 	if (ret)
75 		return ret;
76 
77 	request.which = which;
78 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_ACTIVATE,
79 				&request, sizeof(request), NULL, 0);
80 	if (ret) {
81 		gbphy_runtime_put_autosuspend(gbphy_dev);
82 		return ret;
83 	}
84 
85 	ggc->lines[which].active = true;
86 
87 	return 0;
88 }
89 
90 static void gb_gpio_deactivate_operation(struct gb_gpio_controller *ggc,
91 					 u8 which)
92 {
93 	struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
94 	struct device *dev = &gbphy_dev->dev;
95 	struct gb_gpio_deactivate_request request;
96 	int ret;
97 
98 	request.which = which;
99 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DEACTIVATE,
100 				&request, sizeof(request), NULL, 0);
101 	if (ret) {
102 		dev_err(dev, "failed to deactivate gpio %u\n", which);
103 		goto out_pm_put;
104 	}
105 
106 	ggc->lines[which].active = false;
107 
108 out_pm_put:
109 	gbphy_runtime_put_autosuspend(gbphy_dev);
110 }
111 
112 static int gb_gpio_get_direction_operation(struct gb_gpio_controller *ggc,
113 					   u8 which)
114 {
115 	struct device *dev = &ggc->gbphy_dev->dev;
116 	struct gb_gpio_get_direction_request request;
117 	struct gb_gpio_get_direction_response response;
118 	int ret;
119 	u8 direction;
120 
121 	request.which = which;
122 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_DIRECTION,
123 				&request, sizeof(request),
124 				&response, sizeof(response));
125 	if (ret)
126 		return ret;
127 
128 	direction = response.direction;
129 	if (direction && direction != 1) {
130 		dev_warn(dev, "gpio %u direction was %u (should be 0 or 1)\n",
131 			 which, direction);
132 	}
133 	ggc->lines[which].direction = direction ? 1 : 0;
134 	return 0;
135 }
136 
137 static int gb_gpio_direction_in_operation(struct gb_gpio_controller *ggc,
138 					  u8 which)
139 {
140 	struct gb_gpio_direction_in_request request;
141 	int ret;
142 
143 	request.which = which;
144 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_IN,
145 				&request, sizeof(request), NULL, 0);
146 	if (!ret)
147 		ggc->lines[which].direction = 1;
148 	return ret;
149 }
150 
151 static int gb_gpio_direction_out_operation(struct gb_gpio_controller *ggc,
152 					   u8 which, bool value_high)
153 {
154 	struct gb_gpio_direction_out_request request;
155 	int ret;
156 
157 	request.which = which;
158 	request.value = value_high ? 1 : 0;
159 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_OUT,
160 				&request, sizeof(request), NULL, 0);
161 	if (!ret)
162 		ggc->lines[which].direction = 0;
163 	return ret;
164 }
165 
166 static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
167 				       u8 which)
168 {
169 	struct device *dev = &ggc->gbphy_dev->dev;
170 	struct gb_gpio_get_value_request request;
171 	struct gb_gpio_get_value_response response;
172 	int ret;
173 	u8 value;
174 
175 	request.which = which;
176 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_VALUE,
177 				&request, sizeof(request),
178 				&response, sizeof(response));
179 	if (ret) {
180 		dev_err(dev, "failed to get value of gpio %u\n", which);
181 		return ret;
182 	}
183 
184 	value = response.value;
185 	if (value && value != 1) {
186 		dev_warn(dev, "gpio %u value was %u (should be 0 or 1)\n",
187 			 which, value);
188 	}
189 	ggc->lines[which].value = value ? 1 : 0;
190 	return 0;
191 }
192 
193 static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
194 					u8 which, bool value_high)
195 {
196 	struct device *dev = &ggc->gbphy_dev->dev;
197 	struct gb_gpio_set_value_request request;
198 	int ret;
199 
200 	if (ggc->lines[which].direction == 1) {
201 		dev_warn(dev, "refusing to set value of input gpio %u\n",
202 			 which);
203 		return;
204 	}
205 
206 	request.which = which;
207 	request.value = value_high ? 1 : 0;
208 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_VALUE,
209 				&request, sizeof(request), NULL, 0);
210 	if (ret) {
211 		dev_err(dev, "failed to set value of gpio %u\n", which);
212 		return;
213 	}
214 
215 	ggc->lines[which].value = request.value;
216 }
217 
218 static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
219 					  u8 which, u16 debounce_usec)
220 {
221 	struct gb_gpio_set_debounce_request request;
222 	int ret;
223 
224 	request.which = which;
225 	request.usec = cpu_to_le16(debounce_usec);
226 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_DEBOUNCE,
227 				&request, sizeof(request), NULL, 0);
228 	if (!ret)
229 		ggc->lines[which].debounce_usec = debounce_usec;
230 	return ret;
231 }
232 
233 static void _gb_gpio_irq_mask(struct gb_gpio_controller *ggc, u8 hwirq)
234 {
235 	struct device *dev = &ggc->gbphy_dev->dev;
236 	struct gb_gpio_irq_mask_request request;
237 	int ret;
238 
239 	request.which = hwirq;
240 	ret = gb_operation_sync(ggc->connection,
241 				GB_GPIO_TYPE_IRQ_MASK,
242 				&request, sizeof(request), NULL, 0);
243 	if (ret)
244 		dev_err(dev, "failed to mask irq: %d\n", ret);
245 }
246 
247 static void _gb_gpio_irq_unmask(struct gb_gpio_controller *ggc, u8 hwirq)
248 {
249 	struct device *dev = &ggc->gbphy_dev->dev;
250 	struct gb_gpio_irq_unmask_request request;
251 	int ret;
252 
253 	request.which = hwirq;
254 	ret = gb_operation_sync(ggc->connection,
255 				GB_GPIO_TYPE_IRQ_UNMASK,
256 				&request, sizeof(request), NULL, 0);
257 	if (ret)
258 		dev_err(dev, "failed to unmask irq: %d\n", ret);
259 }
260 
261 static void _gb_gpio_irq_set_type(struct gb_gpio_controller *ggc,
262 				  u8 hwirq, u8 type)
263 {
264 	struct device *dev = &ggc->gbphy_dev->dev;
265 	struct gb_gpio_irq_type_request request;
266 	int ret;
267 
268 	request.which = hwirq;
269 	request.type = type;
270 
271 	ret = gb_operation_sync(ggc->connection,
272 				GB_GPIO_TYPE_IRQ_TYPE,
273 				&request, sizeof(request), NULL, 0);
274 	if (ret)
275 		dev_err(dev, "failed to set irq type: %d\n", ret);
276 }
277 
278 static void gb_gpio_irq_mask(struct irq_data *d)
279 {
280 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
281 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
282 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
283 
284 	line->masked = true;
285 	line->masked_pending = true;
286 }
287 
288 static void gb_gpio_irq_unmask(struct irq_data *d)
289 {
290 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
291 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
292 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
293 
294 	line->masked = false;
295 	line->masked_pending = true;
296 }
297 
298 static int gb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
299 {
300 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
301 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
302 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
303 	struct device *dev = &ggc->gbphy_dev->dev;
304 	u8 irq_type;
305 
306 	switch (type) {
307 	case IRQ_TYPE_NONE:
308 		irq_type = GB_GPIO_IRQ_TYPE_NONE;
309 		break;
310 	case IRQ_TYPE_EDGE_RISING:
311 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_RISING;
312 		break;
313 	case IRQ_TYPE_EDGE_FALLING:
314 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_FALLING;
315 		break;
316 	case IRQ_TYPE_EDGE_BOTH:
317 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_BOTH;
318 		break;
319 	case IRQ_TYPE_LEVEL_LOW:
320 		irq_type = GB_GPIO_IRQ_TYPE_LEVEL_LOW;
321 		break;
322 	case IRQ_TYPE_LEVEL_HIGH:
323 		irq_type = GB_GPIO_IRQ_TYPE_LEVEL_HIGH;
324 		break;
325 	default:
326 		dev_err(dev, "unsupported irq type: %u\n", type);
327 		return -EINVAL;
328 	}
329 
330 	line->irq_type = irq_type;
331 	line->irq_type_pending = true;
332 
333 	return 0;
334 }
335 
336 static void gb_gpio_irq_bus_lock(struct irq_data *d)
337 {
338 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
339 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
340 
341 	mutex_lock(&ggc->irq_lock);
342 }
343 
344 static void gb_gpio_irq_bus_sync_unlock(struct irq_data *d)
345 {
346 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
347 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
348 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
349 
350 	if (line->irq_type_pending) {
351 		_gb_gpio_irq_set_type(ggc, d->hwirq, line->irq_type);
352 		line->irq_type_pending = false;
353 	}
354 
355 	if (line->masked_pending) {
356 		if (line->masked)
357 			_gb_gpio_irq_mask(ggc, d->hwirq);
358 		else
359 			_gb_gpio_irq_unmask(ggc, d->hwirq);
360 		line->masked_pending = false;
361 	}
362 
363 	mutex_unlock(&ggc->irq_lock);
364 }
365 
366 static int gb_gpio_request_handler(struct gb_operation *op)
367 {
368 	struct gb_connection *connection = op->connection;
369 	struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
370 	struct device *dev = &ggc->gbphy_dev->dev;
371 	struct gb_message *request;
372 	struct gb_gpio_irq_event_request *event;
373 	u8 type = op->type;
374 	int irq, ret;
375 
376 	if (type != GB_GPIO_TYPE_IRQ_EVENT) {
377 		dev_err(dev, "unsupported unsolicited request: %u\n", type);
378 		return -EINVAL;
379 	}
380 
381 	request = op->request;
382 
383 	if (request->payload_size < sizeof(*event)) {
384 		dev_err(dev, "short event received (%zu < %zu)\n",
385 			request->payload_size, sizeof(*event));
386 		return -EINVAL;
387 	}
388 
389 	event = request->payload;
390 	if (event->which > ggc->line_max) {
391 		dev_err(dev, "invalid hw irq: %d\n", event->which);
392 		return -EINVAL;
393 	}
394 
395 	irq = irq_find_mapping(ggc->chip.irq.domain, event->which);
396 	if (!irq) {
397 		dev_err(dev, "failed to find IRQ\n");
398 		return -EINVAL;
399 	}
400 
401 	ret = generic_handle_irq_safe(irq);
402 	if (ret)
403 		dev_err(dev, "failed to invoke irq handler\n");
404 
405 	return ret;
406 }
407 
408 static int gb_gpio_request(struct gpio_chip *chip, unsigned int offset)
409 {
410 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
411 
412 	return gb_gpio_activate_operation(ggc, (u8)offset);
413 }
414 
415 static void gb_gpio_free(struct gpio_chip *chip, unsigned int offset)
416 {
417 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
418 
419 	gb_gpio_deactivate_operation(ggc, (u8)offset);
420 }
421 
422 static int gb_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
423 {
424 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
425 	u8 which;
426 	int ret;
427 
428 	which = (u8)offset;
429 	ret = gb_gpio_get_direction_operation(ggc, which);
430 	if (ret)
431 		return ret;
432 
433 	return ggc->lines[which].direction ? 1 : 0;
434 }
435 
436 static int gb_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
437 {
438 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
439 
440 	return gb_gpio_direction_in_operation(ggc, (u8)offset);
441 }
442 
443 static int gb_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
444 				    int value)
445 {
446 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
447 
448 	return gb_gpio_direction_out_operation(ggc, (u8)offset, !!value);
449 }
450 
451 static int gb_gpio_get(struct gpio_chip *chip, unsigned int offset)
452 {
453 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
454 	u8 which;
455 	int ret;
456 
457 	which = (u8)offset;
458 	ret = gb_gpio_get_value_operation(ggc, which);
459 	if (ret)
460 		return ret;
461 
462 	return ggc->lines[which].value;
463 }
464 
465 static void gb_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
466 {
467 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
468 
469 	gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
470 }
471 
472 static int gb_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
473 			      unsigned long config)
474 {
475 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
476 	u32 debounce;
477 
478 	if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
479 		return -ENOTSUPP;
480 
481 	debounce = pinconf_to_config_argument(config);
482 	if (debounce > U16_MAX)
483 		return -EINVAL;
484 
485 	return gb_gpio_set_debounce_operation(ggc, (u8)offset, (u16)debounce);
486 }
487 
488 static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
489 {
490 	int ret;
491 
492 	/* Now find out how many lines there are */
493 	ret = gb_gpio_line_count_operation(ggc);
494 	if (ret)
495 		return ret;
496 
497 	ggc->lines = kcalloc(ggc->line_max + 1, sizeof(*ggc->lines),
498 			     GFP_KERNEL);
499 	if (!ggc->lines)
500 		return -ENOMEM;
501 
502 	return ret;
503 }
504 
505 static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
506 			 const struct gbphy_device_id *id)
507 {
508 	struct gb_connection *connection;
509 	struct gb_gpio_controller *ggc;
510 	struct gpio_chip *gpio;
511 	struct gpio_irq_chip *girq;
512 	struct irq_chip *irqc;
513 	int ret;
514 
515 	ggc = kzalloc(sizeof(*ggc), GFP_KERNEL);
516 	if (!ggc)
517 		return -ENOMEM;
518 
519 	connection =
520 		gb_connection_create(gbphy_dev->bundle,
521 				     le16_to_cpu(gbphy_dev->cport_desc->id),
522 				     gb_gpio_request_handler);
523 	if (IS_ERR(connection)) {
524 		ret = PTR_ERR(connection);
525 		goto exit_ggc_free;
526 	}
527 
528 	ggc->connection = connection;
529 	gb_connection_set_data(connection, ggc);
530 	ggc->gbphy_dev = gbphy_dev;
531 	gb_gbphy_set_data(gbphy_dev, ggc);
532 
533 	ret = gb_connection_enable_tx(connection);
534 	if (ret)
535 		goto exit_connection_destroy;
536 
537 	ret = gb_gpio_controller_setup(ggc);
538 	if (ret)
539 		goto exit_connection_disable;
540 
541 	irqc = &ggc->irqc;
542 	irqc->irq_mask = gb_gpio_irq_mask;
543 	irqc->irq_unmask = gb_gpio_irq_unmask;
544 	irqc->irq_set_type = gb_gpio_irq_set_type;
545 	irqc->irq_bus_lock = gb_gpio_irq_bus_lock;
546 	irqc->irq_bus_sync_unlock = gb_gpio_irq_bus_sync_unlock;
547 	irqc->name = "greybus_gpio";
548 
549 	mutex_init(&ggc->irq_lock);
550 
551 	gpio = &ggc->chip;
552 
553 	gpio->label = "greybus_gpio";
554 	gpio->parent = &gbphy_dev->dev;
555 	gpio->owner = THIS_MODULE;
556 
557 	gpio->request = gb_gpio_request;
558 	gpio->free = gb_gpio_free;
559 	gpio->get_direction = gb_gpio_get_direction;
560 	gpio->direction_input = gb_gpio_direction_input;
561 	gpio->direction_output = gb_gpio_direction_output;
562 	gpio->get = gb_gpio_get;
563 	gpio->set = gb_gpio_set;
564 	gpio->set_config = gb_gpio_set_config;
565 	gpio->base = -1;		/* Allocate base dynamically */
566 	gpio->ngpio = ggc->line_max + 1;
567 	gpio->can_sleep = true;
568 
569 	girq = &gpio->irq;
570 	girq->chip = irqc;
571 	/* The event comes from the outside so no parent handler */
572 	girq->parent_handler = NULL;
573 	girq->num_parents = 0;
574 	girq->parents = NULL;
575 	girq->default_type = IRQ_TYPE_NONE;
576 	girq->handler = handle_level_irq;
577 
578 	ret = gb_connection_enable(connection);
579 	if (ret)
580 		goto exit_line_free;
581 
582 	ret = gpiochip_add(gpio);
583 	if (ret) {
584 		dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
585 		goto exit_line_free;
586 	}
587 
588 	gbphy_runtime_put_autosuspend(gbphy_dev);
589 	return 0;
590 
591 exit_line_free:
592 	kfree(ggc->lines);
593 exit_connection_disable:
594 	gb_connection_disable(connection);
595 exit_connection_destroy:
596 	gb_connection_destroy(connection);
597 exit_ggc_free:
598 	kfree(ggc);
599 	return ret;
600 }
601 
602 static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
603 {
604 	struct gb_gpio_controller *ggc = gb_gbphy_get_data(gbphy_dev);
605 	struct gb_connection *connection = ggc->connection;
606 	int ret;
607 
608 	ret = gbphy_runtime_get_sync(gbphy_dev);
609 	if (ret)
610 		gbphy_runtime_get_noresume(gbphy_dev);
611 
612 	gb_connection_disable_rx(connection);
613 	gpiochip_remove(&ggc->chip);
614 	gb_connection_disable(connection);
615 	gb_connection_destroy(connection);
616 	kfree(ggc->lines);
617 	kfree(ggc);
618 }
619 
620 static const struct gbphy_device_id gb_gpio_id_table[] = {
621 	{ GBPHY_PROTOCOL(GREYBUS_PROTOCOL_GPIO) },
622 	{ },
623 };
624 MODULE_DEVICE_TABLE(gbphy, gb_gpio_id_table);
625 
626 static struct gbphy_driver gpio_driver = {
627 	.name		= "gpio",
628 	.probe		= gb_gpio_probe,
629 	.remove		= gb_gpio_remove,
630 	.id_table	= gb_gpio_id_table,
631 };
632 
633 module_gbphy_driver(gpio_driver);
634 MODULE_LICENSE("GPL v2");
635