1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * V4L2 asynchronous subdevice registration API
4  *
5  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6  */
7 
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/i2c.h>
12 #include <linux/list.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 
22 #include <media/v4l2-async.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-fwnode.h>
25 #include <media/v4l2-subdev.h>
26 
v4l2_async_notifier_call_bound(struct v4l2_async_notifier * n,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)27 static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
28 					  struct v4l2_subdev *subdev,
29 					  struct v4l2_async_subdev *asd)
30 {
31 	if (!n->ops || !n->ops->bound)
32 		return 0;
33 
34 	return n->ops->bound(n, subdev, asd);
35 }
36 
v4l2_async_notifier_call_unbind(struct v4l2_async_notifier * n,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)37 static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
38 					    struct v4l2_subdev *subdev,
39 					    struct v4l2_async_subdev *asd)
40 {
41 	if (!n->ops || !n->ops->unbind)
42 		return;
43 
44 	n->ops->unbind(n, subdev, asd);
45 }
46 
v4l2_async_notifier_call_complete(struct v4l2_async_notifier * n)47 static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
48 {
49 	if (!n->ops || !n->ops->complete)
50 		return 0;
51 
52 	return n->ops->complete(n);
53 }
54 
match_i2c(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)55 static bool match_i2c(struct v4l2_async_notifier *notifier,
56 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
57 {
58 #if IS_ENABLED(CONFIG_I2C)
59 	struct i2c_client *client = i2c_verify_client(sd->dev);
60 
61 	return client &&
62 		asd->match.i2c.adapter_id == client->adapter->nr &&
63 		asd->match.i2c.address == client->addr;
64 #else
65 	return false;
66 #endif
67 }
68 
match_fwnode(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)69 static bool match_fwnode(struct v4l2_async_notifier *notifier,
70 			 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
71 {
72 #if IS_ENABLED(CONFIG_OF)
73 	struct fwnode_handle *other_fwnode;
74 	struct fwnode_handle *dev_fwnode;
75 	bool asd_fwnode_is_ep;
76 	bool sd_fwnode_is_ep;
77 	struct device *dev;
78 
79 	/*
80 	 * Both the subdev and the async subdev can provide either an endpoint
81 	 * fwnode or a device fwnode. Start with the simple case of direct
82 	 * fwnode matching.
83 	 */
84 	if (sd->fwnode == asd->match.fwnode)
85 		return true;
86 
87 	/*
88 	 * Check the same situation for any possible secondary assigned to the
89 	 * subdev's fwnode
90 	 */
91 	if (!IS_ERR_OR_NULL(sd->fwnode->secondary) &&
92 	    sd->fwnode->secondary == asd->match.fwnode)
93 		return true;
94 
95 	/*
96 	 * Otherwise, check if the sd fwnode and the asd fwnode refer to an
97 	 * endpoint or a device. If they're of the same type, there's no match.
98 	 * Technically speaking this checks if the nodes refer to a connected
99 	 * endpoint, which is the simplest check that works for both OF and
100 	 * ACPI. This won't make a difference, as drivers should not try to
101 	 * match unconnected endpoints.
102 	 */
103 	sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd->fwnode);
104 	asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
105 
106 	if (sd_fwnode_is_ep == asd_fwnode_is_ep)
107 		return false;
108 
109 	/*
110 	 * The sd and asd fwnodes are of different types. Get the device fwnode
111 	 * parent of the endpoint fwnode, and compare it with the other fwnode.
112 	 */
113 	if (sd_fwnode_is_ep) {
114 		dev_fwnode = fwnode_graph_get_port_parent(sd->fwnode);
115 		other_fwnode = asd->match.fwnode;
116 	} else {
117 		dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
118 		other_fwnode = sd->fwnode;
119 	}
120 
121 	fwnode_handle_put(dev_fwnode);
122 
123 	if (dev_fwnode != other_fwnode)
124 		return false;
125 
126 	/*
127 	 * We have a heterogeneous match. Retrieve the struct device of the side
128 	 * that matched on a device fwnode to print its driver name.
129 	 */
130 	if (sd_fwnode_is_ep)
131 		dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev
132 		    : notifier->sd->dev;
133 	else
134 		dev = sd->dev;
135 
136 	if (dev && dev->driver) {
137 		if (sd_fwnode_is_ep)
138 			dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n",
139 				 dev->driver->name);
140 		dev_notice(dev, "Consider updating driver %s to match on endpoints\n",
141 			   dev->driver->name);
142 	}
143 
144 	return true;
145 #else
146 	return false;
147 #endif
148 }
149 
150 static LIST_HEAD(subdev_list);
151 static LIST_HEAD(notifier_list);
152 static DEFINE_MUTEX(list_lock);
153 
154 static struct v4l2_async_subdev *
v4l2_async_find_match(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd)155 v4l2_async_find_match(struct v4l2_async_notifier *notifier,
156 		      struct v4l2_subdev *sd)
157 {
158 	bool (*match)(struct v4l2_async_notifier *notifier,
159 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
160 	struct v4l2_async_subdev *asd;
161 
162 	list_for_each_entry(asd, &notifier->waiting, list) {
163 		/* bus_type has been verified valid before */
164 		switch (asd->match_type) {
165 		case V4L2_ASYNC_MATCH_I2C:
166 			match = match_i2c;
167 			break;
168 		case V4L2_ASYNC_MATCH_FWNODE:
169 			match = match_fwnode;
170 			break;
171 		default:
172 			/* Cannot happen, unless someone breaks us */
173 			WARN_ON(true);
174 			return NULL;
175 		}
176 
177 		/* match cannot be NULL here */
178 		if (match(notifier, sd, asd))
179 			return asd;
180 	}
181 
182 	return NULL;
183 }
184 
185 /* Compare two async sub-device descriptors for equivalence */
asd_equal(struct v4l2_async_subdev * asd_x,struct v4l2_async_subdev * asd_y)186 static bool asd_equal(struct v4l2_async_subdev *asd_x,
187 		      struct v4l2_async_subdev *asd_y)
188 {
189 	if (asd_x->match_type != asd_y->match_type)
190 		return false;
191 
192 	switch (asd_x->match_type) {
193 	case V4L2_ASYNC_MATCH_I2C:
194 		return asd_x->match.i2c.adapter_id ==
195 			asd_y->match.i2c.adapter_id &&
196 			asd_x->match.i2c.address ==
197 			asd_y->match.i2c.address;
198 	case V4L2_ASYNC_MATCH_FWNODE:
199 		return asd_x->match.fwnode == asd_y->match.fwnode;
200 	default:
201 		break;
202 	}
203 
204 	return false;
205 }
206 
207 /* Find the sub-device notifier registered by a sub-device driver. */
208 static struct v4l2_async_notifier *
v4l2_async_find_subdev_notifier(struct v4l2_subdev * sd)209 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
210 {
211 	struct v4l2_async_notifier *n;
212 
213 	list_for_each_entry(n, &notifier_list, list)
214 		if (n->sd == sd)
215 			return n;
216 
217 	return NULL;
218 }
219 
220 /* Get v4l2_device related to the notifier if one can be found. */
221 static struct v4l2_device *
v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier * notifier)222 v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier)
223 {
224 	while (notifier->parent)
225 		notifier = notifier->parent;
226 
227 	return notifier->v4l2_dev;
228 }
229 
230 /*
231  * Return true if all child sub-device notifiers are complete, false otherwise.
232  */
233 static bool
v4l2_async_notifier_can_complete(struct v4l2_async_notifier * notifier)234 v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
235 {
236 	struct v4l2_subdev *sd;
237 
238 	if (!list_empty(&notifier->waiting))
239 		return false;
240 
241 	list_for_each_entry(sd, &notifier->done, async_list) {
242 		struct v4l2_async_notifier *subdev_notifier =
243 			v4l2_async_find_subdev_notifier(sd);
244 
245 		if (subdev_notifier &&
246 		    !v4l2_async_notifier_can_complete(subdev_notifier))
247 			return false;
248 	}
249 
250 	return true;
251 }
252 
253 /*
254  * Complete the master notifier if possible. This is done when all async
255  * sub-devices have been bound; v4l2_device is also available then.
256  */
257 static int
v4l2_async_notifier_try_complete(struct v4l2_async_notifier * notifier)258 v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier)
259 {
260 	/* Quick check whether there are still more sub-devices here. */
261 	if (!list_empty(&notifier->waiting))
262 		return 0;
263 
264 	/* Check the entire notifier tree; find the root notifier first. */
265 	while (notifier->parent)
266 		notifier = notifier->parent;
267 
268 	/* This is root if it has v4l2_dev. */
269 	if (!notifier->v4l2_dev)
270 		return 0;
271 
272 	/* Is everything ready? */
273 	if (!v4l2_async_notifier_can_complete(notifier))
274 		return 0;
275 
276 	return v4l2_async_notifier_call_complete(notifier);
277 }
278 
279 static int
280 v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier);
281 
v4l2_async_match_notify(struct v4l2_async_notifier * notifier,struct v4l2_device * v4l2_dev,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)282 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
283 				   struct v4l2_device *v4l2_dev,
284 				   struct v4l2_subdev *sd,
285 				   struct v4l2_async_subdev *asd)
286 {
287 	struct v4l2_async_notifier *subdev_notifier;
288 	int ret;
289 
290 	ret = v4l2_device_register_subdev(v4l2_dev, sd);
291 	if (ret < 0)
292 		return ret;
293 
294 	ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
295 	if (ret < 0) {
296 		v4l2_device_unregister_subdev(sd);
297 		return ret;
298 	}
299 
300 	/* Remove from the waiting list */
301 	list_del(&asd->list);
302 	sd->asd = asd;
303 	sd->notifier = notifier;
304 
305 	/* Move from the global subdevice list to notifier's done */
306 	list_move(&sd->async_list, &notifier->done);
307 
308 	/*
309 	 * See if the sub-device has a notifier. If not, return here.
310 	 */
311 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
312 	if (!subdev_notifier || subdev_notifier->parent)
313 		return 0;
314 
315 	/*
316 	 * Proceed with checking for the sub-device notifier's async
317 	 * sub-devices, and return the result. The error will be handled by the
318 	 * caller.
319 	 */
320 	subdev_notifier->parent = notifier;
321 
322 	return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
323 }
324 
325 /* Test all async sub-devices in a notifier for a match. */
326 static int
v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier * notifier)327 v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier)
328 {
329 	struct v4l2_device *v4l2_dev =
330 		v4l2_async_notifier_find_v4l2_dev(notifier);
331 	struct v4l2_subdev *sd;
332 
333 	if (!v4l2_dev)
334 		return 0;
335 
336 again:
337 	list_for_each_entry(sd, &subdev_list, async_list) {
338 		struct v4l2_async_subdev *asd;
339 		int ret;
340 
341 		asd = v4l2_async_find_match(notifier, sd);
342 		if (!asd)
343 			continue;
344 
345 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
346 		if (ret < 0)
347 			return ret;
348 
349 		/*
350 		 * v4l2_async_match_notify() may lead to registering a
351 		 * new notifier and thus changing the async subdevs
352 		 * list. In order to proceed safely from here, restart
353 		 * parsing the list from the beginning.
354 		 */
355 		goto again;
356 	}
357 
358 	return 0;
359 }
360 
v4l2_async_cleanup(struct v4l2_subdev * sd)361 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
362 {
363 	v4l2_device_unregister_subdev(sd);
364 	/*
365 	 * Subdevice driver will reprobe and put the subdev back
366 	 * onto the list
367 	 */
368 	list_del_init(&sd->async_list);
369 	sd->asd = NULL;
370 }
371 
372 /* Unbind all sub-devices in the notifier tree. */
373 static void
v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier * notifier)374 v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
375 {
376 	struct v4l2_subdev *sd, *tmp;
377 
378 	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
379 		struct v4l2_async_notifier *subdev_notifier =
380 			v4l2_async_find_subdev_notifier(sd);
381 
382 		if (subdev_notifier)
383 			v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
384 
385 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
386 		v4l2_async_cleanup(sd);
387 
388 		list_move(&sd->async_list, &subdev_list);
389 	}
390 
391 	notifier->parent = NULL;
392 }
393 
394 /* See if an async sub-device can be found in a notifier's lists. */
395 static bool
__v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd)396 __v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
397 				       struct v4l2_async_subdev *asd)
398 {
399 	struct v4l2_async_subdev *asd_y;
400 	struct v4l2_subdev *sd;
401 
402 	list_for_each_entry(asd_y, &notifier->waiting, list)
403 		if (asd_equal(asd, asd_y))
404 			return true;
405 
406 	list_for_each_entry(sd, &notifier->done, async_list) {
407 		if (WARN_ON(!sd->asd))
408 			continue;
409 
410 		if (asd_equal(asd, sd->asd))
411 			return true;
412 	}
413 
414 	return false;
415 }
416 
417 /*
418  * Find out whether an async sub-device was set up already or
419  * whether it exists in a given notifier before @this_index.
420  * If @this_index < 0, search the notifier's entire @asd_list.
421  */
422 static bool
v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd,int this_index)423 v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
424 				     struct v4l2_async_subdev *asd,
425 				     int this_index)
426 {
427 	struct v4l2_async_subdev *asd_y;
428 	int j = 0;
429 
430 	lockdep_assert_held(&list_lock);
431 
432 	/* Check that an asd is not being added more than once. */
433 	list_for_each_entry(asd_y, &notifier->asd_list, asd_list) {
434 		if (this_index >= 0 && j++ >= this_index)
435 			break;
436 		if (asd_equal(asd, asd_y))
437 			return true;
438 	}
439 
440 	/* Check that an asd does not exist in other notifiers. */
441 	list_for_each_entry(notifier, &notifier_list, list)
442 		if (__v4l2_async_notifier_has_async_subdev(notifier, asd))
443 			return true;
444 
445 	return false;
446 }
447 
v4l2_async_notifier_asd_valid(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd,int this_index)448 static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
449 					 struct v4l2_async_subdev *asd,
450 					 int this_index)
451 {
452 	struct device *dev =
453 		notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
454 
455 	if (!asd)
456 		return -EINVAL;
457 
458 	switch (asd->match_type) {
459 	case V4L2_ASYNC_MATCH_I2C:
460 	case V4L2_ASYNC_MATCH_FWNODE:
461 		if (v4l2_async_notifier_has_async_subdev(notifier, asd,
462 							 this_index)) {
463 			dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
464 			return -EEXIST;
465 		}
466 		break;
467 	default:
468 		dev_err(dev, "Invalid match type %u on %p\n",
469 			asd->match_type, asd);
470 		return -EINVAL;
471 	}
472 
473 	return 0;
474 }
475 
v4l2_async_notifier_init(struct v4l2_async_notifier * notifier)476 void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier)
477 {
478 	INIT_LIST_HEAD(&notifier->asd_list);
479 }
480 EXPORT_SYMBOL(v4l2_async_notifier_init);
481 
__v4l2_async_notifier_register(struct v4l2_async_notifier * notifier)482 static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
483 {
484 	struct v4l2_async_subdev *asd;
485 	int ret, i = 0;
486 
487 	INIT_LIST_HEAD(&notifier->waiting);
488 	INIT_LIST_HEAD(&notifier->done);
489 
490 	mutex_lock(&list_lock);
491 
492 	list_for_each_entry(asd, &notifier->asd_list, asd_list) {
493 		ret = v4l2_async_notifier_asd_valid(notifier, asd, i++);
494 		if (ret)
495 			goto err_unlock;
496 
497 		list_add_tail(&asd->list, &notifier->waiting);
498 	}
499 
500 	ret = v4l2_async_notifier_try_all_subdevs(notifier);
501 	if (ret < 0)
502 		goto err_unbind;
503 
504 	ret = v4l2_async_notifier_try_complete(notifier);
505 	if (ret < 0)
506 		goto err_unbind;
507 
508 	/* Keep also completed notifiers on the list */
509 	list_add(&notifier->list, &notifier_list);
510 
511 	mutex_unlock(&list_lock);
512 
513 	return 0;
514 
515 err_unbind:
516 	/*
517 	 * On failure, unbind all sub-devices registered through this notifier.
518 	 */
519 	v4l2_async_notifier_unbind_all_subdevs(notifier);
520 
521 err_unlock:
522 	mutex_unlock(&list_lock);
523 
524 	return ret;
525 }
526 
v4l2_async_notifier_register(struct v4l2_device * v4l2_dev,struct v4l2_async_notifier * notifier)527 int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
528 				 struct v4l2_async_notifier *notifier)
529 {
530 	int ret;
531 
532 	if (WARN_ON(!v4l2_dev || notifier->sd))
533 		return -EINVAL;
534 
535 	notifier->v4l2_dev = v4l2_dev;
536 
537 	ret = __v4l2_async_notifier_register(notifier);
538 	if (ret)
539 		notifier->v4l2_dev = NULL;
540 
541 	return ret;
542 }
543 EXPORT_SYMBOL(v4l2_async_notifier_register);
544 
v4l2_async_subdev_notifier_register(struct v4l2_subdev * sd,struct v4l2_async_notifier * notifier)545 int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
546 					struct v4l2_async_notifier *notifier)
547 {
548 	int ret;
549 
550 	if (WARN_ON(!sd || notifier->v4l2_dev))
551 		return -EINVAL;
552 
553 	notifier->sd = sd;
554 
555 	ret = __v4l2_async_notifier_register(notifier);
556 	if (ret)
557 		notifier->sd = NULL;
558 
559 	return ret;
560 }
561 EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
562 
563 static void
__v4l2_async_notifier_unregister(struct v4l2_async_notifier * notifier)564 __v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
565 {
566 	if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
567 		return;
568 
569 	v4l2_async_notifier_unbind_all_subdevs(notifier);
570 
571 	notifier->sd = NULL;
572 	notifier->v4l2_dev = NULL;
573 
574 	list_del(&notifier->list);
575 }
576 
577 #if 0
578 void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
579 {
580 	mutex_lock(&list_lock);
581 
582 	__v4l2_async_notifier_unregister(notifier);
583 
584 	mutex_unlock(&list_lock);
585 }
586 EXPORT_SYMBOL(v4l2_async_notifier_unregister);
587 #endif
588 
__v4l2_async_notifier_cleanup(struct v4l2_async_notifier * notifier)589 static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
590 {
591 	struct v4l2_async_subdev *asd, *tmp;
592 
593 	if (!notifier || !notifier->asd_list.next)
594 		return;
595 
596 	list_for_each_entry_safe(asd, tmp, &notifier->asd_list, asd_list) {
597 		switch (asd->match_type) {
598 		case V4L2_ASYNC_MATCH_FWNODE:
599 			fwnode_handle_put(asd->match.fwnode);
600 			break;
601 		default:
602 			break;
603 		}
604 
605 		list_del(&asd->asd_list);
606 		kfree(asd);
607 	}
608 }
609 
v4l2_async_notifier_cleanup(struct v4l2_async_notifier * notifier)610 void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
611 {
612 	mutex_lock(&list_lock);
613 
614 	__v4l2_async_notifier_cleanup(notifier);
615 
616 	mutex_unlock(&list_lock);
617 }
618 EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
619 
__v4l2_async_notifier_add_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd)620 int __v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
621 				   struct v4l2_async_subdev *asd)
622 {
623 	int ret;
624 
625 	mutex_lock(&list_lock);
626 
627 	ret = v4l2_async_notifier_asd_valid(notifier, asd, -1);
628 	if (ret)
629 		goto unlock;
630 
631 	list_add_tail(&asd->asd_list, &notifier->asd_list);
632 
633 unlock:
634 	mutex_unlock(&list_lock);
635 	return ret;
636 }
637 EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_subdev);
638 
639 struct v4l2_async_subdev *
__v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier * notifier,struct fwnode_handle * fwnode,unsigned int asd_struct_size)640 __v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
641 					struct fwnode_handle *fwnode,
642 					unsigned int asd_struct_size)
643 {
644 	struct v4l2_async_subdev *asd;
645 	int ret;
646 
647 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
648 	if (!asd)
649 		return ERR_PTR(-ENOMEM);
650 
651 	asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
652 	asd->match.fwnode = fwnode_handle_get(fwnode);
653 
654 	ret = __v4l2_async_notifier_add_subdev(notifier, asd);
655 	if (ret) {
656 		fwnode_handle_put(fwnode);
657 		kfree(asd);
658 		return ERR_PTR(ret);
659 	}
660 
661 	return asd;
662 }
663 EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_fwnode_subdev);
664 
665 struct v4l2_async_subdev *
__v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier * notif,struct fwnode_handle * endpoint,unsigned int asd_struct_size)666 __v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
667 					       struct fwnode_handle *endpoint,
668 					       unsigned int asd_struct_size)
669 {
670 	struct v4l2_async_subdev *asd;
671 	struct fwnode_handle *remote;
672 
673 	remote = fwnode_graph_get_remote_port_parent(endpoint);
674 	if (!remote)
675 		return ERR_PTR(-ENOTCONN);
676 
677 	asd = __v4l2_async_notifier_add_fwnode_subdev(notif, remote,
678 						      asd_struct_size);
679 	/*
680 	 * Calling __v4l2_async_notifier_add_fwnode_subdev grabs a refcount,
681 	 * so drop the one we got in fwnode_graph_get_remote_port_parent.
682 	 */
683 	fwnode_handle_put(remote);
684 	return asd;
685 }
686 EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_fwnode_remote_subdev);
687 
688 struct v4l2_async_subdev *
__v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier * notifier,int adapter_id,unsigned short address,unsigned int asd_struct_size)689 __v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
690 				     int adapter_id, unsigned short address,
691 				     unsigned int asd_struct_size)
692 {
693 	struct v4l2_async_subdev *asd;
694 	int ret;
695 
696 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
697 	if (!asd)
698 		return ERR_PTR(-ENOMEM);
699 
700 	asd->match_type = V4L2_ASYNC_MATCH_I2C;
701 	asd->match.i2c.adapter_id = adapter_id;
702 	asd->match.i2c.address = address;
703 
704 	ret = __v4l2_async_notifier_add_subdev(notifier, asd);
705 	if (ret) {
706 		kfree(asd);
707 		return ERR_PTR(ret);
708 	}
709 
710 	return asd;
711 }
712 EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_i2c_subdev);
713 
v4l2_async_register_subdev(struct v4l2_subdev * sd)714 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
715 {
716 	struct v4l2_async_notifier *subdev_notifier;
717 	struct v4l2_async_notifier *notifier;
718 	int ret;
719 
720 	/*
721 	 * No reference taken. The reference is held by the device
722 	 * (struct v4l2_subdev.dev), and async sub-device does not
723 	 * exist independently of the device at any point of time.
724 	 */
725 	if (!sd->fwnode && sd->dev)
726 		sd->fwnode = dev_fwnode(sd->dev);
727 
728 	mutex_lock(&list_lock);
729 
730 	INIT_LIST_HEAD(&sd->async_list);
731 
732 	list_for_each_entry(notifier, &notifier_list, list) {
733 		struct v4l2_device *v4l2_dev =
734 			v4l2_async_notifier_find_v4l2_dev(notifier);
735 		struct v4l2_async_subdev *asd;
736 
737 		if (!v4l2_dev)
738 			continue;
739 
740 		asd = v4l2_async_find_match(notifier, sd);
741 		if (!asd)
742 			continue;
743 
744 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
745 		if (ret)
746 			goto err_unbind;
747 
748 		ret = v4l2_async_notifier_try_complete(notifier);
749 		if (ret)
750 			goto err_unbind;
751 
752 		goto out_unlock;
753 	}
754 
755 	/* None matched, wait for hot-plugging */
756 	list_add(&sd->async_list, &subdev_list);
757 
758 out_unlock:
759 	mutex_unlock(&list_lock);
760 
761 	return 0;
762 
763 err_unbind:
764 	/*
765 	 * Complete failed. Unbind the sub-devices bound through registering
766 	 * this async sub-device.
767 	 */
768 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
769 	if (subdev_notifier)
770 		v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
771 
772 	if (sd->asd)
773 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
774 	v4l2_async_cleanup(sd);
775 
776 	mutex_unlock(&list_lock);
777 
778 	return ret;
779 }
780 EXPORT_SYMBOL(v4l2_async_register_subdev);
781 
v4l2_async_unregister_subdev(struct v4l2_subdev * sd)782 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
783 {
784 	if (!sd->async_list.next)
785 		return;
786 
787 	mutex_lock(&list_lock);
788 
789 	__v4l2_async_notifier_unregister(sd->subdev_notifier);
790 	__v4l2_async_notifier_cleanup(sd->subdev_notifier);
791 	kfree(sd->subdev_notifier);
792 	sd->subdev_notifier = NULL;
793 
794 	if (sd->asd) {
795 		struct v4l2_async_notifier *notifier = sd->notifier;
796 
797 		list_add(&sd->asd->list, &notifier->waiting);
798 
799 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
800 	}
801 
802 	v4l2_async_cleanup(sd);
803 
804 	mutex_unlock(&list_lock);
805 }
806 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
807 
print_waiting_subdev(struct seq_file * s,struct v4l2_async_subdev * asd)808 static void print_waiting_subdev(struct seq_file *s,
809 				 struct v4l2_async_subdev *asd)
810 {
811 	switch (asd->match_type) {
812 	case V4L2_ASYNC_MATCH_I2C:
813 		seq_printf(s, " [i2c] dev=%d-%04x\n", asd->match.i2c.adapter_id,
814 			   asd->match.i2c.address);
815 		break;
816 	case V4L2_ASYNC_MATCH_FWNODE: {
817 #if IS_ENABLED(CONFIG_OF)
818 		struct fwnode_handle *devnode, *fwnode = asd->match.fwnode;
819 
820 		devnode = fwnode_graph_is_endpoint(fwnode) ?
821 			  fwnode_graph_get_port_parent(fwnode) :
822 			  fwnode_handle_get(fwnode);
823 
824 		seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
825 			   devnode->dev ? dev_name(devnode->dev) : "nil",
826 			   fwnode);
827 
828 		fwnode_handle_put(devnode);
829 #endif
830 		break;
831 	}
832 	}
833 }
834 
835 static const char *
v4l2_async_notifier_name(struct v4l2_async_notifier * notifier)836 v4l2_async_notifier_name(struct v4l2_async_notifier *notifier)
837 {
838 	if (notifier->v4l2_dev)
839 		return notifier->v4l2_dev->name;
840 	else if (notifier->sd)
841 		return notifier->sd->name;
842 	else
843 		return "nil";
844 }
845 
pending_subdevs_show(struct seq_file * s,void * data)846 static int pending_subdevs_show(struct seq_file *s, void *data)
847 {
848 	struct v4l2_async_notifier *notif;
849 	struct v4l2_async_subdev *asd;
850 
851 	mutex_lock(&list_lock);
852 
853 	list_for_each_entry(notif, &notifier_list, list) {
854 		seq_printf(s, "%s:\n", v4l2_async_notifier_name(notif));
855 		list_for_each_entry(asd, &notif->waiting, list)
856 			print_waiting_subdev(s, asd);
857 	}
858 
859 	mutex_unlock(&list_lock);
860 
861 	return 0;
862 }
863 DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
864 
v4l2_async_debug_init(struct dentry * debugfs_dir)865 void v4l2_async_debug_init(struct dentry *debugfs_dir)
866 {
867 	debugfs_create_file("pending_async_subdevices", 0444, debugfs_dir, NULL,
868 			    &pending_subdevs_fops);
869 }
870