1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
4  *
5  * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/device.h>
10 #include <linux/io.h>
11 #include <linux/pm.h>
12 #include <linux/pm_clock.h>
13 #include <linux/clk.h>
14 #include <linux/clkdev.h>
15 #include <linux/of_clk.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/pm_domain.h>
19 #include <linux/pm_runtime.h>
20 
21 #ifdef CONFIG_PM_CLK
22 
23 enum pce_status {
24 	PCE_STATUS_NONE = 0,
25 	PCE_STATUS_ACQUIRED,
26 	PCE_STATUS_PREPARED,
27 	PCE_STATUS_ENABLED,
28 	PCE_STATUS_ERROR,
29 };
30 
31 struct pm_clock_entry {
32 	struct list_head node;
33 	char *con_id;
34 	struct clk *clk;
35 	enum pce_status status;
36 	bool enabled_when_prepared;
37 };
38 
39 /**
40  * pm_clk_list_lock - ensure exclusive access for modifying the PM clock
41  *		      entry list.
42  * @psd: pm_subsys_data instance corresponding to the PM clock entry list
43  *	 and clk_op_might_sleep count to be modified.
44  *
45  * Get exclusive access before modifying the PM clock entry list and the
46  * clock_op_might_sleep count to guard against concurrent modifications.
47  * This also protects against a concurrent clock_op_might_sleep and PM clock
48  * entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not
49  * happen in atomic context, hence both the mutex and the spinlock must be
50  * taken here.
51  */
pm_clk_list_lock(struct pm_subsys_data * psd)52 static void pm_clk_list_lock(struct pm_subsys_data *psd)
53 	__acquires(&psd->lock)
54 {
55 	mutex_lock(&psd->clock_mutex);
56 	spin_lock_irq(&psd->lock);
57 }
58 
59 /**
60  * pm_clk_list_unlock - counterpart to pm_clk_list_lock().
61  * @psd: the same pm_subsys_data instance previously passed to
62  *	 pm_clk_list_lock().
63  */
pm_clk_list_unlock(struct pm_subsys_data * psd)64 static void pm_clk_list_unlock(struct pm_subsys_data *psd)
65 	__releases(&psd->lock)
66 {
67 	spin_unlock_irq(&psd->lock);
68 	mutex_unlock(&psd->clock_mutex);
69 }
70 
71 /**
72  * pm_clk_op_lock - ensure exclusive access for performing clock operations.
73  * @psd: pm_subsys_data instance corresponding to the PM clock entry list
74  *	 and clk_op_might_sleep count being used.
75  * @flags: stored irq flags.
76  * @fn: string for the caller function's name.
77  *
78  * This is used by pm_clk_suspend() and pm_clk_resume() to guard
79  * against concurrent modifications to the clock entry list and the
80  * clock_op_might_sleep count. If clock_op_might_sleep is != 0 then
81  * only the mutex can be locked and those functions can only be used in
82  * non atomic context. If clock_op_might_sleep == 0 then these functions
83  * may be used in any context and only the spinlock can be locked.
84  * Returns -EINVAL if called in atomic context when clock ops might sleep.
85  */
pm_clk_op_lock(struct pm_subsys_data * psd,unsigned long * flags,const char * fn)86 static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags,
87 			  const char *fn)
88 	/* sparse annotations don't work here as exit state isn't static */
89 {
90 	bool atomic_context = in_atomic() || irqs_disabled();
91 
92 try_again:
93 	spin_lock_irqsave(&psd->lock, *flags);
94 	if (!psd->clock_op_might_sleep) {
95 		/* the __release is there to work around sparse limitations */
96 		__release(&psd->lock);
97 		return 0;
98 	}
99 
100 	/* bail out if in atomic context */
101 	if (atomic_context) {
102 		pr_err("%s: atomic context with clock_ops_might_sleep = %d",
103 		       fn, psd->clock_op_might_sleep);
104 		spin_unlock_irqrestore(&psd->lock, *flags);
105 		might_sleep();
106 		return -EPERM;
107 	}
108 
109 	/* we must switch to the mutex */
110 	spin_unlock_irqrestore(&psd->lock, *flags);
111 	mutex_lock(&psd->clock_mutex);
112 
113 	/*
114 	 * There was a possibility for psd->clock_op_might_sleep
115 	 * to become 0 above. Keep the mutex only if not the case.
116 	 */
117 	if (likely(psd->clock_op_might_sleep))
118 		return 0;
119 
120 	mutex_unlock(&psd->clock_mutex);
121 	goto try_again;
122 }
123 
124 /**
125  * pm_clk_op_unlock - counterpart to pm_clk_op_lock().
126  * @psd: the same pm_subsys_data instance previously passed to
127  *	 pm_clk_op_lock().
128  * @flags: irq flags provided by pm_clk_op_lock().
129  */
pm_clk_op_unlock(struct pm_subsys_data * psd,unsigned long * flags)130 static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags)
131 	/* sparse annotations don't work here as entry state isn't static */
132 {
133 	if (psd->clock_op_might_sleep) {
134 		mutex_unlock(&psd->clock_mutex);
135 	} else {
136 		/* the __acquire is there to work around sparse limitations */
137 		__acquire(&psd->lock);
138 		spin_unlock_irqrestore(&psd->lock, *flags);
139 	}
140 }
141 
142 /**
143  * __pm_clk_enable - Enable a clock, reporting any errors
144  * @dev: The device for the given clock
145  * @ce: PM clock entry corresponding to the clock.
146  */
__pm_clk_enable(struct device * dev,struct pm_clock_entry * ce)147 static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
148 {
149 	int ret;
150 
151 	switch (ce->status) {
152 	case PCE_STATUS_ACQUIRED:
153 		ret = clk_prepare_enable(ce->clk);
154 		break;
155 	case PCE_STATUS_PREPARED:
156 		ret = clk_enable(ce->clk);
157 		break;
158 	default:
159 		return;
160 	}
161 	if (!ret)
162 		ce->status = PCE_STATUS_ENABLED;
163 	else
164 		dev_err(dev, "%s: failed to enable clk %p, error %d\n",
165 			__func__, ce->clk, ret);
166 }
167 
168 /**
169  * pm_clk_acquire - Acquire a device clock.
170  * @dev: Device whose clock is to be acquired.
171  * @ce: PM clock entry corresponding to the clock.
172  */
pm_clk_acquire(struct device * dev,struct pm_clock_entry * ce)173 static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
174 {
175 	if (!ce->clk)
176 		ce->clk = clk_get(dev, ce->con_id);
177 	if (IS_ERR(ce->clk)) {
178 		ce->status = PCE_STATUS_ERROR;
179 		return;
180 	} else if (clk_is_enabled_when_prepared(ce->clk)) {
181 		/* we defer preparing the clock in that case */
182 		ce->status = PCE_STATUS_ACQUIRED;
183 		ce->enabled_when_prepared = true;
184 	} else if (clk_prepare(ce->clk)) {
185 		ce->status = PCE_STATUS_ERROR;
186 		dev_err(dev, "clk_prepare() failed\n");
187 		return;
188 	} else {
189 		ce->status = PCE_STATUS_PREPARED;
190 	}
191 	dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
192 		ce->clk, ce->con_id);
193 }
194 
__pm_clk_add(struct device * dev,const char * con_id,struct clk * clk)195 static int __pm_clk_add(struct device *dev, const char *con_id,
196 			struct clk *clk)
197 {
198 	struct pm_subsys_data *psd = dev_to_psd(dev);
199 	struct pm_clock_entry *ce;
200 
201 	if (!psd)
202 		return -EINVAL;
203 
204 	ce = kzalloc(sizeof(*ce), GFP_KERNEL);
205 	if (!ce)
206 		return -ENOMEM;
207 
208 	if (con_id) {
209 		ce->con_id = kstrdup(con_id, GFP_KERNEL);
210 		if (!ce->con_id) {
211 			kfree(ce);
212 			return -ENOMEM;
213 		}
214 	} else {
215 		if (IS_ERR(clk)) {
216 			kfree(ce);
217 			return -ENOENT;
218 		}
219 		ce->clk = clk;
220 	}
221 
222 	pm_clk_acquire(dev, ce);
223 
224 	pm_clk_list_lock(psd);
225 	list_add_tail(&ce->node, &psd->clock_list);
226 	if (ce->enabled_when_prepared)
227 		psd->clock_op_might_sleep++;
228 	pm_clk_list_unlock(psd);
229 	return 0;
230 }
231 
232 /**
233  * pm_clk_add - Start using a device clock for power management.
234  * @dev: Device whose clock is going to be used for power management.
235  * @con_id: Connection ID of the clock.
236  *
237  * Add the clock represented by @con_id to the list of clocks used for
238  * the power management of @dev.
239  */
pm_clk_add(struct device * dev,const char * con_id)240 int pm_clk_add(struct device *dev, const char *con_id)
241 {
242 	return __pm_clk_add(dev, con_id, NULL);
243 }
244 EXPORT_SYMBOL_GPL(pm_clk_add);
245 
246 /**
247  * pm_clk_add_clk - Start using a device clock for power management.
248  * @dev: Device whose clock is going to be used for power management.
249  * @clk: Clock pointer
250  *
251  * Add the clock to the list of clocks used for the power management of @dev.
252  * The power-management code will take control of the clock reference, so
253  * callers should not call clk_put() on @clk after this function sucessfully
254  * returned.
255  */
pm_clk_add_clk(struct device * dev,struct clk * clk)256 int pm_clk_add_clk(struct device *dev, struct clk *clk)
257 {
258 	return __pm_clk_add(dev, NULL, clk);
259 }
260 EXPORT_SYMBOL_GPL(pm_clk_add_clk);
261 
262 
263 /**
264  * of_pm_clk_add_clk - Start using a device clock for power management.
265  * @dev: Device whose clock is going to be used for power management.
266  * @name: Name of clock that is going to be used for power management.
267  *
268  * Add the clock described in the 'clocks' device-tree node that matches
269  * with the 'name' provided, to the list of clocks used for the power
270  * management of @dev. On success, returns 0. Returns a negative error
271  * code if the clock is not found or cannot be added.
272  */
of_pm_clk_add_clk(struct device * dev,const char * name)273 int of_pm_clk_add_clk(struct device *dev, const char *name)
274 {
275 	struct clk *clk;
276 	int ret;
277 
278 	if (!dev || !dev->of_node || !name)
279 		return -EINVAL;
280 
281 	clk = of_clk_get_by_name(dev->of_node, name);
282 	if (IS_ERR(clk))
283 		return PTR_ERR(clk);
284 
285 	ret = pm_clk_add_clk(dev, clk);
286 	if (ret) {
287 		clk_put(clk);
288 		return ret;
289 	}
290 
291 	return 0;
292 }
293 EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
294 
295 /**
296  * of_pm_clk_add_clks - Start using device clock(s) for power management.
297  * @dev: Device whose clock(s) is going to be used for power management.
298  *
299  * Add a series of clocks described in the 'clocks' device-tree node for
300  * a device to the list of clocks used for the power management of @dev.
301  * On success, returns the number of clocks added. Returns a negative
302  * error code if there are no clocks in the device node for the device
303  * or if adding a clock fails.
304  */
of_pm_clk_add_clks(struct device * dev)305 int of_pm_clk_add_clks(struct device *dev)
306 {
307 	struct clk **clks;
308 	int i, count;
309 	int ret;
310 
311 	if (!dev || !dev->of_node)
312 		return -EINVAL;
313 
314 	count = of_clk_get_parent_count(dev->of_node);
315 	if (count <= 0)
316 		return -ENODEV;
317 
318 	clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
319 	if (!clks)
320 		return -ENOMEM;
321 
322 	for (i = 0; i < count; i++) {
323 		clks[i] = of_clk_get(dev->of_node, i);
324 		if (IS_ERR(clks[i])) {
325 			ret = PTR_ERR(clks[i]);
326 			goto error;
327 		}
328 
329 		ret = pm_clk_add_clk(dev, clks[i]);
330 		if (ret) {
331 			clk_put(clks[i]);
332 			goto error;
333 		}
334 	}
335 
336 	kfree(clks);
337 
338 	return i;
339 
340 error:
341 	while (i--)
342 		pm_clk_remove_clk(dev, clks[i]);
343 
344 	kfree(clks);
345 
346 	return ret;
347 }
348 EXPORT_SYMBOL_GPL(of_pm_clk_add_clks);
349 
350 /**
351  * __pm_clk_remove - Destroy PM clock entry.
352  * @ce: PM clock entry to destroy.
353  */
__pm_clk_remove(struct pm_clock_entry * ce)354 static void __pm_clk_remove(struct pm_clock_entry *ce)
355 {
356 	if (!ce)
357 		return;
358 
359 	switch (ce->status) {
360 	case PCE_STATUS_ENABLED:
361 		clk_disable(ce->clk);
362 		fallthrough;
363 	case PCE_STATUS_PREPARED:
364 		clk_unprepare(ce->clk);
365 		fallthrough;
366 	case PCE_STATUS_ACQUIRED:
367 	case PCE_STATUS_ERROR:
368 		if (!IS_ERR(ce->clk))
369 			clk_put(ce->clk);
370 		break;
371 	default:
372 		break;
373 	}
374 
375 	kfree(ce->con_id);
376 	kfree(ce);
377 }
378 
379 /**
380  * pm_clk_remove - Stop using a device clock for power management.
381  * @dev: Device whose clock should not be used for PM any more.
382  * @con_id: Connection ID of the clock.
383  *
384  * Remove the clock represented by @con_id from the list of clocks used for
385  * the power management of @dev.
386  */
pm_clk_remove(struct device * dev,const char * con_id)387 void pm_clk_remove(struct device *dev, const char *con_id)
388 {
389 	struct pm_subsys_data *psd = dev_to_psd(dev);
390 	struct pm_clock_entry *ce;
391 
392 	if (!psd)
393 		return;
394 
395 	pm_clk_list_lock(psd);
396 
397 	list_for_each_entry(ce, &psd->clock_list, node) {
398 		if (!con_id && !ce->con_id)
399 			goto remove;
400 		else if (!con_id || !ce->con_id)
401 			continue;
402 		else if (!strcmp(con_id, ce->con_id))
403 			goto remove;
404 	}
405 
406 	pm_clk_list_unlock(psd);
407 	return;
408 
409  remove:
410 	list_del(&ce->node);
411 	if (ce->enabled_when_prepared)
412 		psd->clock_op_might_sleep--;
413 	pm_clk_list_unlock(psd);
414 
415 	__pm_clk_remove(ce);
416 }
417 EXPORT_SYMBOL_GPL(pm_clk_remove);
418 
419 /**
420  * pm_clk_remove_clk - Stop using a device clock for power management.
421  * @dev: Device whose clock should not be used for PM any more.
422  * @clk: Clock pointer
423  *
424  * Remove the clock pointed to by @clk from the list of clocks used for
425  * the power management of @dev.
426  */
pm_clk_remove_clk(struct device * dev,struct clk * clk)427 void pm_clk_remove_clk(struct device *dev, struct clk *clk)
428 {
429 	struct pm_subsys_data *psd = dev_to_psd(dev);
430 	struct pm_clock_entry *ce;
431 
432 	if (!psd || !clk)
433 		return;
434 
435 	pm_clk_list_lock(psd);
436 
437 	list_for_each_entry(ce, &psd->clock_list, node) {
438 		if (clk == ce->clk)
439 			goto remove;
440 	}
441 
442 	pm_clk_list_unlock(psd);
443 	return;
444 
445  remove:
446 	list_del(&ce->node);
447 	if (ce->enabled_when_prepared)
448 		psd->clock_op_might_sleep--;
449 	pm_clk_list_unlock(psd);
450 
451 	__pm_clk_remove(ce);
452 }
453 EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
454 
455 /**
456  * pm_clk_init - Initialize a device's list of power management clocks.
457  * @dev: Device to initialize the list of PM clocks for.
458  *
459  * Initialize the lock and clock_list members of the device's pm_subsys_data
460  * object, set the count of clocks that might sleep to 0.
461  */
pm_clk_init(struct device * dev)462 void pm_clk_init(struct device *dev)
463 {
464 	struct pm_subsys_data *psd = dev_to_psd(dev);
465 	if (psd) {
466 		INIT_LIST_HEAD(&psd->clock_list);
467 		mutex_init(&psd->clock_mutex);
468 		psd->clock_op_might_sleep = 0;
469 	}
470 }
471 EXPORT_SYMBOL_GPL(pm_clk_init);
472 
473 /**
474  * pm_clk_create - Create and initialize a device's list of PM clocks.
475  * @dev: Device to create and initialize the list of PM clocks for.
476  *
477  * Allocate a struct pm_subsys_data object, initialize its lock and clock_list
478  * members and make the @dev's power.subsys_data field point to it.
479  */
pm_clk_create(struct device * dev)480 int pm_clk_create(struct device *dev)
481 {
482 	return dev_pm_get_subsys_data(dev);
483 }
484 EXPORT_SYMBOL_GPL(pm_clk_create);
485 
486 /**
487  * pm_clk_destroy - Destroy a device's list of power management clocks.
488  * @dev: Device to destroy the list of PM clocks for.
489  *
490  * Clear the @dev's power.subsys_data field, remove the list of clock entries
491  * from the struct pm_subsys_data object pointed to by it before and free
492  * that object.
493  */
pm_clk_destroy(struct device * dev)494 void pm_clk_destroy(struct device *dev)
495 {
496 	struct pm_subsys_data *psd = dev_to_psd(dev);
497 	struct pm_clock_entry *ce, *c;
498 	struct list_head list;
499 
500 	if (!psd)
501 		return;
502 
503 	INIT_LIST_HEAD(&list);
504 
505 	pm_clk_list_lock(psd);
506 
507 	list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
508 		list_move(&ce->node, &list);
509 	psd->clock_op_might_sleep = 0;
510 
511 	pm_clk_list_unlock(psd);
512 
513 	dev_pm_put_subsys_data(dev);
514 
515 	list_for_each_entry_safe_reverse(ce, c, &list, node) {
516 		list_del(&ce->node);
517 		__pm_clk_remove(ce);
518 	}
519 }
520 EXPORT_SYMBOL_GPL(pm_clk_destroy);
521 
522 /**
523  * pm_clk_suspend - Disable clocks in a device's PM clock list.
524  * @dev: Device to disable the clocks for.
525  */
pm_clk_suspend(struct device * dev)526 int pm_clk_suspend(struct device *dev)
527 {
528 	struct pm_subsys_data *psd = dev_to_psd(dev);
529 	struct pm_clock_entry *ce;
530 	unsigned long flags;
531 	int ret;
532 
533 	dev_dbg(dev, "%s()\n", __func__);
534 
535 	if (!psd)
536 		return 0;
537 
538 	ret = pm_clk_op_lock(psd, &flags, __func__);
539 	if (ret)
540 		return ret;
541 
542 	list_for_each_entry_reverse(ce, &psd->clock_list, node) {
543 		if (ce->status == PCE_STATUS_ENABLED) {
544 			if (ce->enabled_when_prepared) {
545 				clk_disable_unprepare(ce->clk);
546 				ce->status = PCE_STATUS_ACQUIRED;
547 			} else {
548 				clk_disable(ce->clk);
549 				ce->status = PCE_STATUS_PREPARED;
550 			}
551 		}
552 	}
553 
554 	pm_clk_op_unlock(psd, &flags);
555 
556 	return 0;
557 }
558 EXPORT_SYMBOL_GPL(pm_clk_suspend);
559 
560 /**
561  * pm_clk_resume - Enable clocks in a device's PM clock list.
562  * @dev: Device to enable the clocks for.
563  */
pm_clk_resume(struct device * dev)564 int pm_clk_resume(struct device *dev)
565 {
566 	struct pm_subsys_data *psd = dev_to_psd(dev);
567 	struct pm_clock_entry *ce;
568 	unsigned long flags;
569 	int ret;
570 
571 	dev_dbg(dev, "%s()\n", __func__);
572 
573 	if (!psd)
574 		return 0;
575 
576 	ret = pm_clk_op_lock(psd, &flags, __func__);
577 	if (ret)
578 		return ret;
579 
580 	list_for_each_entry(ce, &psd->clock_list, node)
581 		__pm_clk_enable(dev, ce);
582 
583 	pm_clk_op_unlock(psd, &flags);
584 
585 	return 0;
586 }
587 EXPORT_SYMBOL_GPL(pm_clk_resume);
588 
589 /**
590  * pm_clk_notify - Notify routine for device addition and removal.
591  * @nb: Notifier block object this function is a member of.
592  * @action: Operation being carried out by the caller.
593  * @data: Device the routine is being run for.
594  *
595  * For this function to work, @nb must be a member of an object of type
596  * struct pm_clk_notifier_block containing all of the requisite data.
597  * Specifically, the pm_domain member of that object is copied to the device's
598  * pm_domain field and its con_ids member is used to populate the device's list
599  * of PM clocks, depending on @action.
600  *
601  * If the device's pm_domain field is already populated with a value different
602  * from the one stored in the struct pm_clk_notifier_block object, the function
603  * does nothing.
604  */
pm_clk_notify(struct notifier_block * nb,unsigned long action,void * data)605 static int pm_clk_notify(struct notifier_block *nb,
606 				 unsigned long action, void *data)
607 {
608 	struct pm_clk_notifier_block *clknb;
609 	struct device *dev = data;
610 	char **con_id;
611 	int error;
612 
613 	dev_dbg(dev, "%s() %ld\n", __func__, action);
614 
615 	clknb = container_of(nb, struct pm_clk_notifier_block, nb);
616 
617 	switch (action) {
618 	case BUS_NOTIFY_ADD_DEVICE:
619 		if (dev->pm_domain)
620 			break;
621 
622 		error = pm_clk_create(dev);
623 		if (error)
624 			break;
625 
626 		dev_pm_domain_set(dev, clknb->pm_domain);
627 		if (clknb->con_ids[0]) {
628 			for (con_id = clknb->con_ids; *con_id; con_id++)
629 				pm_clk_add(dev, *con_id);
630 		} else {
631 			pm_clk_add(dev, NULL);
632 		}
633 
634 		break;
635 	case BUS_NOTIFY_DEL_DEVICE:
636 		if (dev->pm_domain != clknb->pm_domain)
637 			break;
638 
639 		dev_pm_domain_set(dev, NULL);
640 		pm_clk_destroy(dev);
641 		break;
642 	}
643 
644 	return 0;
645 }
646 
pm_clk_runtime_suspend(struct device * dev)647 int pm_clk_runtime_suspend(struct device *dev)
648 {
649 	int ret;
650 
651 	dev_dbg(dev, "%s\n", __func__);
652 
653 	ret = pm_generic_runtime_suspend(dev);
654 	if (ret) {
655 		dev_err(dev, "failed to suspend device\n");
656 		return ret;
657 	}
658 
659 	ret = pm_clk_suspend(dev);
660 	if (ret) {
661 		dev_err(dev, "failed to suspend clock\n");
662 		pm_generic_runtime_resume(dev);
663 		return ret;
664 	}
665 
666 	return 0;
667 }
668 EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend);
669 
pm_clk_runtime_resume(struct device * dev)670 int pm_clk_runtime_resume(struct device *dev)
671 {
672 	int ret;
673 
674 	dev_dbg(dev, "%s\n", __func__);
675 
676 	ret = pm_clk_resume(dev);
677 	if (ret) {
678 		dev_err(dev, "failed to resume clock\n");
679 		return ret;
680 	}
681 
682 	return pm_generic_runtime_resume(dev);
683 }
684 EXPORT_SYMBOL_GPL(pm_clk_runtime_resume);
685 
686 #else /* !CONFIG_PM_CLK */
687 
688 /**
689  * enable_clock - Enable a device clock.
690  * @dev: Device whose clock is to be enabled.
691  * @con_id: Connection ID of the clock.
692  */
enable_clock(struct device * dev,const char * con_id)693 static void enable_clock(struct device *dev, const char *con_id)
694 {
695 	struct clk *clk;
696 
697 	clk = clk_get(dev, con_id);
698 	if (!IS_ERR(clk)) {
699 		clk_prepare_enable(clk);
700 		clk_put(clk);
701 		dev_info(dev, "Runtime PM disabled, clock forced on.\n");
702 	}
703 }
704 
705 /**
706  * disable_clock - Disable a device clock.
707  * @dev: Device whose clock is to be disabled.
708  * @con_id: Connection ID of the clock.
709  */
disable_clock(struct device * dev,const char * con_id)710 static void disable_clock(struct device *dev, const char *con_id)
711 {
712 	struct clk *clk;
713 
714 	clk = clk_get(dev, con_id);
715 	if (!IS_ERR(clk)) {
716 		clk_disable_unprepare(clk);
717 		clk_put(clk);
718 		dev_info(dev, "Runtime PM disabled, clock forced off.\n");
719 	}
720 }
721 
722 /**
723  * pm_clk_notify - Notify routine for device addition and removal.
724  * @nb: Notifier block object this function is a member of.
725  * @action: Operation being carried out by the caller.
726  * @data: Device the routine is being run for.
727  *
728  * For this function to work, @nb must be a member of an object of type
729  * struct pm_clk_notifier_block containing all of the requisite data.
730  * Specifically, the con_ids member of that object is used to enable or disable
731  * the device's clocks, depending on @action.
732  */
pm_clk_notify(struct notifier_block * nb,unsigned long action,void * data)733 static int pm_clk_notify(struct notifier_block *nb,
734 				 unsigned long action, void *data)
735 {
736 	struct pm_clk_notifier_block *clknb;
737 	struct device *dev = data;
738 	char **con_id;
739 
740 	dev_dbg(dev, "%s() %ld\n", __func__, action);
741 
742 	clknb = container_of(nb, struct pm_clk_notifier_block, nb);
743 
744 	switch (action) {
745 	case BUS_NOTIFY_BIND_DRIVER:
746 		if (clknb->con_ids[0]) {
747 			for (con_id = clknb->con_ids; *con_id; con_id++)
748 				enable_clock(dev, *con_id);
749 		} else {
750 			enable_clock(dev, NULL);
751 		}
752 		break;
753 	case BUS_NOTIFY_DRIVER_NOT_BOUND:
754 	case BUS_NOTIFY_UNBOUND_DRIVER:
755 		if (clknb->con_ids[0]) {
756 			for (con_id = clknb->con_ids; *con_id; con_id++)
757 				disable_clock(dev, *con_id);
758 		} else {
759 			disable_clock(dev, NULL);
760 		}
761 		break;
762 	}
763 
764 	return 0;
765 }
766 
767 #endif /* !CONFIG_PM_CLK */
768 
769 /**
770  * pm_clk_add_notifier - Add bus type notifier for power management clocks.
771  * @bus: Bus type to add the notifier to.
772  * @clknb: Notifier to be added to the given bus type.
773  *
774  * The nb member of @clknb is not expected to be initialized and its
775  * notifier_call member will be replaced with pm_clk_notify().  However,
776  * the remaining members of @clknb should be populated prior to calling this
777  * routine.
778  */
pm_clk_add_notifier(struct bus_type * bus,struct pm_clk_notifier_block * clknb)779 void pm_clk_add_notifier(struct bus_type *bus,
780 				 struct pm_clk_notifier_block *clknb)
781 {
782 	if (!bus || !clknb)
783 		return;
784 
785 	clknb->nb.notifier_call = pm_clk_notify;
786 	bus_register_notifier(bus, &clknb->nb);
787 }
788 EXPORT_SYMBOL_GPL(pm_clk_add_notifier);
789