xref: /linux/drivers/base/power/wakeup.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/wakeup.c - System wakeup events framework
4  *
5  * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/capability.h>
13 #include <linux/export.h>
14 #include <linux/suspend.h>
15 #include <linux/seq_file.h>
16 #include <linux/debugfs.h>
17 #include <linux/pm_wakeirq.h>
18 #include <trace/events/power.h>
19 
20 #include "power.h"
21 
22 #ifndef CONFIG_SUSPEND
23 suspend_state_t pm_suspend_target_state;
24 #define pm_suspend_target_state	(PM_SUSPEND_ON)
25 #endif
26 
27 /*
28  * If set, the suspend/hibernate code will abort transitions to a sleep state
29  * if wakeup events are registered during or immediately before the transition.
30  */
31 bool events_check_enabled __read_mostly;
32 
33 /* First wakeup IRQ seen by the kernel in the last cycle. */
34 unsigned int pm_wakeup_irq __read_mostly;
35 
36 /* If greater than 0 and the system is suspending, terminate the suspend. */
37 static atomic_t pm_abort_suspend __read_mostly;
38 
39 /*
40  * Combined counters of registered wakeup events and wakeup events in progress.
41  * They need to be modified together atomically, so it's better to use one
42  * atomic variable to hold them both.
43  */
44 static atomic_t combined_event_count = ATOMIC_INIT(0);
45 
46 #define IN_PROGRESS_BITS	(sizeof(int) * 4)
47 #define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
48 
49 static void split_counters(unsigned int *cnt, unsigned int *inpr)
50 {
51 	unsigned int comb = atomic_read(&combined_event_count);
52 
53 	*cnt = (comb >> IN_PROGRESS_BITS);
54 	*inpr = comb & MAX_IN_PROGRESS;
55 }
56 
57 /* A preserved old value of the events counter. */
58 static unsigned int saved_count;
59 
60 static DEFINE_RAW_SPINLOCK(events_lock);
61 
62 static void pm_wakeup_timer_fn(struct timer_list *t);
63 
64 static LIST_HEAD(wakeup_sources);
65 
66 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
67 
68 DEFINE_STATIC_SRCU(wakeup_srcu);
69 
70 static struct wakeup_source deleted_ws = {
71 	.name = "deleted",
72 	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
73 };
74 
75 /**
76  * wakeup_source_prepare - Prepare a new wakeup source for initialization.
77  * @ws: Wakeup source to prepare.
78  * @name: Pointer to the name of the new wakeup source.
79  *
80  * Callers must ensure that the @name string won't be freed when @ws is still in
81  * use.
82  */
83 void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
84 {
85 	if (ws) {
86 		memset(ws, 0, sizeof(*ws));
87 		ws->name = name;
88 	}
89 }
90 EXPORT_SYMBOL_GPL(wakeup_source_prepare);
91 
92 /**
93  * wakeup_source_create - Create a struct wakeup_source object.
94  * @name: Name of the new wakeup source.
95  */
96 struct wakeup_source *wakeup_source_create(const char *name)
97 {
98 	struct wakeup_source *ws;
99 
100 	ws = kmalloc(sizeof(*ws), GFP_KERNEL);
101 	if (!ws)
102 		return NULL;
103 
104 	wakeup_source_prepare(ws, name ? kstrdup_const(name, GFP_KERNEL) : NULL);
105 	return ws;
106 }
107 EXPORT_SYMBOL_GPL(wakeup_source_create);
108 
109 /*
110  * Record wakeup_source statistics being deleted into a dummy wakeup_source.
111  */
112 static void wakeup_source_record(struct wakeup_source *ws)
113 {
114 	unsigned long flags;
115 
116 	spin_lock_irqsave(&deleted_ws.lock, flags);
117 
118 	if (ws->event_count) {
119 		deleted_ws.total_time =
120 			ktime_add(deleted_ws.total_time, ws->total_time);
121 		deleted_ws.prevent_sleep_time =
122 			ktime_add(deleted_ws.prevent_sleep_time,
123 				  ws->prevent_sleep_time);
124 		deleted_ws.max_time =
125 			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
126 				deleted_ws.max_time : ws->max_time;
127 		deleted_ws.event_count += ws->event_count;
128 		deleted_ws.active_count += ws->active_count;
129 		deleted_ws.relax_count += ws->relax_count;
130 		deleted_ws.expire_count += ws->expire_count;
131 		deleted_ws.wakeup_count += ws->wakeup_count;
132 	}
133 
134 	spin_unlock_irqrestore(&deleted_ws.lock, flags);
135 }
136 
137 /**
138  * wakeup_source_destroy - Destroy a struct wakeup_source object.
139  * @ws: Wakeup source to destroy.
140  *
141  * Use only for wakeup source objects created with wakeup_source_create().
142  */
143 void wakeup_source_destroy(struct wakeup_source *ws)
144 {
145 	if (!ws)
146 		return;
147 
148 	__pm_relax(ws);
149 	wakeup_source_record(ws);
150 	kfree_const(ws->name);
151 	kfree(ws);
152 }
153 EXPORT_SYMBOL_GPL(wakeup_source_destroy);
154 
155 /**
156  * wakeup_source_add - Add given object to the list of wakeup sources.
157  * @ws: Wakeup source object to add to the list.
158  */
159 void wakeup_source_add(struct wakeup_source *ws)
160 {
161 	unsigned long flags;
162 
163 	if (WARN_ON(!ws))
164 		return;
165 
166 	spin_lock_init(&ws->lock);
167 	timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
168 	ws->active = false;
169 
170 	raw_spin_lock_irqsave(&events_lock, flags);
171 	list_add_rcu(&ws->entry, &wakeup_sources);
172 	raw_spin_unlock_irqrestore(&events_lock, flags);
173 }
174 EXPORT_SYMBOL_GPL(wakeup_source_add);
175 
176 /**
177  * wakeup_source_remove - Remove given object from the wakeup sources list.
178  * @ws: Wakeup source object to remove from the list.
179  */
180 void wakeup_source_remove(struct wakeup_source *ws)
181 {
182 	unsigned long flags;
183 
184 	if (WARN_ON(!ws))
185 		return;
186 
187 	raw_spin_lock_irqsave(&events_lock, flags);
188 	list_del_rcu(&ws->entry);
189 	raw_spin_unlock_irqrestore(&events_lock, flags);
190 	synchronize_srcu(&wakeup_srcu);
191 
192 	del_timer_sync(&ws->timer);
193 	/*
194 	 * Clear timer.function to make wakeup_source_not_registered() treat
195 	 * this wakeup source as not registered.
196 	 */
197 	ws->timer.function = NULL;
198 }
199 EXPORT_SYMBOL_GPL(wakeup_source_remove);
200 
201 /**
202  * wakeup_source_register - Create wakeup source and add it to the list.
203  * @name: Name of the wakeup source to register.
204  */
205 struct wakeup_source *wakeup_source_register(const char *name)
206 {
207 	struct wakeup_source *ws;
208 
209 	ws = wakeup_source_create(name);
210 	if (ws)
211 		wakeup_source_add(ws);
212 
213 	return ws;
214 }
215 EXPORT_SYMBOL_GPL(wakeup_source_register);
216 
217 /**
218  * wakeup_source_unregister - Remove wakeup source from the list and remove it.
219  * @ws: Wakeup source object to unregister.
220  */
221 void wakeup_source_unregister(struct wakeup_source *ws)
222 {
223 	if (ws) {
224 		wakeup_source_remove(ws);
225 		wakeup_source_destroy(ws);
226 	}
227 }
228 EXPORT_SYMBOL_GPL(wakeup_source_unregister);
229 
230 /**
231  * device_wakeup_attach - Attach a wakeup source object to a device object.
232  * @dev: Device to handle.
233  * @ws: Wakeup source object to attach to @dev.
234  *
235  * This causes @dev to be treated as a wakeup device.
236  */
237 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
238 {
239 	spin_lock_irq(&dev->power.lock);
240 	if (dev->power.wakeup) {
241 		spin_unlock_irq(&dev->power.lock);
242 		return -EEXIST;
243 	}
244 	dev->power.wakeup = ws;
245 	if (dev->power.wakeirq)
246 		device_wakeup_attach_irq(dev, dev->power.wakeirq);
247 	spin_unlock_irq(&dev->power.lock);
248 	return 0;
249 }
250 
251 /**
252  * device_wakeup_enable - Enable given device to be a wakeup source.
253  * @dev: Device to handle.
254  *
255  * Create a wakeup source object, register it and attach it to @dev.
256  */
257 int device_wakeup_enable(struct device *dev)
258 {
259 	struct wakeup_source *ws;
260 	int ret;
261 
262 	if (!dev || !dev->power.can_wakeup)
263 		return -EINVAL;
264 
265 	if (pm_suspend_target_state != PM_SUSPEND_ON)
266 		dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
267 
268 	ws = wakeup_source_register(dev_name(dev));
269 	if (!ws)
270 		return -ENOMEM;
271 
272 	ret = device_wakeup_attach(dev, ws);
273 	if (ret)
274 		wakeup_source_unregister(ws);
275 
276 	return ret;
277 }
278 EXPORT_SYMBOL_GPL(device_wakeup_enable);
279 
280 /**
281  * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
282  * @dev: Device to handle
283  * @wakeirq: Device specific wakeirq entry
284  *
285  * Attach a device wakeirq to the wakeup source so the device
286  * wake IRQ can be configured automatically for suspend and
287  * resume.
288  *
289  * Call under the device's power.lock lock.
290  */
291 void device_wakeup_attach_irq(struct device *dev,
292 			     struct wake_irq *wakeirq)
293 {
294 	struct wakeup_source *ws;
295 
296 	ws = dev->power.wakeup;
297 	if (!ws)
298 		return;
299 
300 	if (ws->wakeirq)
301 		dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
302 
303 	ws->wakeirq = wakeirq;
304 }
305 
306 /**
307  * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
308  * @dev: Device to handle
309  *
310  * Removes a device wakeirq from the wakeup source.
311  *
312  * Call under the device's power.lock lock.
313  */
314 void device_wakeup_detach_irq(struct device *dev)
315 {
316 	struct wakeup_source *ws;
317 
318 	ws = dev->power.wakeup;
319 	if (ws)
320 		ws->wakeirq = NULL;
321 }
322 
323 /**
324  * device_wakeup_arm_wake_irqs(void)
325  *
326  * Itereates over the list of device wakeirqs to arm them.
327  */
328 void device_wakeup_arm_wake_irqs(void)
329 {
330 	struct wakeup_source *ws;
331 	int srcuidx;
332 
333 	srcuidx = srcu_read_lock(&wakeup_srcu);
334 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
335 		dev_pm_arm_wake_irq(ws->wakeirq);
336 	srcu_read_unlock(&wakeup_srcu, srcuidx);
337 }
338 
339 /**
340  * device_wakeup_disarm_wake_irqs(void)
341  *
342  * Itereates over the list of device wakeirqs to disarm them.
343  */
344 void device_wakeup_disarm_wake_irqs(void)
345 {
346 	struct wakeup_source *ws;
347 	int srcuidx;
348 
349 	srcuidx = srcu_read_lock(&wakeup_srcu);
350 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
351 		dev_pm_disarm_wake_irq(ws->wakeirq);
352 	srcu_read_unlock(&wakeup_srcu, srcuidx);
353 }
354 
355 /**
356  * device_wakeup_detach - Detach a device's wakeup source object from it.
357  * @dev: Device to detach the wakeup source object from.
358  *
359  * After it returns, @dev will not be treated as a wakeup device any more.
360  */
361 static struct wakeup_source *device_wakeup_detach(struct device *dev)
362 {
363 	struct wakeup_source *ws;
364 
365 	spin_lock_irq(&dev->power.lock);
366 	ws = dev->power.wakeup;
367 	dev->power.wakeup = NULL;
368 	spin_unlock_irq(&dev->power.lock);
369 	return ws;
370 }
371 
372 /**
373  * device_wakeup_disable - Do not regard a device as a wakeup source any more.
374  * @dev: Device to handle.
375  *
376  * Detach the @dev's wakeup source object from it, unregister this wakeup source
377  * object and destroy it.
378  */
379 int device_wakeup_disable(struct device *dev)
380 {
381 	struct wakeup_source *ws;
382 
383 	if (!dev || !dev->power.can_wakeup)
384 		return -EINVAL;
385 
386 	ws = device_wakeup_detach(dev);
387 	wakeup_source_unregister(ws);
388 	return 0;
389 }
390 EXPORT_SYMBOL_GPL(device_wakeup_disable);
391 
392 /**
393  * device_set_wakeup_capable - Set/reset device wakeup capability flag.
394  * @dev: Device to handle.
395  * @capable: Whether or not @dev is capable of waking up the system from sleep.
396  *
397  * If @capable is set, set the @dev's power.can_wakeup flag and add its
398  * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
399  * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
400  *
401  * This function may sleep and it can't be called from any context where
402  * sleeping is not allowed.
403  */
404 void device_set_wakeup_capable(struct device *dev, bool capable)
405 {
406 	if (!!dev->power.can_wakeup == !!capable)
407 		return;
408 
409 	dev->power.can_wakeup = capable;
410 	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
411 		if (capable) {
412 			int ret = wakeup_sysfs_add(dev);
413 
414 			if (ret)
415 				dev_info(dev, "Wakeup sysfs attributes not added\n");
416 		} else {
417 			wakeup_sysfs_remove(dev);
418 		}
419 	}
420 }
421 EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
422 
423 /**
424  * device_init_wakeup - Device wakeup initialization.
425  * @dev: Device to handle.
426  * @enable: Whether or not to enable @dev as a wakeup device.
427  *
428  * By default, most devices should leave wakeup disabled.  The exceptions are
429  * devices that everyone expects to be wakeup sources: keyboards, power buttons,
430  * possibly network interfaces, etc.  Also, devices that don't generate their
431  * own wakeup requests but merely forward requests from one bus to another
432  * (like PCI bridges) should have wakeup enabled by default.
433  */
434 int device_init_wakeup(struct device *dev, bool enable)
435 {
436 	int ret = 0;
437 
438 	if (!dev)
439 		return -EINVAL;
440 
441 	if (enable) {
442 		device_set_wakeup_capable(dev, true);
443 		ret = device_wakeup_enable(dev);
444 	} else {
445 		device_wakeup_disable(dev);
446 		device_set_wakeup_capable(dev, false);
447 	}
448 
449 	return ret;
450 }
451 EXPORT_SYMBOL_GPL(device_init_wakeup);
452 
453 /**
454  * device_set_wakeup_enable - Enable or disable a device to wake up the system.
455  * @dev: Device to handle.
456  */
457 int device_set_wakeup_enable(struct device *dev, bool enable)
458 {
459 	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
460 }
461 EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
462 
463 /**
464  * wakeup_source_not_registered - validate the given wakeup source.
465  * @ws: Wakeup source to be validated.
466  */
467 static bool wakeup_source_not_registered(struct wakeup_source *ws)
468 {
469 	/*
470 	 * Use timer struct to check if the given source is initialized
471 	 * by wakeup_source_add.
472 	 */
473 	return ws->timer.function != pm_wakeup_timer_fn;
474 }
475 
476 /*
477  * The functions below use the observation that each wakeup event starts a
478  * period in which the system should not be suspended.  The moment this period
479  * will end depends on how the wakeup event is going to be processed after being
480  * detected and all of the possible cases can be divided into two distinct
481  * groups.
482  *
483  * First, a wakeup event may be detected by the same functional unit that will
484  * carry out the entire processing of it and possibly will pass it to user space
485  * for further processing.  In that case the functional unit that has detected
486  * the event may later "close" the "no suspend" period associated with it
487  * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
488  * pm_relax(), balanced with each other, is supposed to be used in such
489  * situations.
490  *
491  * Second, a wakeup event may be detected by one functional unit and processed
492  * by another one.  In that case the unit that has detected it cannot really
493  * "close" the "no suspend" period associated with it, unless it knows in
494  * advance what's going to happen to the event during processing.  This
495  * knowledge, however, may not be available to it, so it can simply specify time
496  * to wait before the system can be suspended and pass it as the second
497  * argument of pm_wakeup_event().
498  *
499  * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
500  * "no suspend" period will be ended either by the pm_relax(), or by the timer
501  * function executed when the timer expires, whichever comes first.
502  */
503 
504 /**
505  * wakup_source_activate - Mark given wakeup source as active.
506  * @ws: Wakeup source to handle.
507  *
508  * Update the @ws' statistics and, if @ws has just been activated, notify the PM
509  * core of the event by incrementing the counter of of wakeup events being
510  * processed.
511  */
512 static void wakeup_source_activate(struct wakeup_source *ws)
513 {
514 	unsigned int cec;
515 
516 	if (WARN_ONCE(wakeup_source_not_registered(ws),
517 			"unregistered wakeup source\n"))
518 		return;
519 
520 	ws->active = true;
521 	ws->active_count++;
522 	ws->last_time = ktime_get();
523 	if (ws->autosleep_enabled)
524 		ws->start_prevent_time = ws->last_time;
525 
526 	/* Increment the counter of events in progress. */
527 	cec = atomic_inc_return(&combined_event_count);
528 
529 	trace_wakeup_source_activate(ws->name, cec);
530 }
531 
532 /**
533  * wakeup_source_report_event - Report wakeup event using the given source.
534  * @ws: Wakeup source to report the event for.
535  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
536  */
537 static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
538 {
539 	ws->event_count++;
540 	/* This is racy, but the counter is approximate anyway. */
541 	if (events_check_enabled)
542 		ws->wakeup_count++;
543 
544 	if (!ws->active)
545 		wakeup_source_activate(ws);
546 
547 	if (hard)
548 		pm_system_wakeup();
549 }
550 
551 /**
552  * __pm_stay_awake - Notify the PM core of a wakeup event.
553  * @ws: Wakeup source object associated with the source of the event.
554  *
555  * It is safe to call this function from interrupt context.
556  */
557 void __pm_stay_awake(struct wakeup_source *ws)
558 {
559 	unsigned long flags;
560 
561 	if (!ws)
562 		return;
563 
564 	spin_lock_irqsave(&ws->lock, flags);
565 
566 	wakeup_source_report_event(ws, false);
567 	del_timer(&ws->timer);
568 	ws->timer_expires = 0;
569 
570 	spin_unlock_irqrestore(&ws->lock, flags);
571 }
572 EXPORT_SYMBOL_GPL(__pm_stay_awake);
573 
574 /**
575  * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
576  * @dev: Device the wakeup event is related to.
577  *
578  * Notify the PM core of a wakeup event (signaled by @dev) by calling
579  * __pm_stay_awake for the @dev's wakeup source object.
580  *
581  * Call this function after detecting of a wakeup event if pm_relax() is going
582  * to be called directly after processing the event (and possibly passing it to
583  * user space for further processing).
584  */
585 void pm_stay_awake(struct device *dev)
586 {
587 	unsigned long flags;
588 
589 	if (!dev)
590 		return;
591 
592 	spin_lock_irqsave(&dev->power.lock, flags);
593 	__pm_stay_awake(dev->power.wakeup);
594 	spin_unlock_irqrestore(&dev->power.lock, flags);
595 }
596 EXPORT_SYMBOL_GPL(pm_stay_awake);
597 
598 #ifdef CONFIG_PM_AUTOSLEEP
599 static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
600 {
601 	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
602 	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
603 }
604 #else
605 static inline void update_prevent_sleep_time(struct wakeup_source *ws,
606 					     ktime_t now) {}
607 #endif
608 
609 /**
610  * wakup_source_deactivate - Mark given wakeup source as inactive.
611  * @ws: Wakeup source to handle.
612  *
613  * Update the @ws' statistics and notify the PM core that the wakeup source has
614  * become inactive by decrementing the counter of wakeup events being processed
615  * and incrementing the counter of registered wakeup events.
616  */
617 static void wakeup_source_deactivate(struct wakeup_source *ws)
618 {
619 	unsigned int cnt, inpr, cec;
620 	ktime_t duration;
621 	ktime_t now;
622 
623 	ws->relax_count++;
624 	/*
625 	 * __pm_relax() may be called directly or from a timer function.
626 	 * If it is called directly right after the timer function has been
627 	 * started, but before the timer function calls __pm_relax(), it is
628 	 * possible that __pm_stay_awake() will be called in the meantime and
629 	 * will set ws->active.  Then, ws->active may be cleared immediately
630 	 * by the __pm_relax() called from the timer function, but in such a
631 	 * case ws->relax_count will be different from ws->active_count.
632 	 */
633 	if (ws->relax_count != ws->active_count) {
634 		ws->relax_count--;
635 		return;
636 	}
637 
638 	ws->active = false;
639 
640 	now = ktime_get();
641 	duration = ktime_sub(now, ws->last_time);
642 	ws->total_time = ktime_add(ws->total_time, duration);
643 	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
644 		ws->max_time = duration;
645 
646 	ws->last_time = now;
647 	del_timer(&ws->timer);
648 	ws->timer_expires = 0;
649 
650 	if (ws->autosleep_enabled)
651 		update_prevent_sleep_time(ws, now);
652 
653 	/*
654 	 * Increment the counter of registered wakeup events and decrement the
655 	 * couter of wakeup events in progress simultaneously.
656 	 */
657 	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
658 	trace_wakeup_source_deactivate(ws->name, cec);
659 
660 	split_counters(&cnt, &inpr);
661 	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
662 		wake_up(&wakeup_count_wait_queue);
663 }
664 
665 /**
666  * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
667  * @ws: Wakeup source object associated with the source of the event.
668  *
669  * Call this function for wakeup events whose processing started with calling
670  * __pm_stay_awake().
671  *
672  * It is safe to call it from interrupt context.
673  */
674 void __pm_relax(struct wakeup_source *ws)
675 {
676 	unsigned long flags;
677 
678 	if (!ws)
679 		return;
680 
681 	spin_lock_irqsave(&ws->lock, flags);
682 	if (ws->active)
683 		wakeup_source_deactivate(ws);
684 	spin_unlock_irqrestore(&ws->lock, flags);
685 }
686 EXPORT_SYMBOL_GPL(__pm_relax);
687 
688 /**
689  * pm_relax - Notify the PM core that processing of a wakeup event has ended.
690  * @dev: Device that signaled the event.
691  *
692  * Execute __pm_relax() for the @dev's wakeup source object.
693  */
694 void pm_relax(struct device *dev)
695 {
696 	unsigned long flags;
697 
698 	if (!dev)
699 		return;
700 
701 	spin_lock_irqsave(&dev->power.lock, flags);
702 	__pm_relax(dev->power.wakeup);
703 	spin_unlock_irqrestore(&dev->power.lock, flags);
704 }
705 EXPORT_SYMBOL_GPL(pm_relax);
706 
707 /**
708  * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
709  * @data: Address of the wakeup source object associated with the event source.
710  *
711  * Call wakeup_source_deactivate() for the wakeup source whose address is stored
712  * in @data if it is currently active and its timer has not been canceled and
713  * the expiration time of the timer is not in future.
714  */
715 static void pm_wakeup_timer_fn(struct timer_list *t)
716 {
717 	struct wakeup_source *ws = from_timer(ws, t, timer);
718 	unsigned long flags;
719 
720 	spin_lock_irqsave(&ws->lock, flags);
721 
722 	if (ws->active && ws->timer_expires
723 	    && time_after_eq(jiffies, ws->timer_expires)) {
724 		wakeup_source_deactivate(ws);
725 		ws->expire_count++;
726 	}
727 
728 	spin_unlock_irqrestore(&ws->lock, flags);
729 }
730 
731 /**
732  * pm_wakeup_ws_event - Notify the PM core of a wakeup event.
733  * @ws: Wakeup source object associated with the event source.
734  * @msec: Anticipated event processing time (in milliseconds).
735  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
736  *
737  * Notify the PM core of a wakeup event whose source is @ws that will take
738  * approximately @msec milliseconds to be processed by the kernel.  If @ws is
739  * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
740  * execute pm_wakeup_timer_fn() in future.
741  *
742  * It is safe to call this function from interrupt context.
743  */
744 void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
745 {
746 	unsigned long flags;
747 	unsigned long expires;
748 
749 	if (!ws)
750 		return;
751 
752 	spin_lock_irqsave(&ws->lock, flags);
753 
754 	wakeup_source_report_event(ws, hard);
755 
756 	if (!msec) {
757 		wakeup_source_deactivate(ws);
758 		goto unlock;
759 	}
760 
761 	expires = jiffies + msecs_to_jiffies(msec);
762 	if (!expires)
763 		expires = 1;
764 
765 	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
766 		mod_timer(&ws->timer, expires);
767 		ws->timer_expires = expires;
768 	}
769 
770  unlock:
771 	spin_unlock_irqrestore(&ws->lock, flags);
772 }
773 EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
774 
775 /**
776  * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
777  * @dev: Device the wakeup event is related to.
778  * @msec: Anticipated event processing time (in milliseconds).
779  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
780  *
781  * Call pm_wakeup_ws_event() for the @dev's wakeup source object.
782  */
783 void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
784 {
785 	unsigned long flags;
786 
787 	if (!dev)
788 		return;
789 
790 	spin_lock_irqsave(&dev->power.lock, flags);
791 	pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
792 	spin_unlock_irqrestore(&dev->power.lock, flags);
793 }
794 EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
795 
796 void pm_print_active_wakeup_sources(void)
797 {
798 	struct wakeup_source *ws;
799 	int srcuidx, active = 0;
800 	struct wakeup_source *last_activity_ws = NULL;
801 
802 	srcuidx = srcu_read_lock(&wakeup_srcu);
803 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
804 		if (ws->active) {
805 			pm_pr_dbg("active wakeup source: %s\n", ws->name);
806 			active = 1;
807 		} else if (!active &&
808 			   (!last_activity_ws ||
809 			    ktime_to_ns(ws->last_time) >
810 			    ktime_to_ns(last_activity_ws->last_time))) {
811 			last_activity_ws = ws;
812 		}
813 	}
814 
815 	if (!active && last_activity_ws)
816 		pm_pr_dbg("last active wakeup source: %s\n",
817 			last_activity_ws->name);
818 	srcu_read_unlock(&wakeup_srcu, srcuidx);
819 }
820 EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
821 
822 /**
823  * pm_wakeup_pending - Check if power transition in progress should be aborted.
824  *
825  * Compare the current number of registered wakeup events with its preserved
826  * value from the past and return true if new wakeup events have been registered
827  * since the old value was stored.  Also return true if the current number of
828  * wakeup events being processed is different from zero.
829  */
830 bool pm_wakeup_pending(void)
831 {
832 	unsigned long flags;
833 	bool ret = false;
834 
835 	raw_spin_lock_irqsave(&events_lock, flags);
836 	if (events_check_enabled) {
837 		unsigned int cnt, inpr;
838 
839 		split_counters(&cnt, &inpr);
840 		ret = (cnt != saved_count || inpr > 0);
841 		events_check_enabled = !ret;
842 	}
843 	raw_spin_unlock_irqrestore(&events_lock, flags);
844 
845 	if (ret) {
846 		pm_pr_dbg("Wakeup pending, aborting suspend\n");
847 		pm_print_active_wakeup_sources();
848 	}
849 
850 	return ret || atomic_read(&pm_abort_suspend) > 0;
851 }
852 
853 void pm_system_wakeup(void)
854 {
855 	atomic_inc(&pm_abort_suspend);
856 	s2idle_wake();
857 }
858 EXPORT_SYMBOL_GPL(pm_system_wakeup);
859 
860 void pm_system_cancel_wakeup(void)
861 {
862 	atomic_dec(&pm_abort_suspend);
863 }
864 
865 void pm_wakeup_clear(bool reset)
866 {
867 	pm_wakeup_irq = 0;
868 	if (reset)
869 		atomic_set(&pm_abort_suspend, 0);
870 }
871 
872 void pm_system_irq_wakeup(unsigned int irq_number)
873 {
874 	if (pm_wakeup_irq == 0) {
875 		pm_wakeup_irq = irq_number;
876 		pm_system_wakeup();
877 	}
878 }
879 
880 /**
881  * pm_get_wakeup_count - Read the number of registered wakeup events.
882  * @count: Address to store the value at.
883  * @block: Whether or not to block.
884  *
885  * Store the number of registered wakeup events at the address in @count.  If
886  * @block is set, block until the current number of wakeup events being
887  * processed is zero.
888  *
889  * Return 'false' if the current number of wakeup events being processed is
890  * nonzero.  Otherwise return 'true'.
891  */
892 bool pm_get_wakeup_count(unsigned int *count, bool block)
893 {
894 	unsigned int cnt, inpr;
895 
896 	if (block) {
897 		DEFINE_WAIT(wait);
898 
899 		for (;;) {
900 			prepare_to_wait(&wakeup_count_wait_queue, &wait,
901 					TASK_INTERRUPTIBLE);
902 			split_counters(&cnt, &inpr);
903 			if (inpr == 0 || signal_pending(current))
904 				break;
905 			pm_print_active_wakeup_sources();
906 			schedule();
907 		}
908 		finish_wait(&wakeup_count_wait_queue, &wait);
909 	}
910 
911 	split_counters(&cnt, &inpr);
912 	*count = cnt;
913 	return !inpr;
914 }
915 
916 /**
917  * pm_save_wakeup_count - Save the current number of registered wakeup events.
918  * @count: Value to compare with the current number of registered wakeup events.
919  *
920  * If @count is equal to the current number of registered wakeup events and the
921  * current number of wakeup events being processed is zero, store @count as the
922  * old number of registered wakeup events for pm_check_wakeup_events(), enable
923  * wakeup events detection and return 'true'.  Otherwise disable wakeup events
924  * detection and return 'false'.
925  */
926 bool pm_save_wakeup_count(unsigned int count)
927 {
928 	unsigned int cnt, inpr;
929 	unsigned long flags;
930 
931 	events_check_enabled = false;
932 	raw_spin_lock_irqsave(&events_lock, flags);
933 	split_counters(&cnt, &inpr);
934 	if (cnt == count && inpr == 0) {
935 		saved_count = count;
936 		events_check_enabled = true;
937 	}
938 	raw_spin_unlock_irqrestore(&events_lock, flags);
939 	return events_check_enabled;
940 }
941 
942 #ifdef CONFIG_PM_AUTOSLEEP
943 /**
944  * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
945  * @enabled: Whether to set or to clear the autosleep_enabled flags.
946  */
947 void pm_wakep_autosleep_enabled(bool set)
948 {
949 	struct wakeup_source *ws;
950 	ktime_t now = ktime_get();
951 	int srcuidx;
952 
953 	srcuidx = srcu_read_lock(&wakeup_srcu);
954 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
955 		spin_lock_irq(&ws->lock);
956 		if (ws->autosleep_enabled != set) {
957 			ws->autosleep_enabled = set;
958 			if (ws->active) {
959 				if (set)
960 					ws->start_prevent_time = now;
961 				else
962 					update_prevent_sleep_time(ws, now);
963 			}
964 		}
965 		spin_unlock_irq(&ws->lock);
966 	}
967 	srcu_read_unlock(&wakeup_srcu, srcuidx);
968 }
969 #endif /* CONFIG_PM_AUTOSLEEP */
970 
971 static struct dentry *wakeup_sources_stats_dentry;
972 
973 /**
974  * print_wakeup_source_stats - Print wakeup source statistics information.
975  * @m: seq_file to print the statistics into.
976  * @ws: Wakeup source object to print the statistics for.
977  */
978 static int print_wakeup_source_stats(struct seq_file *m,
979 				     struct wakeup_source *ws)
980 {
981 	unsigned long flags;
982 	ktime_t total_time;
983 	ktime_t max_time;
984 	unsigned long active_count;
985 	ktime_t active_time;
986 	ktime_t prevent_sleep_time;
987 
988 	spin_lock_irqsave(&ws->lock, flags);
989 
990 	total_time = ws->total_time;
991 	max_time = ws->max_time;
992 	prevent_sleep_time = ws->prevent_sleep_time;
993 	active_count = ws->active_count;
994 	if (ws->active) {
995 		ktime_t now = ktime_get();
996 
997 		active_time = ktime_sub(now, ws->last_time);
998 		total_time = ktime_add(total_time, active_time);
999 		if (active_time > max_time)
1000 			max_time = active_time;
1001 
1002 		if (ws->autosleep_enabled)
1003 			prevent_sleep_time = ktime_add(prevent_sleep_time,
1004 				ktime_sub(now, ws->start_prevent_time));
1005 	} else {
1006 		active_time = 0;
1007 	}
1008 
1009 	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
1010 		   ws->name, active_count, ws->event_count,
1011 		   ws->wakeup_count, ws->expire_count,
1012 		   ktime_to_ms(active_time), ktime_to_ms(total_time),
1013 		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
1014 		   ktime_to_ms(prevent_sleep_time));
1015 
1016 	spin_unlock_irqrestore(&ws->lock, flags);
1017 
1018 	return 0;
1019 }
1020 
1021 static void *wakeup_sources_stats_seq_start(struct seq_file *m,
1022 					loff_t *pos)
1023 {
1024 	struct wakeup_source *ws;
1025 	loff_t n = *pos;
1026 	int *srcuidx = m->private;
1027 
1028 	if (n == 0) {
1029 		seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
1030 			"expire_count\tactive_since\ttotal_time\tmax_time\t"
1031 			"last_change\tprevent_suspend_time\n");
1032 	}
1033 
1034 	*srcuidx = srcu_read_lock(&wakeup_srcu);
1035 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
1036 		if (n-- <= 0)
1037 			return ws;
1038 	}
1039 
1040 	return NULL;
1041 }
1042 
1043 static void *wakeup_sources_stats_seq_next(struct seq_file *m,
1044 					void *v, loff_t *pos)
1045 {
1046 	struct wakeup_source *ws = v;
1047 	struct wakeup_source *next_ws = NULL;
1048 
1049 	++(*pos);
1050 
1051 	list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
1052 		next_ws = ws;
1053 		break;
1054 	}
1055 
1056 	return next_ws;
1057 }
1058 
1059 static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
1060 {
1061 	int *srcuidx = m->private;
1062 
1063 	srcu_read_unlock(&wakeup_srcu, *srcuidx);
1064 }
1065 
1066 /**
1067  * wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
1068  * @m: seq_file to print the statistics into.
1069  * @v: wakeup_source of each iteration
1070  */
1071 static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
1072 {
1073 	struct wakeup_source *ws = v;
1074 
1075 	print_wakeup_source_stats(m, ws);
1076 
1077 	return 0;
1078 }
1079 
1080 static const struct seq_operations wakeup_sources_stats_seq_ops = {
1081 	.start = wakeup_sources_stats_seq_start,
1082 	.next  = wakeup_sources_stats_seq_next,
1083 	.stop  = wakeup_sources_stats_seq_stop,
1084 	.show  = wakeup_sources_stats_seq_show,
1085 };
1086 
1087 static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
1088 {
1089 	return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
1090 }
1091 
1092 static const struct file_operations wakeup_sources_stats_fops = {
1093 	.owner = THIS_MODULE,
1094 	.open = wakeup_sources_stats_open,
1095 	.read = seq_read,
1096 	.llseek = seq_lseek,
1097 	.release = seq_release_private,
1098 };
1099 
1100 static int __init wakeup_sources_debugfs_init(void)
1101 {
1102 	wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
1103 			S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
1104 	return 0;
1105 }
1106 
1107 postcore_initcall(wakeup_sources_debugfs_init);
1108