xref: /linux/drivers/base/power/wakeup.c (revision f86fd32d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/wakeup.c - System wakeup events framework
4  *
5  * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/capability.h>
13 #include <linux/export.h>
14 #include <linux/suspend.h>
15 #include <linux/seq_file.h>
16 #include <linux/debugfs.h>
17 #include <linux/pm_wakeirq.h>
18 #include <trace/events/power.h>
19 
20 #include "power.h"
21 
22 #ifndef CONFIG_SUSPEND
23 suspend_state_t pm_suspend_target_state;
24 #define pm_suspend_target_state	(PM_SUSPEND_ON)
25 #endif
26 
27 /*
28  * If set, the suspend/hibernate code will abort transitions to a sleep state
29  * if wakeup events are registered during or immediately before the transition.
30  */
31 bool events_check_enabled __read_mostly;
32 
33 /* First wakeup IRQ seen by the kernel in the last cycle. */
34 unsigned int pm_wakeup_irq __read_mostly;
35 
36 /* If greater than 0 and the system is suspending, terminate the suspend. */
37 static atomic_t pm_abort_suspend __read_mostly;
38 
39 /*
40  * Combined counters of registered wakeup events and wakeup events in progress.
41  * They need to be modified together atomically, so it's better to use one
42  * atomic variable to hold them both.
43  */
44 static atomic_t combined_event_count = ATOMIC_INIT(0);
45 
46 #define IN_PROGRESS_BITS	(sizeof(int) * 4)
47 #define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
48 
49 static void split_counters(unsigned int *cnt, unsigned int *inpr)
50 {
51 	unsigned int comb = atomic_read(&combined_event_count);
52 
53 	*cnt = (comb >> IN_PROGRESS_BITS);
54 	*inpr = comb & MAX_IN_PROGRESS;
55 }
56 
57 /* A preserved old value of the events counter. */
58 static unsigned int saved_count;
59 
60 static DEFINE_RAW_SPINLOCK(events_lock);
61 
62 static void pm_wakeup_timer_fn(struct timer_list *t);
63 
64 static LIST_HEAD(wakeup_sources);
65 
66 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
67 
68 DEFINE_STATIC_SRCU(wakeup_srcu);
69 
70 static struct wakeup_source deleted_ws = {
71 	.name = "deleted",
72 	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
73 };
74 
75 static DEFINE_IDA(wakeup_ida);
76 
77 /**
78  * wakeup_source_create - Create a struct wakeup_source object.
79  * @name: Name of the new wakeup source.
80  */
81 struct wakeup_source *wakeup_source_create(const char *name)
82 {
83 	struct wakeup_source *ws;
84 	const char *ws_name;
85 	int id;
86 
87 	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
88 	if (!ws)
89 		goto err_ws;
90 
91 	ws_name = kstrdup_const(name, GFP_KERNEL);
92 	if (!ws_name)
93 		goto err_name;
94 	ws->name = ws_name;
95 
96 	id = ida_alloc(&wakeup_ida, GFP_KERNEL);
97 	if (id < 0)
98 		goto err_id;
99 	ws->id = id;
100 
101 	return ws;
102 
103 err_id:
104 	kfree_const(ws->name);
105 err_name:
106 	kfree(ws);
107 err_ws:
108 	return NULL;
109 }
110 EXPORT_SYMBOL_GPL(wakeup_source_create);
111 
112 /*
113  * Record wakeup_source statistics being deleted into a dummy wakeup_source.
114  */
115 static void wakeup_source_record(struct wakeup_source *ws)
116 {
117 	unsigned long flags;
118 
119 	spin_lock_irqsave(&deleted_ws.lock, flags);
120 
121 	if (ws->event_count) {
122 		deleted_ws.total_time =
123 			ktime_add(deleted_ws.total_time, ws->total_time);
124 		deleted_ws.prevent_sleep_time =
125 			ktime_add(deleted_ws.prevent_sleep_time,
126 				  ws->prevent_sleep_time);
127 		deleted_ws.max_time =
128 			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
129 				deleted_ws.max_time : ws->max_time;
130 		deleted_ws.event_count += ws->event_count;
131 		deleted_ws.active_count += ws->active_count;
132 		deleted_ws.relax_count += ws->relax_count;
133 		deleted_ws.expire_count += ws->expire_count;
134 		deleted_ws.wakeup_count += ws->wakeup_count;
135 	}
136 
137 	spin_unlock_irqrestore(&deleted_ws.lock, flags);
138 }
139 
140 static void wakeup_source_free(struct wakeup_source *ws)
141 {
142 	ida_free(&wakeup_ida, ws->id);
143 	kfree_const(ws->name);
144 	kfree(ws);
145 }
146 
147 /**
148  * wakeup_source_destroy - Destroy a struct wakeup_source object.
149  * @ws: Wakeup source to destroy.
150  *
151  * Use only for wakeup source objects created with wakeup_source_create().
152  */
153 void wakeup_source_destroy(struct wakeup_source *ws)
154 {
155 	if (!ws)
156 		return;
157 
158 	__pm_relax(ws);
159 	wakeup_source_record(ws);
160 	wakeup_source_free(ws);
161 }
162 EXPORT_SYMBOL_GPL(wakeup_source_destroy);
163 
164 /**
165  * wakeup_source_add - Add given object to the list of wakeup sources.
166  * @ws: Wakeup source object to add to the list.
167  */
168 void wakeup_source_add(struct wakeup_source *ws)
169 {
170 	unsigned long flags;
171 
172 	if (WARN_ON(!ws))
173 		return;
174 
175 	spin_lock_init(&ws->lock);
176 	timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
177 	ws->active = false;
178 
179 	raw_spin_lock_irqsave(&events_lock, flags);
180 	list_add_rcu(&ws->entry, &wakeup_sources);
181 	raw_spin_unlock_irqrestore(&events_lock, flags);
182 }
183 EXPORT_SYMBOL_GPL(wakeup_source_add);
184 
185 /**
186  * wakeup_source_remove - Remove given object from the wakeup sources list.
187  * @ws: Wakeup source object to remove from the list.
188  */
189 void wakeup_source_remove(struct wakeup_source *ws)
190 {
191 	unsigned long flags;
192 
193 	if (WARN_ON(!ws))
194 		return;
195 
196 	raw_spin_lock_irqsave(&events_lock, flags);
197 	list_del_rcu(&ws->entry);
198 	raw_spin_unlock_irqrestore(&events_lock, flags);
199 	synchronize_srcu(&wakeup_srcu);
200 
201 	del_timer_sync(&ws->timer);
202 	/*
203 	 * Clear timer.function to make wakeup_source_not_registered() treat
204 	 * this wakeup source as not registered.
205 	 */
206 	ws->timer.function = NULL;
207 }
208 EXPORT_SYMBOL_GPL(wakeup_source_remove);
209 
210 /**
211  * wakeup_source_register - Create wakeup source and add it to the list.
212  * @dev: Device this wakeup source is associated with (or NULL if virtual).
213  * @name: Name of the wakeup source to register.
214  */
215 struct wakeup_source *wakeup_source_register(struct device *dev,
216 					     const char *name)
217 {
218 	struct wakeup_source *ws;
219 	int ret;
220 
221 	ws = wakeup_source_create(name);
222 	if (ws) {
223 		if (!dev || device_is_registered(dev)) {
224 			ret = wakeup_source_sysfs_add(dev, ws);
225 			if (ret) {
226 				wakeup_source_free(ws);
227 				return NULL;
228 			}
229 		}
230 		wakeup_source_add(ws);
231 	}
232 	return ws;
233 }
234 EXPORT_SYMBOL_GPL(wakeup_source_register);
235 
236 /**
237  * wakeup_source_unregister - Remove wakeup source from the list and remove it.
238  * @ws: Wakeup source object to unregister.
239  */
240 void wakeup_source_unregister(struct wakeup_source *ws)
241 {
242 	if (ws) {
243 		wakeup_source_remove(ws);
244 		wakeup_source_sysfs_remove(ws);
245 		wakeup_source_destroy(ws);
246 	}
247 }
248 EXPORT_SYMBOL_GPL(wakeup_source_unregister);
249 
250 /**
251  * wakeup_sources_read_lock - Lock wakeup source list for read.
252  *
253  * Returns an index of srcu lock for struct wakeup_srcu.
254  * This index must be passed to the matching wakeup_sources_read_unlock().
255  */
256 int wakeup_sources_read_lock(void)
257 {
258 	return srcu_read_lock(&wakeup_srcu);
259 }
260 EXPORT_SYMBOL_GPL(wakeup_sources_read_lock);
261 
262 /**
263  * wakeup_sources_read_unlock - Unlock wakeup source list.
264  * @idx: return value from corresponding wakeup_sources_read_lock()
265  */
266 void wakeup_sources_read_unlock(int idx)
267 {
268 	srcu_read_unlock(&wakeup_srcu, idx);
269 }
270 EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
271 
272 /**
273  * wakeup_sources_walk_start - Begin a walk on wakeup source list
274  *
275  * Returns first object of the list of wakeup sources.
276  *
277  * Note that to be safe, wakeup sources list needs to be locked by calling
278  * wakeup_source_read_lock() for this.
279  */
280 struct wakeup_source *wakeup_sources_walk_start(void)
281 {
282 	struct list_head *ws_head = &wakeup_sources;
283 
284 	return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
285 }
286 EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
287 
288 /**
289  * wakeup_sources_walk_next - Get next wakeup source from the list
290  * @ws: Previous wakeup source object
291  *
292  * Note that to be safe, wakeup sources list needs to be locked by calling
293  * wakeup_source_read_lock() for this.
294  */
295 struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws)
296 {
297 	struct list_head *ws_head = &wakeup_sources;
298 
299 	return list_next_or_null_rcu(ws_head, &ws->entry,
300 				struct wakeup_source, entry);
301 }
302 EXPORT_SYMBOL_GPL(wakeup_sources_walk_next);
303 
304 /**
305  * device_wakeup_attach - Attach a wakeup source object to a device object.
306  * @dev: Device to handle.
307  * @ws: Wakeup source object to attach to @dev.
308  *
309  * This causes @dev to be treated as a wakeup device.
310  */
311 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
312 {
313 	spin_lock_irq(&dev->power.lock);
314 	if (dev->power.wakeup) {
315 		spin_unlock_irq(&dev->power.lock);
316 		return -EEXIST;
317 	}
318 	dev->power.wakeup = ws;
319 	if (dev->power.wakeirq)
320 		device_wakeup_attach_irq(dev, dev->power.wakeirq);
321 	spin_unlock_irq(&dev->power.lock);
322 	return 0;
323 }
324 
325 /**
326  * device_wakeup_enable - Enable given device to be a wakeup source.
327  * @dev: Device to handle.
328  *
329  * Create a wakeup source object, register it and attach it to @dev.
330  */
331 int device_wakeup_enable(struct device *dev)
332 {
333 	struct wakeup_source *ws;
334 	int ret;
335 
336 	if (!dev || !dev->power.can_wakeup)
337 		return -EINVAL;
338 
339 	if (pm_suspend_target_state != PM_SUSPEND_ON)
340 		dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
341 
342 	ws = wakeup_source_register(dev, dev_name(dev));
343 	if (!ws)
344 		return -ENOMEM;
345 
346 	ret = device_wakeup_attach(dev, ws);
347 	if (ret)
348 		wakeup_source_unregister(ws);
349 
350 	return ret;
351 }
352 EXPORT_SYMBOL_GPL(device_wakeup_enable);
353 
354 /**
355  * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
356  * @dev: Device to handle
357  * @wakeirq: Device specific wakeirq entry
358  *
359  * Attach a device wakeirq to the wakeup source so the device
360  * wake IRQ can be configured automatically for suspend and
361  * resume.
362  *
363  * Call under the device's power.lock lock.
364  */
365 void device_wakeup_attach_irq(struct device *dev,
366 			     struct wake_irq *wakeirq)
367 {
368 	struct wakeup_source *ws;
369 
370 	ws = dev->power.wakeup;
371 	if (!ws)
372 		return;
373 
374 	if (ws->wakeirq)
375 		dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
376 
377 	ws->wakeirq = wakeirq;
378 }
379 
380 /**
381  * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
382  * @dev: Device to handle
383  *
384  * Removes a device wakeirq from the wakeup source.
385  *
386  * Call under the device's power.lock lock.
387  */
388 void device_wakeup_detach_irq(struct device *dev)
389 {
390 	struct wakeup_source *ws;
391 
392 	ws = dev->power.wakeup;
393 	if (ws)
394 		ws->wakeirq = NULL;
395 }
396 
397 /**
398  * device_wakeup_arm_wake_irqs(void)
399  *
400  * Itereates over the list of device wakeirqs to arm them.
401  */
402 void device_wakeup_arm_wake_irqs(void)
403 {
404 	struct wakeup_source *ws;
405 	int srcuidx;
406 
407 	srcuidx = srcu_read_lock(&wakeup_srcu);
408 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
409 		dev_pm_arm_wake_irq(ws->wakeirq);
410 	srcu_read_unlock(&wakeup_srcu, srcuidx);
411 }
412 
413 /**
414  * device_wakeup_disarm_wake_irqs(void)
415  *
416  * Itereates over the list of device wakeirqs to disarm them.
417  */
418 void device_wakeup_disarm_wake_irqs(void)
419 {
420 	struct wakeup_source *ws;
421 	int srcuidx;
422 
423 	srcuidx = srcu_read_lock(&wakeup_srcu);
424 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
425 		dev_pm_disarm_wake_irq(ws->wakeirq);
426 	srcu_read_unlock(&wakeup_srcu, srcuidx);
427 }
428 
429 /**
430  * device_wakeup_detach - Detach a device's wakeup source object from it.
431  * @dev: Device to detach the wakeup source object from.
432  *
433  * After it returns, @dev will not be treated as a wakeup device any more.
434  */
435 static struct wakeup_source *device_wakeup_detach(struct device *dev)
436 {
437 	struct wakeup_source *ws;
438 
439 	spin_lock_irq(&dev->power.lock);
440 	ws = dev->power.wakeup;
441 	dev->power.wakeup = NULL;
442 	spin_unlock_irq(&dev->power.lock);
443 	return ws;
444 }
445 
446 /**
447  * device_wakeup_disable - Do not regard a device as a wakeup source any more.
448  * @dev: Device to handle.
449  *
450  * Detach the @dev's wakeup source object from it, unregister this wakeup source
451  * object and destroy it.
452  */
453 int device_wakeup_disable(struct device *dev)
454 {
455 	struct wakeup_source *ws;
456 
457 	if (!dev || !dev->power.can_wakeup)
458 		return -EINVAL;
459 
460 	ws = device_wakeup_detach(dev);
461 	wakeup_source_unregister(ws);
462 	return 0;
463 }
464 EXPORT_SYMBOL_GPL(device_wakeup_disable);
465 
466 /**
467  * device_set_wakeup_capable - Set/reset device wakeup capability flag.
468  * @dev: Device to handle.
469  * @capable: Whether or not @dev is capable of waking up the system from sleep.
470  *
471  * If @capable is set, set the @dev's power.can_wakeup flag and add its
472  * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
473  * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
474  *
475  * This function may sleep and it can't be called from any context where
476  * sleeping is not allowed.
477  */
478 void device_set_wakeup_capable(struct device *dev, bool capable)
479 {
480 	if (!!dev->power.can_wakeup == !!capable)
481 		return;
482 
483 	dev->power.can_wakeup = capable;
484 	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
485 		if (capable) {
486 			int ret = wakeup_sysfs_add(dev);
487 
488 			if (ret)
489 				dev_info(dev, "Wakeup sysfs attributes not added\n");
490 		} else {
491 			wakeup_sysfs_remove(dev);
492 		}
493 	}
494 }
495 EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
496 
497 /**
498  * device_init_wakeup - Device wakeup initialization.
499  * @dev: Device to handle.
500  * @enable: Whether or not to enable @dev as a wakeup device.
501  *
502  * By default, most devices should leave wakeup disabled.  The exceptions are
503  * devices that everyone expects to be wakeup sources: keyboards, power buttons,
504  * possibly network interfaces, etc.  Also, devices that don't generate their
505  * own wakeup requests but merely forward requests from one bus to another
506  * (like PCI bridges) should have wakeup enabled by default.
507  */
508 int device_init_wakeup(struct device *dev, bool enable)
509 {
510 	int ret = 0;
511 
512 	if (!dev)
513 		return -EINVAL;
514 
515 	if (enable) {
516 		device_set_wakeup_capable(dev, true);
517 		ret = device_wakeup_enable(dev);
518 	} else {
519 		device_wakeup_disable(dev);
520 		device_set_wakeup_capable(dev, false);
521 	}
522 
523 	return ret;
524 }
525 EXPORT_SYMBOL_GPL(device_init_wakeup);
526 
527 /**
528  * device_set_wakeup_enable - Enable or disable a device to wake up the system.
529  * @dev: Device to handle.
530  */
531 int device_set_wakeup_enable(struct device *dev, bool enable)
532 {
533 	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
534 }
535 EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
536 
537 /**
538  * wakeup_source_not_registered - validate the given wakeup source.
539  * @ws: Wakeup source to be validated.
540  */
541 static bool wakeup_source_not_registered(struct wakeup_source *ws)
542 {
543 	/*
544 	 * Use timer struct to check if the given source is initialized
545 	 * by wakeup_source_add.
546 	 */
547 	return ws->timer.function != pm_wakeup_timer_fn;
548 }
549 
550 /*
551  * The functions below use the observation that each wakeup event starts a
552  * period in which the system should not be suspended.  The moment this period
553  * will end depends on how the wakeup event is going to be processed after being
554  * detected and all of the possible cases can be divided into two distinct
555  * groups.
556  *
557  * First, a wakeup event may be detected by the same functional unit that will
558  * carry out the entire processing of it and possibly will pass it to user space
559  * for further processing.  In that case the functional unit that has detected
560  * the event may later "close" the "no suspend" period associated with it
561  * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
562  * pm_relax(), balanced with each other, is supposed to be used in such
563  * situations.
564  *
565  * Second, a wakeup event may be detected by one functional unit and processed
566  * by another one.  In that case the unit that has detected it cannot really
567  * "close" the "no suspend" period associated with it, unless it knows in
568  * advance what's going to happen to the event during processing.  This
569  * knowledge, however, may not be available to it, so it can simply specify time
570  * to wait before the system can be suspended and pass it as the second
571  * argument of pm_wakeup_event().
572  *
573  * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
574  * "no suspend" period will be ended either by the pm_relax(), or by the timer
575  * function executed when the timer expires, whichever comes first.
576  */
577 
578 /**
579  * wakup_source_activate - Mark given wakeup source as active.
580  * @ws: Wakeup source to handle.
581  *
582  * Update the @ws' statistics and, if @ws has just been activated, notify the PM
583  * core of the event by incrementing the counter of of wakeup events being
584  * processed.
585  */
586 static void wakeup_source_activate(struct wakeup_source *ws)
587 {
588 	unsigned int cec;
589 
590 	if (WARN_ONCE(wakeup_source_not_registered(ws),
591 			"unregistered wakeup source\n"))
592 		return;
593 
594 	ws->active = true;
595 	ws->active_count++;
596 	ws->last_time = ktime_get();
597 	if (ws->autosleep_enabled)
598 		ws->start_prevent_time = ws->last_time;
599 
600 	/* Increment the counter of events in progress. */
601 	cec = atomic_inc_return(&combined_event_count);
602 
603 	trace_wakeup_source_activate(ws->name, cec);
604 }
605 
606 /**
607  * wakeup_source_report_event - Report wakeup event using the given source.
608  * @ws: Wakeup source to report the event for.
609  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
610  */
611 static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
612 {
613 	ws->event_count++;
614 	/* This is racy, but the counter is approximate anyway. */
615 	if (events_check_enabled)
616 		ws->wakeup_count++;
617 
618 	if (!ws->active)
619 		wakeup_source_activate(ws);
620 
621 	if (hard)
622 		pm_system_wakeup();
623 }
624 
625 /**
626  * __pm_stay_awake - Notify the PM core of a wakeup event.
627  * @ws: Wakeup source object associated with the source of the event.
628  *
629  * It is safe to call this function from interrupt context.
630  */
631 void __pm_stay_awake(struct wakeup_source *ws)
632 {
633 	unsigned long flags;
634 
635 	if (!ws)
636 		return;
637 
638 	spin_lock_irqsave(&ws->lock, flags);
639 
640 	wakeup_source_report_event(ws, false);
641 	del_timer(&ws->timer);
642 	ws->timer_expires = 0;
643 
644 	spin_unlock_irqrestore(&ws->lock, flags);
645 }
646 EXPORT_SYMBOL_GPL(__pm_stay_awake);
647 
648 /**
649  * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
650  * @dev: Device the wakeup event is related to.
651  *
652  * Notify the PM core of a wakeup event (signaled by @dev) by calling
653  * __pm_stay_awake for the @dev's wakeup source object.
654  *
655  * Call this function after detecting of a wakeup event if pm_relax() is going
656  * to be called directly after processing the event (and possibly passing it to
657  * user space for further processing).
658  */
659 void pm_stay_awake(struct device *dev)
660 {
661 	unsigned long flags;
662 
663 	if (!dev)
664 		return;
665 
666 	spin_lock_irqsave(&dev->power.lock, flags);
667 	__pm_stay_awake(dev->power.wakeup);
668 	spin_unlock_irqrestore(&dev->power.lock, flags);
669 }
670 EXPORT_SYMBOL_GPL(pm_stay_awake);
671 
672 #ifdef CONFIG_PM_AUTOSLEEP
673 static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
674 {
675 	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
676 	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
677 }
678 #else
679 static inline void update_prevent_sleep_time(struct wakeup_source *ws,
680 					     ktime_t now) {}
681 #endif
682 
683 /**
684  * wakup_source_deactivate - Mark given wakeup source as inactive.
685  * @ws: Wakeup source to handle.
686  *
687  * Update the @ws' statistics and notify the PM core that the wakeup source has
688  * become inactive by decrementing the counter of wakeup events being processed
689  * and incrementing the counter of registered wakeup events.
690  */
691 static void wakeup_source_deactivate(struct wakeup_source *ws)
692 {
693 	unsigned int cnt, inpr, cec;
694 	ktime_t duration;
695 	ktime_t now;
696 
697 	ws->relax_count++;
698 	/*
699 	 * __pm_relax() may be called directly or from a timer function.
700 	 * If it is called directly right after the timer function has been
701 	 * started, but before the timer function calls __pm_relax(), it is
702 	 * possible that __pm_stay_awake() will be called in the meantime and
703 	 * will set ws->active.  Then, ws->active may be cleared immediately
704 	 * by the __pm_relax() called from the timer function, but in such a
705 	 * case ws->relax_count will be different from ws->active_count.
706 	 */
707 	if (ws->relax_count != ws->active_count) {
708 		ws->relax_count--;
709 		return;
710 	}
711 
712 	ws->active = false;
713 
714 	now = ktime_get();
715 	duration = ktime_sub(now, ws->last_time);
716 	ws->total_time = ktime_add(ws->total_time, duration);
717 	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
718 		ws->max_time = duration;
719 
720 	ws->last_time = now;
721 	del_timer(&ws->timer);
722 	ws->timer_expires = 0;
723 
724 	if (ws->autosleep_enabled)
725 		update_prevent_sleep_time(ws, now);
726 
727 	/*
728 	 * Increment the counter of registered wakeup events and decrement the
729 	 * couter of wakeup events in progress simultaneously.
730 	 */
731 	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
732 	trace_wakeup_source_deactivate(ws->name, cec);
733 
734 	split_counters(&cnt, &inpr);
735 	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
736 		wake_up(&wakeup_count_wait_queue);
737 }
738 
739 /**
740  * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
741  * @ws: Wakeup source object associated with the source of the event.
742  *
743  * Call this function for wakeup events whose processing started with calling
744  * __pm_stay_awake().
745  *
746  * It is safe to call it from interrupt context.
747  */
748 void __pm_relax(struct wakeup_source *ws)
749 {
750 	unsigned long flags;
751 
752 	if (!ws)
753 		return;
754 
755 	spin_lock_irqsave(&ws->lock, flags);
756 	if (ws->active)
757 		wakeup_source_deactivate(ws);
758 	spin_unlock_irqrestore(&ws->lock, flags);
759 }
760 EXPORT_SYMBOL_GPL(__pm_relax);
761 
762 /**
763  * pm_relax - Notify the PM core that processing of a wakeup event has ended.
764  * @dev: Device that signaled the event.
765  *
766  * Execute __pm_relax() for the @dev's wakeup source object.
767  */
768 void pm_relax(struct device *dev)
769 {
770 	unsigned long flags;
771 
772 	if (!dev)
773 		return;
774 
775 	spin_lock_irqsave(&dev->power.lock, flags);
776 	__pm_relax(dev->power.wakeup);
777 	spin_unlock_irqrestore(&dev->power.lock, flags);
778 }
779 EXPORT_SYMBOL_GPL(pm_relax);
780 
781 /**
782  * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
783  * @data: Address of the wakeup source object associated with the event source.
784  *
785  * Call wakeup_source_deactivate() for the wakeup source whose address is stored
786  * in @data if it is currently active and its timer has not been canceled and
787  * the expiration time of the timer is not in future.
788  */
789 static void pm_wakeup_timer_fn(struct timer_list *t)
790 {
791 	struct wakeup_source *ws = from_timer(ws, t, timer);
792 	unsigned long flags;
793 
794 	spin_lock_irqsave(&ws->lock, flags);
795 
796 	if (ws->active && ws->timer_expires
797 	    && time_after_eq(jiffies, ws->timer_expires)) {
798 		wakeup_source_deactivate(ws);
799 		ws->expire_count++;
800 	}
801 
802 	spin_unlock_irqrestore(&ws->lock, flags);
803 }
804 
805 /**
806  * pm_wakeup_ws_event - Notify the PM core of a wakeup event.
807  * @ws: Wakeup source object associated with the event source.
808  * @msec: Anticipated event processing time (in milliseconds).
809  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
810  *
811  * Notify the PM core of a wakeup event whose source is @ws that will take
812  * approximately @msec milliseconds to be processed by the kernel.  If @ws is
813  * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
814  * execute pm_wakeup_timer_fn() in future.
815  *
816  * It is safe to call this function from interrupt context.
817  */
818 void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
819 {
820 	unsigned long flags;
821 	unsigned long expires;
822 
823 	if (!ws)
824 		return;
825 
826 	spin_lock_irqsave(&ws->lock, flags);
827 
828 	wakeup_source_report_event(ws, hard);
829 
830 	if (!msec) {
831 		wakeup_source_deactivate(ws);
832 		goto unlock;
833 	}
834 
835 	expires = jiffies + msecs_to_jiffies(msec);
836 	if (!expires)
837 		expires = 1;
838 
839 	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
840 		mod_timer(&ws->timer, expires);
841 		ws->timer_expires = expires;
842 	}
843 
844  unlock:
845 	spin_unlock_irqrestore(&ws->lock, flags);
846 }
847 EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
848 
849 /**
850  * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
851  * @dev: Device the wakeup event is related to.
852  * @msec: Anticipated event processing time (in milliseconds).
853  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
854  *
855  * Call pm_wakeup_ws_event() for the @dev's wakeup source object.
856  */
857 void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
858 {
859 	unsigned long flags;
860 
861 	if (!dev)
862 		return;
863 
864 	spin_lock_irqsave(&dev->power.lock, flags);
865 	pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
866 	spin_unlock_irqrestore(&dev->power.lock, flags);
867 }
868 EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
869 
870 void pm_print_active_wakeup_sources(void)
871 {
872 	struct wakeup_source *ws;
873 	int srcuidx, active = 0;
874 	struct wakeup_source *last_activity_ws = NULL;
875 
876 	srcuidx = srcu_read_lock(&wakeup_srcu);
877 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
878 		if (ws->active) {
879 			pm_pr_dbg("active wakeup source: %s\n", ws->name);
880 			active = 1;
881 		} else if (!active &&
882 			   (!last_activity_ws ||
883 			    ktime_to_ns(ws->last_time) >
884 			    ktime_to_ns(last_activity_ws->last_time))) {
885 			last_activity_ws = ws;
886 		}
887 	}
888 
889 	if (!active && last_activity_ws)
890 		pm_pr_dbg("last active wakeup source: %s\n",
891 			last_activity_ws->name);
892 	srcu_read_unlock(&wakeup_srcu, srcuidx);
893 }
894 EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
895 
896 /**
897  * pm_wakeup_pending - Check if power transition in progress should be aborted.
898  *
899  * Compare the current number of registered wakeup events with its preserved
900  * value from the past and return true if new wakeup events have been registered
901  * since the old value was stored.  Also return true if the current number of
902  * wakeup events being processed is different from zero.
903  */
904 bool pm_wakeup_pending(void)
905 {
906 	unsigned long flags;
907 	bool ret = false;
908 
909 	raw_spin_lock_irqsave(&events_lock, flags);
910 	if (events_check_enabled) {
911 		unsigned int cnt, inpr;
912 
913 		split_counters(&cnt, &inpr);
914 		ret = (cnt != saved_count || inpr > 0);
915 		events_check_enabled = !ret;
916 	}
917 	raw_spin_unlock_irqrestore(&events_lock, flags);
918 
919 	if (ret) {
920 		pm_pr_dbg("Wakeup pending, aborting suspend\n");
921 		pm_print_active_wakeup_sources();
922 	}
923 
924 	return ret || atomic_read(&pm_abort_suspend) > 0;
925 }
926 
927 void pm_system_wakeup(void)
928 {
929 	atomic_inc(&pm_abort_suspend);
930 	s2idle_wake();
931 }
932 EXPORT_SYMBOL_GPL(pm_system_wakeup);
933 
934 void pm_system_cancel_wakeup(void)
935 {
936 	atomic_dec_if_positive(&pm_abort_suspend);
937 }
938 
939 void pm_wakeup_clear(bool reset)
940 {
941 	pm_wakeup_irq = 0;
942 	if (reset)
943 		atomic_set(&pm_abort_suspend, 0);
944 }
945 
946 void pm_system_irq_wakeup(unsigned int irq_number)
947 {
948 	if (pm_wakeup_irq == 0) {
949 		pm_wakeup_irq = irq_number;
950 		pm_system_wakeup();
951 	}
952 }
953 
954 /**
955  * pm_get_wakeup_count - Read the number of registered wakeup events.
956  * @count: Address to store the value at.
957  * @block: Whether or not to block.
958  *
959  * Store the number of registered wakeup events at the address in @count.  If
960  * @block is set, block until the current number of wakeup events being
961  * processed is zero.
962  *
963  * Return 'false' if the current number of wakeup events being processed is
964  * nonzero.  Otherwise return 'true'.
965  */
966 bool pm_get_wakeup_count(unsigned int *count, bool block)
967 {
968 	unsigned int cnt, inpr;
969 
970 	if (block) {
971 		DEFINE_WAIT(wait);
972 
973 		for (;;) {
974 			prepare_to_wait(&wakeup_count_wait_queue, &wait,
975 					TASK_INTERRUPTIBLE);
976 			split_counters(&cnt, &inpr);
977 			if (inpr == 0 || signal_pending(current))
978 				break;
979 			pm_print_active_wakeup_sources();
980 			schedule();
981 		}
982 		finish_wait(&wakeup_count_wait_queue, &wait);
983 	}
984 
985 	split_counters(&cnt, &inpr);
986 	*count = cnt;
987 	return !inpr;
988 }
989 
990 /**
991  * pm_save_wakeup_count - Save the current number of registered wakeup events.
992  * @count: Value to compare with the current number of registered wakeup events.
993  *
994  * If @count is equal to the current number of registered wakeup events and the
995  * current number of wakeup events being processed is zero, store @count as the
996  * old number of registered wakeup events for pm_check_wakeup_events(), enable
997  * wakeup events detection and return 'true'.  Otherwise disable wakeup events
998  * detection and return 'false'.
999  */
1000 bool pm_save_wakeup_count(unsigned int count)
1001 {
1002 	unsigned int cnt, inpr;
1003 	unsigned long flags;
1004 
1005 	events_check_enabled = false;
1006 	raw_spin_lock_irqsave(&events_lock, flags);
1007 	split_counters(&cnt, &inpr);
1008 	if (cnt == count && inpr == 0) {
1009 		saved_count = count;
1010 		events_check_enabled = true;
1011 	}
1012 	raw_spin_unlock_irqrestore(&events_lock, flags);
1013 	return events_check_enabled;
1014 }
1015 
1016 #ifdef CONFIG_PM_AUTOSLEEP
1017 /**
1018  * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
1019  * @enabled: Whether to set or to clear the autosleep_enabled flags.
1020  */
1021 void pm_wakep_autosleep_enabled(bool set)
1022 {
1023 	struct wakeup_source *ws;
1024 	ktime_t now = ktime_get();
1025 	int srcuidx;
1026 
1027 	srcuidx = srcu_read_lock(&wakeup_srcu);
1028 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
1029 		spin_lock_irq(&ws->lock);
1030 		if (ws->autosleep_enabled != set) {
1031 			ws->autosleep_enabled = set;
1032 			if (ws->active) {
1033 				if (set)
1034 					ws->start_prevent_time = now;
1035 				else
1036 					update_prevent_sleep_time(ws, now);
1037 			}
1038 		}
1039 		spin_unlock_irq(&ws->lock);
1040 	}
1041 	srcu_read_unlock(&wakeup_srcu, srcuidx);
1042 }
1043 #endif /* CONFIG_PM_AUTOSLEEP */
1044 
1045 /**
1046  * print_wakeup_source_stats - Print wakeup source statistics information.
1047  * @m: seq_file to print the statistics into.
1048  * @ws: Wakeup source object to print the statistics for.
1049  */
1050 static int print_wakeup_source_stats(struct seq_file *m,
1051 				     struct wakeup_source *ws)
1052 {
1053 	unsigned long flags;
1054 	ktime_t total_time;
1055 	ktime_t max_time;
1056 	unsigned long active_count;
1057 	ktime_t active_time;
1058 	ktime_t prevent_sleep_time;
1059 
1060 	spin_lock_irqsave(&ws->lock, flags);
1061 
1062 	total_time = ws->total_time;
1063 	max_time = ws->max_time;
1064 	prevent_sleep_time = ws->prevent_sleep_time;
1065 	active_count = ws->active_count;
1066 	if (ws->active) {
1067 		ktime_t now = ktime_get();
1068 
1069 		active_time = ktime_sub(now, ws->last_time);
1070 		total_time = ktime_add(total_time, active_time);
1071 		if (active_time > max_time)
1072 			max_time = active_time;
1073 
1074 		if (ws->autosleep_enabled)
1075 			prevent_sleep_time = ktime_add(prevent_sleep_time,
1076 				ktime_sub(now, ws->start_prevent_time));
1077 	} else {
1078 		active_time = 0;
1079 	}
1080 
1081 	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
1082 		   ws->name, active_count, ws->event_count,
1083 		   ws->wakeup_count, ws->expire_count,
1084 		   ktime_to_ms(active_time), ktime_to_ms(total_time),
1085 		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
1086 		   ktime_to_ms(prevent_sleep_time));
1087 
1088 	spin_unlock_irqrestore(&ws->lock, flags);
1089 
1090 	return 0;
1091 }
1092 
1093 static void *wakeup_sources_stats_seq_start(struct seq_file *m,
1094 					loff_t *pos)
1095 {
1096 	struct wakeup_source *ws;
1097 	loff_t n = *pos;
1098 	int *srcuidx = m->private;
1099 
1100 	if (n == 0) {
1101 		seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
1102 			"expire_count\tactive_since\ttotal_time\tmax_time\t"
1103 			"last_change\tprevent_suspend_time\n");
1104 	}
1105 
1106 	*srcuidx = srcu_read_lock(&wakeup_srcu);
1107 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
1108 		if (n-- <= 0)
1109 			return ws;
1110 	}
1111 
1112 	return NULL;
1113 }
1114 
1115 static void *wakeup_sources_stats_seq_next(struct seq_file *m,
1116 					void *v, loff_t *pos)
1117 {
1118 	struct wakeup_source *ws = v;
1119 	struct wakeup_source *next_ws = NULL;
1120 
1121 	++(*pos);
1122 
1123 	list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
1124 		next_ws = ws;
1125 		break;
1126 	}
1127 
1128 	if (!next_ws)
1129 		print_wakeup_source_stats(m, &deleted_ws);
1130 
1131 	return next_ws;
1132 }
1133 
1134 static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
1135 {
1136 	int *srcuidx = m->private;
1137 
1138 	srcu_read_unlock(&wakeup_srcu, *srcuidx);
1139 }
1140 
1141 /**
1142  * wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
1143  * @m: seq_file to print the statistics into.
1144  * @v: wakeup_source of each iteration
1145  */
1146 static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
1147 {
1148 	struct wakeup_source *ws = v;
1149 
1150 	print_wakeup_source_stats(m, ws);
1151 
1152 	return 0;
1153 }
1154 
1155 static const struct seq_operations wakeup_sources_stats_seq_ops = {
1156 	.start = wakeup_sources_stats_seq_start,
1157 	.next  = wakeup_sources_stats_seq_next,
1158 	.stop  = wakeup_sources_stats_seq_stop,
1159 	.show  = wakeup_sources_stats_seq_show,
1160 };
1161 
1162 static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
1163 {
1164 	return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
1165 }
1166 
1167 static const struct file_operations wakeup_sources_stats_fops = {
1168 	.owner = THIS_MODULE,
1169 	.open = wakeup_sources_stats_open,
1170 	.read = seq_read,
1171 	.llseek = seq_lseek,
1172 	.release = seq_release_private,
1173 };
1174 
1175 static int __init wakeup_sources_debugfs_init(void)
1176 {
1177 	debugfs_create_file("wakeup_sources", S_IRUGO, NULL, NULL,
1178 			    &wakeup_sources_stats_fops);
1179 	return 0;
1180 }
1181 
1182 postcore_initcall(wakeup_sources_debugfs_init);
1183