xref: /linux/kernel/sched/completion.c (revision 801c1419)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2*801c1419SIngo Molnar 
3b8a21626SPeter Zijlstra /*
4b8a21626SPeter Zijlstra  * Generic wait-for-completion handler;
5b8a21626SPeter Zijlstra  *
6b8a21626SPeter Zijlstra  * It differs from semaphores in that their default case is the opposite,
7b8a21626SPeter Zijlstra  * wait_for_completion default blocks whereas semaphore default non-block. The
8b8a21626SPeter Zijlstra  * interface also makes it easy to 'complete' multiple waiting threads,
9b8a21626SPeter Zijlstra  * something which isn't entirely natural for semaphores.
10b8a21626SPeter Zijlstra  *
11b8a21626SPeter Zijlstra  * But more importantly, the primitive documents the usage. Semaphores would
12b8a21626SPeter Zijlstra  * typically be used for exclusion which gives rise to priority inversion.
13b8a21626SPeter Zijlstra  * Waiting for completion is a typically sync point, but not an exclusion point.
14b8a21626SPeter Zijlstra  */
15b8a21626SPeter Zijlstra 
16b8a21626SPeter Zijlstra /**
17b8a21626SPeter Zijlstra  * complete: - signals a single thread waiting on this completion
18b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
19b8a21626SPeter Zijlstra  *
20b8a21626SPeter Zijlstra  * This will wake up a single thread waiting on this completion. Threads will be
21b8a21626SPeter Zijlstra  * awakened in the same order in which they were queued.
22b8a21626SPeter Zijlstra  *
23b8a21626SPeter Zijlstra  * See also complete_all(), wait_for_completion() and related routines.
24b8a21626SPeter Zijlstra  *
257696f991SAndrea Parri  * If this function wakes up a task, it executes a full memory barrier before
267696f991SAndrea Parri  * accessing the task state.
27b8a21626SPeter Zijlstra  */
28b8a21626SPeter Zijlstra void complete(struct completion *x)
29b8a21626SPeter Zijlstra {
30b8a21626SPeter Zijlstra 	unsigned long flags;
31b8a21626SPeter Zijlstra 
32a5c6234eSThomas Gleixner 	raw_spin_lock_irqsave(&x->wait.lock, flags);
33cd8084f9SByungchul Park 
34da9647e0SPeter Zijlstra 	if (x->done != UINT_MAX)
35b8a21626SPeter Zijlstra 		x->done++;
36a5c6234eSThomas Gleixner 	swake_up_locked(&x->wait);
37a5c6234eSThomas Gleixner 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
38b8a21626SPeter Zijlstra }
39b8a21626SPeter Zijlstra EXPORT_SYMBOL(complete);
40b8a21626SPeter Zijlstra 
41b8a21626SPeter Zijlstra /**
42b8a21626SPeter Zijlstra  * complete_all: - signals all threads waiting on this completion
43b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
44b8a21626SPeter Zijlstra  *
45b8a21626SPeter Zijlstra  * This will wake up all threads waiting on this particular completion event.
46b8a21626SPeter Zijlstra  *
477696f991SAndrea Parri  * If this function wakes up a task, it executes a full memory barrier before
487696f991SAndrea Parri  * accessing the task state.
499c878320SSteven Rostedt  *
509c878320SSteven Rostedt  * Since complete_all() sets the completion of @x permanently to done
519c878320SSteven Rostedt  * to allow multiple waiters to finish, a call to reinit_completion()
529c878320SSteven Rostedt  * must be used on @x if @x is to be used again. The code must make
539c878320SSteven Rostedt  * sure that all waiters have woken and finished before reinitializing
549c878320SSteven Rostedt  * @x. Also note that the function completion_done() can not be used
559c878320SSteven Rostedt  * to know if there are still waiters after complete_all() has been called.
56b8a21626SPeter Zijlstra  */
57b8a21626SPeter Zijlstra void complete_all(struct completion *x)
58b8a21626SPeter Zijlstra {
59b8a21626SPeter Zijlstra 	unsigned long flags;
60b8a21626SPeter Zijlstra 
618bf6c677SSebastian Siewior 	lockdep_assert_RT_in_threaded_ctx();
62a5c6234eSThomas Gleixner 
63a5c6234eSThomas Gleixner 	raw_spin_lock_irqsave(&x->wait.lock, flags);
64da9647e0SPeter Zijlstra 	x->done = UINT_MAX;
65a5c6234eSThomas Gleixner 	swake_up_all_locked(&x->wait);
66a5c6234eSThomas Gleixner 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
67b8a21626SPeter Zijlstra }
68b8a21626SPeter Zijlstra EXPORT_SYMBOL(complete_all);
69b8a21626SPeter Zijlstra 
70b8a21626SPeter Zijlstra static inline long __sched
71b8a21626SPeter Zijlstra do_wait_for_common(struct completion *x,
72b8a21626SPeter Zijlstra 		   long (*action)(long), long timeout, int state)
73b8a21626SPeter Zijlstra {
74b8a21626SPeter Zijlstra 	if (!x->done) {
75a5c6234eSThomas Gleixner 		DECLARE_SWAITQUEUE(wait);
76b8a21626SPeter Zijlstra 
77b8a21626SPeter Zijlstra 		do {
78b8a21626SPeter Zijlstra 			if (signal_pending_state(state, current)) {
79b8a21626SPeter Zijlstra 				timeout = -ERESTARTSYS;
80b8a21626SPeter Zijlstra 				break;
81b8a21626SPeter Zijlstra 			}
82a5c6234eSThomas Gleixner 			__prepare_to_swait(&x->wait, &wait);
83b8a21626SPeter Zijlstra 			__set_current_state(state);
84a5c6234eSThomas Gleixner 			raw_spin_unlock_irq(&x->wait.lock);
85b8a21626SPeter Zijlstra 			timeout = action(timeout);
86a5c6234eSThomas Gleixner 			raw_spin_lock_irq(&x->wait.lock);
87b8a21626SPeter Zijlstra 		} while (!x->done && timeout);
88a5c6234eSThomas Gleixner 		__finish_swait(&x->wait, &wait);
89b8a21626SPeter Zijlstra 		if (!x->done)
90b8a21626SPeter Zijlstra 			return timeout;
91b8a21626SPeter Zijlstra 	}
92da9647e0SPeter Zijlstra 	if (x->done != UINT_MAX)
93b8a21626SPeter Zijlstra 		x->done--;
94b8a21626SPeter Zijlstra 	return timeout ?: 1;
95b8a21626SPeter Zijlstra }
96b8a21626SPeter Zijlstra 
97b8a21626SPeter Zijlstra static inline long __sched
98b8a21626SPeter Zijlstra __wait_for_common(struct completion *x,
99b8a21626SPeter Zijlstra 		  long (*action)(long), long timeout, int state)
100b8a21626SPeter Zijlstra {
101b8a21626SPeter Zijlstra 	might_sleep();
102b8a21626SPeter Zijlstra 
103cd8084f9SByungchul Park 	complete_acquire(x);
104cd8084f9SByungchul Park 
105a5c6234eSThomas Gleixner 	raw_spin_lock_irq(&x->wait.lock);
106b8a21626SPeter Zijlstra 	timeout = do_wait_for_common(x, action, timeout, state);
107a5c6234eSThomas Gleixner 	raw_spin_unlock_irq(&x->wait.lock);
108cd8084f9SByungchul Park 
109cd8084f9SByungchul Park 	complete_release(x);
110cd8084f9SByungchul Park 
111b8a21626SPeter Zijlstra 	return timeout;
112b8a21626SPeter Zijlstra }
113b8a21626SPeter Zijlstra 
114b8a21626SPeter Zijlstra static long __sched
115b8a21626SPeter Zijlstra wait_for_common(struct completion *x, long timeout, int state)
116b8a21626SPeter Zijlstra {
117b8a21626SPeter Zijlstra 	return __wait_for_common(x, schedule_timeout, timeout, state);
118b8a21626SPeter Zijlstra }
119b8a21626SPeter Zijlstra 
120b8a21626SPeter Zijlstra static long __sched
121b8a21626SPeter Zijlstra wait_for_common_io(struct completion *x, long timeout, int state)
122b8a21626SPeter Zijlstra {
123b8a21626SPeter Zijlstra 	return __wait_for_common(x, io_schedule_timeout, timeout, state);
124b8a21626SPeter Zijlstra }
125b8a21626SPeter Zijlstra 
126b8a21626SPeter Zijlstra /**
127b8a21626SPeter Zijlstra  * wait_for_completion: - waits for completion of a task
128b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
129b8a21626SPeter Zijlstra  *
130b8a21626SPeter Zijlstra  * This waits to be signaled for completion of a specific task. It is NOT
131b8a21626SPeter Zijlstra  * interruptible and there is no timeout.
132b8a21626SPeter Zijlstra  *
133b8a21626SPeter Zijlstra  * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
134b8a21626SPeter Zijlstra  * and interrupt capability. Also see complete().
135b8a21626SPeter Zijlstra  */
136b8a21626SPeter Zijlstra void __sched wait_for_completion(struct completion *x)
137b8a21626SPeter Zijlstra {
138b8a21626SPeter Zijlstra 	wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
139b8a21626SPeter Zijlstra }
140b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion);
141b8a21626SPeter Zijlstra 
142b8a21626SPeter Zijlstra /**
143b8a21626SPeter Zijlstra  * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
144b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
145b8a21626SPeter Zijlstra  * @timeout:  timeout value in jiffies
146b8a21626SPeter Zijlstra  *
147b8a21626SPeter Zijlstra  * This waits for either a completion of a specific task to be signaled or for a
148b8a21626SPeter Zijlstra  * specified timeout to expire. The timeout is in jiffies. It is not
149b8a21626SPeter Zijlstra  * interruptible.
150b8a21626SPeter Zijlstra  *
151b8a21626SPeter Zijlstra  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
152b8a21626SPeter Zijlstra  * till timeout) if completed.
153b8a21626SPeter Zijlstra  */
154b8a21626SPeter Zijlstra unsigned long __sched
155b8a21626SPeter Zijlstra wait_for_completion_timeout(struct completion *x, unsigned long timeout)
156b8a21626SPeter Zijlstra {
157b8a21626SPeter Zijlstra 	return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
158b8a21626SPeter Zijlstra }
159b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_timeout);
160b8a21626SPeter Zijlstra 
161b8a21626SPeter Zijlstra /**
162b8a21626SPeter Zijlstra  * wait_for_completion_io: - waits for completion of a task
163b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
164b8a21626SPeter Zijlstra  *
165b8a21626SPeter Zijlstra  * This waits to be signaled for completion of a specific task. It is NOT
166b8a21626SPeter Zijlstra  * interruptible and there is no timeout. The caller is accounted as waiting
167a1bd5373SWolfram Sang  * for IO (which traditionally means blkio only).
168b8a21626SPeter Zijlstra  */
169b8a21626SPeter Zijlstra void __sched wait_for_completion_io(struct completion *x)
170b8a21626SPeter Zijlstra {
171b8a21626SPeter Zijlstra 	wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
172b8a21626SPeter Zijlstra }
173b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_io);
174b8a21626SPeter Zijlstra 
175b8a21626SPeter Zijlstra /**
176b8a21626SPeter Zijlstra  * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
177b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
178b8a21626SPeter Zijlstra  * @timeout:  timeout value in jiffies
179b8a21626SPeter Zijlstra  *
180b8a21626SPeter Zijlstra  * This waits for either a completion of a specific task to be signaled or for a
181b8a21626SPeter Zijlstra  * specified timeout to expire. The timeout is in jiffies. It is not
182a1bd5373SWolfram Sang  * interruptible. The caller is accounted as waiting for IO (which traditionally
183a1bd5373SWolfram Sang  * means blkio only).
184b8a21626SPeter Zijlstra  *
185b8a21626SPeter Zijlstra  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
186b8a21626SPeter Zijlstra  * till timeout) if completed.
187b8a21626SPeter Zijlstra  */
188b8a21626SPeter Zijlstra unsigned long __sched
189b8a21626SPeter Zijlstra wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
190b8a21626SPeter Zijlstra {
191b8a21626SPeter Zijlstra 	return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
192b8a21626SPeter Zijlstra }
193b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_io_timeout);
194b8a21626SPeter Zijlstra 
195b8a21626SPeter Zijlstra /**
196b8a21626SPeter Zijlstra  * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
197b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
198b8a21626SPeter Zijlstra  *
199b8a21626SPeter Zijlstra  * This waits for completion of a specific task to be signaled. It is
200b8a21626SPeter Zijlstra  * interruptible.
201b8a21626SPeter Zijlstra  *
202b8a21626SPeter Zijlstra  * Return: -ERESTARTSYS if interrupted, 0 if completed.
203b8a21626SPeter Zijlstra  */
204b8a21626SPeter Zijlstra int __sched wait_for_completion_interruptible(struct completion *x)
205b8a21626SPeter Zijlstra {
206b8a21626SPeter Zijlstra 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
207b8a21626SPeter Zijlstra 	if (t == -ERESTARTSYS)
208b8a21626SPeter Zijlstra 		return t;
209b8a21626SPeter Zijlstra 	return 0;
210b8a21626SPeter Zijlstra }
211b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_interruptible);
212b8a21626SPeter Zijlstra 
213b8a21626SPeter Zijlstra /**
214b8a21626SPeter Zijlstra  * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
215b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
216b8a21626SPeter Zijlstra  * @timeout:  timeout value in jiffies
217b8a21626SPeter Zijlstra  *
218b8a21626SPeter Zijlstra  * This waits for either a completion of a specific task to be signaled or for a
219b8a21626SPeter Zijlstra  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
220b8a21626SPeter Zijlstra  *
221b8a21626SPeter Zijlstra  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
222b8a21626SPeter Zijlstra  * or number of jiffies left till timeout) if completed.
223b8a21626SPeter Zijlstra  */
224b8a21626SPeter Zijlstra long __sched
225b8a21626SPeter Zijlstra wait_for_completion_interruptible_timeout(struct completion *x,
226b8a21626SPeter Zijlstra 					  unsigned long timeout)
227b8a21626SPeter Zijlstra {
228b8a21626SPeter Zijlstra 	return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
229b8a21626SPeter Zijlstra }
230b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
231b8a21626SPeter Zijlstra 
232b8a21626SPeter Zijlstra /**
233b8a21626SPeter Zijlstra  * wait_for_completion_killable: - waits for completion of a task (killable)
234b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
235b8a21626SPeter Zijlstra  *
236b8a21626SPeter Zijlstra  * This waits to be signaled for completion of a specific task. It can be
237b8a21626SPeter Zijlstra  * interrupted by a kill signal.
238b8a21626SPeter Zijlstra  *
239b8a21626SPeter Zijlstra  * Return: -ERESTARTSYS if interrupted, 0 if completed.
240b8a21626SPeter Zijlstra  */
241b8a21626SPeter Zijlstra int __sched wait_for_completion_killable(struct completion *x)
242b8a21626SPeter Zijlstra {
243b8a21626SPeter Zijlstra 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
244b8a21626SPeter Zijlstra 	if (t == -ERESTARTSYS)
245b8a21626SPeter Zijlstra 		return t;
246b8a21626SPeter Zijlstra 	return 0;
247b8a21626SPeter Zijlstra }
248b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_killable);
249b8a21626SPeter Zijlstra 
250b8a21626SPeter Zijlstra /**
251b8a21626SPeter Zijlstra  * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
252b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
253b8a21626SPeter Zijlstra  * @timeout:  timeout value in jiffies
254b8a21626SPeter Zijlstra  *
255b8a21626SPeter Zijlstra  * This waits for either a completion of a specific task to be
256b8a21626SPeter Zijlstra  * signaled or for a specified timeout to expire. It can be
257b8a21626SPeter Zijlstra  * interrupted by a kill signal. The timeout is in jiffies.
258b8a21626SPeter Zijlstra  *
259b8a21626SPeter Zijlstra  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
260b8a21626SPeter Zijlstra  * or number of jiffies left till timeout) if completed.
261b8a21626SPeter Zijlstra  */
262b8a21626SPeter Zijlstra long __sched
263b8a21626SPeter Zijlstra wait_for_completion_killable_timeout(struct completion *x,
264b8a21626SPeter Zijlstra 				     unsigned long timeout)
265b8a21626SPeter Zijlstra {
266b8a21626SPeter Zijlstra 	return wait_for_common(x, timeout, TASK_KILLABLE);
267b8a21626SPeter Zijlstra }
268b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_killable_timeout);
269b8a21626SPeter Zijlstra 
270b8a21626SPeter Zijlstra /**
271b8a21626SPeter Zijlstra  *	try_wait_for_completion - try to decrement a completion without blocking
272b8a21626SPeter Zijlstra  *	@x:	completion structure
273b8a21626SPeter Zijlstra  *
274b8a21626SPeter Zijlstra  *	Return: 0 if a decrement cannot be done without blocking
275b8a21626SPeter Zijlstra  *		 1 if a decrement succeeded.
276b8a21626SPeter Zijlstra  *
277b8a21626SPeter Zijlstra  *	If a completion is being used as a counting completion,
278b8a21626SPeter Zijlstra  *	attempt to decrement the counter without blocking. This
279b8a21626SPeter Zijlstra  *	enables us to avoid waiting if the resource the completion
280b8a21626SPeter Zijlstra  *	is protecting is not available.
281b8a21626SPeter Zijlstra  */
282b8a21626SPeter Zijlstra bool try_wait_for_completion(struct completion *x)
283b8a21626SPeter Zijlstra {
284b8a21626SPeter Zijlstra 	unsigned long flags;
285d17067e4Sgaurav jindal 	bool ret = true;
286b8a21626SPeter Zijlstra 
2877c34e318SNicholas Mc Guire 	/*
2887c34e318SNicholas Mc Guire 	 * Since x->done will need to be locked only
2897c34e318SNicholas Mc Guire 	 * in the non-blocking case, we check x->done
2907c34e318SNicholas Mc Guire 	 * first without taking the lock so we can
2917c34e318SNicholas Mc Guire 	 * return early in the blocking case.
2927c34e318SNicholas Mc Guire 	 */
293bc956015SOleg Nesterov 	if (!READ_ONCE(x->done))
294d17067e4Sgaurav jindal 		return false;
2957c34e318SNicholas Mc Guire 
296a5c6234eSThomas Gleixner 	raw_spin_lock_irqsave(&x->wait.lock, flags);
297b8a21626SPeter Zijlstra 	if (!x->done)
298d17067e4Sgaurav jindal 		ret = false;
299da9647e0SPeter Zijlstra 	else if (x->done != UINT_MAX)
300b8a21626SPeter Zijlstra 		x->done--;
301a5c6234eSThomas Gleixner 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
302b8a21626SPeter Zijlstra 	return ret;
303b8a21626SPeter Zijlstra }
304b8a21626SPeter Zijlstra EXPORT_SYMBOL(try_wait_for_completion);
305b8a21626SPeter Zijlstra 
306b8a21626SPeter Zijlstra /**
307b8a21626SPeter Zijlstra  *	completion_done - Test to see if a completion has any waiters
308b8a21626SPeter Zijlstra  *	@x:	completion structure
309b8a21626SPeter Zijlstra  *
310b8a21626SPeter Zijlstra  *	Return: 0 if there are waiters (wait_for_completion() in progress)
311b8a21626SPeter Zijlstra  *		 1 if there are no waiters.
312b8a21626SPeter Zijlstra  *
3139c878320SSteven Rostedt  *	Note, this will always return true if complete_all() was called on @X.
314b8a21626SPeter Zijlstra  */
315b8a21626SPeter Zijlstra bool completion_done(struct completion *x)
316b8a21626SPeter Zijlstra {
317dec13c42SPaul E. McKenney 	unsigned long flags;
318dec13c42SPaul E. McKenney 
319bc956015SOleg Nesterov 	if (!READ_ONCE(x->done))
320bc956015SOleg Nesterov 		return false;
321bc956015SOleg Nesterov 
322bc956015SOleg Nesterov 	/*
323bc956015SOleg Nesterov 	 * If ->done, we need to wait for complete() to release ->wait.lock
324bc956015SOleg Nesterov 	 * otherwise we can end up freeing the completion before complete()
325bc956015SOleg Nesterov 	 * is done referencing it.
326bc956015SOleg Nesterov 	 */
327a5c6234eSThomas Gleixner 	raw_spin_lock_irqsave(&x->wait.lock, flags);
328a5c6234eSThomas Gleixner 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
329bc956015SOleg Nesterov 	return true;
330b8a21626SPeter Zijlstra }
331b8a21626SPeter Zijlstra EXPORT_SYMBOL(completion_done);
332