xref: /linux/kernel/sched/completion.c (revision 929659ac)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2801c1419SIngo Molnar 
3b8a21626SPeter Zijlstra /*
4b8a21626SPeter Zijlstra  * Generic wait-for-completion handler;
5b8a21626SPeter Zijlstra  *
6b8a21626SPeter Zijlstra  * It differs from semaphores in that their default case is the opposite,
7b8a21626SPeter Zijlstra  * wait_for_completion default blocks whereas semaphore default non-block. The
8b8a21626SPeter Zijlstra  * interface also makes it easy to 'complete' multiple waiting threads,
9b8a21626SPeter Zijlstra  * something which isn't entirely natural for semaphores.
10b8a21626SPeter Zijlstra  *
11b8a21626SPeter Zijlstra  * But more importantly, the primitive documents the usage. Semaphores would
12b8a21626SPeter Zijlstra  * typically be used for exclusion which gives rise to priority inversion.
13b8a21626SPeter Zijlstra  * Waiting for completion is a typically sync point, but not an exclusion point.
14b8a21626SPeter Zijlstra  */
15b8a21626SPeter Zijlstra 
16b8a21626SPeter Zijlstra /**
17b8a21626SPeter Zijlstra  * complete: - signals a single thread waiting on this completion
18b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
19b8a21626SPeter Zijlstra  *
20b8a21626SPeter Zijlstra  * This will wake up a single thread waiting on this completion. Threads will be
21b8a21626SPeter Zijlstra  * awakened in the same order in which they were queued.
22b8a21626SPeter Zijlstra  *
23b8a21626SPeter Zijlstra  * See also complete_all(), wait_for_completion() and related routines.
24b8a21626SPeter Zijlstra  *
257696f991SAndrea Parri  * If this function wakes up a task, it executes a full memory barrier before
267696f991SAndrea Parri  * accessing the task state.
27b8a21626SPeter Zijlstra  */
28b8a21626SPeter Zijlstra void complete(struct completion *x)
29b8a21626SPeter Zijlstra {
30b8a21626SPeter Zijlstra 	unsigned long flags;
31b8a21626SPeter Zijlstra 
32a5c6234eSThomas Gleixner 	raw_spin_lock_irqsave(&x->wait.lock, flags);
33cd8084f9SByungchul Park 
34da9647e0SPeter Zijlstra 	if (x->done != UINT_MAX)
35b8a21626SPeter Zijlstra 		x->done++;
36a5c6234eSThomas Gleixner 	swake_up_locked(&x->wait);
37a5c6234eSThomas Gleixner 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
38b8a21626SPeter Zijlstra }
39b8a21626SPeter Zijlstra EXPORT_SYMBOL(complete);
40b8a21626SPeter Zijlstra 
41b8a21626SPeter Zijlstra /**
42b8a21626SPeter Zijlstra  * complete_all: - signals all threads waiting on this completion
43b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
44b8a21626SPeter Zijlstra  *
45b8a21626SPeter Zijlstra  * This will wake up all threads waiting on this particular completion event.
46b8a21626SPeter Zijlstra  *
477696f991SAndrea Parri  * If this function wakes up a task, it executes a full memory barrier before
487696f991SAndrea Parri  * accessing the task state.
499c878320SSteven Rostedt  *
509c878320SSteven Rostedt  * Since complete_all() sets the completion of @x permanently to done
519c878320SSteven Rostedt  * to allow multiple waiters to finish, a call to reinit_completion()
529c878320SSteven Rostedt  * must be used on @x if @x is to be used again. The code must make
539c878320SSteven Rostedt  * sure that all waiters have woken and finished before reinitializing
549c878320SSteven Rostedt  * @x. Also note that the function completion_done() can not be used
559c878320SSteven Rostedt  * to know if there are still waiters after complete_all() has been called.
56b8a21626SPeter Zijlstra  */
57b8a21626SPeter Zijlstra void complete_all(struct completion *x)
58b8a21626SPeter Zijlstra {
59b8a21626SPeter Zijlstra 	unsigned long flags;
60b8a21626SPeter Zijlstra 
618bf6c677SSebastian Siewior 	lockdep_assert_RT_in_threaded_ctx();
62a5c6234eSThomas Gleixner 
63a5c6234eSThomas Gleixner 	raw_spin_lock_irqsave(&x->wait.lock, flags);
64da9647e0SPeter Zijlstra 	x->done = UINT_MAX;
65a5c6234eSThomas Gleixner 	swake_up_all_locked(&x->wait);
66a5c6234eSThomas Gleixner 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
67b8a21626SPeter Zijlstra }
68b8a21626SPeter Zijlstra EXPORT_SYMBOL(complete_all);
69b8a21626SPeter Zijlstra 
70b8a21626SPeter Zijlstra static inline long __sched
71b8a21626SPeter Zijlstra do_wait_for_common(struct completion *x,
72b8a21626SPeter Zijlstra 		   long (*action)(long), long timeout, int state)
73b8a21626SPeter Zijlstra {
74b8a21626SPeter Zijlstra 	if (!x->done) {
75a5c6234eSThomas Gleixner 		DECLARE_SWAITQUEUE(wait);
76b8a21626SPeter Zijlstra 
77b8a21626SPeter Zijlstra 		do {
78b8a21626SPeter Zijlstra 			if (signal_pending_state(state, current)) {
79b8a21626SPeter Zijlstra 				timeout = -ERESTARTSYS;
80b8a21626SPeter Zijlstra 				break;
81b8a21626SPeter Zijlstra 			}
82a5c6234eSThomas Gleixner 			__prepare_to_swait(&x->wait, &wait);
83b8a21626SPeter Zijlstra 			__set_current_state(state);
84a5c6234eSThomas Gleixner 			raw_spin_unlock_irq(&x->wait.lock);
85b8a21626SPeter Zijlstra 			timeout = action(timeout);
86a5c6234eSThomas Gleixner 			raw_spin_lock_irq(&x->wait.lock);
87b8a21626SPeter Zijlstra 		} while (!x->done && timeout);
88a5c6234eSThomas Gleixner 		__finish_swait(&x->wait, &wait);
89b8a21626SPeter Zijlstra 		if (!x->done)
90b8a21626SPeter Zijlstra 			return timeout;
91b8a21626SPeter Zijlstra 	}
92da9647e0SPeter Zijlstra 	if (x->done != UINT_MAX)
93b8a21626SPeter Zijlstra 		x->done--;
94b8a21626SPeter Zijlstra 	return timeout ?: 1;
95b8a21626SPeter Zijlstra }
96b8a21626SPeter Zijlstra 
97b8a21626SPeter Zijlstra static inline long __sched
98b8a21626SPeter Zijlstra __wait_for_common(struct completion *x,
99b8a21626SPeter Zijlstra 		  long (*action)(long), long timeout, int state)
100b8a21626SPeter Zijlstra {
101b8a21626SPeter Zijlstra 	might_sleep();
102b8a21626SPeter Zijlstra 
103cd8084f9SByungchul Park 	complete_acquire(x);
104cd8084f9SByungchul Park 
105a5c6234eSThomas Gleixner 	raw_spin_lock_irq(&x->wait.lock);
106b8a21626SPeter Zijlstra 	timeout = do_wait_for_common(x, action, timeout, state);
107a5c6234eSThomas Gleixner 	raw_spin_unlock_irq(&x->wait.lock);
108cd8084f9SByungchul Park 
109cd8084f9SByungchul Park 	complete_release(x);
110cd8084f9SByungchul Park 
111b8a21626SPeter Zijlstra 	return timeout;
112b8a21626SPeter Zijlstra }
113b8a21626SPeter Zijlstra 
114b8a21626SPeter Zijlstra static long __sched
115b8a21626SPeter Zijlstra wait_for_common(struct completion *x, long timeout, int state)
116b8a21626SPeter Zijlstra {
117b8a21626SPeter Zijlstra 	return __wait_for_common(x, schedule_timeout, timeout, state);
118b8a21626SPeter Zijlstra }
119b8a21626SPeter Zijlstra 
120b8a21626SPeter Zijlstra static long __sched
121b8a21626SPeter Zijlstra wait_for_common_io(struct completion *x, long timeout, int state)
122b8a21626SPeter Zijlstra {
123b8a21626SPeter Zijlstra 	return __wait_for_common(x, io_schedule_timeout, timeout, state);
124b8a21626SPeter Zijlstra }
125b8a21626SPeter Zijlstra 
126b8a21626SPeter Zijlstra /**
127b8a21626SPeter Zijlstra  * wait_for_completion: - waits for completion of a task
128b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
129b8a21626SPeter Zijlstra  *
130b8a21626SPeter Zijlstra  * This waits to be signaled for completion of a specific task. It is NOT
131b8a21626SPeter Zijlstra  * interruptible and there is no timeout.
132b8a21626SPeter Zijlstra  *
133b8a21626SPeter Zijlstra  * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
134b8a21626SPeter Zijlstra  * and interrupt capability. Also see complete().
135b8a21626SPeter Zijlstra  */
136b8a21626SPeter Zijlstra void __sched wait_for_completion(struct completion *x)
137b8a21626SPeter Zijlstra {
138b8a21626SPeter Zijlstra 	wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
139b8a21626SPeter Zijlstra }
140b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion);
141b8a21626SPeter Zijlstra 
142b8a21626SPeter Zijlstra /**
143b8a21626SPeter Zijlstra  * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
144b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
145b8a21626SPeter Zijlstra  * @timeout:  timeout value in jiffies
146b8a21626SPeter Zijlstra  *
147b8a21626SPeter Zijlstra  * This waits for either a completion of a specific task to be signaled or for a
148b8a21626SPeter Zijlstra  * specified timeout to expire. The timeout is in jiffies. It is not
149b8a21626SPeter Zijlstra  * interruptible.
150b8a21626SPeter Zijlstra  *
151b8a21626SPeter Zijlstra  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
152b8a21626SPeter Zijlstra  * till timeout) if completed.
153b8a21626SPeter Zijlstra  */
154b8a21626SPeter Zijlstra unsigned long __sched
155b8a21626SPeter Zijlstra wait_for_completion_timeout(struct completion *x, unsigned long timeout)
156b8a21626SPeter Zijlstra {
157b8a21626SPeter Zijlstra 	return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
158b8a21626SPeter Zijlstra }
159b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_timeout);
160b8a21626SPeter Zijlstra 
161b8a21626SPeter Zijlstra /**
162b8a21626SPeter Zijlstra  * wait_for_completion_io: - waits for completion of a task
163b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
164b8a21626SPeter Zijlstra  *
165b8a21626SPeter Zijlstra  * This waits to be signaled for completion of a specific task. It is NOT
166b8a21626SPeter Zijlstra  * interruptible and there is no timeout. The caller is accounted as waiting
167a1bd5373SWolfram Sang  * for IO (which traditionally means blkio only).
168b8a21626SPeter Zijlstra  */
169b8a21626SPeter Zijlstra void __sched wait_for_completion_io(struct completion *x)
170b8a21626SPeter Zijlstra {
171b8a21626SPeter Zijlstra 	wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
172b8a21626SPeter Zijlstra }
173b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_io);
174b8a21626SPeter Zijlstra 
175b8a21626SPeter Zijlstra /**
176b8a21626SPeter Zijlstra  * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
177b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
178b8a21626SPeter Zijlstra  * @timeout:  timeout value in jiffies
179b8a21626SPeter Zijlstra  *
180b8a21626SPeter Zijlstra  * This waits for either a completion of a specific task to be signaled or for a
181b8a21626SPeter Zijlstra  * specified timeout to expire. The timeout is in jiffies. It is not
182a1bd5373SWolfram Sang  * interruptible. The caller is accounted as waiting for IO (which traditionally
183a1bd5373SWolfram Sang  * means blkio only).
184b8a21626SPeter Zijlstra  *
185b8a21626SPeter Zijlstra  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
186b8a21626SPeter Zijlstra  * till timeout) if completed.
187b8a21626SPeter Zijlstra  */
188b8a21626SPeter Zijlstra unsigned long __sched
189b8a21626SPeter Zijlstra wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
190b8a21626SPeter Zijlstra {
191b8a21626SPeter Zijlstra 	return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
192b8a21626SPeter Zijlstra }
193b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_io_timeout);
194b8a21626SPeter Zijlstra 
195b8a21626SPeter Zijlstra /**
196b8a21626SPeter Zijlstra  * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
197b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
198b8a21626SPeter Zijlstra  *
199b8a21626SPeter Zijlstra  * This waits for completion of a specific task to be signaled. It is
200b8a21626SPeter Zijlstra  * interruptible.
201b8a21626SPeter Zijlstra  *
202b8a21626SPeter Zijlstra  * Return: -ERESTARTSYS if interrupted, 0 if completed.
203b8a21626SPeter Zijlstra  */
204b8a21626SPeter Zijlstra int __sched wait_for_completion_interruptible(struct completion *x)
205b8a21626SPeter Zijlstra {
206b8a21626SPeter Zijlstra 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
207*929659acSPeter Zijlstra 
208b8a21626SPeter Zijlstra 	if (t == -ERESTARTSYS)
209b8a21626SPeter Zijlstra 		return t;
210b8a21626SPeter Zijlstra 	return 0;
211b8a21626SPeter Zijlstra }
212b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_interruptible);
213b8a21626SPeter Zijlstra 
214b8a21626SPeter Zijlstra /**
215b8a21626SPeter Zijlstra  * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
216b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
217b8a21626SPeter Zijlstra  * @timeout:  timeout value in jiffies
218b8a21626SPeter Zijlstra  *
219b8a21626SPeter Zijlstra  * This waits for either a completion of a specific task to be signaled or for a
220b8a21626SPeter Zijlstra  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
221b8a21626SPeter Zijlstra  *
222b8a21626SPeter Zijlstra  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
223b8a21626SPeter Zijlstra  * or number of jiffies left till timeout) if completed.
224b8a21626SPeter Zijlstra  */
225b8a21626SPeter Zijlstra long __sched
226b8a21626SPeter Zijlstra wait_for_completion_interruptible_timeout(struct completion *x,
227b8a21626SPeter Zijlstra 					  unsigned long timeout)
228b8a21626SPeter Zijlstra {
229b8a21626SPeter Zijlstra 	return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
230b8a21626SPeter Zijlstra }
231b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
232b8a21626SPeter Zijlstra 
233b8a21626SPeter Zijlstra /**
234b8a21626SPeter Zijlstra  * wait_for_completion_killable: - waits for completion of a task (killable)
235b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
236b8a21626SPeter Zijlstra  *
237b8a21626SPeter Zijlstra  * This waits to be signaled for completion of a specific task. It can be
238b8a21626SPeter Zijlstra  * interrupted by a kill signal.
239b8a21626SPeter Zijlstra  *
240b8a21626SPeter Zijlstra  * Return: -ERESTARTSYS if interrupted, 0 if completed.
241b8a21626SPeter Zijlstra  */
242b8a21626SPeter Zijlstra int __sched wait_for_completion_killable(struct completion *x)
243b8a21626SPeter Zijlstra {
244b8a21626SPeter Zijlstra 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
245*929659acSPeter Zijlstra 
246b8a21626SPeter Zijlstra 	if (t == -ERESTARTSYS)
247b8a21626SPeter Zijlstra 		return t;
248b8a21626SPeter Zijlstra 	return 0;
249b8a21626SPeter Zijlstra }
250b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_killable);
251b8a21626SPeter Zijlstra 
252*929659acSPeter Zijlstra int __sched wait_for_completion_state(struct completion *x, unsigned int state)
253*929659acSPeter Zijlstra {
254*929659acSPeter Zijlstra 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, state);
255*929659acSPeter Zijlstra 
256*929659acSPeter Zijlstra 	if (t == -ERESTARTSYS)
257*929659acSPeter Zijlstra 		return t;
258*929659acSPeter Zijlstra 	return 0;
259*929659acSPeter Zijlstra }
260*929659acSPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_state);
261*929659acSPeter Zijlstra 
262b8a21626SPeter Zijlstra /**
263b8a21626SPeter Zijlstra  * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
264b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
265b8a21626SPeter Zijlstra  * @timeout:  timeout value in jiffies
266b8a21626SPeter Zijlstra  *
267b8a21626SPeter Zijlstra  * This waits for either a completion of a specific task to be
268b8a21626SPeter Zijlstra  * signaled or for a specified timeout to expire. It can be
269b8a21626SPeter Zijlstra  * interrupted by a kill signal. The timeout is in jiffies.
270b8a21626SPeter Zijlstra  *
271b8a21626SPeter Zijlstra  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
272b8a21626SPeter Zijlstra  * or number of jiffies left till timeout) if completed.
273b8a21626SPeter Zijlstra  */
274b8a21626SPeter Zijlstra long __sched
275b8a21626SPeter Zijlstra wait_for_completion_killable_timeout(struct completion *x,
276b8a21626SPeter Zijlstra 				     unsigned long timeout)
277b8a21626SPeter Zijlstra {
278b8a21626SPeter Zijlstra 	return wait_for_common(x, timeout, TASK_KILLABLE);
279b8a21626SPeter Zijlstra }
280b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_killable_timeout);
281b8a21626SPeter Zijlstra 
282b8a21626SPeter Zijlstra /**
283b8a21626SPeter Zijlstra  *	try_wait_for_completion - try to decrement a completion without blocking
284b8a21626SPeter Zijlstra  *	@x:	completion structure
285b8a21626SPeter Zijlstra  *
286b8a21626SPeter Zijlstra  *	Return: 0 if a decrement cannot be done without blocking
287b8a21626SPeter Zijlstra  *		 1 if a decrement succeeded.
288b8a21626SPeter Zijlstra  *
289b8a21626SPeter Zijlstra  *	If a completion is being used as a counting completion,
290b8a21626SPeter Zijlstra  *	attempt to decrement the counter without blocking. This
291b8a21626SPeter Zijlstra  *	enables us to avoid waiting if the resource the completion
292b8a21626SPeter Zijlstra  *	is protecting is not available.
293b8a21626SPeter Zijlstra  */
294b8a21626SPeter Zijlstra bool try_wait_for_completion(struct completion *x)
295b8a21626SPeter Zijlstra {
296b8a21626SPeter Zijlstra 	unsigned long flags;
297d17067e4Sgaurav jindal 	bool ret = true;
298b8a21626SPeter Zijlstra 
2997c34e318SNicholas Mc Guire 	/*
3007c34e318SNicholas Mc Guire 	 * Since x->done will need to be locked only
3017c34e318SNicholas Mc Guire 	 * in the non-blocking case, we check x->done
3027c34e318SNicholas Mc Guire 	 * first without taking the lock so we can
3037c34e318SNicholas Mc Guire 	 * return early in the blocking case.
3047c34e318SNicholas Mc Guire 	 */
305bc956015SOleg Nesterov 	if (!READ_ONCE(x->done))
306d17067e4Sgaurav jindal 		return false;
3077c34e318SNicholas Mc Guire 
308a5c6234eSThomas Gleixner 	raw_spin_lock_irqsave(&x->wait.lock, flags);
309b8a21626SPeter Zijlstra 	if (!x->done)
310d17067e4Sgaurav jindal 		ret = false;
311da9647e0SPeter Zijlstra 	else if (x->done != UINT_MAX)
312b8a21626SPeter Zijlstra 		x->done--;
313a5c6234eSThomas Gleixner 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
314b8a21626SPeter Zijlstra 	return ret;
315b8a21626SPeter Zijlstra }
316b8a21626SPeter Zijlstra EXPORT_SYMBOL(try_wait_for_completion);
317b8a21626SPeter Zijlstra 
318b8a21626SPeter Zijlstra /**
319b8a21626SPeter Zijlstra  *	completion_done - Test to see if a completion has any waiters
320b8a21626SPeter Zijlstra  *	@x:	completion structure
321b8a21626SPeter Zijlstra  *
322b8a21626SPeter Zijlstra  *	Return: 0 if there are waiters (wait_for_completion() in progress)
323b8a21626SPeter Zijlstra  *		 1 if there are no waiters.
324b8a21626SPeter Zijlstra  *
3259c878320SSteven Rostedt  *	Note, this will always return true if complete_all() was called on @X.
326b8a21626SPeter Zijlstra  */
327b8a21626SPeter Zijlstra bool completion_done(struct completion *x)
328b8a21626SPeter Zijlstra {
329dec13c42SPaul E. McKenney 	unsigned long flags;
330dec13c42SPaul E. McKenney 
331bc956015SOleg Nesterov 	if (!READ_ONCE(x->done))
332bc956015SOleg Nesterov 		return false;
333bc956015SOleg Nesterov 
334bc956015SOleg Nesterov 	/*
335bc956015SOleg Nesterov 	 * If ->done, we need to wait for complete() to release ->wait.lock
336bc956015SOleg Nesterov 	 * otherwise we can end up freeing the completion before complete()
337bc956015SOleg Nesterov 	 * is done referencing it.
338bc956015SOleg Nesterov 	 */
339a5c6234eSThomas Gleixner 	raw_spin_lock_irqsave(&x->wait.lock, flags);
340a5c6234eSThomas Gleixner 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
341bc956015SOleg Nesterov 	return true;
342b8a21626SPeter Zijlstra }
343b8a21626SPeter Zijlstra EXPORT_SYMBOL(completion_done);
344