xref: /linux/kernel/sched/completion.c (revision 6f63904c)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2801c1419SIngo Molnar 
3b8a21626SPeter Zijlstra /*
4b8a21626SPeter Zijlstra  * Generic wait-for-completion handler;
5b8a21626SPeter Zijlstra  *
6b8a21626SPeter Zijlstra  * It differs from semaphores in that their default case is the opposite,
7b8a21626SPeter Zijlstra  * wait_for_completion default blocks whereas semaphore default non-block. The
8b8a21626SPeter Zijlstra  * interface also makes it easy to 'complete' multiple waiting threads,
9b8a21626SPeter Zijlstra  * something which isn't entirely natural for semaphores.
10b8a21626SPeter Zijlstra  *
11b8a21626SPeter Zijlstra  * But more importantly, the primitive documents the usage. Semaphores would
12b8a21626SPeter Zijlstra  * typically be used for exclusion which gives rise to priority inversion.
13b8a21626SPeter Zijlstra  * Waiting for completion is a typically sync point, but not an exclusion point.
14b8a21626SPeter Zijlstra  */
15b8a21626SPeter Zijlstra 
complete_with_flags(struct completion * x,int wake_flags)16*6f63904cSAndrei Vagin static void complete_with_flags(struct completion *x, int wake_flags)
17*6f63904cSAndrei Vagin {
18*6f63904cSAndrei Vagin 	unsigned long flags;
19*6f63904cSAndrei Vagin 
20*6f63904cSAndrei Vagin 	raw_spin_lock_irqsave(&x->wait.lock, flags);
21*6f63904cSAndrei Vagin 
22*6f63904cSAndrei Vagin 	if (x->done != UINT_MAX)
23*6f63904cSAndrei Vagin 		x->done++;
24*6f63904cSAndrei Vagin 	swake_up_locked(&x->wait, wake_flags);
25*6f63904cSAndrei Vagin 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
26*6f63904cSAndrei Vagin }
27*6f63904cSAndrei Vagin 
complete_on_current_cpu(struct completion * x)28*6f63904cSAndrei Vagin void complete_on_current_cpu(struct completion *x)
29*6f63904cSAndrei Vagin {
30*6f63904cSAndrei Vagin 	return complete_with_flags(x, WF_CURRENT_CPU);
31*6f63904cSAndrei Vagin }
32*6f63904cSAndrei Vagin 
33b8a21626SPeter Zijlstra /**
34b8a21626SPeter Zijlstra  * complete: - signals a single thread waiting on this completion
35b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
36b8a21626SPeter Zijlstra  *
37b8a21626SPeter Zijlstra  * This will wake up a single thread waiting on this completion. Threads will be
38b8a21626SPeter Zijlstra  * awakened in the same order in which they were queued.
39b8a21626SPeter Zijlstra  *
40b8a21626SPeter Zijlstra  * See also complete_all(), wait_for_completion() and related routines.
41b8a21626SPeter Zijlstra  *
427696f991SAndrea Parri  * If this function wakes up a task, it executes a full memory barrier before
437696f991SAndrea Parri  * accessing the task state.
44b8a21626SPeter Zijlstra  */
complete(struct completion * x)45b8a21626SPeter Zijlstra void complete(struct completion *x)
46b8a21626SPeter Zijlstra {
47*6f63904cSAndrei Vagin 	complete_with_flags(x, 0);
48b8a21626SPeter Zijlstra }
49b8a21626SPeter Zijlstra EXPORT_SYMBOL(complete);
50b8a21626SPeter Zijlstra 
51b8a21626SPeter Zijlstra /**
52b8a21626SPeter Zijlstra  * complete_all: - signals all threads waiting on this completion
53b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
54b8a21626SPeter Zijlstra  *
55b8a21626SPeter Zijlstra  * This will wake up all threads waiting on this particular completion event.
56b8a21626SPeter Zijlstra  *
577696f991SAndrea Parri  * If this function wakes up a task, it executes a full memory barrier before
587696f991SAndrea Parri  * accessing the task state.
599c878320SSteven Rostedt  *
609c878320SSteven Rostedt  * Since complete_all() sets the completion of @x permanently to done
619c878320SSteven Rostedt  * to allow multiple waiters to finish, a call to reinit_completion()
629c878320SSteven Rostedt  * must be used on @x if @x is to be used again. The code must make
639c878320SSteven Rostedt  * sure that all waiters have woken and finished before reinitializing
649c878320SSteven Rostedt  * @x. Also note that the function completion_done() can not be used
659c878320SSteven Rostedt  * to know if there are still waiters after complete_all() has been called.
66b8a21626SPeter Zijlstra  */
complete_all(struct completion * x)67b8a21626SPeter Zijlstra void complete_all(struct completion *x)
68b8a21626SPeter Zijlstra {
69b8a21626SPeter Zijlstra 	unsigned long flags;
70b8a21626SPeter Zijlstra 
718bf6c677SSebastian Siewior 	lockdep_assert_RT_in_threaded_ctx();
72a5c6234eSThomas Gleixner 
73a5c6234eSThomas Gleixner 	raw_spin_lock_irqsave(&x->wait.lock, flags);
74da9647e0SPeter Zijlstra 	x->done = UINT_MAX;
75a5c6234eSThomas Gleixner 	swake_up_all_locked(&x->wait);
76a5c6234eSThomas Gleixner 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
77b8a21626SPeter Zijlstra }
78b8a21626SPeter Zijlstra EXPORT_SYMBOL(complete_all);
79b8a21626SPeter Zijlstra 
80b8a21626SPeter Zijlstra static inline long __sched
do_wait_for_common(struct completion * x,long (* action)(long),long timeout,int state)81b8a21626SPeter Zijlstra do_wait_for_common(struct completion *x,
82b8a21626SPeter Zijlstra 		   long (*action)(long), long timeout, int state)
83b8a21626SPeter Zijlstra {
84b8a21626SPeter Zijlstra 	if (!x->done) {
85a5c6234eSThomas Gleixner 		DECLARE_SWAITQUEUE(wait);
86b8a21626SPeter Zijlstra 
87b8a21626SPeter Zijlstra 		do {
88b8a21626SPeter Zijlstra 			if (signal_pending_state(state, current)) {
89b8a21626SPeter Zijlstra 				timeout = -ERESTARTSYS;
90b8a21626SPeter Zijlstra 				break;
91b8a21626SPeter Zijlstra 			}
92a5c6234eSThomas Gleixner 			__prepare_to_swait(&x->wait, &wait);
93b8a21626SPeter Zijlstra 			__set_current_state(state);
94a5c6234eSThomas Gleixner 			raw_spin_unlock_irq(&x->wait.lock);
95b8a21626SPeter Zijlstra 			timeout = action(timeout);
96a5c6234eSThomas Gleixner 			raw_spin_lock_irq(&x->wait.lock);
97b8a21626SPeter Zijlstra 		} while (!x->done && timeout);
98a5c6234eSThomas Gleixner 		__finish_swait(&x->wait, &wait);
99b8a21626SPeter Zijlstra 		if (!x->done)
100b8a21626SPeter Zijlstra 			return timeout;
101b8a21626SPeter Zijlstra 	}
102da9647e0SPeter Zijlstra 	if (x->done != UINT_MAX)
103b8a21626SPeter Zijlstra 		x->done--;
104b8a21626SPeter Zijlstra 	return timeout ?: 1;
105b8a21626SPeter Zijlstra }
106b8a21626SPeter Zijlstra 
107b8a21626SPeter Zijlstra static inline long __sched
__wait_for_common(struct completion * x,long (* action)(long),long timeout,int state)108b8a21626SPeter Zijlstra __wait_for_common(struct completion *x,
109b8a21626SPeter Zijlstra 		  long (*action)(long), long timeout, int state)
110b8a21626SPeter Zijlstra {
111b8a21626SPeter Zijlstra 	might_sleep();
112b8a21626SPeter Zijlstra 
113cd8084f9SByungchul Park 	complete_acquire(x);
114cd8084f9SByungchul Park 
115a5c6234eSThomas Gleixner 	raw_spin_lock_irq(&x->wait.lock);
116b8a21626SPeter Zijlstra 	timeout = do_wait_for_common(x, action, timeout, state);
117a5c6234eSThomas Gleixner 	raw_spin_unlock_irq(&x->wait.lock);
118cd8084f9SByungchul Park 
119cd8084f9SByungchul Park 	complete_release(x);
120cd8084f9SByungchul Park 
121b8a21626SPeter Zijlstra 	return timeout;
122b8a21626SPeter Zijlstra }
123b8a21626SPeter Zijlstra 
124b8a21626SPeter Zijlstra static long __sched
wait_for_common(struct completion * x,long timeout,int state)125b8a21626SPeter Zijlstra wait_for_common(struct completion *x, long timeout, int state)
126b8a21626SPeter Zijlstra {
127b8a21626SPeter Zijlstra 	return __wait_for_common(x, schedule_timeout, timeout, state);
128b8a21626SPeter Zijlstra }
129b8a21626SPeter Zijlstra 
130b8a21626SPeter Zijlstra static long __sched
wait_for_common_io(struct completion * x,long timeout,int state)131b8a21626SPeter Zijlstra wait_for_common_io(struct completion *x, long timeout, int state)
132b8a21626SPeter Zijlstra {
133b8a21626SPeter Zijlstra 	return __wait_for_common(x, io_schedule_timeout, timeout, state);
134b8a21626SPeter Zijlstra }
135b8a21626SPeter Zijlstra 
136b8a21626SPeter Zijlstra /**
137b8a21626SPeter Zijlstra  * wait_for_completion: - waits for completion of a task
138b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
139b8a21626SPeter Zijlstra  *
140b8a21626SPeter Zijlstra  * This waits to be signaled for completion of a specific task. It is NOT
141b8a21626SPeter Zijlstra  * interruptible and there is no timeout.
142b8a21626SPeter Zijlstra  *
143b8a21626SPeter Zijlstra  * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
144b8a21626SPeter Zijlstra  * and interrupt capability. Also see complete().
145b8a21626SPeter Zijlstra  */
wait_for_completion(struct completion * x)146b8a21626SPeter Zijlstra void __sched wait_for_completion(struct completion *x)
147b8a21626SPeter Zijlstra {
148b8a21626SPeter Zijlstra 	wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
149b8a21626SPeter Zijlstra }
150b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion);
151b8a21626SPeter Zijlstra 
152b8a21626SPeter Zijlstra /**
153b8a21626SPeter Zijlstra  * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
154b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
155b8a21626SPeter Zijlstra  * @timeout:  timeout value in jiffies
156b8a21626SPeter Zijlstra  *
157b8a21626SPeter Zijlstra  * This waits for either a completion of a specific task to be signaled or for a
158b8a21626SPeter Zijlstra  * specified timeout to expire. The timeout is in jiffies. It is not
159b8a21626SPeter Zijlstra  * interruptible.
160b8a21626SPeter Zijlstra  *
161b8a21626SPeter Zijlstra  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
162b8a21626SPeter Zijlstra  * till timeout) if completed.
163b8a21626SPeter Zijlstra  */
164b8a21626SPeter Zijlstra unsigned long __sched
wait_for_completion_timeout(struct completion * x,unsigned long timeout)165b8a21626SPeter Zijlstra wait_for_completion_timeout(struct completion *x, unsigned long timeout)
166b8a21626SPeter Zijlstra {
167b8a21626SPeter Zijlstra 	return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
168b8a21626SPeter Zijlstra }
169b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_timeout);
170b8a21626SPeter Zijlstra 
171b8a21626SPeter Zijlstra /**
172b8a21626SPeter Zijlstra  * wait_for_completion_io: - waits for completion of a task
173b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
174b8a21626SPeter Zijlstra  *
175b8a21626SPeter Zijlstra  * This waits to be signaled for completion of a specific task. It is NOT
176b8a21626SPeter Zijlstra  * interruptible and there is no timeout. The caller is accounted as waiting
177a1bd5373SWolfram Sang  * for IO (which traditionally means blkio only).
178b8a21626SPeter Zijlstra  */
wait_for_completion_io(struct completion * x)179b8a21626SPeter Zijlstra void __sched wait_for_completion_io(struct completion *x)
180b8a21626SPeter Zijlstra {
181b8a21626SPeter Zijlstra 	wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
182b8a21626SPeter Zijlstra }
183b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_io);
184b8a21626SPeter Zijlstra 
185b8a21626SPeter Zijlstra /**
186b8a21626SPeter Zijlstra  * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
187b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
188b8a21626SPeter Zijlstra  * @timeout:  timeout value in jiffies
189b8a21626SPeter Zijlstra  *
190b8a21626SPeter Zijlstra  * This waits for either a completion of a specific task to be signaled or for a
191b8a21626SPeter Zijlstra  * specified timeout to expire. The timeout is in jiffies. It is not
192a1bd5373SWolfram Sang  * interruptible. The caller is accounted as waiting for IO (which traditionally
193a1bd5373SWolfram Sang  * means blkio only).
194b8a21626SPeter Zijlstra  *
195b8a21626SPeter Zijlstra  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
196b8a21626SPeter Zijlstra  * till timeout) if completed.
197b8a21626SPeter Zijlstra  */
198b8a21626SPeter Zijlstra unsigned long __sched
wait_for_completion_io_timeout(struct completion * x,unsigned long timeout)199b8a21626SPeter Zijlstra wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
200b8a21626SPeter Zijlstra {
201b8a21626SPeter Zijlstra 	return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
202b8a21626SPeter Zijlstra }
203b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_io_timeout);
204b8a21626SPeter Zijlstra 
205b8a21626SPeter Zijlstra /**
206b8a21626SPeter Zijlstra  * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
207b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
208b8a21626SPeter Zijlstra  *
209b8a21626SPeter Zijlstra  * This waits for completion of a specific task to be signaled. It is
210b8a21626SPeter Zijlstra  * interruptible.
211b8a21626SPeter Zijlstra  *
212b8a21626SPeter Zijlstra  * Return: -ERESTARTSYS if interrupted, 0 if completed.
213b8a21626SPeter Zijlstra  */
wait_for_completion_interruptible(struct completion * x)214b8a21626SPeter Zijlstra int __sched wait_for_completion_interruptible(struct completion *x)
215b8a21626SPeter Zijlstra {
216b8a21626SPeter Zijlstra 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
217929659acSPeter Zijlstra 
218b8a21626SPeter Zijlstra 	if (t == -ERESTARTSYS)
219b8a21626SPeter Zijlstra 		return t;
220b8a21626SPeter Zijlstra 	return 0;
221b8a21626SPeter Zijlstra }
222b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_interruptible);
223b8a21626SPeter Zijlstra 
224b8a21626SPeter Zijlstra /**
225b8a21626SPeter Zijlstra  * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
226b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
227b8a21626SPeter Zijlstra  * @timeout:  timeout value in jiffies
228b8a21626SPeter Zijlstra  *
229b8a21626SPeter Zijlstra  * This waits for either a completion of a specific task to be signaled or for a
230b8a21626SPeter Zijlstra  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
231b8a21626SPeter Zijlstra  *
232b8a21626SPeter Zijlstra  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
233b8a21626SPeter Zijlstra  * or number of jiffies left till timeout) if completed.
234b8a21626SPeter Zijlstra  */
235b8a21626SPeter Zijlstra long __sched
wait_for_completion_interruptible_timeout(struct completion * x,unsigned long timeout)236b8a21626SPeter Zijlstra wait_for_completion_interruptible_timeout(struct completion *x,
237b8a21626SPeter Zijlstra 					  unsigned long timeout)
238b8a21626SPeter Zijlstra {
239b8a21626SPeter Zijlstra 	return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
240b8a21626SPeter Zijlstra }
241b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
242b8a21626SPeter Zijlstra 
243b8a21626SPeter Zijlstra /**
244b8a21626SPeter Zijlstra  * wait_for_completion_killable: - waits for completion of a task (killable)
245b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
246b8a21626SPeter Zijlstra  *
247b8a21626SPeter Zijlstra  * This waits to be signaled for completion of a specific task. It can be
248b8a21626SPeter Zijlstra  * interrupted by a kill signal.
249b8a21626SPeter Zijlstra  *
250b8a21626SPeter Zijlstra  * Return: -ERESTARTSYS if interrupted, 0 if completed.
251b8a21626SPeter Zijlstra  */
wait_for_completion_killable(struct completion * x)252b8a21626SPeter Zijlstra int __sched wait_for_completion_killable(struct completion *x)
253b8a21626SPeter Zijlstra {
254b8a21626SPeter Zijlstra 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
255929659acSPeter Zijlstra 
256b8a21626SPeter Zijlstra 	if (t == -ERESTARTSYS)
257b8a21626SPeter Zijlstra 		return t;
258b8a21626SPeter Zijlstra 	return 0;
259b8a21626SPeter Zijlstra }
260b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_killable);
261b8a21626SPeter Zijlstra 
wait_for_completion_state(struct completion * x,unsigned int state)262929659acSPeter Zijlstra int __sched wait_for_completion_state(struct completion *x, unsigned int state)
263929659acSPeter Zijlstra {
264929659acSPeter Zijlstra 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, state);
265929659acSPeter Zijlstra 
266929659acSPeter Zijlstra 	if (t == -ERESTARTSYS)
267929659acSPeter Zijlstra 		return t;
268929659acSPeter Zijlstra 	return 0;
269929659acSPeter Zijlstra }
270929659acSPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_state);
271929659acSPeter Zijlstra 
272b8a21626SPeter Zijlstra /**
273b8a21626SPeter Zijlstra  * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
274b8a21626SPeter Zijlstra  * @x:  holds the state of this particular completion
275b8a21626SPeter Zijlstra  * @timeout:  timeout value in jiffies
276b8a21626SPeter Zijlstra  *
277b8a21626SPeter Zijlstra  * This waits for either a completion of a specific task to be
278b8a21626SPeter Zijlstra  * signaled or for a specified timeout to expire. It can be
279b8a21626SPeter Zijlstra  * interrupted by a kill signal. The timeout is in jiffies.
280b8a21626SPeter Zijlstra  *
281b8a21626SPeter Zijlstra  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
282b8a21626SPeter Zijlstra  * or number of jiffies left till timeout) if completed.
283b8a21626SPeter Zijlstra  */
284b8a21626SPeter Zijlstra long __sched
wait_for_completion_killable_timeout(struct completion * x,unsigned long timeout)285b8a21626SPeter Zijlstra wait_for_completion_killable_timeout(struct completion *x,
286b8a21626SPeter Zijlstra 				     unsigned long timeout)
287b8a21626SPeter Zijlstra {
288b8a21626SPeter Zijlstra 	return wait_for_common(x, timeout, TASK_KILLABLE);
289b8a21626SPeter Zijlstra }
290b8a21626SPeter Zijlstra EXPORT_SYMBOL(wait_for_completion_killable_timeout);
291b8a21626SPeter Zijlstra 
292b8a21626SPeter Zijlstra /**
293b8a21626SPeter Zijlstra  *	try_wait_for_completion - try to decrement a completion without blocking
294b8a21626SPeter Zijlstra  *	@x:	completion structure
295b8a21626SPeter Zijlstra  *
296b8a21626SPeter Zijlstra  *	Return: 0 if a decrement cannot be done without blocking
297b8a21626SPeter Zijlstra  *		 1 if a decrement succeeded.
298b8a21626SPeter Zijlstra  *
299b8a21626SPeter Zijlstra  *	If a completion is being used as a counting completion,
300b8a21626SPeter Zijlstra  *	attempt to decrement the counter without blocking. This
301b8a21626SPeter Zijlstra  *	enables us to avoid waiting if the resource the completion
302b8a21626SPeter Zijlstra  *	is protecting is not available.
303b8a21626SPeter Zijlstra  */
try_wait_for_completion(struct completion * x)304b8a21626SPeter Zijlstra bool try_wait_for_completion(struct completion *x)
305b8a21626SPeter Zijlstra {
306b8a21626SPeter Zijlstra 	unsigned long flags;
307d17067e4Sgaurav jindal 	bool ret = true;
308b8a21626SPeter Zijlstra 
3097c34e318SNicholas Mc Guire 	/*
3107c34e318SNicholas Mc Guire 	 * Since x->done will need to be locked only
3117c34e318SNicholas Mc Guire 	 * in the non-blocking case, we check x->done
3127c34e318SNicholas Mc Guire 	 * first without taking the lock so we can
3137c34e318SNicholas Mc Guire 	 * return early in the blocking case.
3147c34e318SNicholas Mc Guire 	 */
315bc956015SOleg Nesterov 	if (!READ_ONCE(x->done))
316d17067e4Sgaurav jindal 		return false;
3177c34e318SNicholas Mc Guire 
318a5c6234eSThomas Gleixner 	raw_spin_lock_irqsave(&x->wait.lock, flags);
319b8a21626SPeter Zijlstra 	if (!x->done)
320d17067e4Sgaurav jindal 		ret = false;
321da9647e0SPeter Zijlstra 	else if (x->done != UINT_MAX)
322b8a21626SPeter Zijlstra 		x->done--;
323a5c6234eSThomas Gleixner 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
324b8a21626SPeter Zijlstra 	return ret;
325b8a21626SPeter Zijlstra }
326b8a21626SPeter Zijlstra EXPORT_SYMBOL(try_wait_for_completion);
327b8a21626SPeter Zijlstra 
328b8a21626SPeter Zijlstra /**
329b8a21626SPeter Zijlstra  *	completion_done - Test to see if a completion has any waiters
330b8a21626SPeter Zijlstra  *	@x:	completion structure
331b8a21626SPeter Zijlstra  *
332b8a21626SPeter Zijlstra  *	Return: 0 if there are waiters (wait_for_completion() in progress)
333b8a21626SPeter Zijlstra  *		 1 if there are no waiters.
334b8a21626SPeter Zijlstra  *
3359c878320SSteven Rostedt  *	Note, this will always return true if complete_all() was called on @X.
336b8a21626SPeter Zijlstra  */
completion_done(struct completion * x)337b8a21626SPeter Zijlstra bool completion_done(struct completion *x)
338b8a21626SPeter Zijlstra {
339dec13c42SPaul E. McKenney 	unsigned long flags;
340dec13c42SPaul E. McKenney 
341bc956015SOleg Nesterov 	if (!READ_ONCE(x->done))
342bc956015SOleg Nesterov 		return false;
343bc956015SOleg Nesterov 
344bc956015SOleg Nesterov 	/*
345bc956015SOleg Nesterov 	 * If ->done, we need to wait for complete() to release ->wait.lock
346bc956015SOleg Nesterov 	 * otherwise we can end up freeing the completion before complete()
347bc956015SOleg Nesterov 	 * is done referencing it.
348bc956015SOleg Nesterov 	 */
349a5c6234eSThomas Gleixner 	raw_spin_lock_irqsave(&x->wait.lock, flags);
350a5c6234eSThomas Gleixner 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
351bc956015SOleg Nesterov 	return true;
352b8a21626SPeter Zijlstra }
353b8a21626SPeter Zijlstra EXPORT_SYMBOL(completion_done);
354