xref: /dragonfly/sys/dev/drm/linux_fence.c (revision 6a3cbbc2)
1 /*	$NetBSD: linux_fence.c,v 1.14 2019/01/05 22:24:24 tnn Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 
34 #include <sys/condvar.h>
35 #include <sys/queue.h>
36 
37 #include <linux/compiler.h>
38 
39 #include <linux/atomic.h>
40 #include <linux/errno.h>
41 #include <linux/kref.h>
42 #include <linux/fence.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
46 
47 /*
48  * linux_fence_trace
49  *
50  *	True if we print FENCE_TRACE messages, false if not.  These are
51  *	extremely noisy, too much even for AB_VERBOSE and AB_DEBUG in
52  *	boothowto.
53  */
54 int	linux_fence_trace = 0;
55 
56 /*
57  * fence_init(fence, ops, lock, context, seqno)
58  *
59  *	Initialize fence.  Caller should call fence_destroy when done,
60  *	after all references have been released.
61  */
62 void
63 fence_init(struct fence *fence, const struct fence_ops *ops, struct lock *lock,
64     u64 context, unsigned seqno)
65 {
66 
67 	kref_init(&fence->refcount);
68 	fence->lock = lock;
69 	fence->flags = 0;
70 	fence->context = context;
71 	fence->seqno = seqno;
72 	fence->ops = ops;
73 	TAILQ_INIT(&fence->f_callbacks);
74 	cv_init(&fence->f_cv, "fence");
75 }
76 
77 /*
78  * fence_destroy(fence)
79  *
80  *	Clean up memory initialized with fence_init.  This is meant to
81  *	be used after a fence release callback.
82  */
83 void
84 fence_destroy(struct fence *fence)
85 {
86 
87 #if 0
88 	KASSERT(!fence_referenced_p(fence));
89 
90 	KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
91 #endif
92 	cv_destroy(&fence->f_cv);
93 }
94 
95 #if 0
96 static void
97 fence_free_cb(struct rcu_head *rcu)
98 {
99 	struct fence *fence = container_of(rcu, struct fence, f_rcu);
100 
101 	fence_destroy(fence);
102 	kfree(fence);
103 }
104 #endif
105 
106 /*
107  * fence_free(fence)
108  *
109  *	Schedule fence to be destroyed and then freed with kfree after
110  *	any pending RCU read sections on all CPUs have completed.
111  *	Caller must guarantee all references have been released.  This
112  *	is meant to be used after a fence release callback.
113  *
114  *	NOTE: Callers assume kfree will be used.  We don't even use
115  *	kmalloc to allocate these -- caller is expected to allocate
116  *	memory with kmalloc to be initialized with fence_init.
117  */
118 void
119 fence_free(struct fence *fence)
120 {
121 #ifdef __NetBSD__
122 	call_rcu(&fence->f_rcu, &fence_free_cb);
123 	kfree(fence);
124 #else
125 	kfree(fence);
126 #endif
127 }
128 
129 static inline uint32_t
130 atomic_add_int_nv(volatile uint32_t *target, int32_t delta)
131 {
132 	return (atomic_fetchadd_32(target, delta) + delta);
133 }
134 
135 /*
136  * fence_context_alloc(n)
137  *
138  *	Return the first of a contiguous sequence of unique
139  *	identifiers, at least until the system wraps around.
140  */
141 u64
142 fence_context_alloc(unsigned n)
143 {
144 	static atomic64_t next_context = { 0 };
145 
146 	return atomic64_add_return(n, &next_context) - n;
147 }
148 
149 /*
150  * fence_is_later(a, b)
151  *
152  *	True if the sequence number of fence a is later than the
153  *	sequence number of fence b.  Since sequence numbers wrap
154  *	around, we define this to mean that the sequence number of
155  *	fence a is no more than INT_MAX past the sequence number of
156  *	fence b.
157  *
158  *	The two fences must have the same context.
159  */
160 bool
161 fence_is_later(struct fence *a, struct fence *b)
162 {
163 #if 0
164 	KASSERTMSG(a->context == b->context, "incommensurate fences"
165 	    ": %u @ %p =/= %u @ %p", a->context, a, b->context, b);
166 #endif
167 
168 	return a->seqno - b->seqno < INT_MAX;
169 }
170 
171 /*
172  * fence_get(fence)
173  *
174  *	Acquire a reference to fence.  The fence must not be being
175  *	destroyed.  Return the fence.
176  */
177 struct fence *
178 fence_get(struct fence *fence)
179 {
180 
181 	if (fence)
182 		kref_get(&fence->refcount);
183 	return fence;
184 }
185 
186 /*
187  * fence_get_rcu(fence)
188  *
189  *	Attempt to acquire a reference to a fence that may be about to
190  *	be destroyed, during a read section.  Return the fence on
191  *	success, or NULL on failure.
192  */
193 struct fence *
194 fence_get_rcu(struct fence *fence)
195 {
196 
197 	if (!kref_get_unless_zero(&fence->refcount))
198 		return NULL;
199 	return fence;
200 }
201 
202 static void
203 fence_release(struct kref *refcount)
204 {
205 	struct fence *fence = container_of(refcount, struct fence, refcount);
206 
207 	if (fence->ops->release)
208 		(*fence->ops->release)(fence);
209 	else
210 		fence_free(fence);
211 }
212 
213 /*
214  * fence_put(fence)
215  *
216  *	Release a reference to fence.  If this was the last one, call
217  *	the fence's release callback.
218  */
219 void
220 fence_put(struct fence *fence)
221 {
222 
223 	if (fence == NULL)
224 		return;
225 	kref_put(&fence->refcount, &fence_release);
226 }
227 
228 /*
229  * fence_ensure_signal_enabled(fence)
230  *
231  *	Internal subroutine.  If the fence was already signalled,
232  *	return -ENOENT.  Otherwise, if the enable signalling callback
233  *	has not been called yet, call it.  If fails, signal the fence
234  *	and return -ENOENT.  If it succeeds, or if it had already been
235  *	called, return zero to indicate success.
236  *
237  *	Caller must hold the fence's lock.
238  */
239 static int
240 fence_ensure_signal_enabled(struct fence *fence)
241 {
242 #if 0
243 	KKASSERT(spin_is_locked(fence->lock));
244 #endif
245 
246 	/* If the fence was already signalled, fail with -ENOENT.  */
247 	if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))
248 		return -ENOENT;
249 
250 	/*
251 	 * If the enable signaling callback has been called, success.
252 	 * Otherwise, set the bit indicating it.
253 	 */
254 	if (test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags))
255 		return 0;
256 
257 	/* Otherwise, note that we've called it and call it.  */
258 	if (!(*fence->ops->enable_signaling)(fence)) {
259 		/* If it failed, signal and return -ENOENT.  */
260 		fence_signal_locked(fence);
261 		return -ENOENT;
262 	}
263 
264 	/* Success!  */
265 	return 0;
266 }
267 
268 /*
269  * fence_add_callback(fence, fcb, fn)
270  *
271  *	If fence has been signalled, return -ENOENT.  If the enable
272  *	signalling callback hasn't been called yet, call it; if it
273  *	fails, return -ENOENT.  Otherwise, arrange to call fn(fence,
274  *	fcb) when it is signalled, and return 0.
275  *
276  *	The fence uses memory allocated by the caller in fcb from the
277  *	time of fence_add_callback either to the time of
278  *	fence_remove_callback, or just before calling fn.
279  */
280 int
281 fence_add_callback(struct fence *fence, struct fence_cb *fcb, fence_func_t fn)
282 {
283 	int ret;
284 
285 
286 	/* Optimistically try to skip the lock if it's already signalled.  */
287 	if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT)) {
288 		ret = -ENOENT;
289 		goto out0;
290 	}
291 
292 	/* Acquire the lock.  */
293 	mutex_lock(fence->lock);
294 
295 	/* Ensure signalling is enabled, or fail if we can't.  */
296 	ret = fence_ensure_signal_enabled(fence);
297 	if (ret)
298 		goto out1;
299 
300 	/* Insert the callback.  */
301 	fcb->func = fn;
302 	TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
303 	fcb->fcb_onqueue = true;
304 
305 	/* Release the lock and we're done.  */
306 out1:	mutex_unlock(fence->lock);
307 out0:	return ret;
308 }
309 
310 /*
311  * fence_remove_callback(fence, fcb)
312  *
313  *	Remove the callback fcb from fence.  Return true if it was
314  *	removed from the list, or false if it had already run and so
315  *	was no longer queued anyway.  Caller must have already called
316  *	fence_add_callback(fence, fcb).
317  */
318 bool
319 fence_remove_callback(struct fence *fence, struct fence_cb *fcb)
320 {
321 	bool onqueue;
322 
323 
324 	mutex_lock(fence->lock);
325 	onqueue = fcb->fcb_onqueue;
326 	if (onqueue) {
327 		TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
328 		fcb->fcb_onqueue = false;
329 	}
330 	mutex_unlock(fence->lock);
331 
332 	return onqueue;
333 }
334 
335 /*
336  * fence_enable_sw_signaling(fence)
337  *
338  *	If it hasn't been called yet and the fence hasn't been
339  *	signalled yet, call the fence's enable_sw_signaling callback.
340  *	If when that happens, the callback indicates failure by
341  *	returning false, signal the fence.
342  */
343 void
344 fence_enable_sw_signaling(struct fence *fence)
345 {
346 	mutex_lock(fence->lock);
347 	(void)fence_ensure_signal_enabled(fence);
348 	mutex_unlock(fence->lock);
349 }
350 
351 /*
352  * fence_is_signaled(fence)
353  *
354  *	Test whether the fence has been signalled.  If it has been
355  *	signalled by fence_signal(_locked), return true.  If the
356  *	signalled callback returns true indicating that some implicit
357  *	external condition has changed, call the callbacks as if with
358  *	fence_signal.
359  */
360 bool
361 fence_is_signaled(struct fence *fence)
362 {
363 	bool signaled;
364 
365 	mutex_lock(fence->lock);
366 	signaled = fence_is_signaled_locked(fence);
367 	mutex_unlock(fence->lock);
368 
369 	return signaled;
370 }
371 
372 /*
373  * fence_is_signaled_locked(fence)
374  *
375  *	Test whether the fence has been signalled.  Like
376  *	fence_is_signaleed, but caller already holds the fence's lock.
377  */
378 bool
379 fence_is_signaled_locked(struct fence *fence)
380 {
381 
382 #if 0
383 	KKASSERT(spin_is_locked(fence->lock));
384 #endif
385 
386 	/* Check whether we already set the signalled bit.  */
387 	if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))
388 		return true;
389 
390 	/* If there's a signalled callback, test it.  */
391 	if (fence->ops->signaled) {
392 		if ((*fence->ops->signaled)(fence)) {
393 			/*
394 			 * It's been signalled implicitly by some
395 			 * external phenomonen.  Act as though someone
396 			 * has called fence_signal.
397 			 */
398 			fence_signal_locked(fence);
399 			return true;
400 		}
401 	}
402 
403 	return false;
404 }
405 
406 /*
407  * fence_signal(fence)
408  *
409  *	Signal the fence.  If it has already been signalled, return
410  *	-EINVAL.  If it has not been signalled, call the enable
411  *	signalling callback if it hasn't been called yet, and remove
412  *	each registered callback from the queue and call it; then
413  *	return 0.
414  */
415 int
416 fence_signal(struct fence *fence)
417 {
418 	int ret;
419 
420 	mutex_lock(fence->lock);
421 	ret = fence_signal_locked(fence);
422 	mutex_unlock(fence->lock);
423 
424 	return ret;
425 }
426 
427 /*
428  * fence_signal_locked(fence)
429  *
430  *	Signal the fence.  Like fence_signal, but caller already holds
431  *	the fence's lock.
432  */
433 int
434 fence_signal_locked(struct fence *fence)
435 {
436 	struct fence_cb *fcb, *next;
437 
438 #if 0
439 	KKASSERT(spin_is_locked(fence->lock));
440 #endif
441 
442 	/* If it's been signalled, fail; otherwise set the signalled bit.  */
443 	if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
444 		return -EINVAL;
445 
446 	/* Wake waiters.  */
447 	cv_broadcast(&fence->f_cv);
448 
449 	/* Remove and call the callbacks.  */
450 	TAILQ_FOREACH_MUTABLE(fcb, &fence->f_callbacks, fcb_entry, next) {
451 		TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
452 		fcb->fcb_onqueue = false;
453 		(*fcb->func)(fence, fcb);
454 	}
455 
456 	/* Success! */
457 	return 0;
458 }
459 
460 struct wait_any {
461 	struct fence_cb	fcb;
462 	struct wait_any1 {
463 		struct lock lock;
464 		struct cv cv;
465 		bool		done;
466 	}		*common;
467 };
468 
469 static void
470 wait_any_cb(struct fence *fence, struct fence_cb *fcb)
471 {
472 	struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
473 
474 	mutex_lock(&cb->common->lock);
475 	cb->common->done = true;
476 	cv_broadcast(&cb->common->cv);
477 	mutex_unlock(&cb->common->lock);
478 }
479 
480 /*
481  * fence_wait_any_timeout(fence, nfences, intr, timeout)
482  *
483  *	Wait for any of fences[0], fences[1], fences[2], ...,
484  *	fences[nfences-1] to be signaled.
485  */
486 long
487 fence_wait_any_timeout(struct fence **fences, uint32_t nfences, bool intr,
488     long timeout)
489 {
490 	struct wait_any1 common;
491 	struct wait_any *cb;
492 	uint32_t i, j;
493 	int start, end;
494 	long ret = 0;
495 
496 	/* Allocate an array of callback records.  */
497 	cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
498 	if (cb == NULL) {
499 		ret = -ENOMEM;
500 		goto out0;
501 	}
502 
503 	/* Initialize a mutex and condvar for the common wait.  */
504 	lockinit(&common.lock, "drmfcl", 0, LK_CANRECURSE);
505 	cv_init(&common.cv, "fence");
506 	common.done = false;
507 
508 	/* Add a callback to each of the fences, or stop here if we can't.  */
509 	for (i = 0; i < nfences; i++) {
510 		cb[i].common = &common;
511 		ret = fence_add_callback(fences[i], &cb[i].fcb, &wait_any_cb);
512 		if (ret)
513 			goto out1;
514 	}
515 
516 	/*
517 	 * Test whether any of the fences has been signalled.  If they
518 	 * have, stop here.  If the haven't, we are guaranteed to be
519 	 * notified by one of the callbacks when they have.
520 	 */
521 	for (j = 0; j < nfences; j++) {
522 		if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fences[j]->flags))
523 			goto out1;
524 	}
525 
526 	/*
527 	 * None of them was ready immediately.  Wait for one of the
528 	 * callbacks to notify us when it is done.
529 	 */
530 	mutex_lock(&common.lock);
531 	while (timeout > 0 && !common.done) {
532 		start = ticks;
533 		cpu_ccfence();
534 		if (intr) {
535 			if (timeout != MAX_SCHEDULE_TIMEOUT) {
536 				ret = -cv_timedwait_sig(&common.cv,
537 				    &common.lock, MIN(timeout, /* paranoia */
538 					MAX_SCHEDULE_TIMEOUT));
539 			} else {
540 				ret = -cv_wait_sig(&common.cv, &common.lock);
541 			}
542 		} else {
543 			if (timeout != MAX_SCHEDULE_TIMEOUT) {
544 				ret = -cv_timedwait(&common.cv,
545 				    &common.lock, MIN(timeout, /* paranoia */
546 					MAX_SCHEDULE_TIMEOUT));
547 			} else {
548 				cv_wait(&common.cv, &common.lock);
549 				ret = 0;
550 			}
551 		}
552 		end = ticks;
553 		cpu_ccfence();
554 		if (ret)
555 			break;
556 		timeout -= MIN(timeout, (unsigned)end - (unsigned)start);
557 	}
558 	mutex_unlock(&common.lock);
559 
560 	/*
561 	 * Massage the return code: if we were interrupted, return
562 	 * ERESTARTSYS; if cv_timedwait timed out, return 0; otherwise
563 	 * return the remaining time.
564 	 */
565 	if (ret < 0) {
566 		if (ret == -EINTR || ret == -ERESTART)
567 			ret = -ERESTARTSYS;
568 		if (ret == -EWOULDBLOCK)
569 			ret = 0;
570 	} else {
571 		KKASSERT(ret == 0);
572 		ret = timeout;
573 	}
574 
575 out1:	while (i --> 0)
576 		(void)fence_remove_callback(fences[i], &cb[i].fcb);
577 	cv_destroy(&common.cv);
578 	mutex_destroy(&common.lock);
579 	kfree(cb);
580 out0:	return ret;
581 }
582 
583 /*
584  * fence_wait_timeout(fence, intr, timeout)
585  *
586  *	Wait until fence is signalled; or until interrupt, if intr is
587  *	true; or until timeout, if positive.  Return -ERESTARTSYS if
588  *	interrupted, negative error code on any other error, zero on
589  *	timeout, or positive number of ticks remaining if the fence is
590  *	signalled before the timeout.  Works by calling the fence wait
591  *	callback.
592  *
593  *	The timeout must be nonnegative and less than
594  *	MAX_SCHEDULE_TIMEOUT.
595  */
596 long
597 fence_wait_timeout(struct fence *fence, bool intr, long timeout)
598 {
599 
600 	KKASSERT(timeout >= 0);
601 	KKASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
602 
603 	return (*fence->ops->wait)(fence, intr, timeout);
604 }
605 
606 /*
607  * fence_wait(fence, intr)
608  *
609  *	Wait until fence is signalled; or until interrupt, if intr is
610  *	true.  Return -ERESTARTSYS if interrupted, negative error code
611  *	on any other error, zero on sucess.  Works by calling the fence
612  *	wait callback with MAX_SCHEDULE_TIMEOUT.
613  */
614 long
615 fence_wait(struct fence *fence, bool intr)
616 {
617 	long ret;
618 
619 	ret = (*fence->ops->wait)(fence, intr, MAX_SCHEDULE_TIMEOUT);
620 	KKASSERT(ret != 0);
621 
622 	return (ret < 0 ? ret : 0);
623 }
624 
625 /*
626  * fence_default_wait(fence, intr, timeout)
627  *
628  *	Default implementation of fence wait callback using a condition
629  *	variable.  If the fence is already signalled, return timeout,
630  *	or 1 if no timeout.  If the enable signalling callback hasn't
631  *	been called, call it, and if it fails, act as if the fence had
632  *	been signalled.  Otherwise, wait on the internal condvar.  If
633  *	timeout is MAX_SCHEDULE_TIMEOUT, treat it as no timeout.
634  */
635 long
636 fence_default_wait(struct fence *fence, bool intr, long timeout)
637 {
638 	int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
639 	struct lock *lock = fence->lock;
640 	long ret = 0;
641 
642 #if 0
643 	KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
644 	KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
645 #endif
646 
647 	/* Optimistically try to skip the lock if it's already signalled.  */
648 	if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))
649 		return (timeout < MAX_SCHEDULE_TIMEOUT ? timeout : 1);
650 
651 	/* Acquire the lock.  */
652 	mutex_lock(fence->lock);
653 
654 	/* Ensure signalling is enabled, or fail if we can't.  */
655 	ret = fence_ensure_signal_enabled(fence);
656 	if (ret)
657 		goto out;
658 
659 	/* Find out what our deadline is so we can handle spurious wakeup.  */
660 	if (timeout < MAX_SCHEDULE_TIMEOUT) {
661 		now = ticks;
662 		cpu_ccfence();
663 		starttime = now;
664 		deadline = starttime + timeout;
665 	}
666 
667 	/* Wait until the signalled bit is set.  */
668 	while (!(fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))) {
669 		/*
670 		 * If there's a timeout and we've passed the deadline,
671 		 * give up.
672 		 */
673 		if (timeout < MAX_SCHEDULE_TIMEOUT) {
674 			now = ticks;
675 			cpu_ccfence();
676 			if (deadline <= now)
677 				break;
678 		}
679 		if (intr) {
680 			if (timeout < MAX_SCHEDULE_TIMEOUT) {
681 				ret = -cv_timedwait_sig(&fence->f_cv, lock,
682 				    deadline - now);
683 			} else {
684 				ret = -cv_wait_sig(&fence->f_cv, lock);
685 			}
686 		} else {
687 			if (timeout < MAX_SCHEDULE_TIMEOUT) {
688 				ret = -cv_timedwait(&fence->f_cv, lock,
689 				    deadline - now);
690 			} else {
691 				cv_wait(&fence->f_cv, lock);
692 				ret = 0;
693 			}
694 		}
695 		/* If the wait failed, give up.  */
696 		if (ret)
697 			break;
698 	}
699 
700 out:
701 	/* All done.  Release the lock.  */
702 	mutex_unlock(fence->lock);
703 
704 	/* If cv_timedwait gave up, return 0 meaning timeout.  */
705 	if (ret == -EWOULDBLOCK) {
706 		/* Only cv_timedwait and cv_timedwait_sig can return this.  */
707 		KKASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
708 		return 0;
709 	}
710 
711 	/* If there was a timeout and the deadline passed, return 0.  */
712 	if (timeout < MAX_SCHEDULE_TIMEOUT) {
713 		if (deadline <= now)
714 			return 0;
715 	}
716 
717 	/* If we were interrupted, return -ERESTARTSYS.  */
718 	if (ret == -EINTR || ret == -ERESTART)
719 		return -ERESTARTSYS;
720 
721 	/* If there was any other kind of error, fail.  */
722 	if (ret)
723 		return ret;
724 
725 	/*
726 	 * Success!  Return the number of ticks left, at least 1, or 1
727 	 * if no timeout.
728 	 */
729 	return (timeout < MAX_SCHEDULE_TIMEOUT ? MIN(deadline - now, 1) : 1);
730 }
731