xref: /dragonfly/sys/dev/drm/linux_fence.c (revision 4837705e)
1 /*	$NetBSD: linux_fence.c,v 1.14 2019/01/05 22:24:24 tnn Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 
34 #include <sys/condvar.h>
35 #include <sys/queue.h>
36 
37 #include <linux/compiler.h>
38 
39 #include <linux/atomic.h>
40 #include <linux/errno.h>
41 #include <linux/kref.h>
42 #include <linux/fence.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
46 
47 /*
48  * linux_fence_trace
49  *
50  *	True if we print FENCE_TRACE messages, false if not.  These are
51  *	extremely noisy, too much even for AB_VERBOSE and AB_DEBUG in
52  *	boothowto.
53  */
54 int	linux_fence_trace = 0;
55 
56 /*
57  * fence_init(fence, ops, lock, context, seqno)
58  *
59  *	Initialize fence.  Caller should call fence_destroy when done,
60  *	after all references have been released.
61  */
62 void
63 fence_init(struct fence *fence, const struct fence_ops *ops, struct lock *lock,
64     unsigned context, unsigned seqno)
65 {
66 
67 	kref_init(&fence->refcount);
68 	fence->lock = lock;
69 	fence->flags = 0;
70 	fence->context = context;
71 	fence->seqno = seqno;
72 	fence->ops = ops;
73 	TAILQ_INIT(&fence->f_callbacks);
74 	cv_init(&fence->f_cv, "fence");
75 }
76 
77 /*
78  * fence_destroy(fence)
79  *
80  *	Clean up memory initialized with fence_init.  This is meant to
81  *	be used after a fence release callback.
82  */
83 void
84 fence_destroy(struct fence *fence)
85 {
86 
87 #if 0
88 	KASSERT(!fence_referenced_p(fence));
89 
90 	KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
91 #endif
92 	cv_destroy(&fence->f_cv);
93 }
94 
95 #if 0
96 static void
97 fence_free_cb(struct rcu_head *rcu)
98 {
99 	struct fence *fence = container_of(rcu, struct fence, f_rcu);
100 
101 	fence_destroy(fence);
102 	kfree(fence);
103 }
104 #endif
105 
106 /*
107  * fence_free(fence)
108  *
109  *	Schedule fence to be destroyed and then freed with kfree after
110  *	any pending RCU read sections on all CPUs have completed.
111  *	Caller must guarantee all references have been released.  This
112  *	is meant to be used after a fence release callback.
113  *
114  *	NOTE: Callers assume kfree will be used.  We don't even use
115  *	kmalloc to allocate these -- caller is expected to allocate
116  *	memory with kmalloc to be initialized with fence_init.
117  */
118 void
119 fence_free(struct fence *fence)
120 {
121 #ifdef __NetBSD__
122 	call_rcu(&fence->f_rcu, &fence_free_cb);
123 	kfree(fence);
124 #else
125 	kfree(fence);
126 #endif
127 }
128 
129 static inline uint32_t
130 atomic_add_int_nv(volatile uint32_t *target, int32_t delta)
131 {
132 	return (atomic_fetchadd_32(target, delta) + delta);
133 }
134 
135 /*
136  * fence_context_alloc(n)
137  *
138  *	Return the first of a contiguous sequence of unique
139  *	identifiers, at least until the system wraps around.
140  */
141 unsigned
142 fence_context_alloc(unsigned n)
143 {
144 	static volatile unsigned next_context = 0;
145 
146 	return atomic_add_int_nv(&next_context, n) - n;
147 }
148 
149 #if 0
150 /*
151  * fence_is_later(a, b)
152  *
153  *	True if the sequence number of fence a is later than the
154  *	sequence number of fence b.  Since sequence numbers wrap
155  *	around, we define this to mean that the sequence number of
156  *	fence a is no more than INT_MAX past the sequence number of
157  *	fence b.
158  *
159  *	The two fences must have the same context.
160  */
161 bool
162 fence_is_later(struct fence *a, struct fence *b)
163 {
164 
165 	KASSERTMSG(a->context == b->context, "incommensurate fences"
166 	    ": %u @ %p =/= %u @ %p", a->context, a, b->context, b);
167 
168 	return a->seqno - b->seqno < INT_MAX;
169 }
170 #endif
171 
172 /*
173  * fence_get(fence)
174  *
175  *	Acquire a reference to fence.  The fence must not be being
176  *	destroyed.  Return the fence.
177  */
178 struct fence *
179 fence_get(struct fence *fence)
180 {
181 
182 	if (fence)
183 		kref_get(&fence->refcount);
184 	return fence;
185 }
186 
187 /*
188  * fence_get_rcu(fence)
189  *
190  *	Attempt to acquire a reference to a fence that may be about to
191  *	be destroyed, during a read section.  Return the fence on
192  *	success, or NULL on failure.
193  */
194 struct fence *
195 fence_get_rcu(struct fence *fence)
196 {
197 
198 	if (!kref_get_unless_zero(&fence->refcount))
199 		return NULL;
200 	return fence;
201 }
202 
203 static void
204 fence_release(struct kref *refcount)
205 {
206 	struct fence *fence = container_of(refcount, struct fence, refcount);
207 
208 	if (fence->ops->release)
209 		(*fence->ops->release)(fence);
210 	else
211 		fence_free(fence);
212 }
213 
214 /*
215  * fence_put(fence)
216  *
217  *	Release a reference to fence.  If this was the last one, call
218  *	the fence's release callback.
219  */
220 void
221 fence_put(struct fence *fence)
222 {
223 
224 	if (fence == NULL)
225 		return;
226 	kref_put(&fence->refcount, &fence_release);
227 }
228 
229 /*
230  * fence_ensure_signal_enabled(fence)
231  *
232  *	Internal subroutine.  If the fence was already signalled,
233  *	return -ENOENT.  Otherwise, if the enable signalling callback
234  *	has not been called yet, call it.  If fails, signal the fence
235  *	and return -ENOENT.  If it succeeds, or if it had already been
236  *	called, return zero to indicate success.
237  *
238  *	Caller must hold the fence's lock.
239  */
240 static int
241 fence_ensure_signal_enabled(struct fence *fence)
242 {
243 #if 0
244 	KKASSERT(spin_is_locked(fence->lock));
245 #endif
246 
247 	/* If the fence was already signalled, fail with -ENOENT.  */
248 	if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))
249 		return -ENOENT;
250 
251 	/*
252 	 * If the enable signaling callback has been called, success.
253 	 * Otherwise, set the bit indicating it.
254 	 */
255 	if (test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags))
256 		return 0;
257 
258 	/* Otherwise, note that we've called it and call it.  */
259 	if (!(*fence->ops->enable_signaling)(fence)) {
260 		/* If it failed, signal and return -ENOENT.  */
261 		fence_signal_locked(fence);
262 		return -ENOENT;
263 	}
264 
265 	/* Success!  */
266 	return 0;
267 }
268 
269 /*
270  * fence_add_callback(fence, fcb, fn)
271  *
272  *	If fence has been signalled, return -ENOENT.  If the enable
273  *	signalling callback hasn't been called yet, call it; if it
274  *	fails, return -ENOENT.  Otherwise, arrange to call fn(fence,
275  *	fcb) when it is signalled, and return 0.
276  *
277  *	The fence uses memory allocated by the caller in fcb from the
278  *	time of fence_add_callback either to the time of
279  *	fence_remove_callback, or just before calling fn.
280  */
281 int
282 fence_add_callback(struct fence *fence, struct fence_cb *fcb, fence_func_t fn)
283 {
284 	int ret;
285 
286 
287 	/* Optimistically try to skip the lock if it's already signalled.  */
288 	if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT)) {
289 		ret = -ENOENT;
290 		goto out0;
291 	}
292 
293 	/* Acquire the lock.  */
294 	mutex_lock(fence->lock);
295 
296 	/* Ensure signalling is enabled, or fail if we can't.  */
297 	ret = fence_ensure_signal_enabled(fence);
298 	if (ret)
299 		goto out1;
300 
301 	/* Insert the callback.  */
302 	fcb->fcb_func = fn;
303 	TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
304 	fcb->fcb_onqueue = true;
305 
306 	/* Release the lock and we're done.  */
307 out1:	mutex_unlock(fence->lock);
308 out0:	return ret;
309 }
310 
311 /*
312  * fence_remove_callback(fence, fcb)
313  *
314  *	Remove the callback fcb from fence.  Return true if it was
315  *	removed from the list, or false if it had already run and so
316  *	was no longer queued anyway.  Caller must have already called
317  *	fence_add_callback(fence, fcb).
318  */
319 bool
320 fence_remove_callback(struct fence *fence, struct fence_cb *fcb)
321 {
322 	bool onqueue;
323 
324 
325 	mutex_lock(fence->lock);
326 	onqueue = fcb->fcb_onqueue;
327 	if (onqueue) {
328 		TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
329 		fcb->fcb_onqueue = false;
330 	}
331 	mutex_unlock(fence->lock);
332 
333 	return onqueue;
334 }
335 
336 /*
337  * fence_enable_sw_signaling(fence)
338  *
339  *	If it hasn't been called yet and the fence hasn't been
340  *	signalled yet, call the fence's enable_sw_signaling callback.
341  *	If when that happens, the callback indicates failure by
342  *	returning false, signal the fence.
343  */
344 void
345 fence_enable_sw_signaling(struct fence *fence)
346 {
347 	mutex_lock(fence->lock);
348 	(void)fence_ensure_signal_enabled(fence);
349 	mutex_unlock(fence->lock);
350 }
351 
352 /*
353  * fence_is_signaled(fence)
354  *
355  *	Test whether the fence has been signalled.  If it has been
356  *	signalled by fence_signal(_locked), return true.  If the
357  *	signalled callback returns true indicating that some implicit
358  *	external condition has changed, call the callbacks as if with
359  *	fence_signal.
360  */
361 bool
362 fence_is_signaled(struct fence *fence)
363 {
364 	bool signaled;
365 
366 	mutex_lock(fence->lock);
367 	signaled = fence_is_signaled_locked(fence);
368 	mutex_unlock(fence->lock);
369 
370 	return signaled;
371 }
372 
373 /*
374  * fence_is_signaled_locked(fence)
375  *
376  *	Test whether the fence has been signalled.  Like
377  *	fence_is_signaleed, but caller already holds the fence's lock.
378  */
379 bool
380 fence_is_signaled_locked(struct fence *fence)
381 {
382 
383 #if 0
384 	KKASSERT(spin_is_locked(fence->lock));
385 #endif
386 
387 	/* Check whether we already set the signalled bit.  */
388 	if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))
389 		return true;
390 
391 	/* If there's a signalled callback, test it.  */
392 	if (fence->ops->signaled) {
393 		if ((*fence->ops->signaled)(fence)) {
394 			/*
395 			 * It's been signalled implicitly by some
396 			 * external phenomonen.  Act as though someone
397 			 * has called fence_signal.
398 			 */
399 			fence_signal_locked(fence);
400 			return true;
401 		}
402 	}
403 
404 	return false;
405 }
406 
407 /*
408  * fence_signal(fence)
409  *
410  *	Signal the fence.  If it has already been signalled, return
411  *	-EINVAL.  If it has not been signalled, call the enable
412  *	signalling callback if it hasn't been called yet, and remove
413  *	each registered callback from the queue and call it; then
414  *	return 0.
415  */
416 int
417 fence_signal(struct fence *fence)
418 {
419 	int ret;
420 
421 	mutex_lock(fence->lock);
422 	ret = fence_signal_locked(fence);
423 	mutex_unlock(fence->lock);
424 
425 	return ret;
426 }
427 
428 /*
429  * fence_signal_locked(fence)
430  *
431  *	Signal the fence.  Like fence_signal, but caller already holds
432  *	the fence's lock.
433  */
434 int
435 fence_signal_locked(struct fence *fence)
436 {
437 	struct fence_cb *fcb, *next;
438 
439 #if 0
440 	KKASSERT(spin_is_locked(fence->lock));
441 #endif
442 
443 	/* If it's been signalled, fail; otherwise set the signalled bit.  */
444 	if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
445 		return -EINVAL;
446 
447 	/* Wake waiters.  */
448 	cv_broadcast(&fence->f_cv);
449 
450 	/* Remove and call the callbacks.  */
451 	TAILQ_FOREACH_MUTABLE(fcb, &fence->f_callbacks, fcb_entry, next) {
452 		TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
453 		fcb->fcb_onqueue = false;
454 		(*fcb->fcb_func)(fence, fcb);
455 	}
456 
457 	/* Success! */
458 	return 0;
459 }
460 
461 struct wait_any {
462 	struct fence_cb	fcb;
463 	struct wait_any1 {
464 		struct lock lock;
465 		struct cv cv;
466 		bool		done;
467 	}		*common;
468 };
469 
470 static void
471 wait_any_cb(struct fence *fence, struct fence_cb *fcb)
472 {
473 	struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
474 
475 	mutex_lock(&cb->common->lock);
476 	cb->common->done = true;
477 	cv_broadcast(&cb->common->cv);
478 	mutex_unlock(&cb->common->lock);
479 }
480 
481 /*
482  * fence_wait_any_timeout(fence, nfences, intr, timeout)
483  *
484  *	Wait for any of fences[0], fences[1], fences[2], ...,
485  *	fences[nfences-1] to be signaled.
486  */
487 long
488 fence_wait_any_timeout(struct fence **fences, uint32_t nfences, bool intr,
489     long timeout)
490 {
491 	struct wait_any1 common;
492 	struct wait_any *cb;
493 	uint32_t i, j;
494 	int start, end;
495 	long ret = 0;
496 
497 	/* Allocate an array of callback records.  */
498 	cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
499 	if (cb == NULL) {
500 		ret = -ENOMEM;
501 		goto out0;
502 	}
503 
504 	/* Initialize a mutex and condvar for the common wait.  */
505 	lockinit(&common.lock, "drmfcl", 0, LK_CANRECURSE);
506 	cv_init(&common.cv, "fence");
507 	common.done = false;
508 
509 	/* Add a callback to each of the fences, or stop here if we can't.  */
510 	for (i = 0; i < nfences; i++) {
511 		cb[i].common = &common;
512 		ret = fence_add_callback(fences[i], &cb[i].fcb, &wait_any_cb);
513 		if (ret)
514 			goto out1;
515 	}
516 
517 	/*
518 	 * Test whether any of the fences has been signalled.  If they
519 	 * have, stop here.  If the haven't, we are guaranteed to be
520 	 * notified by one of the callbacks when they have.
521 	 */
522 	for (j = 0; j < nfences; j++) {
523 		if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fences[j]->flags))
524 			goto out1;
525 	}
526 
527 	/*
528 	 * None of them was ready immediately.  Wait for one of the
529 	 * callbacks to notify us when it is done.
530 	 */
531 	mutex_lock(&common.lock);
532 	while (timeout > 0 && !common.done) {
533 		start = ticks;
534 		cpu_ccfence();
535 		if (intr) {
536 			if (timeout != MAX_SCHEDULE_TIMEOUT) {
537 				ret = -cv_timedwait_sig(&common.cv,
538 				    &common.lock, MIN(timeout, /* paranoia */
539 					MAX_SCHEDULE_TIMEOUT));
540 			} else {
541 				ret = -cv_wait_sig(&common.cv, &common.lock);
542 			}
543 		} else {
544 			if (timeout != MAX_SCHEDULE_TIMEOUT) {
545 				ret = -cv_timedwait(&common.cv,
546 				    &common.lock, MIN(timeout, /* paranoia */
547 					MAX_SCHEDULE_TIMEOUT));
548 			} else {
549 				cv_wait(&common.cv, &common.lock);
550 				ret = 0;
551 			}
552 		}
553 		end = ticks;
554 		cpu_ccfence();
555 		if (ret)
556 			break;
557 		timeout -= MIN(timeout, (unsigned)end - (unsigned)start);
558 	}
559 	mutex_unlock(&common.lock);
560 
561 	/*
562 	 * Massage the return code: if we were interrupted, return
563 	 * ERESTARTSYS; if cv_timedwait timed out, return 0; otherwise
564 	 * return the remaining time.
565 	 */
566 	if (ret < 0) {
567 		if (ret == -EINTR || ret == -ERESTART)
568 			ret = -ERESTARTSYS;
569 		if (ret == -EWOULDBLOCK)
570 			ret = 0;
571 	} else {
572 		KKASSERT(ret == 0);
573 		ret = timeout;
574 	}
575 
576 out1:	while (i --> 0)
577 		(void)fence_remove_callback(fences[i], &cb[i].fcb);
578 	cv_destroy(&common.cv);
579 	mutex_destroy(&common.lock);
580 	kfree(cb);
581 out0:	return ret;
582 }
583 
584 /*
585  * fence_wait_timeout(fence, intr, timeout)
586  *
587  *	Wait until fence is signalled; or until interrupt, if intr is
588  *	true; or until timeout, if positive.  Return -ERESTARTSYS if
589  *	interrupted, negative error code on any other error, zero on
590  *	timeout, or positive number of ticks remaining if the fence is
591  *	signalled before the timeout.  Works by calling the fence wait
592  *	callback.
593  *
594  *	The timeout must be nonnegative and less than
595  *	MAX_SCHEDULE_TIMEOUT.
596  */
597 long
598 fence_wait_timeout(struct fence *fence, bool intr, long timeout)
599 {
600 
601 	KKASSERT(timeout >= 0);
602 	KKASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
603 
604 	return (*fence->ops->wait)(fence, intr, timeout);
605 }
606 
607 /*
608  * fence_wait(fence, intr)
609  *
610  *	Wait until fence is signalled; or until interrupt, if intr is
611  *	true.  Return -ERESTARTSYS if interrupted, negative error code
612  *	on any other error, zero on sucess.  Works by calling the fence
613  *	wait callback with MAX_SCHEDULE_TIMEOUT.
614  */
615 long
616 fence_wait(struct fence *fence, bool intr)
617 {
618 	long ret;
619 
620 	ret = (*fence->ops->wait)(fence, intr, MAX_SCHEDULE_TIMEOUT);
621 	KKASSERT(ret != 0);
622 
623 	return (ret < 0 ? ret : 0);
624 }
625 
626 /*
627  * fence_default_wait(fence, intr, timeout)
628  *
629  *	Default implementation of fence wait callback using a condition
630  *	variable.  If the fence is already signalled, return timeout,
631  *	or 1 if no timeout.  If the enable signalling callback hasn't
632  *	been called, call it, and if it fails, act as if the fence had
633  *	been signalled.  Otherwise, wait on the internal condvar.  If
634  *	timeout is MAX_SCHEDULE_TIMEOUT, treat it as no timeout.
635  */
636 long
637 fence_default_wait(struct fence *fence, bool intr, long timeout)
638 {
639 	int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
640 	struct lock *lock = fence->lock;
641 	long ret = 0;
642 
643 #if 0
644 	KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
645 	KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
646 #endif
647 
648 	/* Optimistically try to skip the lock if it's already signalled.  */
649 	if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))
650 		return (timeout < MAX_SCHEDULE_TIMEOUT ? timeout : 1);
651 
652 	/* Acquire the lock.  */
653 	mutex_lock(fence->lock);
654 
655 	/* Ensure signalling is enabled, or fail if we can't.  */
656 	ret = fence_ensure_signal_enabled(fence);
657 	if (ret)
658 		goto out;
659 
660 	/* Find out what our deadline is so we can handle spurious wakeup.  */
661 	if (timeout < MAX_SCHEDULE_TIMEOUT) {
662 		now = ticks;
663 		cpu_ccfence();
664 		starttime = now;
665 		deadline = starttime + timeout;
666 	}
667 
668 	/* Wait until the signalled bit is set.  */
669 	while (!(fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))) {
670 		/*
671 		 * If there's a timeout and we've passed the deadline,
672 		 * give up.
673 		 */
674 		if (timeout < MAX_SCHEDULE_TIMEOUT) {
675 			now = ticks;
676 			cpu_ccfence();
677 			if (deadline <= now)
678 				break;
679 		}
680 		if (intr) {
681 			if (timeout < MAX_SCHEDULE_TIMEOUT) {
682 				ret = -cv_timedwait_sig(&fence->f_cv, lock,
683 				    deadline - now);
684 			} else {
685 				ret = -cv_wait_sig(&fence->f_cv, lock);
686 			}
687 		} else {
688 			if (timeout < MAX_SCHEDULE_TIMEOUT) {
689 				ret = -cv_timedwait(&fence->f_cv, lock,
690 				    deadline - now);
691 			} else {
692 				cv_wait(&fence->f_cv, lock);
693 				ret = 0;
694 			}
695 		}
696 		/* If the wait failed, give up.  */
697 		if (ret)
698 			break;
699 	}
700 
701 out:
702 	/* All done.  Release the lock.  */
703 	mutex_unlock(fence->lock);
704 
705 	/* If cv_timedwait gave up, return 0 meaning timeout.  */
706 	if (ret == -EWOULDBLOCK) {
707 		/* Only cv_timedwait and cv_timedwait_sig can return this.  */
708 		KKASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
709 		return 0;
710 	}
711 
712 	/* If there was a timeout and the deadline passed, return 0.  */
713 	if (timeout < MAX_SCHEDULE_TIMEOUT) {
714 		if (deadline <= now)
715 			return 0;
716 	}
717 
718 	/* If we were interrupted, return -ERESTARTSYS.  */
719 	if (ret == -EINTR || ret == -ERESTART)
720 		return -ERESTARTSYS;
721 
722 	/* If there was any other kind of error, fail.  */
723 	if (ret)
724 		return ret;
725 
726 	/*
727 	 * Success!  Return the number of ticks left, at least 1, or 1
728 	 * if no timeout.
729 	 */
730 	return (timeout < MAX_SCHEDULE_TIMEOUT ? MIN(deadline - now, 1) : 1);
731 }
732