xref: /dragonfly/sys/dev/drm/include/linux/sched.h (revision b3f5eba6)
1 /*
2  * Copyright (c) 2015-2019 François Tigeot <ftigeot@wolfpond.org>
3  * Copyright (c) 2019 Matthew Dillon <dillon@backplane.com>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #ifndef	_LINUX_SCHED_H_
29 #define	_LINUX_SCHED_H_
30 
31 #include <linux/capability.h>
32 #include <linux/threads.h>
33 #include <linux/kernel.h>
34 #include <linux/types.h>
35 #include <linux/jiffies.h>
36 #include <linux/rbtree.h>
37 #include <linux/cpumask.h>
38 #include <linux/errno.h>
39 #include <linux/mm_types.h>
40 #include <linux/preempt.h>
41 
42 #include <asm/page.h>
43 
44 #include <linux/compiler.h>
45 #include <linux/completion.h>
46 #include <linux/pid.h>
47 #include <linux/rcupdate.h>
48 #include <linux/rculist.h>
49 
50 #include <linux/time.h>
51 #include <linux/timer.h>
52 #include <linux/hrtimer.h>
53 #include <linux/gfp.h>
54 
55 #include <asm/processor.h>
56 
57 #include <linux/spinlock.h>
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/proc.h>
62 #include <sys/sched.h>
63 #include <sys/signal2.h>
64 
65 #define	TASK_RUNNING		0
66 #define	TASK_INTERRUPTIBLE	1
67 #define	TASK_UNINTERRUPTIBLE	2
68 
69 #define MAX_SCHEDULE_TIMEOUT    LONG_MAX
70 
71 /*
72  * schedule_timeout: puts the current thread to sleep until timeout
73  * if its state allows it to.
74  */
75 static inline long
76 schedule_timeout(signed long timeout)
77 {
78 	unsigned long time_before, time_after;
79 	long slept, ret = 0;
80 	int timo;
81 
82 	if (timeout < 0) {
83 		kprintf("schedule_timeout(): timeout cannot be negative\n");
84 		goto done;
85 	}
86 
87 	/*
88 	 * Indefinite wait if timeout is MAX_SCHEDULE_TIMEOUT, but we are
89 	 * also translating to an integer.  The first conditional will
90 	 * cover both but to code defensively test both.
91 	 */
92 	if (timeout >= INT_MAX || timeout == MAX_SCHEDULE_TIMEOUT)
93 		timo = 0;
94 	else
95 		timo = timeout;
96 
97 	switch (current->state) {
98 	case TASK_INTERRUPTIBLE:
99 		time_before = ticks;
100 		tsleep(current, PCATCH, "lstim", timo);
101 		time_after = ticks;
102 		slept = time_after - time_before;
103 		ret = timeout - slept;
104 		if (ret < 0)
105 			ret = 0;
106 		break;
107 	case TASK_UNINTERRUPTIBLE:
108 		tsleep(current, 0, "lstim", timo);
109 		break;
110 	default:
111 		/* We are supposed to return immediately here */
112 		tsleep(current, 0, "lstim", 1);
113 		break;
114 	}
115 
116 done:
117 	if (timeout == MAX_SCHEDULE_TIMEOUT)
118 		ret = MAX_SCHEDULE_TIMEOUT;
119 
120 	current->state = TASK_RUNNING;
121 	return ret;
122 }
123 
124 #define TASK_COMM_LEN	MAXCOMLEN
125 
126 /*
127  * local_clock: fast time source, monotonic on the same cpu
128  */
129 static inline uint64_t
130 local_clock(void)
131 {
132 	struct timespec ts;
133 
134 	getnanouptime(&ts);
135 	return (ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec;
136 }
137 
138 static inline void
139 yield(void)
140 {
141 	lwkt_yield();
142 }
143 
144 #define __set_current_state(state_value)	current->state = (state_value);
145 
146 #define set_current_state(state_value)		\
147 do {						\
148 	__set_current_state(state_value);	\
149 	mb();					\
150 } while (0)
151 
152 static inline int
153 wake_up_process(struct task_struct *tsk)
154 {
155 	/* Among other things, this function is supposed to act as
156 	 * a barrier */
157 	smp_wmb();
158 	wakeup(tsk);
159 
160 	return 0;
161 }
162 
163 static inline int
164 signal_pending(struct task_struct *p)
165 {
166 	struct thread *t = container_of(p, struct thread, td_linux_task);
167 
168 	return CURSIG(t->td_lwp);
169 }
170 
171 static inline int
172 signal_pending_state(long state, struct task_struct *p)
173 {
174 	struct thread *t = container_of(p, struct thread, td_linux_task);
175 	sigset_t pending_set;
176 
177 	if (!(state & TASK_INTERRUPTIBLE))
178 		return 0;
179 
180 	pending_set = lwp_sigpend(t->td_lwp);
181 
182 	return SIGISMEMBER(pending_set, SIGKILL);
183 }
184 
185 /* Explicit rescheduling in order to reduce latency */
186 static inline int
187 cond_resched(void)
188 {
189 	lwkt_yield();
190 	return 0;
191 }
192 
193 static inline int
194 send_sig(int sig, struct proc *p, int priv)
195 {
196 	ksignal(p, sig);
197 	return 0;
198 }
199 
200 static inline void
201 set_need_resched(void)
202 {
203 	/* do nothing for now */
204 	/* used on ttm_bo_reserve failures */
205 }
206 
207 #endif	/* _LINUX_SCHED_H_ */
208