xref: /dragonfly/sys/dev/drm/include/linux/sched.h (revision 5ca0a96d)
1 /*
2  * Copyright (c) 2015-2020 François Tigeot <ftigeot@wolfpond.org>
3  * Copyright (c) 2019-2020 Matthew Dillon <dillon@backplane.com>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #ifndef	_LINUX_SCHED_H_
29 #define	_LINUX_SCHED_H_
30 
31 #include <linux/capability.h>
32 #include <linux/threads.h>
33 #include <linux/kernel.h>
34 #include <linux/types.h>
35 #include <linux/jiffies.h>
36 #include <linux/rbtree.h>
37 #include <linux/thread_info.h>
38 #include <linux/cpumask.h>
39 #include <linux/errno.h>
40 #include <linux/mm_types.h>
41 #include <linux/preempt.h>
42 
43 #include <asm/page.h>
44 
45 #include <linux/smp.h>
46 #include <linux/compiler.h>
47 #include <linux/completion.h>
48 #include <linux/pid.h>
49 #include <linux/rcupdate.h>
50 #include <linux/rculist.h>
51 
52 #include <linux/time.h>
53 #include <linux/timer.h>
54 #include <linux/hrtimer.h>
55 #include <linux/llist.h>
56 #include <linux/gfp.h>
57 
58 #include <asm/processor.h>
59 
60 #include <linux/spinlock.h>
61 
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/proc.h>
65 #include <sys/sched.h>
66 #include <sys/signal2.h>
67 
68 #include <machine/cpu.h>
69 
70 struct seq_file;
71 
72 #define	TASK_RUNNING		0
73 #define	TASK_INTERRUPTIBLE	1
74 #define	TASK_UNINTERRUPTIBLE	2
75 
76 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
77 
78 #define MAX_SCHEDULE_TIMEOUT    LONG_MAX
79 
80 #define TASK_COMM_LEN	MAXCOMLEN
81 
82 struct task_struct {
83 	struct thread *dfly_td;
84 	volatile long state;
85 	struct mm_struct *mm;	/* mirror copy in p->p_linux_mm */
86 	int prio;
87 
88 	/* kthread-specific data */
89 	unsigned long		kt_flags;
90 	int			(*kt_fn)(void *data);
91 	void			*kt_fndata;
92 	int			kt_exitvalue;
93 
94 	/* executable name without path */
95 	char			comm[TASK_COMM_LEN];
96 
97 	atomic_t usage_counter;
98 	pid_t pid;
99 	struct spinlock		kt_spin;
100 };
101 
102 #define __set_current_state(state_value)	current->state = (state_value);
103 
104 #define set_current_state(state_value)		\
105 do {						\
106 	__set_current_state(state_value);	\
107 	mb();					\
108 } while (0)
109 
110 /*
111  * schedule_timeout: puts the current thread to sleep until timeout
112  * if its state allows it to.
113  */
114 static inline long
115 schedule_timeout(signed long timeout)
116 {
117 	unsigned long time_before, time_after;
118 	long slept, ret = 0;
119 	int timo;
120 
121 	if (timeout < 0) {
122 		kprintf("schedule_timeout(): timeout cannot be negative\n");
123 		goto done;
124 	}
125 
126 	/*
127 	 * Indefinite wait if timeout is MAX_SCHEDULE_TIMEOUT, but we are
128 	 * also translating to an integer.  The first conditional will
129 	 * cover both but to code defensively test both.
130 	 */
131 	if (timeout >= INT_MAX || timeout == MAX_SCHEDULE_TIMEOUT)
132 		timo = 0;
133 	else
134 		timo = timeout;
135 
136 	spin_lock(&current->kt_spin);
137 
138 	switch (current->state) {
139 	case TASK_INTERRUPTIBLE:
140 		time_before = ticks;
141 		ssleep(current, &current->kt_spin, PCATCH, "lstim", timo);
142 		time_after = ticks;
143 		slept = time_after - time_before;
144 		ret = timeout - slept;
145 		if (ret < 0)
146 			ret = 0;
147 		break;
148 	case TASK_UNINTERRUPTIBLE:
149 		ssleep(current, &current->kt_spin, 0, "lstim", timo);
150 		break;
151 	default:
152 		/*
153 		 * Task has been flagged running before we could
154 		 * enter the sleep.
155 		 *
156 		 * XXX should be able to remove this ssleep(), have it
157 		 * here to protect against live-locks in case we mess
158 		 * up the task->state.
159 		 */
160 		ssleep(current, &current->kt_spin, 0, "lst1", 1);
161 		break;
162 	}
163 
164 	spin_unlock(&current->kt_spin);
165 
166 done:
167 	if (timeout == MAX_SCHEDULE_TIMEOUT)
168 		ret = MAX_SCHEDULE_TIMEOUT;
169 
170 	current->state = TASK_RUNNING;
171 	return ret;
172 }
173 
174 static inline void
175 schedule(void)
176 {
177 	(void)schedule_timeout(MAX_SCHEDULE_TIMEOUT);
178 }
179 
180 static inline signed long
181 schedule_timeout_uninterruptible(signed long timeout)
182 {
183 	__set_current_state(TASK_UNINTERRUPTIBLE);
184 	return schedule_timeout(timeout);
185 }
186 
187 static inline long
188 io_schedule_timeout(signed long timeout)
189 {
190 	return schedule_timeout(timeout);
191 }
192 
193 /*
194  * local_clock: fast time source, monotonic on the same cpu
195  */
196 static inline uint64_t
197 local_clock(void)
198 {
199 	struct timespec ts;
200 
201 	getnanouptime(&ts);
202 	return (ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec;
203 }
204 
205 static inline void
206 yield(void)
207 {
208 	lwkt_yield();
209 }
210 
211 static inline int
212 wake_up_process(struct task_struct *tsk)
213 {
214 	long ostate;
215 
216 	/*
217 	 * Among other things, this function is supposed to act as
218 	 * a barrier
219 	 */
220 	smp_wmb();
221 	spin_lock(&tsk->kt_spin);
222 	ostate = tsk->state;
223 	tsk->state = TASK_RUNNING;
224 	spin_unlock(&tsk->kt_spin);
225 	if (ostate != TASK_RUNNING)
226 		wakeup(tsk);
227 
228 	return 1;	/* Always indicate the process was woken up */
229 }
230 
231 static inline int
232 signal_pending(struct task_struct *p)
233 {
234 	struct thread *t = p->dfly_td;
235 
236 	/* Some kernel threads do not have lwp, t->td_lwp can be NULL */
237 	if (t->td_lwp == NULL)
238 		return 0;
239 
240 	return CURSIG(t->td_lwp);
241 }
242 
243 static inline int
244 fatal_signal_pending(struct task_struct *p)
245 {
246 	struct thread *t = p->dfly_td;
247 	sigset_t pending_set;
248 
249 	/* Some kernel threads do not have lwp, t->td_lwp can be NULL */
250 	if (t->td_lwp == NULL)
251 		return 0;
252 
253 	pending_set = lwp_sigpend(t->td_lwp);
254 	return SIGISMEMBER(pending_set, SIGKILL);
255 }
256 
257 static inline int
258 signal_pending_state(long state, struct task_struct *p)
259 {
260 	if (state & TASK_INTERRUPTIBLE)
261 		return (signal_pending(p));
262 	else
263 		return (fatal_signal_pending(p));
264 }
265 
266 /* Explicit rescheduling in order to reduce latency */
267 static inline int
268 cond_resched(void)
269 {
270 	lwkt_yield();
271 	return 0;
272 }
273 
274 static inline int
275 send_sig(int sig, struct proc *p, int priv)
276 {
277 	ksignal(p, sig);
278 	return 0;
279 }
280 
281 static inline void
282 set_need_resched(void)
283 {
284 	/* do nothing for now */
285 	/* used on ttm_bo_reserve failures */
286 }
287 
288 static inline bool
289 need_resched(void)
290 {
291 	return any_resched_wanted();
292 }
293 
294 static inline int
295 sched_setscheduler_nocheck(struct task_struct *ts,
296 			   int policy, const struct sched_param *param)
297 {
298 	/* We do not allow different thread scheduling policies */
299 	return 0;
300 }
301 
302 static inline int
303 pagefault_disabled(void)
304 {
305 	return (curthread->td_flags & TDF_NOFAULT);
306 }
307 
308 static inline void
309 mmgrab(struct mm_struct *mm)
310 {
311 	atomic_inc(&mm->mm_count);
312 }
313 
314 #endif	/* _LINUX_SCHED_H_ */
315