xref: /dragonfly/sys/dev/drm/include/linux/sched.h (revision 6a3cbbc2)
1 /*
2  * Copyright (c) 2015-2020 François Tigeot <ftigeot@wolfpond.org>
3  * Copyright (c) 2019 Matthew Dillon <dillon@backplane.com>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #ifndef	_LINUX_SCHED_H_
29 #define	_LINUX_SCHED_H_
30 
31 #include <linux/capability.h>
32 #include <linux/threads.h>
33 #include <linux/kernel.h>
34 #include <linux/types.h>
35 #include <linux/jiffies.h>
36 #include <linux/rbtree.h>
37 #include <linux/thread_info.h>
38 #include <linux/cpumask.h>
39 #include <linux/errno.h>
40 #include <linux/mm_types.h>
41 #include <linux/preempt.h>
42 
43 #include <asm/page.h>
44 
45 #include <linux/smp.h>
46 #include <linux/compiler.h>
47 #include <linux/completion.h>
48 #include <linux/pid.h>
49 #include <linux/rcupdate.h>
50 #include <linux/rculist.h>
51 
52 #include <linux/time.h>
53 #include <linux/timer.h>
54 #include <linux/hrtimer.h>
55 #include <linux/gfp.h>
56 
57 #include <asm/processor.h>
58 
59 #include <linux/spinlock.h>
60 
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/proc.h>
64 #include <sys/sched.h>
65 #include <sys/signal2.h>
66 
67 #include <machine/cpu.h>
68 
69 struct seq_file;
70 
71 #define	TASK_RUNNING		0
72 #define	TASK_INTERRUPTIBLE	1
73 #define	TASK_UNINTERRUPTIBLE	2
74 
75 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
76 
77 #define MAX_SCHEDULE_TIMEOUT    LONG_MAX
78 
79 struct task_struct {
80 	struct thread *dfly_td;
81 	volatile long state;
82 	struct mm_struct *mm;	/* mirror copy in p->p_linux_mm */
83 	int prio;
84 
85 	/* kthread-specific data */
86 	unsigned long		kt_flags;
87 	struct completion	kt_exited;
88 	int			(*kt_fn)(void *data);
89 	void			*kt_fndata;
90 	int			kt_exitvalue;
91 };
92 
93 #define __set_current_state(state_value)	current->state = (state_value);
94 
95 #define set_current_state(state_value)		\
96 do {						\
97 	__set_current_state(state_value);	\
98 	mb();					\
99 } while (0)
100 
101 /*
102  * schedule_timeout: puts the current thread to sleep until timeout
103  * if its state allows it to.
104  */
105 static inline long
106 schedule_timeout(signed long timeout)
107 {
108 	unsigned long time_before, time_after;
109 	long slept, ret = 0;
110 	int timo;
111 
112 	if (timeout < 0) {
113 		kprintf("schedule_timeout(): timeout cannot be negative\n");
114 		goto done;
115 	}
116 
117 	/*
118 	 * Indefinite wait if timeout is MAX_SCHEDULE_TIMEOUT, but we are
119 	 * also translating to an integer.  The first conditional will
120 	 * cover both but to code defensively test both.
121 	 */
122 	if (timeout >= INT_MAX || timeout == MAX_SCHEDULE_TIMEOUT)
123 		timo = 0;
124 	else
125 		timo = timeout;
126 
127 	switch (current->state) {
128 	case TASK_INTERRUPTIBLE:
129 		time_before = ticks;
130 		tsleep(current, PCATCH, "lstim", timo);
131 		time_after = ticks;
132 		slept = time_after - time_before;
133 		ret = timeout - slept;
134 		if (ret < 0)
135 			ret = 0;
136 		break;
137 	case TASK_UNINTERRUPTIBLE:
138 		tsleep(current, 0, "lstim", timo);
139 		break;
140 	default:
141 		/* We are supposed to return immediately here */
142 		tsleep(current, 0, "lstim", 1);
143 		break;
144 	}
145 
146 done:
147 	if (timeout == MAX_SCHEDULE_TIMEOUT)
148 		ret = MAX_SCHEDULE_TIMEOUT;
149 
150 	current->state = TASK_RUNNING;
151 	return ret;
152 }
153 
154 static inline void
155 schedule(void)
156 {
157 	(void)schedule_timeout(MAX_SCHEDULE_TIMEOUT);
158 }
159 
160 static inline signed long
161 schedule_timeout_uninterruptible(signed long timeout)
162 {
163 	__set_current_state(TASK_UNINTERRUPTIBLE);
164 	return schedule_timeout(timeout);
165 }
166 
167 static inline long
168 io_schedule_timeout(signed long timeout)
169 {
170 	return schedule_timeout(timeout);
171 }
172 
173 #define TASK_COMM_LEN	MAXCOMLEN
174 
175 /*
176  * local_clock: fast time source, monotonic on the same cpu
177  */
178 static inline uint64_t
179 local_clock(void)
180 {
181 	struct timespec ts;
182 
183 	getnanouptime(&ts);
184 	return (ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec;
185 }
186 
187 static inline void
188 yield(void)
189 {
190 	lwkt_yield();
191 }
192 
193 static inline int
194 wake_up_process(struct task_struct *tsk)
195 {
196 	/* Among other things, this function is supposed to act as
197 	 * a barrier */
198 	smp_wmb();
199 	wakeup(tsk);
200 
201 	return 1;	/* Always indicate the process was woken up */
202 }
203 
204 static inline int
205 signal_pending(struct task_struct *p)
206 {
207 	struct thread *t = p->dfly_td;
208 
209 	/* Some kernel threads do not have lwp, t->td_lwp can be NULL */
210 	if (t->td_lwp == NULL)
211 		return 0;
212 
213 	return CURSIG(t->td_lwp);
214 }
215 
216 static inline int
217 fatal_signal_pending(struct task_struct *p)
218 {
219 	struct thread *t = p->dfly_td;
220 	sigset_t pending_set;
221 
222 	/* Some kernel threads do not have lwp, t->td_lwp can be NULL */
223 	if (t->td_lwp == NULL)
224 		return 0;
225 
226 	pending_set = lwp_sigpend(t->td_lwp);
227 	return SIGISMEMBER(pending_set, SIGKILL);
228 }
229 
230 static inline int
231 signal_pending_state(long state, struct task_struct *p)
232 {
233 	if (state & TASK_INTERRUPTIBLE)
234 		return (signal_pending(p));
235 	else
236 		return (fatal_signal_pending(p));
237 }
238 
239 /* Explicit rescheduling in order to reduce latency */
240 static inline int
241 cond_resched(void)
242 {
243 	lwkt_yield();
244 	return 0;
245 }
246 
247 static inline int
248 send_sig(int sig, struct proc *p, int priv)
249 {
250 	ksignal(p, sig);
251 	return 0;
252 }
253 
254 static inline void
255 set_need_resched(void)
256 {
257 	/* do nothing for now */
258 	/* used on ttm_bo_reserve failures */
259 }
260 
261 static inline bool
262 need_resched(void)
263 {
264 	return any_resched_wanted();
265 }
266 
267 static inline int
268 sched_setscheduler_nocheck(struct task_struct *ts,
269 			   int policy, const struct sched_param *param)
270 {
271 	/* We do not allow different thread scheduling policies */
272 	return 0;
273 }
274 
275 static inline int
276 pagefault_disabled(void)
277 {
278 	return (curthread->td_flags & TDF_NOFAULT);
279 }
280 
281 #endif	/* _LINUX_SCHED_H_ */
282