1 /* 2 * Copyright (c) 2015-2019 François Tigeot <ftigeot@wolfpond.org> 3 * Copyright (c) 2019 Matthew Dillon <dillon@backplane.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #ifndef _LINUX_SCHED_H_ 29 #define _LINUX_SCHED_H_ 30 31 #include <linux/capability.h> 32 #include <linux/threads.h> 33 #include <linux/kernel.h> 34 #include <linux/types.h> 35 #include <linux/jiffies.h> 36 #include <linux/rbtree.h> 37 #include <linux/cpumask.h> 38 #include <linux/errno.h> 39 #include <linux/mm_types.h> 40 #include <linux/preempt.h> 41 42 #include <asm/page.h> 43 44 #include <linux/smp.h> 45 #include <linux/compiler.h> 46 #include <linux/completion.h> 47 #include <linux/pid.h> 48 #include <linux/rcupdate.h> 49 #include <linux/rculist.h> 50 51 #include <linux/time.h> 52 #include <linux/timer.h> 53 #include <linux/hrtimer.h> 54 #include <linux/gfp.h> 55 56 #include <asm/processor.h> 57 58 #include <linux/spinlock.h> 59 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/proc.h> 63 #include <sys/sched.h> 64 #include <sys/signal2.h> 65 66 #include <machine/cpu.h> 67 68 struct seq_file; 69 70 #define TASK_RUNNING 0 71 #define TASK_INTERRUPTIBLE 1 72 #define TASK_UNINTERRUPTIBLE 2 73 74 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 75 76 #define MAX_SCHEDULE_TIMEOUT LONG_MAX 77 78 struct task_struct { 79 struct thread *dfly_td; 80 volatile long state; 81 struct mm_struct *mm; /* mirror copy in p->p_linux_mm */ 82 int prio; 83 84 /* kthread-specific data */ 85 unsigned long kt_flags; 86 struct completion kt_exited; 87 int (*kt_fn)(void *data); 88 void *kt_fndata; 89 int kt_exitvalue; 90 }; 91 92 #define __set_current_state(state_value) current->state = (state_value); 93 94 #define set_current_state(state_value) \ 95 do { \ 96 __set_current_state(state_value); \ 97 mb(); \ 98 } while (0) 99 100 /* 101 * schedule_timeout: puts the current thread to sleep until timeout 102 * if its state allows it to. 103 */ 104 static inline long 105 schedule_timeout(signed long timeout) 106 { 107 unsigned long time_before, time_after; 108 long slept, ret = 0; 109 int timo; 110 111 if (timeout < 0) { 112 kprintf("schedule_timeout(): timeout cannot be negative\n"); 113 goto done; 114 } 115 116 /* 117 * Indefinite wait if timeout is MAX_SCHEDULE_TIMEOUT, but we are 118 * also translating to an integer. The first conditional will 119 * cover both but to code defensively test both. 120 */ 121 if (timeout >= INT_MAX || timeout == MAX_SCHEDULE_TIMEOUT) 122 timo = 0; 123 else 124 timo = timeout; 125 126 switch (current->state) { 127 case TASK_INTERRUPTIBLE: 128 time_before = ticks; 129 tsleep(current, PCATCH, "lstim", timo); 130 time_after = ticks; 131 slept = time_after - time_before; 132 ret = timeout - slept; 133 if (ret < 0) 134 ret = 0; 135 break; 136 case TASK_UNINTERRUPTIBLE: 137 tsleep(current, 0, "lstim", timo); 138 break; 139 default: 140 /* We are supposed to return immediately here */ 141 tsleep(current, 0, "lstim", 1); 142 break; 143 } 144 145 done: 146 if (timeout == MAX_SCHEDULE_TIMEOUT) 147 ret = MAX_SCHEDULE_TIMEOUT; 148 149 current->state = TASK_RUNNING; 150 return ret; 151 } 152 153 static inline void 154 schedule(void) 155 { 156 (void)schedule_timeout(MAX_SCHEDULE_TIMEOUT); 157 } 158 159 static inline signed long 160 schedule_timeout_uninterruptible(signed long timeout) 161 { 162 __set_current_state(TASK_UNINTERRUPTIBLE); 163 return schedule_timeout(timeout); 164 } 165 166 static inline long 167 io_schedule_timeout(signed long timeout) 168 { 169 return schedule_timeout(timeout); 170 } 171 172 #define TASK_COMM_LEN MAXCOMLEN 173 174 /* 175 * local_clock: fast time source, monotonic on the same cpu 176 */ 177 static inline uint64_t 178 local_clock(void) 179 { 180 struct timespec ts; 181 182 getnanouptime(&ts); 183 return (ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec; 184 } 185 186 static inline void 187 yield(void) 188 { 189 lwkt_yield(); 190 } 191 192 static inline int 193 wake_up_process(struct task_struct *tsk) 194 { 195 /* Among other things, this function is supposed to act as 196 * a barrier */ 197 smp_wmb(); 198 wakeup(tsk); 199 200 return 1; /* Always indicate the process was woken up */ 201 } 202 203 static inline int 204 signal_pending(struct task_struct *p) 205 { 206 struct thread *t = p->dfly_td; 207 208 /* Some kernel threads do not have lwp, t->td_lwp can be NULL */ 209 if (t->td_lwp == NULL) 210 return 0; 211 212 return CURSIG(t->td_lwp); 213 } 214 215 static inline int 216 fatal_signal_pending(struct task_struct *p) 217 { 218 struct thread *t = p->dfly_td; 219 sigset_t pending_set; 220 221 /* Some kernel threads do not have lwp, t->td_lwp can be NULL */ 222 if (t->td_lwp == NULL) 223 return 0; 224 225 pending_set = lwp_sigpend(t->td_lwp); 226 return SIGISMEMBER(pending_set, SIGKILL); 227 } 228 229 static inline int 230 signal_pending_state(long state, struct task_struct *p) 231 { 232 if (state & TASK_INTERRUPTIBLE) 233 return (signal_pending(p)); 234 else 235 return (fatal_signal_pending(p)); 236 } 237 238 /* Explicit rescheduling in order to reduce latency */ 239 static inline int 240 cond_resched(void) 241 { 242 lwkt_yield(); 243 return 0; 244 } 245 246 static inline int 247 send_sig(int sig, struct proc *p, int priv) 248 { 249 ksignal(p, sig); 250 return 0; 251 } 252 253 static inline void 254 set_need_resched(void) 255 { 256 /* do nothing for now */ 257 /* used on ttm_bo_reserve failures */ 258 } 259 260 static inline bool 261 need_resched(void) 262 { 263 return any_resched_wanted(); 264 } 265 266 static inline int 267 sched_setscheduler_nocheck(struct task_struct *ts, 268 int policy, const struct sched_param *param) 269 { 270 /* We do not allow different thread scheduling policies */ 271 return 0; 272 } 273 274 #endif /* _LINUX_SCHED_H_ */ 275