1 /*-
2 * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conds
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conds, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conds and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/proc.h>
30 #include <sys/signalvar.h>
31 #include <sys/sleepqueue.h>
32
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/list.h>
37 #include <linux/sched.h>
38 #include <linux/spinlock.h>
39 #include <linux/wait.h>
40
41 static int
linux_add_to_sleepqueue(void * wchan,struct task_struct * task,const char * wmesg,int timeout,int state)42 linux_add_to_sleepqueue(void *wchan, struct task_struct *task,
43 const char *wmesg, int timeout, int state)
44 {
45 int flags, ret;
46
47 MPASS((state & ~(TASK_PARKED | TASK_NORMAL)) == 0);
48
49 flags = SLEEPQ_SLEEP | ((state & TASK_INTERRUPTIBLE) != 0 ?
50 SLEEPQ_INTERRUPTIBLE : 0);
51
52 sleepq_add(wchan, NULL, wmesg, flags, 0);
53 if (timeout != 0)
54 sleepq_set_timeout(wchan, timeout);
55
56 DROP_GIANT();
57 if ((state & TASK_INTERRUPTIBLE) != 0) {
58 if (timeout == 0)
59 ret = -sleepq_wait_sig(wchan, 0);
60 else
61 ret = -sleepq_timedwait_sig(wchan, 0);
62 } else {
63 if (timeout == 0) {
64 sleepq_wait(wchan, 0);
65 ret = 0;
66 } else
67 ret = -sleepq_timedwait(wchan, 0);
68 }
69 PICKUP_GIANT();
70
71 /* filter return value */
72 if (ret != 0 && ret != -EWOULDBLOCK) {
73 linux_schedule_save_interrupt_value(task, ret);
74 ret = -ERESTARTSYS;
75 }
76 return (ret);
77 }
78
79 unsigned int
linux_msleep_interruptible(unsigned int ms)80 linux_msleep_interruptible(unsigned int ms)
81 {
82 int ret;
83
84 /* guard against invalid values */
85 if (ms == 0)
86 ms = 1;
87 ret = -pause_sbt("lnxsleep", mstosbt(ms), 0, C_HARDCLOCK | C_CATCH);
88
89 switch (ret) {
90 case -EWOULDBLOCK:
91 return (0);
92 default:
93 linux_schedule_save_interrupt_value(current, ret);
94 return (ms);
95 }
96 }
97
98 static int
wake_up_task(struct task_struct * task,unsigned int state)99 wake_up_task(struct task_struct *task, unsigned int state)
100 {
101 int ret, wakeup_swapper;
102
103 ret = wakeup_swapper = 0;
104 sleepq_lock(task);
105 if ((atomic_read(&task->state) & state) != 0) {
106 set_task_state(task, TASK_WAKING);
107 wakeup_swapper = sleepq_signal(task, SLEEPQ_SLEEP, 0, 0);
108 ret = 1;
109 }
110 sleepq_release(task);
111 if (wakeup_swapper)
112 kick_proc0();
113 return (ret);
114 }
115
116 bool
linux_signal_pending(struct task_struct * task)117 linux_signal_pending(struct task_struct *task)
118 {
119 struct thread *td;
120 sigset_t pending;
121
122 td = task->task_thread;
123 PROC_LOCK(td->td_proc);
124 pending = td->td_siglist;
125 SIGSETOR(pending, td->td_proc->p_siglist);
126 SIGSETNAND(pending, td->td_sigmask);
127 PROC_UNLOCK(td->td_proc);
128 return (!SIGISEMPTY(pending));
129 }
130
131 bool
linux_fatal_signal_pending(struct task_struct * task)132 linux_fatal_signal_pending(struct task_struct *task)
133 {
134 struct thread *td;
135 bool ret;
136
137 td = task->task_thread;
138 PROC_LOCK(td->td_proc);
139 ret = SIGISMEMBER(td->td_siglist, SIGKILL) ||
140 SIGISMEMBER(td->td_proc->p_siglist, SIGKILL);
141 PROC_UNLOCK(td->td_proc);
142 return (ret);
143 }
144
145 bool
linux_signal_pending_state(long state,struct task_struct * task)146 linux_signal_pending_state(long state, struct task_struct *task)
147 {
148
149 MPASS((state & ~TASK_NORMAL) == 0);
150
151 if ((state & TASK_INTERRUPTIBLE) == 0)
152 return (false);
153 return (linux_signal_pending(task));
154 }
155
156 void
linux_send_sig(int signo,struct task_struct * task)157 linux_send_sig(int signo, struct task_struct *task)
158 {
159 struct thread *td;
160
161 td = task->task_thread;
162 PROC_LOCK(td->td_proc);
163 tdsignal(td, signo);
164 PROC_UNLOCK(td->td_proc);
165 }
166
167 int
autoremove_wake_function(wait_queue_t * wq,unsigned int state,int flags,void * key __unused)168 autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags,
169 void *key __unused)
170 {
171 struct task_struct *task;
172 int ret;
173
174 task = wq->private;
175 if ((ret = wake_up_task(task, state)) != 0)
176 list_del_init(&wq->task_list);
177 return (ret);
178 }
179
180 int
default_wake_function(wait_queue_t * wq,unsigned int state,int flags,void * key __unused)181 default_wake_function(wait_queue_t *wq, unsigned int state, int flags,
182 void *key __unused)
183 {
184 return (wake_up_task(wq->private, state));
185 }
186
187 void
linux_init_wait_entry(wait_queue_t * wq,int flags)188 linux_init_wait_entry(wait_queue_t *wq, int flags)
189 {
190
191 memset(wq, 0, sizeof(*wq));
192 wq->flags = flags;
193 wq->private = current;
194 wq->func = autoremove_wake_function;
195 INIT_LIST_HEAD(&wq->task_list);
196 }
197
198 void
linux_wake_up(wait_queue_head_t * wqh,unsigned int state,int nr,bool locked)199 linux_wake_up(wait_queue_head_t *wqh, unsigned int state, int nr, bool locked)
200 {
201 wait_queue_t *pos, *next;
202
203 if (!locked)
204 spin_lock(&wqh->lock);
205 list_for_each_entry_safe(pos, next, &wqh->task_list, task_list) {
206 if (pos->func == NULL) {
207 if (wake_up_task(pos->private, state) != 0 && --nr == 0)
208 break;
209 } else {
210 if (pos->func(pos, state, 0, NULL) != 0 && --nr == 0)
211 break;
212 }
213 }
214 if (!locked)
215 spin_unlock(&wqh->lock);
216 }
217
218 void
linux_prepare_to_wait(wait_queue_head_t * wqh,wait_queue_t * wq,int state)219 linux_prepare_to_wait(wait_queue_head_t *wqh, wait_queue_t *wq, int state)
220 {
221
222 spin_lock(&wqh->lock);
223 if (list_empty(&wq->task_list))
224 __add_wait_queue(wqh, wq);
225 set_task_state(current, state);
226 spin_unlock(&wqh->lock);
227 }
228
229 void
linux_finish_wait(wait_queue_head_t * wqh,wait_queue_t * wq)230 linux_finish_wait(wait_queue_head_t *wqh, wait_queue_t *wq)
231 {
232
233 spin_lock(&wqh->lock);
234 set_task_state(current, TASK_RUNNING);
235 if (!list_empty(&wq->task_list)) {
236 __remove_wait_queue(wqh, wq);
237 INIT_LIST_HEAD(&wq->task_list);
238 }
239 spin_unlock(&wqh->lock);
240 }
241
242 bool
linux_waitqueue_active(wait_queue_head_t * wqh)243 linux_waitqueue_active(wait_queue_head_t *wqh)
244 {
245 bool ret;
246
247 spin_lock(&wqh->lock);
248 ret = !list_empty(&wqh->task_list);
249 spin_unlock(&wqh->lock);
250 return (ret);
251 }
252
253 int
linux_wait_event_common(wait_queue_head_t * wqh,wait_queue_t * wq,int timeout,unsigned int state,spinlock_t * lock)254 linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, int timeout,
255 unsigned int state, spinlock_t *lock)
256 {
257 struct task_struct *task;
258 int ret;
259
260 if (lock != NULL)
261 spin_unlock_irq(lock);
262
263 /* range check timeout */
264 if (timeout < 1)
265 timeout = 1;
266 else if (timeout == MAX_SCHEDULE_TIMEOUT)
267 timeout = 0;
268
269 task = current;
270
271 /*
272 * Our wait queue entry is on the stack - make sure it doesn't
273 * get swapped out while we sleep.
274 */
275 PHOLD(task->task_thread->td_proc);
276 sleepq_lock(task);
277 if (atomic_read(&task->state) != TASK_WAKING) {
278 ret = linux_add_to_sleepqueue(task, task, "wevent", timeout,
279 state);
280 } else {
281 sleepq_release(task);
282 ret = 0;
283 }
284 PRELE(task->task_thread->td_proc);
285
286 if (lock != NULL)
287 spin_lock_irq(lock);
288 return (ret);
289 }
290
291 int
linux_schedule_timeout(int timeout)292 linux_schedule_timeout(int timeout)
293 {
294 struct task_struct *task;
295 int ret;
296 int state;
297 int remainder;
298
299 task = current;
300
301 /* range check timeout */
302 if (timeout < 1)
303 timeout = 1;
304 else if (timeout == MAX_SCHEDULE_TIMEOUT)
305 timeout = 0;
306
307 remainder = ticks + timeout;
308
309 sleepq_lock(task);
310 state = atomic_read(&task->state);
311 if (state != TASK_WAKING) {
312 ret = linux_add_to_sleepqueue(task, task, "sched", timeout,
313 state);
314 } else {
315 sleepq_release(task);
316 ret = 0;
317 }
318 set_task_state(task, TASK_RUNNING);
319
320 if (timeout == 0)
321 return (MAX_SCHEDULE_TIMEOUT);
322
323 /* range check return value */
324 remainder -= ticks;
325
326 /* range check return value */
327 if (ret == -ERESTARTSYS && remainder < 1)
328 remainder = 1;
329 else if (remainder < 0)
330 remainder = 0;
331 else if (remainder > timeout)
332 remainder = timeout;
333 return (remainder);
334 }
335
336 static void
wake_up_sleepers(void * wchan)337 wake_up_sleepers(void *wchan)
338 {
339 int wakeup_swapper;
340
341 sleepq_lock(wchan);
342 wakeup_swapper = sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0);
343 sleepq_release(wchan);
344 if (wakeup_swapper)
345 kick_proc0();
346 }
347
348 #define bit_to_wchan(word, bit) ((void *)(((uintptr_t)(word) << 6) | (bit)))
349
350 void
linux_wake_up_bit(void * word,int bit)351 linux_wake_up_bit(void *word, int bit)
352 {
353
354 wake_up_sleepers(bit_to_wchan(word, bit));
355 }
356
357 int
linux_wait_on_bit_timeout(unsigned long * word,int bit,unsigned int state,int timeout)358 linux_wait_on_bit_timeout(unsigned long *word, int bit, unsigned int state,
359 int timeout)
360 {
361 struct task_struct *task;
362 void *wchan;
363 int ret;
364
365 /* range check timeout */
366 if (timeout < 1)
367 timeout = 1;
368 else if (timeout == MAX_SCHEDULE_TIMEOUT)
369 timeout = 0;
370
371 task = current;
372 wchan = bit_to_wchan(word, bit);
373 for (;;) {
374 sleepq_lock(wchan);
375 if ((*word & (1 << bit)) == 0) {
376 sleepq_release(wchan);
377 ret = 0;
378 break;
379 }
380 set_task_state(task, state);
381 ret = linux_add_to_sleepqueue(wchan, task, "wbit", timeout,
382 state);
383 if (ret != 0)
384 break;
385 }
386 set_task_state(task, TASK_RUNNING);
387
388 return (ret);
389 }
390
391 void
linux_wake_up_atomic_t(atomic_t * a)392 linux_wake_up_atomic_t(atomic_t *a)
393 {
394
395 wake_up_sleepers(a);
396 }
397
398 int
linux_wait_on_atomic_t(atomic_t * a,unsigned int state)399 linux_wait_on_atomic_t(atomic_t *a, unsigned int state)
400 {
401 struct task_struct *task;
402 void *wchan;
403 int ret;
404
405 task = current;
406 wchan = a;
407 for (;;) {
408 sleepq_lock(wchan);
409 if (atomic_read(a) == 0) {
410 sleepq_release(wchan);
411 ret = 0;
412 break;
413 }
414 set_task_state(task, state);
415 ret = linux_add_to_sleepqueue(wchan, task, "watomic", 0, state);
416 if (ret != 0)
417 break;
418 }
419 set_task_state(task, TASK_RUNNING);
420
421 return (ret);
422 }
423
424 bool
linux_wake_up_state(struct task_struct * task,unsigned int state)425 linux_wake_up_state(struct task_struct *task, unsigned int state)
426 {
427
428 return (wake_up_task(task, state) != 0);
429 }
430