1 /*-
2  * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conds
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conds, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conds and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/proc.h>
33 #include <sys/signalvar.h>
34 #include <sys/sleepqueue.h>
35 
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/list.h>
39 #include <linux/sched.h>
40 #include <linux/spinlock.h>
41 #include <linux/wait.h>
42 
43 static int
44 linux_add_to_sleepqueue(void *wchan, const char *wmesg, int timeout, int state)
45 {
46 	int flags, ret;
47 
48 	MPASS((state & ~TASK_NORMAL) == 0);
49 
50 	flags = SLEEPQ_SLEEP | ((state & TASK_INTERRUPTIBLE) != 0 ?
51 	    SLEEPQ_INTERRUPTIBLE : 0);
52 
53 	sleepq_add(wchan, NULL, wmesg, flags, 0);
54 	if (timeout != 0)
55 		sleepq_set_timeout(wchan, timeout);
56 	if ((state & TASK_INTERRUPTIBLE) != 0) {
57 		if (timeout == 0)
58 			ret = -sleepq_wait_sig(wchan, 0);
59 		else
60 			ret = -sleepq_timedwait_sig(wchan, 0);
61 	} else {
62 		if (timeout == 0) {
63 			sleepq_wait(wchan, 0);
64 			ret = 0;
65 		} else
66 			ret = -sleepq_timedwait(wchan, 0);
67 	}
68 	/* filter return value */
69 	if (ret != 0 && ret != -EWOULDBLOCK)
70 		ret = -ERESTARTSYS;
71 	return (ret);
72 }
73 
74 static int
75 wake_up_task(struct task_struct *task, unsigned int state)
76 {
77 	int ret, wakeup_swapper;
78 
79 	ret = wakeup_swapper = 0;
80 	sleepq_lock(task);
81 	if ((atomic_load_acq_int(&task->state) & state) != 0) {
82 		set_task_state(task, TASK_WAKING);
83 		wakeup_swapper = sleepq_signal(task, SLEEPQ_SLEEP, 0, 0);
84 		ret = 1;
85 	}
86 	sleepq_release(task);
87 	if (wakeup_swapper)
88 		kick_proc0();
89 	return (ret);
90 }
91 
92 bool
93 linux_signal_pending(struct task_struct *task)
94 {
95 	struct thread *td;
96 	sigset_t pending;
97 
98 	td = task->task_thread;
99 	PROC_LOCK(td->td_proc);
100 	pending = td->td_siglist;
101 	SIGSETOR(pending, td->td_proc->p_siglist);
102 	SIGSETNAND(pending, td->td_sigmask);
103 	PROC_UNLOCK(td->td_proc);
104 	return (!SIGISEMPTY(pending));
105 }
106 
107 bool
108 linux_fatal_signal_pending(struct task_struct *task)
109 {
110 	struct thread *td;
111 	bool ret;
112 
113 	td = task->task_thread;
114 	PROC_LOCK(td->td_proc);
115 	ret = SIGISMEMBER(td->td_siglist, SIGKILL) ||
116 	    SIGISMEMBER(td->td_proc->p_siglist, SIGKILL);
117 	PROC_UNLOCK(td->td_proc);
118 	return (ret);
119 }
120 
121 bool
122 linux_signal_pending_state(long state, struct task_struct *task)
123 {
124 
125 	MPASS((state & ~TASK_NORMAL) == 0);
126 
127 	if ((state & TASK_INTERRUPTIBLE) == 0)
128 		return (false);
129 	return (linux_signal_pending(task));
130 }
131 
132 void
133 linux_send_sig(int signo, struct task_struct *task)
134 {
135 	struct thread *td;
136 
137 	td = task->task_thread;
138 	PROC_LOCK(td->td_proc);
139 	tdsignal(td, signo);
140 	PROC_UNLOCK(td->td_proc);
141 }
142 
143 int
144 autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags,
145     void *key __unused)
146 {
147 	struct task_struct *task;
148 	int ret;
149 
150 	task = wq->private;
151 	if ((ret = wake_up_task(task, state)) != 0)
152 		list_del_init(&wq->task_list);
153 	return (ret);
154 }
155 
156 void
157 linux_wake_up(wait_queue_head_t *wqh, unsigned int state, int nr, bool locked)
158 {
159 	wait_queue_t *pos, *next;
160 
161 	if (!locked)
162 		spin_lock(&wqh->lock);
163 	list_for_each_entry_safe(pos, next, &wqh->task_list, task_list) {
164 		if (pos->func == NULL) {
165 			if (wake_up_task(pos->private, state) != 0 && --nr == 0)
166 				break;
167 		} else {
168 			if (pos->func(pos, state, 0, NULL) != 0 && --nr == 0)
169 				break;
170 		}
171 	}
172 	if (!locked)
173 		spin_unlock(&wqh->lock);
174 }
175 
176 void
177 linux_prepare_to_wait(wait_queue_head_t *wqh, wait_queue_t *wq, int state)
178 {
179 
180 	spin_lock(&wqh->lock);
181 	if (list_empty(&wq->task_list))
182 		__add_wait_queue(wqh, wq);
183 	set_task_state(current, state);
184 	spin_unlock(&wqh->lock);
185 }
186 
187 void
188 linux_finish_wait(wait_queue_head_t *wqh, wait_queue_t *wq)
189 {
190 
191 	spin_lock(&wqh->lock);
192 	set_task_state(current, TASK_RUNNING);
193 	if (!list_empty(&wq->task_list)) {
194 		__remove_wait_queue(wqh, wq);
195 		INIT_LIST_HEAD(&wq->task_list);
196 	}
197 	spin_unlock(&wqh->lock);
198 }
199 
200 bool
201 linux_waitqueue_active(wait_queue_head_t *wqh)
202 {
203 	bool ret;
204 
205 	spin_lock(&wqh->lock);
206 	ret = !list_empty(&wqh->task_list);
207 	spin_unlock(&wqh->lock);
208 	return (ret);
209 }
210 
211 int
212 linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, int timeout,
213     unsigned int state, spinlock_t *lock)
214 {
215 	struct task_struct *task;
216 	long ret;
217 
218 	if (lock != NULL)
219 		spin_unlock_irq(lock);
220 
221 	DROP_GIANT();
222 
223 	task = current;
224 
225 	/*
226 	 * Our wait queue entry is on the stack - make sure it doesn't
227 	 * get swapped out while we sleep.
228 	 */
229 #ifndef NO_SWAPPING
230 	PHOLD(task->task_thread->td_proc);
231 #endif
232 	sleepq_lock(task);
233 	if (atomic_load_acq_int(&task->state) != TASK_WAKING) {
234 		ret = linux_add_to_sleepqueue(task, "wevent", timeout, state);
235 	} else {
236 		sleepq_release(task);
237 		ret = linux_signal_pending_state(state, task) ? -ERESTARTSYS : 0;
238 	}
239 #ifndef NO_SWAPPING
240 	PRELE(task->task_thread->td_proc);
241 #endif
242 
243 	PICKUP_GIANT();
244 
245 	if (lock != NULL)
246 		spin_lock_irq(lock);
247 	return (ret);
248 }
249 
250 int
251 linux_schedule_timeout(int timeout)
252 {
253 	struct task_struct *task;
254 	int state;
255 	int remainder;
256 
257 	task = current;
258 
259 	/* range check timeout */
260 	if (timeout < 1)
261 		timeout = 1;
262 	else if (timeout == MAX_SCHEDULE_TIMEOUT)
263 		timeout = 0;
264 
265 	remainder = ticks + timeout;
266 
267 	DROP_GIANT();
268 
269 	sleepq_lock(task);
270 	state = atomic_load_acq_int(&task->state);
271 	if (state != TASK_WAKING)
272 		(void)linux_add_to_sleepqueue(task, "sched", timeout, state);
273 	else
274 		sleepq_release(task);
275 	set_task_state(task, TASK_RUNNING);
276 
277 	PICKUP_GIANT();
278 
279 	if (timeout == 0)
280 		return (MAX_SCHEDULE_TIMEOUT);
281 
282 	/* range check return value */
283 	remainder -= ticks;
284 	if (remainder < 0)
285 		remainder = 0;
286 	else if (remainder > timeout)
287 		remainder = timeout;
288 	return (remainder);
289 }
290 
291 static void
292 wake_up_sleepers(void *wchan)
293 {
294 	int wakeup_swapper;
295 
296 	sleepq_lock(wchan);
297 	wakeup_swapper = sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0);
298 	sleepq_release(wchan);
299 	if (wakeup_swapper)
300 		kick_proc0();
301 }
302 
303 #define	bit_to_wchan(word, bit)	((void *)(((uintptr_t)(word) << 6) | (bit)))
304 
305 void
306 linux_wake_up_bit(void *word, int bit)
307 {
308 
309 	wake_up_sleepers(bit_to_wchan(word, bit));
310 }
311 
312 int
313 linux_wait_on_bit_timeout(unsigned long *word, int bit, unsigned int state,
314     int timeout)
315 {
316 	struct task_struct *task;
317 	void *wchan;
318 	int ret;
319 
320 	DROP_GIANT();
321 
322 	/* range check timeout */
323 	if (timeout < 1)
324 		timeout = 1;
325 	else if (timeout == MAX_SCHEDULE_TIMEOUT)
326 		timeout = 0;
327 
328 	task = current;
329 	wchan = bit_to_wchan(word, bit);
330 	for (;;) {
331 		sleepq_lock(wchan);
332 		if ((*word & (1 << bit)) == 0) {
333 			sleepq_release(wchan);
334 			ret = 0;
335 			break;
336 		}
337 		set_task_state(task, state);
338 		ret = linux_add_to_sleepqueue(wchan, "wbit", timeout, state);
339 		if (ret != 0)
340 			break;
341 	}
342 	set_task_state(task, TASK_RUNNING);
343 
344 	PICKUP_GIANT();
345 
346 	return (ret);
347 }
348 
349 void
350 linux_wake_up_atomic_t(atomic_t *a)
351 {
352 
353 	wake_up_sleepers(a);
354 }
355 
356 int
357 linux_wait_on_atomic_t(atomic_t *a, unsigned int state)
358 {
359 	struct task_struct *task;
360 	void *wchan;
361 	int ret;
362 
363 	DROP_GIANT();
364 
365 	task = current;
366 	wchan = a;
367 	for (;;) {
368 		sleepq_lock(wchan);
369 		if (atomic_read(a) == 0) {
370 			sleepq_release(wchan);
371 			ret = 0;
372 			break;
373 		}
374 		set_task_state(task, state);
375 		ret = linux_add_to_sleepqueue(wchan, "watomic", 0, state);
376 		if (ret != 0)
377 			break;
378 	}
379 	set_task_state(task, TASK_RUNNING);
380 
381 	PICKUP_GIANT();
382 
383 	return (ret);
384 }
385 
386 bool
387 linux_wake_up_state(struct task_struct *task, unsigned int state)
388 {
389 
390 	return (wake_up_task(task, state) != 0);
391 }
392