xref: /dragonfly/sys/kern/kern_synch.c (revision 99dd49c5)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
39  * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $
40  * $DragonFly: src/sys/kern/kern_synch.c,v 1.91 2008/09/09 04:06:13 dillon Exp $
41  */
42 
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/proc.h>
48 #include <sys/kernel.h>
49 #include <sys/signalvar.h>
50 #include <sys/signal2.h>
51 #include <sys/resourcevar.h>
52 #include <sys/vmmeter.h>
53 #include <sys/sysctl.h>
54 #include <sys/lock.h>
55 #ifdef KTRACE
56 #include <sys/uio.h>
57 #include <sys/ktrace.h>
58 #endif
59 #include <sys/xwait.h>
60 #include <sys/ktr.h>
61 
62 #include <sys/thread2.h>
63 #include <sys/spinlock2.h>
64 #include <sys/serialize.h>
65 
66 #include <machine/cpu.h>
67 #include <machine/smp.h>
68 
69 TAILQ_HEAD(tslpque, thread);
70 
71 static void sched_setup (void *dummy);
72 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
73 
74 int	hogticks;
75 int	lbolt;
76 int	lbolt_syncer;
77 int	sched_quantum;		/* Roundrobin scheduling quantum in ticks. */
78 int	ncpus;
79 int	ncpus2, ncpus2_shift, ncpus2_mask;
80 int	ncpus_fit, ncpus_fit_mask;
81 int	safepri;
82 int	tsleep_now_works;
83 
84 static struct callout loadav_callout;
85 static struct callout schedcpu_callout;
86 MALLOC_DEFINE(M_TSLEEP, "tslpque", "tsleep queues");
87 
88 #if !defined(KTR_TSLEEP)
89 #define KTR_TSLEEP	KTR_ALL
90 #endif
91 KTR_INFO_MASTER(tsleep);
92 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_beg, 0, "tsleep enter %p", sizeof(void *));
93 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_end, 1, "tsleep exit", 0);
94 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_beg, 2, "wakeup enter %p", sizeof(void *));
95 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_end, 3, "wakeup exit", 0);
96 
97 #define logtsleep1(name)	KTR_LOG(tsleep_ ## name)
98 #define logtsleep2(name, val)	KTR_LOG(tsleep_ ## name, val)
99 
100 struct loadavg averunnable =
101 	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
102 /*
103  * Constants for averages over 1, 5, and 15 minutes
104  * when sampling at 5 second intervals.
105  */
106 static fixpt_t cexp[3] = {
107 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
108 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
109 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
110 };
111 
112 static void	endtsleep (void *);
113 static void	unsleep_and_wakeup_thread(struct thread *td);
114 static void	loadav (void *arg);
115 static void	schedcpu (void *arg);
116 
117 /*
118  * Adjust the scheduler quantum.  The quantum is specified in microseconds.
119  * Note that 'tick' is in microseconds per tick.
120  */
121 static int
122 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
123 {
124 	int error, new_val;
125 
126 	new_val = sched_quantum * tick;
127 	error = sysctl_handle_int(oidp, &new_val, 0, req);
128         if (error != 0 || req->newptr == NULL)
129 		return (error);
130 	if (new_val < tick)
131 		return (EINVAL);
132 	sched_quantum = new_val / tick;
133 	hogticks = 2 * sched_quantum;
134 	return (0);
135 }
136 
137 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
138 	0, sizeof sched_quantum, sysctl_kern_quantum, "I", "");
139 
140 /*
141  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
142  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
143  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
144  *
145  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
146  *     1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
147  *
148  * If you don't want to bother with the faster/more-accurate formula, you
149  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
150  * (more general) method of calculating the %age of CPU used by a process.
151  *
152  * decay 95% of `lwp_pctcpu' in 60 seconds; see CCPU_SHIFT before changing
153  */
154 #define CCPU_SHIFT	11
155 
156 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
157 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
158 
159 /*
160  * kernel uses `FSCALE', userland (SHOULD) use kern.fscale
161  */
162 int     fscale __unused = FSCALE;	/* exported to systat */
163 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
164 
165 /*
166  * Recompute process priorities, once a second.
167  *
168  * Since the userland schedulers are typically event oriented, if the
169  * estcpu calculation at wakeup() time is not sufficient to make a
170  * process runnable relative to other processes in the system we have
171  * a 1-second recalc to help out.
172  *
173  * This code also allows us to store sysclock_t data in the process structure
174  * without fear of an overrun, since sysclock_t are guarenteed to hold
175  * several seconds worth of count.
176  *
177  * WARNING!  callouts can preempt normal threads.  However, they will not
178  * preempt a thread holding a spinlock so we *can* safely use spinlocks.
179  */
180 static int schedcpu_stats(struct proc *p, void *data __unused);
181 static int schedcpu_resource(struct proc *p, void *data __unused);
182 
183 static void
184 schedcpu(void *arg)
185 {
186 	allproc_scan(schedcpu_stats, NULL);
187 	allproc_scan(schedcpu_resource, NULL);
188 	wakeup((caddr_t)&lbolt);
189 	wakeup((caddr_t)&lbolt_syncer);
190 	callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
191 }
192 
193 /*
194  * General process statistics once a second
195  */
196 static int
197 schedcpu_stats(struct proc *p, void *data __unused)
198 {
199 	struct lwp *lp;
200 
201 	crit_enter();
202 	p->p_swtime++;
203 	FOREACH_LWP_IN_PROC(lp, p) {
204 		if (lp->lwp_stat == LSSLEEP)
205 			lp->lwp_slptime++;
206 
207 		/*
208 		 * Only recalculate processes that are active or have slept
209 		 * less then 2 seconds.  The schedulers understand this.
210 		 */
211 		if (lp->lwp_slptime <= 1) {
212 			p->p_usched->recalculate(lp);
213 		} else {
214 			lp->lwp_pctcpu = (lp->lwp_pctcpu * ccpu) >> FSHIFT;
215 		}
216 	}
217 	crit_exit();
218 	return(0);
219 }
220 
221 /*
222  * Resource checks.  XXX break out since ksignal/killproc can block,
223  * limiting us to one process killed per second.  There is probably
224  * a better way.
225  */
226 static int
227 schedcpu_resource(struct proc *p, void *data __unused)
228 {
229 	u_int64_t ttime;
230 	struct lwp *lp;
231 
232 	crit_enter();
233 	if (p->p_stat == SIDL ||
234 	    p->p_stat == SZOMB ||
235 	    p->p_limit == NULL
236 	) {
237 		crit_exit();
238 		return(0);
239 	}
240 
241 	ttime = 0;
242 	FOREACH_LWP_IN_PROC(lp, p) {
243 		/*
244 		 * We may have caught an lp in the middle of being
245 		 * created, lwp_thread can be NULL.
246 		 */
247 		if (lp->lwp_thread) {
248 			ttime += lp->lwp_thread->td_sticks;
249 			ttime += lp->lwp_thread->td_uticks;
250 		}
251 	}
252 
253 	switch(plimit_testcpulimit(p->p_limit, ttime)) {
254 	case PLIMIT_TESTCPU_KILL:
255 		killproc(p, "exceeded maximum CPU limit");
256 		break;
257 	case PLIMIT_TESTCPU_XCPU:
258 		if ((p->p_flag & P_XCPU) == 0) {
259 			p->p_flag |= P_XCPU;
260 			ksignal(p, SIGXCPU);
261 		}
262 		break;
263 	default:
264 		break;
265 	}
266 	crit_exit();
267 	return(0);
268 }
269 
270 /*
271  * This is only used by ps.  Generate a cpu percentage use over
272  * a period of one second.
273  *
274  * MPSAFE
275  */
276 void
277 updatepcpu(struct lwp *lp, int cpticks, int ttlticks)
278 {
279 	fixpt_t acc;
280 	int remticks;
281 
282 	acc = (cpticks << FSHIFT) / ttlticks;
283 	if (ttlticks >= ESTCPUFREQ) {
284 		lp->lwp_pctcpu = acc;
285 	} else {
286 		remticks = ESTCPUFREQ - ttlticks;
287 		lp->lwp_pctcpu = (acc * ttlticks + lp->lwp_pctcpu * remticks) /
288 				ESTCPUFREQ;
289 	}
290 }
291 
292 /*
293  * tsleep/wakeup hash table parameters.  Try to find the sweet spot for
294  * like addresses being slept on.
295  */
296 #define TABLESIZE	1024
297 #define LOOKUP(x)	(((intptr_t)(x) >> 6) & (TABLESIZE - 1))
298 
299 static cpumask_t slpque_cpumasks[TABLESIZE];
300 
301 /*
302  * General scheduler initialization.  We force a reschedule 25 times
303  * a second by default.  Note that cpu0 is initialized in early boot and
304  * cannot make any high level calls.
305  *
306  * Each cpu has its own sleep queue.
307  */
308 void
309 sleep_gdinit(globaldata_t gd)
310 {
311 	static struct tslpque slpque_cpu0[TABLESIZE];
312 	int i;
313 
314 	if (gd->gd_cpuid == 0) {
315 		sched_quantum = (hz + 24) / 25;
316 		hogticks = 2 * sched_quantum;
317 
318 		gd->gd_tsleep_hash = slpque_cpu0;
319 	} else {
320 		gd->gd_tsleep_hash = kmalloc(sizeof(slpque_cpu0),
321 					    M_TSLEEP, M_WAITOK | M_ZERO);
322 	}
323 	for (i = 0; i < TABLESIZE; ++i)
324 		TAILQ_INIT(&gd->gd_tsleep_hash[i]);
325 }
326 
327 /*
328  * General sleep call.  Suspends the current process until a wakeup is
329  * performed on the specified identifier.  The process will then be made
330  * runnable with the specified priority.  Sleeps at most timo/hz seconds
331  * (0 means no timeout).  If flags includes PCATCH flag, signals are checked
332  * before and after sleeping, else signals are not checked.  Returns 0 if
333  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
334  * signal needs to be delivered, ERESTART is returned if the current system
335  * call should be restarted if possible, and EINTR is returned if the system
336  * call should be interrupted by the signal (return EINTR).
337  *
338  * Note that if we are a process, we release_curproc() before messing with
339  * the LWKT scheduler.
340  *
341  * During autoconfiguration or after a panic, a sleep will simply
342  * lower the priority briefly to allow interrupts, then return.
343  */
344 int
345 tsleep(void *ident, int flags, const char *wmesg, int timo)
346 {
347 	struct thread *td = curthread;
348 	struct lwp *lp = td->td_lwp;
349 	struct proc *p = td->td_proc;		/* may be NULL */
350 	globaldata_t gd;
351 	int sig;
352 	int catch;
353 	int id;
354 	int error;
355 	int oldpri;
356 	struct callout thandle;
357 
358 	/*
359 	 * NOTE: removed KTRPOINT, it could cause races due to blocking
360 	 * even in stable.  Just scrap it for now.
361 	 */
362 	if (tsleep_now_works == 0 || panicstr) {
363 		/*
364 		 * After a panic, or before we actually have an operational
365 		 * softclock, just give interrupts a chance, then just return;
366 		 *
367 		 * don't run any other procs or panic below,
368 		 * in case this is the idle process and already asleep.
369 		 */
370 		splz();
371 		oldpri = td->td_pri & TDPRI_MASK;
372 		lwkt_setpri_self(safepri);
373 		lwkt_switch();
374 		lwkt_setpri_self(oldpri);
375 		return (0);
376 	}
377 	logtsleep2(tsleep_beg, ident);
378 	gd = td->td_gd;
379 	KKASSERT(td != &gd->gd_idlethread);	/* you must be kidding! */
380 
381 	/*
382 	 * NOTE: all of this occurs on the current cpu, including any
383 	 * callout-based wakeups, so a critical section is a sufficient
384 	 * interlock.
385 	 *
386 	 * The entire sequence through to where we actually sleep must
387 	 * run without breaking the critical section.
388 	 */
389 	id = LOOKUP(ident);
390 	catch = flags & PCATCH;
391 	error = 0;
392 	sig = 0;
393 
394 	crit_enter_quick(td);
395 
396 	KASSERT(ident != NULL, ("tsleep: no ident"));
397 	KASSERT(lp == NULL ||
398 		lp->lwp_stat == LSRUN ||	/* Obvious */
399 		lp->lwp_stat == LSSTOP,		/* Set in tstop */
400 		("tsleep %p %s %d",
401 			ident, wmesg, lp->lwp_stat));
402 
403 	/*
404 	 * Setup for the current process (if this is a process).
405 	 */
406 	if (lp) {
407 		if (catch) {
408 			/*
409 			 * Early termination if PCATCH was set and a
410 			 * signal is pending, interlocked with the
411 			 * critical section.
412 			 *
413 			 * Early termination only occurs when tsleep() is
414 			 * entered while in a normal LSRUN state.
415 			 */
416 			if ((sig = CURSIG(lp)) != 0)
417 				goto resume;
418 
419 			/*
420 			 * Early termination if PCATCH was set and a
421 			 * mailbox signal was possibly delivered prior to
422 			 * the system call even being made, in order to
423 			 * allow the user to interlock without having to
424 			 * make additional system calls.
425 			 */
426 			if (p->p_flag & P_MAILBOX)
427 				goto resume;
428 
429 			/*
430 			 * Causes ksignal to wake us up when.
431 			 */
432 			lp->lwp_flag |= LWP_SINTR;
433 		}
434 
435 		/*
436 		 * Make sure the current process has been untangled from
437 		 * the userland scheduler and initialize slptime to start
438 		 * counting.
439 		 */
440 		p->p_usched->release_curproc(lp);
441 		lp->lwp_slptime = 0;
442 	}
443 
444 	/*
445 	 * Move our thread to the correct queue and setup our wchan, etc.
446 	 */
447 	lwkt_deschedule_self(td);
448 	td->td_flags |= TDF_TSLEEPQ;
449 	TAILQ_INSERT_TAIL(&gd->gd_tsleep_hash[id], td, td_threadq);
450 	atomic_set_int(&slpque_cpumasks[id], gd->gd_cpumask);
451 
452 	td->td_wchan = ident;
453 	td->td_wmesg = wmesg;
454 	td->td_wdomain = flags & PDOMAIN_MASK;
455 
456 	/*
457 	 * Setup the timeout, if any
458 	 */
459 	if (timo) {
460 		callout_init(&thandle);
461 		callout_reset(&thandle, timo, endtsleep, td);
462 	}
463 
464 	/*
465 	 * Beddy bye bye.
466 	 */
467 	if (lp) {
468 		/*
469 		 * Ok, we are sleeping.  Place us in the SSLEEP state.
470 		 */
471 		KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0);
472 		/*
473 		 * tstop() sets LSSTOP, so don't fiddle with that.
474 		 */
475 		if (lp->lwp_stat != LSSTOP)
476 			lp->lwp_stat = LSSLEEP;
477 		lp->lwp_ru.ru_nvcsw++;
478 		lwkt_switch();
479 
480 		/*
481 		 * And when we are woken up, put us back in LSRUN.  If we
482 		 * slept for over a second, recalculate our estcpu.
483 		 */
484 		lp->lwp_stat = LSRUN;
485 		if (lp->lwp_slptime)
486 			p->p_usched->recalculate(lp);
487 		lp->lwp_slptime = 0;
488 	} else {
489 		lwkt_switch();
490 	}
491 
492 	/*
493 	 * Make sure we haven't switched cpus while we were asleep.  It's
494 	 * not supposed to happen.  Cleanup our temporary flags.
495 	 */
496 	KKASSERT(gd == td->td_gd);
497 
498 	/*
499 	 * Cleanup the timeout.
500 	 */
501 	if (timo) {
502 		if (td->td_flags & TDF_TIMEOUT) {
503 			td->td_flags &= ~TDF_TIMEOUT;
504 			error = EWOULDBLOCK;
505 		} else {
506 			callout_stop(&thandle);
507 		}
508 	}
509 
510 	/*
511 	 * Since td_threadq is used both for our run queue AND for the
512 	 * tsleep hash queue, we can't still be on it at this point because
513 	 * we've gotten cpu back.
514 	 */
515 	KASSERT((td->td_flags & TDF_TSLEEPQ) == 0, ("tsleep: impossible thread flags %08x", td->td_flags));
516 	td->td_wchan = NULL;
517 	td->td_wmesg = NULL;
518 	td->td_wdomain = 0;
519 
520 	/*
521 	 * Figure out the correct error return.  If interrupted by a
522 	 * signal we want to return EINTR or ERESTART.
523 	 *
524 	 * If P_MAILBOX is set no automatic system call restart occurs
525 	 * and we return EINTR.  P_MAILBOX is meant to be used as an
526 	 * interlock, the user must poll it prior to any system call
527 	 * that it wishes to interlock a mailbox signal against since
528 	 * the flag is cleared on *any* system call that sleeps.
529 	 */
530 resume:
531 	if (p) {
532 		if (catch && error == 0) {
533 			if ((p->p_flag & P_MAILBOX) && sig == 0) {
534 				error = EINTR;
535 			} else if (sig != 0 || (sig = CURSIG(lp))) {
536 				if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
537 					error = EINTR;
538 				else
539 					error = ERESTART;
540 			}
541 		}
542 		lp->lwp_flag &= ~(LWP_BREAKTSLEEP | LWP_SINTR);
543 		p->p_flag &= ~P_MAILBOX;
544 	}
545 	logtsleep1(tsleep_end);
546 	crit_exit_quick(td);
547 	return (error);
548 }
549 
550 /*
551  * This is a dandy function that allows us to interlock tsleep/wakeup
552  * operations with unspecified upper level locks, such as lockmgr locks,
553  * simply by holding a critical section.  The sequence is:
554  *
555  *	(enter critical section)
556  *	(acquire upper level lock)
557  *	tsleep_interlock(blah)
558  *	(release upper level lock)
559  *	tsleep(blah, ...)
560  *	(exit critical section)
561  *
562  * Basically this function sets our cpumask for the ident which informs
563  * other cpus that our cpu 'might' be waiting (or about to wait on) the
564  * hash index related to the ident.  The critical section prevents another
565  * cpu's wakeup() from being processed on our cpu until we are actually
566  * able to enter the tsleep().  Thus, no race occurs between our attempt
567  * to release a resource and sleep, and another cpu's attempt to acquire
568  * a resource and call wakeup.
569  *
570  * There isn't much of a point to this function unless you call it while
571  * holding a critical section.
572  */
573 static __inline void
574 _tsleep_interlock(globaldata_t gd, void *ident)
575 {
576 	int id = LOOKUP(ident);
577 
578 	atomic_set_int(&slpque_cpumasks[id], gd->gd_cpumask);
579 }
580 
581 void
582 tsleep_interlock(void *ident)
583 {
584 	_tsleep_interlock(mycpu, ident);
585 }
586 
587 /*
588  * Interlocked spinlock sleep.  An exclusively held spinlock must
589  * be passed to msleep().  The function will atomically release the
590  * spinlock and tsleep on the ident, then reacquire the spinlock and
591  * return.
592  *
593  * This routine is fairly important along the critical path, so optimize it
594  * heavily.
595  */
596 int
597 msleep(void *ident, struct spinlock *spin, int flags,
598        const char *wmesg, int timo)
599 {
600 	globaldata_t gd = mycpu;
601 	int error;
602 
603 	crit_enter_gd(gd);
604 	_tsleep_interlock(gd, ident);
605 	spin_unlock_wr_quick(gd, spin);
606 	error = tsleep(ident, flags, wmesg, timo);
607 	spin_lock_wr_quick(gd, spin);
608 	crit_exit_gd(gd);
609 
610 	return (error);
611 }
612 
613 /*
614  * Interlocked serializer sleep.  An exclusively held serializer must
615  * be passed to serialize_sleep().  The function will atomically release
616  * the serializer and tsleep on the ident, then reacquire the serializer
617  * and return.
618  */
619 int
620 serialize_sleep(void *ident, struct lwkt_serialize *slz, int flags,
621 		const char *wmesg, int timo)
622 {
623 	int ret;
624 
625 	ASSERT_SERIALIZED(slz);
626 
627 	crit_enter();
628 	tsleep_interlock(ident);
629 	lwkt_serialize_exit(slz);
630 	ret = tsleep(ident, flags, wmesg, timo);
631 	lwkt_serialize_enter(slz);
632 	crit_exit();
633 
634 	return ret;
635 }
636 
637 /*
638  * Directly block on the LWKT thread by descheduling it.  This
639  * is much faster then tsleep(), but the only legal way to wake
640  * us up is to directly schedule the thread.
641  *
642  * Setting TDF_SINTR will cause new signals to directly schedule us.
643  *
644  * This routine is typically called while in a critical section.
645  */
646 int
647 lwkt_sleep(const char *wmesg, int flags)
648 {
649 	thread_t td = curthread;
650 	int sig;
651 
652 	if ((flags & PCATCH) == 0 || td->td_lwp == NULL) {
653 		td->td_flags |= TDF_BLOCKED;
654 		td->td_wmesg = wmesg;
655 		lwkt_deschedule_self(td);
656 		lwkt_switch();
657 		td->td_wmesg = NULL;
658 		td->td_flags &= ~TDF_BLOCKED;
659 		return(0);
660 	}
661 	if ((sig = CURSIG(td->td_lwp)) != 0) {
662 		if (SIGISMEMBER(td->td_proc->p_sigacts->ps_sigintr, sig))
663 			return(EINTR);
664 		else
665 			return(ERESTART);
666 
667 	}
668 	td->td_flags |= TDF_BLOCKED | TDF_SINTR;
669 	td->td_wmesg = wmesg;
670 	lwkt_deschedule_self(td);
671 	lwkt_switch();
672 	td->td_flags &= ~(TDF_BLOCKED | TDF_SINTR);
673 	td->td_wmesg = NULL;
674 	return(0);
675 }
676 
677 /*
678  * Implement the timeout for tsleep.
679  *
680  * We set LWP_BREAKTSLEEP to indicate that an event has occured, but
681  * we only call setrunnable if the process is not stopped.
682  *
683  * This type of callout timeout is scheduled on the same cpu the process
684  * is sleeping on.  Also, at the moment, the MP lock is held.
685  */
686 static void
687 endtsleep(void *arg)
688 {
689 	thread_t td = arg;
690 	struct lwp *lp;
691 
692 	ASSERT_MP_LOCK_HELD(curthread);
693 	crit_enter();
694 
695 	/*
696 	 * cpu interlock.  Thread flags are only manipulated on
697 	 * the cpu owning the thread.  proc flags are only manipulated
698 	 * by the older of the MP lock.  We have both.
699 	 */
700 	if (td->td_flags & TDF_TSLEEPQ) {
701 		td->td_flags |= TDF_TIMEOUT;
702 
703 		if ((lp = td->td_lwp) != NULL) {
704 			lp->lwp_flag |= LWP_BREAKTSLEEP;
705 			if (lp->lwp_proc->p_stat != SSTOP)
706 				setrunnable(lp);
707 		} else {
708 			unsleep_and_wakeup_thread(td);
709 		}
710 	}
711 	crit_exit();
712 }
713 
714 /*
715  * Unsleep and wakeup a thread.  This function runs without the MP lock
716  * which means that it can only manipulate thread state on the owning cpu,
717  * and cannot touch the process state at all.
718  */
719 static
720 void
721 unsleep_and_wakeup_thread(struct thread *td)
722 {
723 	globaldata_t gd = mycpu;
724 	int id;
725 
726 #ifdef SMP
727 	if (td->td_gd != gd) {
728 		lwkt_send_ipiq(td->td_gd, (ipifunc1_t)unsleep_and_wakeup_thread, td);
729 		return;
730 	}
731 #endif
732 	crit_enter();
733 	if (td->td_flags & TDF_TSLEEPQ) {
734 		td->td_flags &= ~TDF_TSLEEPQ;
735 		id = LOOKUP(td->td_wchan);
736 		TAILQ_REMOVE(&gd->gd_tsleep_hash[id], td, td_threadq);
737 		if (TAILQ_FIRST(&gd->gd_tsleep_hash[id]) == NULL)
738 			atomic_clear_int(&slpque_cpumasks[id], gd->gd_cpumask);
739 		lwkt_schedule(td);
740 	}
741 	crit_exit();
742 }
743 
744 /*
745  * Make all processes sleeping on the specified identifier runnable.
746  * count may be zero or one only.
747  *
748  * The domain encodes the sleep/wakeup domain AND the first cpu to check
749  * (which is always the current cpu).  As we iterate across cpus
750  *
751  * This call may run without the MP lock held.  We can only manipulate thread
752  * state on the cpu owning the thread.  We CANNOT manipulate process state
753  * at all.
754  */
755 static void
756 _wakeup(void *ident, int domain)
757 {
758 	struct tslpque *qp;
759 	struct thread *td;
760 	struct thread *ntd;
761 	globaldata_t gd;
762 #ifdef SMP
763 	cpumask_t mask;
764 #endif
765 	int id;
766 
767 	crit_enter();
768 	logtsleep2(wakeup_beg, ident);
769 	gd = mycpu;
770 	id = LOOKUP(ident);
771 	qp = &gd->gd_tsleep_hash[id];
772 restart:
773 	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
774 		ntd = TAILQ_NEXT(td, td_threadq);
775 		if (td->td_wchan == ident &&
776 		    td->td_wdomain == (domain & PDOMAIN_MASK)
777 		) {
778 			KKASSERT(td->td_flags & TDF_TSLEEPQ);
779 			td->td_flags &= ~TDF_TSLEEPQ;
780 			TAILQ_REMOVE(qp, td, td_threadq);
781 			if (TAILQ_FIRST(qp) == NULL) {
782 				atomic_clear_int(&slpque_cpumasks[id],
783 						 gd->gd_cpumask);
784 			}
785 			lwkt_schedule(td);
786 			if (domain & PWAKEUP_ONE)
787 				goto done;
788 			goto restart;
789 		}
790 	}
791 
792 #ifdef SMP
793 	/*
794 	 * We finished checking the current cpu but there still may be
795 	 * more work to do.  Either wakeup_one was requested and no matching
796 	 * thread was found, or a normal wakeup was requested and we have
797 	 * to continue checking cpus.
798 	 *
799 	 * It should be noted that this scheme is actually less expensive then
800 	 * the old scheme when waking up multiple threads, since we send
801 	 * only one IPI message per target candidate which may then schedule
802 	 * multiple threads.  Before we could have wound up sending an IPI
803 	 * message for each thread on the target cpu (!= current cpu) that
804 	 * needed to be woken up.
805 	 *
806 	 * NOTE: Wakeups occuring on remote cpus are asynchronous.  This
807 	 * should be ok since we are passing idents in the IPI rather then
808 	 * thread pointers.
809 	 */
810 	if ((domain & PWAKEUP_MYCPU) == 0 &&
811 	    (mask = slpque_cpumasks[id] & gd->gd_other_cpus) != 0) {
812 		lwkt_send_ipiq2_mask(mask, _wakeup, ident,
813 				     domain | PWAKEUP_MYCPU);
814 	}
815 #endif
816 done:
817 	logtsleep1(wakeup_end);
818 	crit_exit();
819 }
820 
821 /*
822  * Wakeup all threads tsleep()ing on the specified ident, on all cpus
823  */
824 void
825 wakeup(void *ident)
826 {
827     _wakeup(ident, PWAKEUP_ENCODE(0, mycpu->gd_cpuid));
828 }
829 
830 /*
831  * Wakeup one thread tsleep()ing on the specified ident, on any cpu.
832  */
833 void
834 wakeup_one(void *ident)
835 {
836     /* XXX potentially round-robin the first responding cpu */
837     _wakeup(ident, PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | PWAKEUP_ONE);
838 }
839 
840 /*
841  * Wakeup threads tsleep()ing on the specified ident on the current cpu
842  * only.
843  */
844 void
845 wakeup_mycpu(void *ident)
846 {
847     _wakeup(ident, PWAKEUP_MYCPU);
848 }
849 
850 /*
851  * Wakeup one thread tsleep()ing on the specified ident on the current cpu
852  * only.
853  */
854 void
855 wakeup_mycpu_one(void *ident)
856 {
857     /* XXX potentially round-robin the first responding cpu */
858     _wakeup(ident, PWAKEUP_MYCPU|PWAKEUP_ONE);
859 }
860 
861 /*
862  * Wakeup all thread tsleep()ing on the specified ident on the specified cpu
863  * only.
864  */
865 void
866 wakeup_oncpu(globaldata_t gd, void *ident)
867 {
868 #ifdef SMP
869     if (gd == mycpu) {
870 	_wakeup(ident, PWAKEUP_MYCPU);
871     } else {
872 	lwkt_send_ipiq2(gd, _wakeup, ident, PWAKEUP_MYCPU);
873     }
874 #else
875     _wakeup(ident, PWAKEUP_MYCPU);
876 #endif
877 }
878 
879 /*
880  * Wakeup one thread tsleep()ing on the specified ident on the specified cpu
881  * only.
882  */
883 void
884 wakeup_oncpu_one(globaldata_t gd, void *ident)
885 {
886 #ifdef SMP
887     if (gd == mycpu) {
888 	_wakeup(ident, PWAKEUP_MYCPU | PWAKEUP_ONE);
889     } else {
890 	lwkt_send_ipiq2(gd, _wakeup, ident, PWAKEUP_MYCPU | PWAKEUP_ONE);
891     }
892 #else
893     _wakeup(ident, PWAKEUP_MYCPU | PWAKEUP_ONE);
894 #endif
895 }
896 
897 /*
898  * Wakeup all threads waiting on the specified ident that slept using
899  * the specified domain, on all cpus.
900  */
901 void
902 wakeup_domain(void *ident, int domain)
903 {
904     _wakeup(ident, PWAKEUP_ENCODE(domain, mycpu->gd_cpuid));
905 }
906 
907 /*
908  * Wakeup one thread waiting on the specified ident that slept using
909  * the specified  domain, on any cpu.
910  */
911 void
912 wakeup_domain_one(void *ident, int domain)
913 {
914     /* XXX potentially round-robin the first responding cpu */
915     _wakeup(ident, PWAKEUP_ENCODE(domain, mycpu->gd_cpuid) | PWAKEUP_ONE);
916 }
917 
918 /*
919  * setrunnable()
920  *
921  * Make a process runnable.  The MP lock must be held on call.  This only
922  * has an effect if we are in SSLEEP.  We only break out of the
923  * tsleep if LWP_BREAKTSLEEP is set, otherwise we just fix-up the state.
924  *
925  * NOTE: With the MP lock held we can only safely manipulate the process
926  * structure.  We cannot safely manipulate the thread structure.
927  */
928 void
929 setrunnable(struct lwp *lp)
930 {
931 	crit_enter();
932 	ASSERT_MP_LOCK_HELD(curthread);
933 	if (lp->lwp_stat == LSSTOP)
934 		lp->lwp_stat = LSSLEEP;
935 	if (lp->lwp_stat == LSSLEEP && (lp->lwp_flag & LWP_BREAKTSLEEP))
936 		unsleep_and_wakeup_thread(lp->lwp_thread);
937 	crit_exit();
938 }
939 
940 /*
941  * The process is stopped due to some condition, usually because p_stat is
942  * set to SSTOP, but also possibly due to being traced.
943  *
944  * NOTE!  If the caller sets SSTOP, the caller must also clear P_WAITED
945  * because the parent may check the child's status before the child actually
946  * gets to this routine.
947  *
948  * This routine is called with the current lwp only, typically just
949  * before returning to userland.
950  *
951  * Setting LWP_BREAKTSLEEP before entering the tsleep will cause a passive
952  * SIGCONT to break out of the tsleep.
953  */
954 void
955 tstop(void)
956 {
957 	struct lwp *lp = curthread->td_lwp;
958 	struct proc *p = lp->lwp_proc;
959 
960 	crit_enter();
961 	/*
962 	 * If LWP_WSTOP is set, we were sleeping
963 	 * while our process was stopped.  At this point
964 	 * we were already counted as stopped.
965 	 */
966 	if ((lp->lwp_flag & LWP_WSTOP) == 0) {
967 		/*
968 		 * If we're the last thread to stop, signal
969 		 * our parent.
970 		 */
971 		p->p_nstopped++;
972 		lp->lwp_flag |= LWP_WSTOP;
973 		wakeup(&p->p_nstopped);
974 		if (p->p_nstopped == p->p_nthreads) {
975 			p->p_flag &= ~P_WAITED;
976 			wakeup(p->p_pptr);
977 			if ((p->p_pptr->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0)
978 				ksignal(p->p_pptr, SIGCHLD);
979 		}
980 	}
981 	while (p->p_stat == SSTOP) {
982 		lp->lwp_flag |= LWP_BREAKTSLEEP;
983 		lp->lwp_stat = LSSTOP;
984 		tsleep(p, 0, "stop", 0);
985 	}
986 	p->p_nstopped--;
987 	lp->lwp_flag &= ~LWP_WSTOP;
988 	crit_exit();
989 }
990 
991 /*
992  * Yield / synchronous reschedule.  This is a bit tricky because the trap
993  * code might have set a lazy release on the switch function.   Setting
994  * P_PASSIVE_ACQ will ensure that the lazy release executes when we call
995  * switch, and that we are given a greater chance of affinity with our
996  * current cpu.
997  *
998  * We call lwkt_setpri_self() to rotate our thread to the end of the lwkt
999  * run queue.  lwkt_switch() will also execute any assigned passive release
1000  * (which usually calls release_curproc()), allowing a same/higher priority
1001  * process to be designated as the current process.
1002  *
1003  * While it is possible for a lower priority process to be designated,
1004  * it's call to lwkt_maybe_switch() in acquire_curproc() will likely
1005  * round-robin back to us and we will be able to re-acquire the current
1006  * process designation.
1007  */
1008 void
1009 uio_yield(void)
1010 {
1011 	struct thread *td = curthread;
1012 	struct proc *p = td->td_proc;
1013 
1014 	lwkt_setpri_self(td->td_pri & TDPRI_MASK);
1015 	if (p) {
1016 		p->p_flag |= P_PASSIVE_ACQ;
1017 		lwkt_switch();
1018 		p->p_flag &= ~P_PASSIVE_ACQ;
1019 	} else {
1020 		lwkt_switch();
1021 	}
1022 }
1023 
1024 /*
1025  * Compute a tenex style load average of a quantity on
1026  * 1, 5 and 15 minute intervals.
1027  */
1028 static int loadav_count_runnable(struct lwp *p, void *data);
1029 
1030 static void
1031 loadav(void *arg)
1032 {
1033 	struct loadavg *avg;
1034 	int i, nrun;
1035 
1036 	nrun = 0;
1037 	alllwp_scan(loadav_count_runnable, &nrun);
1038 	avg = &averunnable;
1039 	for (i = 0; i < 3; i++) {
1040 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
1041 		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
1042 	}
1043 
1044 	/*
1045 	 * Schedule the next update to occur after 5 seconds, but add a
1046 	 * random variation to avoid synchronisation with processes that
1047 	 * run at regular intervals.
1048 	 */
1049 	callout_reset(&loadav_callout, hz * 4 + (int)(krandom() % (hz * 2 + 1)),
1050 		      loadav, NULL);
1051 }
1052 
1053 static int
1054 loadav_count_runnable(struct lwp *lp, void *data)
1055 {
1056 	int *nrunp = data;
1057 	thread_t td;
1058 
1059 	switch (lp->lwp_stat) {
1060 	case LSRUN:
1061 		if ((td = lp->lwp_thread) == NULL)
1062 			break;
1063 		if (td->td_flags & TDF_BLOCKED)
1064 			break;
1065 		++*nrunp;
1066 		break;
1067 	default:
1068 		break;
1069 	}
1070 	return(0);
1071 }
1072 
1073 /* ARGSUSED */
1074 static void
1075 sched_setup(void *dummy)
1076 {
1077 	callout_init(&loadav_callout);
1078 	callout_init(&schedcpu_callout);
1079 
1080 	/* Kick off timeout driven events by calling first time. */
1081 	schedcpu(NULL);
1082 	loadav(NULL);
1083 }
1084 
1085