xref: /dragonfly/sys/kern/kern_synch.c (revision 0de090e1)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
35  * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $
36  */
37 
38 #include "opt_ktrace.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/kernel.h>
44 #include <sys/signalvar.h>
45 #include <sys/resourcevar.h>
46 #include <sys/vmmeter.h>
47 #include <sys/sysctl.h>
48 #include <sys/lock.h>
49 #include <sys/uio.h>
50 #ifdef KTRACE
51 #include <sys/ktrace.h>
52 #endif
53 #include <sys/ktr.h>
54 #include <sys/serialize.h>
55 
56 #include <sys/signal2.h>
57 #include <sys/thread2.h>
58 #include <sys/spinlock2.h>
59 #include <sys/mutex2.h>
60 
61 #include <machine/cpu.h>
62 #include <machine/smp.h>
63 
64 TAILQ_HEAD(tslpque, thread);
65 
66 static void sched_setup (void *dummy);
67 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL);
68 
69 int	lbolt;
70 void	*lbolt_syncer;
71 int	sched_quantum;		/* Roundrobin scheduling quantum in ticks. */
72 int	ncpus;
73 int	ncpus2, ncpus2_shift, ncpus2_mask;	/* note: mask not cpumask_t */
74 int	ncpus_fit, ncpus_fit_mask;		/* note: mask not cpumask_t */
75 int	safepri;
76 int	tsleep_now_works;
77 int	tsleep_crypto_dump = 0;
78 
79 static struct callout loadav_callout;
80 static struct callout schedcpu_callout;
81 MALLOC_DEFINE(M_TSLEEP, "tslpque", "tsleep queues");
82 
83 #define __DEALL(ident)	__DEQUALIFY(void *, ident)
84 
85 #if !defined(KTR_TSLEEP)
86 #define KTR_TSLEEP	KTR_ALL
87 #endif
88 KTR_INFO_MASTER(tsleep);
89 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_beg, 0, "tsleep enter %p", const volatile void *ident);
90 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_end, 1, "tsleep exit");
91 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_beg, 2, "wakeup enter %p", const volatile void *ident);
92 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_end, 3, "wakeup exit");
93 KTR_INFO(KTR_TSLEEP, tsleep, ilockfail,  4, "interlock failed %p", const volatile void *ident);
94 
95 #define logtsleep1(name)	KTR_LOG(tsleep_ ## name)
96 #define logtsleep2(name, val)	KTR_LOG(tsleep_ ## name, val)
97 
98 struct loadavg averunnable =
99 	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
100 /*
101  * Constants for averages over 1, 5, and 15 minutes
102  * when sampling at 5 second intervals.
103  */
104 static fixpt_t cexp[3] = {
105 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
106 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
107 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
108 };
109 
110 static void	endtsleep (void *);
111 static void	loadav (void *arg);
112 static void	schedcpu (void *arg);
113 
114 /*
115  * Adjust the scheduler quantum.  The quantum is specified in microseconds.
116  * Note that 'tick' is in microseconds per tick.
117  */
118 static int
119 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
120 {
121 	int error, new_val;
122 
123 	new_val = sched_quantum * ustick;
124 	error = sysctl_handle_int(oidp, &new_val, 0, req);
125         if (error != 0 || req->newptr == NULL)
126 		return (error);
127 	if (new_val < ustick)
128 		return (EINVAL);
129 	sched_quantum = new_val / ustick;
130 	return (0);
131 }
132 
133 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
134 	0, sizeof sched_quantum, sysctl_kern_quantum, "I", "");
135 
136 static int pctcpu_decay = 10;
137 SYSCTL_INT(_kern, OID_AUTO, pctcpu_decay, CTLFLAG_RW, &pctcpu_decay, 0, "");
138 
139 /*
140  * kernel uses `FSCALE', userland (SHOULD) use kern.fscale
141  */
142 int     fscale __unused = FSCALE;	/* exported to systat */
143 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
144 
145 /*
146  * Recompute process priorities, once a second.
147  *
148  * Since the userland schedulers are typically event oriented, if the
149  * estcpu calculation at wakeup() time is not sufficient to make a
150  * process runnable relative to other processes in the system we have
151  * a 1-second recalc to help out.
152  *
153  * This code also allows us to store sysclock_t data in the process structure
154  * without fear of an overrun, since sysclock_t are guarenteed to hold
155  * several seconds worth of count.
156  *
157  * WARNING!  callouts can preempt normal threads.  However, they will not
158  * preempt a thread holding a spinlock so we *can* safely use spinlocks.
159  */
160 static int schedcpu_stats(struct proc *p, void *data __unused);
161 static int schedcpu_resource(struct proc *p, void *data __unused);
162 
163 static void
164 schedcpu(void *arg)
165 {
166 	allproc_scan(schedcpu_stats, NULL);
167 	allproc_scan(schedcpu_resource, NULL);
168 	wakeup((caddr_t)&lbolt);
169 	wakeup(lbolt_syncer);
170 	callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
171 }
172 
173 /*
174  * General process statistics once a second
175  */
176 static int
177 schedcpu_stats(struct proc *p, void *data __unused)
178 {
179 	struct lwp *lp;
180 
181 	/*
182 	 * Threads may not be completely set up if process in SIDL state.
183 	 */
184 	if (p->p_stat == SIDL)
185 		return(0);
186 
187 	PHOLD(p);
188 	if (lwkt_trytoken(&p->p_token) == FALSE) {
189 		PRELE(p);
190 		return(0);
191 	}
192 
193 	p->p_swtime++;
194 	FOREACH_LWP_IN_PROC(lp, p) {
195 		if (lp->lwp_stat == LSSLEEP) {
196 			++lp->lwp_slptime;
197 			if (lp->lwp_slptime == 1)
198 				p->p_usched->uload_update(lp);
199 		}
200 
201 		/*
202 		 * Only recalculate processes that are active or have slept
203 		 * less then 2 seconds.  The schedulers understand this.
204 		 * Otherwise decay by 50% per second.
205 		 */
206 		if (lp->lwp_slptime <= 1) {
207 			p->p_usched->recalculate(lp);
208 		} else {
209 			int decay;
210 
211 			decay = pctcpu_decay;
212 			cpu_ccfence();
213 			if (decay <= 1)
214 				decay = 1;
215 			if (decay > 100)
216 				decay = 100;
217 			lp->lwp_pctcpu = (lp->lwp_pctcpu * (decay - 1)) / decay;
218 		}
219 	}
220 	lwkt_reltoken(&p->p_token);
221 	lwkt_yield();
222 	PRELE(p);
223 	return(0);
224 }
225 
226 /*
227  * Resource checks.  XXX break out since ksignal/killproc can block,
228  * limiting us to one process killed per second.  There is probably
229  * a better way.
230  */
231 static int
232 schedcpu_resource(struct proc *p, void *data __unused)
233 {
234 	u_int64_t ttime;
235 	struct lwp *lp;
236 
237 	if (p->p_stat == SIDL)
238 		return(0);
239 
240 	PHOLD(p);
241 	if (lwkt_trytoken(&p->p_token) == FALSE) {
242 		PRELE(p);
243 		return(0);
244 	}
245 
246 	if (p->p_stat == SZOMB || p->p_limit == NULL) {
247 		lwkt_reltoken(&p->p_token);
248 		PRELE(p);
249 		return(0);
250 	}
251 
252 	ttime = 0;
253 	FOREACH_LWP_IN_PROC(lp, p) {
254 		/*
255 		 * We may have caught an lp in the middle of being
256 		 * created, lwp_thread can be NULL.
257 		 */
258 		if (lp->lwp_thread) {
259 			ttime += lp->lwp_thread->td_sticks;
260 			ttime += lp->lwp_thread->td_uticks;
261 		}
262 	}
263 
264 	switch(plimit_testcpulimit(p->p_limit, ttime)) {
265 	case PLIMIT_TESTCPU_KILL:
266 		killproc(p, "exceeded maximum CPU limit");
267 		break;
268 	case PLIMIT_TESTCPU_XCPU:
269 		if ((p->p_flags & P_XCPU) == 0) {
270 			p->p_flags |= P_XCPU;
271 			ksignal(p, SIGXCPU);
272 		}
273 		break;
274 	default:
275 		break;
276 	}
277 	lwkt_reltoken(&p->p_token);
278 	lwkt_yield();
279 	PRELE(p);
280 	return(0);
281 }
282 
283 /*
284  * This is only used by ps.  Generate a cpu percentage use over
285  * a period of one second.
286  */
287 void
288 updatepcpu(struct lwp *lp, int cpticks, int ttlticks)
289 {
290 	fixpt_t acc;
291 	int remticks;
292 
293 	acc = (cpticks << FSHIFT) / ttlticks;
294 	if (ttlticks >= ESTCPUFREQ) {
295 		lp->lwp_pctcpu = acc;
296 	} else {
297 		remticks = ESTCPUFREQ - ttlticks;
298 		lp->lwp_pctcpu = (acc * ttlticks + lp->lwp_pctcpu * remticks) /
299 				ESTCPUFREQ;
300 	}
301 }
302 
303 /*
304  * tsleep/wakeup hash table parameters.  Try to find the sweet spot for
305  * like addresses being slept on.
306  */
307 #define TABLESIZE	4001
308 #define LOOKUP(x)	(((u_int)(uintptr_t)(x)) % TABLESIZE)
309 
310 static cpumask_t slpque_cpumasks[TABLESIZE];
311 
312 /*
313  * General scheduler initialization.  We force a reschedule 25 times
314  * a second by default.  Note that cpu0 is initialized in early boot and
315  * cannot make any high level calls.
316  *
317  * Each cpu has its own sleep queue.
318  */
319 void
320 sleep_gdinit(globaldata_t gd)
321 {
322 	static struct tslpque slpque_cpu0[TABLESIZE];
323 	int i;
324 
325 	if (gd->gd_cpuid == 0) {
326 		sched_quantum = (hz + 24) / 25;
327 		gd->gd_tsleep_hash = slpque_cpu0;
328 	} else {
329 		gd->gd_tsleep_hash = kmalloc(sizeof(slpque_cpu0),
330 					    M_TSLEEP, M_WAITOK | M_ZERO);
331 	}
332 	for (i = 0; i < TABLESIZE; ++i)
333 		TAILQ_INIT(&gd->gd_tsleep_hash[i]);
334 }
335 
336 /*
337  * This is a dandy function that allows us to interlock tsleep/wakeup
338  * operations with unspecified upper level locks, such as lockmgr locks,
339  * simply by holding a critical section.  The sequence is:
340  *
341  *	(acquire upper level lock)
342  *	tsleep_interlock(blah)
343  *	(release upper level lock)
344  *	tsleep(blah, ...)
345  *
346  * Basically this functions queues us on the tsleep queue without actually
347  * descheduling us.  When tsleep() is later called with PINTERLOCK it
348  * assumes the thread was already queued, otherwise it queues it there.
349  *
350  * Thus it is possible to receive the wakeup prior to going to sleep and
351  * the race conditions are covered.
352  */
353 static __inline void
354 _tsleep_interlock(globaldata_t gd, const volatile void *ident, int flags)
355 {
356 	thread_t td = gd->gd_curthread;
357 	int id;
358 
359 	crit_enter_quick(td);
360 	if (td->td_flags & TDF_TSLEEPQ) {
361 		id = LOOKUP(td->td_wchan);
362 		TAILQ_REMOVE(&gd->gd_tsleep_hash[id], td, td_sleepq);
363 		if (TAILQ_FIRST(&gd->gd_tsleep_hash[id]) == NULL) {
364 			ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[id],
365 					       gd->gd_cpuid);
366 		}
367 	} else {
368 		td->td_flags |= TDF_TSLEEPQ;
369 	}
370 	id = LOOKUP(ident);
371 	TAILQ_INSERT_TAIL(&gd->gd_tsleep_hash[id], td, td_sleepq);
372 	ATOMIC_CPUMASK_ORBIT(slpque_cpumasks[id], gd->gd_cpuid);
373 	td->td_wchan = ident;
374 	td->td_wdomain = flags & PDOMAIN_MASK;
375 	crit_exit_quick(td);
376 }
377 
378 void
379 tsleep_interlock(const volatile void *ident, int flags)
380 {
381 	_tsleep_interlock(mycpu, ident, flags);
382 }
383 
384 /*
385  * Remove thread from sleepq.  Must be called with a critical section held.
386  * The thread must not be migrating.
387  */
388 static __inline void
389 _tsleep_remove(thread_t td)
390 {
391 	globaldata_t gd = mycpu;
392 	int id;
393 
394 	KKASSERT(td->td_gd == gd && IN_CRITICAL_SECT(td));
395 	KKASSERT((td->td_flags & TDF_MIGRATING) == 0);
396 	if (td->td_flags & TDF_TSLEEPQ) {
397 		td->td_flags &= ~TDF_TSLEEPQ;
398 		id = LOOKUP(td->td_wchan);
399 		TAILQ_REMOVE(&gd->gd_tsleep_hash[id], td, td_sleepq);
400 		if (TAILQ_FIRST(&gd->gd_tsleep_hash[id]) == NULL) {
401 			ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[id],
402 					       gd->gd_cpuid);
403 		}
404 		td->td_wchan = NULL;
405 		td->td_wdomain = 0;
406 	}
407 }
408 
409 void
410 tsleep_remove(thread_t td)
411 {
412 	_tsleep_remove(td);
413 }
414 
415 /*
416  * General sleep call.  Suspends the current process until a wakeup is
417  * performed on the specified identifier.  The process will then be made
418  * runnable with the specified priority.  Sleeps at most timo/hz seconds
419  * (0 means no timeout).  If flags includes PCATCH flag, signals are checked
420  * before and after sleeping, else signals are not checked.  Returns 0 if
421  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
422  * signal needs to be delivered, ERESTART is returned if the current system
423  * call should be restarted if possible, and EINTR is returned if the system
424  * call should be interrupted by the signal (return EINTR).
425  *
426  * Note that if we are a process, we release_curproc() before messing with
427  * the LWKT scheduler.
428  *
429  * During autoconfiguration or after a panic, a sleep will simply
430  * lower the priority briefly to allow interrupts, then return.
431  *
432  * WARNING!  This code can't block (short of switching away), or bad things
433  *           will happen.  No getting tokens, no blocking locks, etc.
434  */
435 int
436 tsleep(const volatile void *ident, int flags, const char *wmesg, int timo)
437 {
438 	struct thread *td = curthread;
439 	struct lwp *lp = td->td_lwp;
440 	struct proc *p = td->td_proc;		/* may be NULL */
441 	globaldata_t gd;
442 	int sig;
443 	int catch;
444 	int error;
445 	int oldpri;
446 	struct callout thandle;
447 
448 	/*
449 	 * Currently a severe hack.  Make sure any delayed wakeups
450 	 * are flushed before we sleep or we might deadlock on whatever
451 	 * event we are sleeping on.
452 	 */
453 	if (td->td_flags & TDF_DELAYED_WAKEUP)
454 		wakeup_end_delayed();
455 
456 	/*
457 	 * NOTE: removed KTRPOINT, it could cause races due to blocking
458 	 * even in stable.  Just scrap it for now.
459 	 */
460 	if (!tsleep_crypto_dump && (tsleep_now_works == 0 || panicstr)) {
461 		/*
462 		 * After a panic, or before we actually have an operational
463 		 * softclock, just give interrupts a chance, then just return;
464 		 *
465 		 * don't run any other procs or panic below,
466 		 * in case this is the idle process and already asleep.
467 		 */
468 		splz();
469 		oldpri = td->td_pri;
470 		lwkt_setpri_self(safepri);
471 		lwkt_switch();
472 		lwkt_setpri_self(oldpri);
473 		return (0);
474 	}
475 	logtsleep2(tsleep_beg, ident);
476 	gd = td->td_gd;
477 	KKASSERT(td != &gd->gd_idlethread);	/* you must be kidding! */
478 	td->td_wakefromcpu = -1;		/* overwritten by _wakeup */
479 
480 	/*
481 	 * NOTE: all of this occurs on the current cpu, including any
482 	 * callout-based wakeups, so a critical section is a sufficient
483 	 * interlock.
484 	 *
485 	 * The entire sequence through to where we actually sleep must
486 	 * run without breaking the critical section.
487 	 */
488 	catch = flags & PCATCH;
489 	error = 0;
490 	sig = 0;
491 
492 	crit_enter_quick(td);
493 
494 	KASSERT(ident != NULL, ("tsleep: no ident"));
495 	KASSERT(lp == NULL ||
496 		lp->lwp_stat == LSRUN ||	/* Obvious */
497 		lp->lwp_stat == LSSTOP,		/* Set in tstop */
498 		("tsleep %p %s %d",
499 			ident, wmesg, lp->lwp_stat));
500 
501 	/*
502 	 * We interlock the sleep queue if the caller has not already done
503 	 * it for us.  This must be done before we potentially acquire any
504 	 * tokens or we can loose the wakeup.
505 	 */
506 	if ((flags & PINTERLOCKED) == 0) {
507 		_tsleep_interlock(gd, ident, flags);
508 	}
509 
510 	/*
511 	 * Setup for the current process (if this is a process).  We must
512 	 * interlock with lwp_token to avoid remote wakeup races via
513 	 * setrunnable()
514 	 */
515 	if (lp) {
516 		lwkt_gettoken(&lp->lwp_token);
517 
518 		/*
519 		 * If the umbrella process is in the SCORE state then
520 		 * make sure that the thread is flagged going into a
521 		 * normal sleep to allow the core dump to proceed, otherwise
522 		 * the coredump can end up waiting forever.  If the normal
523 		 * sleep is woken up, the thread will enter a stopped state
524 		 * upon return to userland.
525 		 *
526 		 * We do not want to interrupt or cause a thread exist at
527 		 * this juncture because that will mess-up the state the
528 		 * coredump is trying to save.
529 		 */
530 		if (p->p_stat == SCORE &&
531 		    (lp->lwp_mpflags & LWP_MP_WSTOP) == 0) {
532 			atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
533 			++p->p_nstopped;
534 		}
535 
536 		/*
537 		 * PCATCH requested.
538 		 */
539 		if (catch) {
540 			/*
541 			 * Early termination if PCATCH was set and a
542 			 * signal is pending, interlocked with the
543 			 * critical section.
544 			 *
545 			 * Early termination only occurs when tsleep() is
546 			 * entered while in a normal LSRUN state.
547 			 */
548 			if ((sig = CURSIG(lp)) != 0)
549 				goto resume;
550 
551 			/*
552 			 * Causes ksignal to wake us up if a signal is
553 			 * received (interlocked with p->p_token).
554 			 */
555 			lp->lwp_flags |= LWP_SINTR;
556 		}
557 	} else {
558 		KKASSERT(p == NULL);
559 	}
560 
561 	/*
562 	 * Make sure the current process has been untangled from
563 	 * the userland scheduler and initialize slptime to start
564 	 * counting.
565 	 *
566 	 * NOTE: td->td_wakefromcpu is pre-set by the release function
567 	 *	 for the dfly scheduler, and then adjusted by _wakeup()
568 	 */
569 	if (lp) {
570 		p->p_usched->release_curproc(lp);
571 		lp->lwp_slptime = 0;
572 	}
573 
574 	/*
575 	 * If the interlocked flag is set but our cpu bit in the slpqueue
576 	 * is no longer set, then a wakeup was processed inbetween the
577 	 * tsleep_interlock() (ours or the callers), and here.  This can
578 	 * occur under numerous circumstances including when we release the
579 	 * current process.
580 	 *
581 	 * Extreme loads can cause the sending of an IPI (e.g. wakeup()'s)
582 	 * to process incoming IPIs, thus draining incoming wakeups.
583 	 */
584 	if ((td->td_flags & TDF_TSLEEPQ) == 0) {
585 		logtsleep2(ilockfail, ident);
586 		goto resume;
587 	}
588 
589 	/*
590 	 * scheduling is blocked while in a critical section.  Coincide
591 	 * the descheduled-by-tsleep flag with the descheduling of the
592 	 * lwkt.
593 	 *
594 	 * The timer callout is localized on our cpu and interlocked by
595 	 * our critical section.
596 	 */
597 	lwkt_deschedule_self(td);
598 	td->td_flags |= TDF_TSLEEP_DESCHEDULED;
599 	td->td_wmesg = wmesg;
600 
601 	/*
602 	 * Setup the timeout, if any.  The timeout is only operable while
603 	 * the thread is flagged descheduled.
604 	 */
605 	KKASSERT((td->td_flags & TDF_TIMEOUT) == 0);
606 	if (timo) {
607 		callout_init_mp(&thandle);
608 		callout_reset(&thandle, timo, endtsleep, td);
609 	}
610 
611 	/*
612 	 * Beddy bye bye.
613 	 */
614 	if (lp) {
615 		/*
616 		 * Ok, we are sleeping.  Place us in the SSLEEP state.
617 		 */
618 		KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
619 
620 		/*
621 		 * tstop() sets LSSTOP, so don't fiddle with that.
622 		 */
623 		if (lp->lwp_stat != LSSTOP)
624 			lp->lwp_stat = LSSLEEP;
625 		lp->lwp_ru.ru_nvcsw++;
626 		p->p_usched->uload_update(lp);
627 		lwkt_switch();
628 
629 		/*
630 		 * And when we are woken up, put us back in LSRUN.  If we
631 		 * slept for over a second, recalculate our estcpu.
632 		 */
633 		lp->lwp_stat = LSRUN;
634 		if (lp->lwp_slptime) {
635 			p->p_usched->uload_update(lp);
636 			p->p_usched->recalculate(lp);
637 		}
638 		lp->lwp_slptime = 0;
639 	} else {
640 		lwkt_switch();
641 	}
642 
643 	/*
644 	 * Make sure we haven't switched cpus while we were asleep.  It's
645 	 * not supposed to happen.  Cleanup our temporary flags.
646 	 */
647 	KKASSERT(gd == td->td_gd);
648 
649 	/*
650 	 * Cleanup the timeout.  If the timeout has already occured thandle
651 	 * has already been stopped, otherwise stop thandle.  If the timeout
652 	 * is running (the callout thread must be blocked trying to get
653 	 * lwp_token) then wait for us to get scheduled.
654 	 */
655 	if (timo) {
656 		while (td->td_flags & TDF_TIMEOUT_RUNNING) {
657 			/* else we won't get rescheduled! */
658 			if (lp->lwp_stat != LSSTOP)
659 				lp->lwp_stat = LSSLEEP;
660 			lwkt_deschedule_self(td);
661 			td->td_wmesg = "tsrace";
662 			lwkt_switch();
663 			kprintf("td %p %s: timeout race\n", td, td->td_comm);
664 		}
665 		if (td->td_flags & TDF_TIMEOUT) {
666 			td->td_flags &= ~TDF_TIMEOUT;
667 			error = EWOULDBLOCK;
668 		} else {
669 			/* does not block when on same cpu */
670 			callout_stop(&thandle);
671 		}
672 	}
673 	td->td_flags &= ~TDF_TSLEEP_DESCHEDULED;
674 
675 	/*
676 	 * Make sure we have been removed from the sleepq.  In most
677 	 * cases this will have been done for us already but it is
678 	 * possible for a scheduling IPI to be in-flight from a
679 	 * previous tsleep/tsleep_interlock() or due to a straight-out
680 	 * call to lwkt_schedule() (in the case of an interrupt thread),
681 	 * causing a spurious wakeup.
682 	 */
683 	_tsleep_remove(td);
684 	td->td_wmesg = NULL;
685 
686 	/*
687 	 * Figure out the correct error return.  If interrupted by a
688 	 * signal we want to return EINTR or ERESTART.
689 	 */
690 resume:
691 	if (lp) {
692 		if (catch && error == 0) {
693 			if (sig != 0 || (sig = CURSIG(lp))) {
694 				if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
695 					error = EINTR;
696 				else
697 					error = ERESTART;
698 			}
699 		}
700 		lp->lwp_flags &= ~LWP_SINTR;
701 		lwkt_reltoken(&lp->lwp_token);
702 	}
703 	logtsleep1(tsleep_end);
704 	crit_exit_quick(td);
705 	return (error);
706 }
707 
708 /*
709  * Interlocked spinlock sleep.  An exclusively held spinlock must
710  * be passed to ssleep().  The function will atomically release the
711  * spinlock and tsleep on the ident, then reacquire the spinlock and
712  * return.
713  *
714  * This routine is fairly important along the critical path, so optimize it
715  * heavily.
716  */
717 int
718 ssleep(const volatile void *ident, struct spinlock *spin, int flags,
719        const char *wmesg, int timo)
720 {
721 	globaldata_t gd = mycpu;
722 	int error;
723 
724 	_tsleep_interlock(gd, ident, flags);
725 	spin_unlock_quick(gd, spin);
726 	error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
727 	_spin_lock_quick(gd, spin, wmesg);
728 
729 	return (error);
730 }
731 
732 int
733 lksleep(const volatile void *ident, struct lock *lock, int flags,
734 	const char *wmesg, int timo)
735 {
736 	globaldata_t gd = mycpu;
737 	int error;
738 
739 	_tsleep_interlock(gd, ident, flags);
740 	lockmgr(lock, LK_RELEASE);
741 	error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
742 	lockmgr(lock, LK_EXCLUSIVE);
743 
744 	return (error);
745 }
746 
747 /*
748  * Interlocked mutex sleep.  An exclusively held mutex must be passed
749  * to mtxsleep().  The function will atomically release the mutex
750  * and tsleep on the ident, then reacquire the mutex and return.
751  */
752 int
753 mtxsleep(const volatile void *ident, struct mtx *mtx, int flags,
754 	 const char *wmesg, int timo)
755 {
756 	globaldata_t gd = mycpu;
757 	int error;
758 
759 	_tsleep_interlock(gd, ident, flags);
760 	mtx_unlock(mtx);
761 	error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
762 	mtx_lock_ex_quick(mtx);
763 
764 	return (error);
765 }
766 
767 /*
768  * Interlocked serializer sleep.  An exclusively held serializer must
769  * be passed to zsleep().  The function will atomically release
770  * the serializer and tsleep on the ident, then reacquire the serializer
771  * and return.
772  */
773 int
774 zsleep(const volatile void *ident, struct lwkt_serialize *slz, int flags,
775        const char *wmesg, int timo)
776 {
777 	globaldata_t gd = mycpu;
778 	int ret;
779 
780 	ASSERT_SERIALIZED(slz);
781 
782 	_tsleep_interlock(gd, ident, flags);
783 	lwkt_serialize_exit(slz);
784 	ret = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
785 	lwkt_serialize_enter(slz);
786 
787 	return ret;
788 }
789 
790 /*
791  * Directly block on the LWKT thread by descheduling it.  This
792  * is much faster then tsleep(), but the only legal way to wake
793  * us up is to directly schedule the thread.
794  *
795  * Setting TDF_SINTR will cause new signals to directly schedule us.
796  *
797  * This routine must be called while in a critical section.
798  */
799 int
800 lwkt_sleep(const char *wmesg, int flags)
801 {
802 	thread_t td = curthread;
803 	int sig;
804 
805 	if ((flags & PCATCH) == 0 || td->td_lwp == NULL) {
806 		td->td_flags |= TDF_BLOCKED;
807 		td->td_wmesg = wmesg;
808 		lwkt_deschedule_self(td);
809 		lwkt_switch();
810 		td->td_wmesg = NULL;
811 		td->td_flags &= ~TDF_BLOCKED;
812 		return(0);
813 	}
814 	if ((sig = CURSIG(td->td_lwp)) != 0) {
815 		if (SIGISMEMBER(td->td_proc->p_sigacts->ps_sigintr, sig))
816 			return(EINTR);
817 		else
818 			return(ERESTART);
819 
820 	}
821 	td->td_flags |= TDF_BLOCKED | TDF_SINTR;
822 	td->td_wmesg = wmesg;
823 	lwkt_deschedule_self(td);
824 	lwkt_switch();
825 	td->td_flags &= ~(TDF_BLOCKED | TDF_SINTR);
826 	td->td_wmesg = NULL;
827 	return(0);
828 }
829 
830 /*
831  * Implement the timeout for tsleep.
832  *
833  * This type of callout timeout is scheduled on the same cpu the process
834  * is sleeping on.  Also, at the moment, the MP lock is held.
835  */
836 static void
837 endtsleep(void *arg)
838 {
839 	thread_t td = arg;
840 	struct lwp *lp;
841 
842 	/*
843 	 * We are going to have to get the lwp_token, which means we might
844 	 * block.  This can race a tsleep getting woken up by other means
845 	 * so set TDF_TIMEOUT_RUNNING to force the tsleep to wait for our
846 	 * processing to complete (sorry tsleep!).
847 	 *
848 	 * We can safely set td_flags because td MUST be on the same cpu
849 	 * as we are.
850 	 */
851 	KKASSERT(td->td_gd == mycpu);
852 	crit_enter();
853 	td->td_flags |= TDF_TIMEOUT_RUNNING | TDF_TIMEOUT;
854 
855 	/*
856 	 * This can block but TDF_TIMEOUT_RUNNING will prevent the thread
857 	 * from exiting the tsleep on us.  The flag is interlocked by virtue
858 	 * of lp being on the same cpu as we are.
859 	 */
860 	if ((lp = td->td_lwp) != NULL)
861 		lwkt_gettoken(&lp->lwp_token);
862 
863 	KKASSERT(td->td_flags & TDF_TSLEEP_DESCHEDULED);
864 
865 	if (lp) {
866 		/*
867 		 * callout timer should never be set in tstop() because
868 		 * it passes a timeout of 0.
869 		 */
870 		KKASSERT(lp->lwp_stat != LSSTOP);
871 		setrunnable(lp);
872 		lwkt_reltoken(&lp->lwp_token);
873 	} else {
874 		_tsleep_remove(td);
875 		lwkt_schedule(td);
876 	}
877 	KKASSERT(td->td_gd == mycpu);
878 	td->td_flags &= ~TDF_TIMEOUT_RUNNING;
879 	crit_exit();
880 }
881 
882 /*
883  * Make all processes sleeping on the specified identifier runnable.
884  * count may be zero or one only.
885  *
886  * The domain encodes the sleep/wakeup domain, flags, plus the originating
887  * cpu.
888  *
889  * This call may run without the MP lock held.  We can only manipulate thread
890  * state on the cpu owning the thread.  We CANNOT manipulate process state
891  * at all.
892  *
893  * _wakeup() can be passed to an IPI so we can't use (const volatile
894  * void *ident).
895  */
896 static void
897 _wakeup(void *ident, int domain)
898 {
899 	struct tslpque *qp;
900 	struct thread *td;
901 	struct thread *ntd;
902 	globaldata_t gd;
903 	cpumask_t mask;
904 	int id;
905 
906 	crit_enter();
907 	logtsleep2(wakeup_beg, ident);
908 	gd = mycpu;
909 	id = LOOKUP(ident);
910 	qp = &gd->gd_tsleep_hash[id];
911 restart:
912 	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
913 		ntd = TAILQ_NEXT(td, td_sleepq);
914 		if (td->td_wchan == ident &&
915 		    td->td_wdomain == (domain & PDOMAIN_MASK)
916 		) {
917 			KKASSERT(td->td_gd == gd);
918 			_tsleep_remove(td);
919 			td->td_wakefromcpu = PWAKEUP_DECODE(domain);
920 			if (td->td_flags & TDF_TSLEEP_DESCHEDULED) {
921 				lwkt_schedule(td);
922 				if (domain & PWAKEUP_ONE)
923 					goto done;
924 			}
925 			goto restart;
926 		}
927 	}
928 
929 	/*
930 	 * We finished checking the current cpu but there still may be
931 	 * more work to do.  Either wakeup_one was requested and no matching
932 	 * thread was found, or a normal wakeup was requested and we have
933 	 * to continue checking cpus.
934 	 *
935 	 * It should be noted that this scheme is actually less expensive then
936 	 * the old scheme when waking up multiple threads, since we send
937 	 * only one IPI message per target candidate which may then schedule
938 	 * multiple threads.  Before we could have wound up sending an IPI
939 	 * message for each thread on the target cpu (!= current cpu) that
940 	 * needed to be woken up.
941 	 *
942 	 * NOTE: Wakeups occuring on remote cpus are asynchronous.  This
943 	 * should be ok since we are passing idents in the IPI rather then
944 	 * thread pointers.
945 	 */
946 	if ((domain & PWAKEUP_MYCPU) == 0) {
947 		mask = slpque_cpumasks[id];
948 		CPUMASK_ANDMASK(mask, gd->gd_other_cpus);
949 		if (CPUMASK_TESTNZERO(mask)) {
950 			lwkt_send_ipiq2_mask(mask, _wakeup, ident,
951 					     domain | PWAKEUP_MYCPU);
952 		}
953 	}
954 done:
955 	logtsleep1(wakeup_end);
956 	crit_exit();
957 }
958 
959 /*
960  * Wakeup all threads tsleep()ing on the specified ident, on all cpus
961  */
962 void
963 wakeup(const volatile void *ident)
964 {
965     globaldata_t gd = mycpu;
966     thread_t td = gd->gd_curthread;
967 
968     if (td && (td->td_flags & TDF_DELAYED_WAKEUP)) {
969 	/*
970 	 * If we are in a delayed wakeup section, record up to two wakeups in
971 	 * a per-CPU queue and issue them when we block or exit the delayed
972 	 * wakeup section.
973 	 */
974 	if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[0], NULL, ident))
975 		return;
976 	if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[1], NULL, ident))
977 		return;
978 
979 	ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[1]),
980 				__DEALL(ident));
981 	ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[0]),
982 				__DEALL(ident));
983     }
984 
985     _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, gd->gd_cpuid));
986 }
987 
988 /*
989  * Wakeup one thread tsleep()ing on the specified ident, on any cpu.
990  */
991 void
992 wakeup_one(const volatile void *ident)
993 {
994     /* XXX potentially round-robin the first responding cpu */
995     _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) |
996 			    PWAKEUP_ONE);
997 }
998 
999 /*
1000  * Wakeup threads tsleep()ing on the specified ident on the current cpu
1001  * only.
1002  */
1003 void
1004 wakeup_mycpu(const volatile void *ident)
1005 {
1006     _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) |
1007 			    PWAKEUP_MYCPU);
1008 }
1009 
1010 /*
1011  * Wakeup one thread tsleep()ing on the specified ident on the current cpu
1012  * only.
1013  */
1014 void
1015 wakeup_mycpu_one(const volatile void *ident)
1016 {
1017     /* XXX potentially round-robin the first responding cpu */
1018     _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) |
1019 			    PWAKEUP_MYCPU | PWAKEUP_ONE);
1020 }
1021 
1022 /*
1023  * Wakeup all thread tsleep()ing on the specified ident on the specified cpu
1024  * only.
1025  */
1026 void
1027 wakeup_oncpu(globaldata_t gd, const volatile void *ident)
1028 {
1029     globaldata_t mygd = mycpu;
1030     if (gd == mycpu) {
1031 	_wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) |
1032 				PWAKEUP_MYCPU);
1033     } else {
1034 	lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident),
1035 			PWAKEUP_ENCODE(0, mygd->gd_cpuid) |
1036 			PWAKEUP_MYCPU);
1037     }
1038 }
1039 
1040 /*
1041  * Wakeup one thread tsleep()ing on the specified ident on the specified cpu
1042  * only.
1043  */
1044 void
1045 wakeup_oncpu_one(globaldata_t gd, const volatile void *ident)
1046 {
1047     globaldata_t mygd = mycpu;
1048     if (gd == mygd) {
1049 	_wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) |
1050 				PWAKEUP_MYCPU | PWAKEUP_ONE);
1051     } else {
1052 	lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident),
1053 			PWAKEUP_ENCODE(0, mygd->gd_cpuid) |
1054 			PWAKEUP_MYCPU | PWAKEUP_ONE);
1055     }
1056 }
1057 
1058 /*
1059  * Wakeup all threads waiting on the specified ident that slept using
1060  * the specified domain, on all cpus.
1061  */
1062 void
1063 wakeup_domain(const volatile void *ident, int domain)
1064 {
1065     _wakeup(__DEALL(ident), PWAKEUP_ENCODE(domain, mycpu->gd_cpuid));
1066 }
1067 
1068 /*
1069  * Wakeup one thread waiting on the specified ident that slept using
1070  * the specified  domain, on any cpu.
1071  */
1072 void
1073 wakeup_domain_one(const volatile void *ident, int domain)
1074 {
1075     /* XXX potentially round-robin the first responding cpu */
1076     _wakeup(__DEALL(ident),
1077 	    PWAKEUP_ENCODE(domain, mycpu->gd_cpuid) | PWAKEUP_ONE);
1078 }
1079 
1080 void
1081 wakeup_start_delayed(void)
1082 {
1083     globaldata_t gd = mycpu;
1084 
1085     crit_enter();
1086     gd->gd_curthread->td_flags |= TDF_DELAYED_WAKEUP;
1087     crit_exit();
1088 }
1089 
1090 void
1091 wakeup_end_delayed(void)
1092 {
1093     globaldata_t gd = mycpu;
1094 
1095     if (gd->gd_curthread->td_flags & TDF_DELAYED_WAKEUP) {
1096 	crit_enter();
1097 	gd->gd_curthread->td_flags &= ~TDF_DELAYED_WAKEUP;
1098 	if (gd->gd_delayed_wakeup[0] || gd->gd_delayed_wakeup[1]) {
1099 	    if (gd->gd_delayed_wakeup[0]) {
1100 		    wakeup(gd->gd_delayed_wakeup[0]);
1101 		    gd->gd_delayed_wakeup[0] = NULL;
1102 	    }
1103 	    if (gd->gd_delayed_wakeup[1]) {
1104 		    wakeup(gd->gd_delayed_wakeup[1]);
1105 		    gd->gd_delayed_wakeup[1] = NULL;
1106 	    }
1107 	}
1108 	crit_exit();
1109     }
1110 }
1111 
1112 /*
1113  * setrunnable()
1114  *
1115  * Make a process runnable.  lp->lwp_token must be held on call and this
1116  * function must be called from the cpu owning lp.
1117  *
1118  * This only has an effect if we are in LSSTOP or LSSLEEP.
1119  */
1120 void
1121 setrunnable(struct lwp *lp)
1122 {
1123 	thread_t td = lp->lwp_thread;
1124 
1125 	ASSERT_LWKT_TOKEN_HELD(&lp->lwp_token);
1126 	KKASSERT(td->td_gd == mycpu);
1127 	crit_enter();
1128 	if (lp->lwp_stat == LSSTOP)
1129 		lp->lwp_stat = LSSLEEP;
1130 	if (lp->lwp_stat == LSSLEEP) {
1131 		_tsleep_remove(td);
1132 		lwkt_schedule(td);
1133 	} else if (td->td_flags & TDF_SINTR) {
1134 		lwkt_schedule(td);
1135 	}
1136 	crit_exit();
1137 }
1138 
1139 /*
1140  * The process is stopped due to some condition, usually because p_stat is
1141  * set to SSTOP, but also possibly due to being traced.
1142  *
1143  * Caller must hold p->p_token
1144  *
1145  * NOTE!  If the caller sets SSTOP, the caller must also clear P_WAITED
1146  * because the parent may check the child's status before the child actually
1147  * gets to this routine.
1148  *
1149  * This routine is called with the current lwp only, typically just
1150  * before returning to userland if the process state is detected as
1151  * possibly being in a stopped state.
1152  */
1153 void
1154 tstop(void)
1155 {
1156 	struct lwp *lp = curthread->td_lwp;
1157 	struct proc *p = lp->lwp_proc;
1158 	struct proc *q;
1159 
1160 	lwkt_gettoken(&lp->lwp_token);
1161 	crit_enter();
1162 
1163 	/*
1164 	 * If LWP_MP_WSTOP is set, we were sleeping
1165 	 * while our process was stopped.  At this point
1166 	 * we were already counted as stopped.
1167 	 */
1168 	if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) {
1169 		/*
1170 		 * If we're the last thread to stop, signal
1171 		 * our parent.
1172 		 */
1173 		p->p_nstopped++;
1174 		atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
1175 		wakeup(&p->p_nstopped);
1176 		if (p->p_nstopped == p->p_nthreads) {
1177 			/*
1178 			 * Token required to interlock kern_wait()
1179 			 */
1180 			q = p->p_pptr;
1181 			PHOLD(q);
1182 			lwkt_gettoken(&q->p_token);
1183 			p->p_flags &= ~P_WAITED;
1184 			wakeup(p->p_pptr);
1185 			if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0)
1186 				ksignal(q, SIGCHLD);
1187 			lwkt_reltoken(&q->p_token);
1188 			PRELE(q);
1189 		}
1190 	}
1191 	while (p->p_stat == SSTOP || p->p_stat == SCORE) {
1192 		lp->lwp_stat = LSSTOP;
1193 		tsleep(p, 0, "stop", 0);
1194 	}
1195 	p->p_nstopped--;
1196 	atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
1197 	crit_exit();
1198 	lwkt_reltoken(&lp->lwp_token);
1199 }
1200 
1201 /*
1202  * Compute a tenex style load average of a quantity on
1203  * 1, 5 and 15 minute intervals.
1204  */
1205 static int loadav_count_runnable(struct lwp *p, void *data);
1206 
1207 static void
1208 loadav(void *arg)
1209 {
1210 	struct loadavg *avg;
1211 	int i, nrun;
1212 
1213 	nrun = 0;
1214 	alllwp_scan(loadav_count_runnable, &nrun);
1215 	avg = &averunnable;
1216 	for (i = 0; i < 3; i++) {
1217 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
1218 		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
1219 	}
1220 
1221 	/*
1222 	 * Schedule the next update to occur after 5 seconds, but add a
1223 	 * random variation to avoid synchronisation with processes that
1224 	 * run at regular intervals.
1225 	 */
1226 	callout_reset(&loadav_callout, hz * 4 + (int)(krandom() % (hz * 2 + 1)),
1227 		      loadav, NULL);
1228 }
1229 
1230 static int
1231 loadav_count_runnable(struct lwp *lp, void *data)
1232 {
1233 	int *nrunp = data;
1234 	thread_t td;
1235 
1236 	switch (lp->lwp_stat) {
1237 	case LSRUN:
1238 		if ((td = lp->lwp_thread) == NULL)
1239 			break;
1240 		if (td->td_flags & TDF_BLOCKED)
1241 			break;
1242 		++*nrunp;
1243 		break;
1244 	default:
1245 		break;
1246 	}
1247 	lwkt_yield();
1248 	return(0);
1249 }
1250 
1251 /* ARGSUSED */
1252 static void
1253 sched_setup(void *dummy)
1254 {
1255 	callout_init_mp(&loadav_callout);
1256 	callout_init_mp(&schedcpu_callout);
1257 
1258 	/* Kick off timeout driven events by calling first time. */
1259 	schedcpu(NULL);
1260 	loadav(NULL);
1261 }
1262 
1263