xref: /dragonfly/sys/kern/usched_dummy.c (revision 99dd49c5)
1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/usched_dummy.c,v 1.9 2008/04/21 15:24:46 dillon Exp $
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <machine/cpu.h>
49 #include <machine/smp.h>
50 
51 #include <sys/thread2.h>
52 #include <sys/spinlock2.h>
53 
54 #define MAXPRI			128
55 #define PRIBASE_REALTIME	0
56 #define PRIBASE_NORMAL		MAXPRI
57 #define PRIBASE_IDLE		(MAXPRI * 2)
58 #define PRIBASE_THREAD		(MAXPRI * 3)
59 #define PRIBASE_NULL		(MAXPRI * 4)
60 
61 #define lwp_priority	lwp_usdata.bsd4.priority
62 #define lwp_estcpu	lwp_usdata.bsd4.estcpu
63 
64 static void dummy_acquire_curproc(struct lwp *lp);
65 static void dummy_release_curproc(struct lwp *lp);
66 static void dummy_select_curproc(globaldata_t gd);
67 static void dummy_setrunqueue(struct lwp *lp);
68 static void dummy_schedulerclock(struct lwp *lp, sysclock_t period,
69 				sysclock_t cpstamp);
70 static void dummy_recalculate_estcpu(struct lwp *lp);
71 static void dummy_resetpriority(struct lwp *lp);
72 static void dummy_forking(struct lwp *plp, struct lwp *lp);
73 static void dummy_exiting(struct lwp *plp, struct lwp *lp);
74 static void dummy_yield(struct lwp *lp);
75 
76 struct usched usched_dummy = {
77 	{ NULL },
78 	"dummy", "Dummy DragonFly Scheduler",
79 	NULL,			/* default registration */
80 	NULL,			/* default deregistration */
81 	dummy_acquire_curproc,
82 	dummy_release_curproc,
83 	dummy_setrunqueue,
84 	dummy_schedulerclock,
85 	dummy_recalculate_estcpu,
86 	dummy_resetpriority,
87 	dummy_forking,
88 	dummy_exiting,
89 	NULL,			/* setcpumask not supported */
90 	dummy_yield
91 };
92 
93 struct usched_dummy_pcpu {
94 	int	rrcount;
95 	struct thread helper_thread;
96 	struct lwp *uschedcp;
97 };
98 
99 typedef struct usched_dummy_pcpu *dummy_pcpu_t;
100 
101 static struct usched_dummy_pcpu dummy_pcpu[MAXCPU];
102 static cpumask_t dummy_curprocmask = -1;
103 static cpumask_t dummy_rdyprocmask;
104 static struct spinlock dummy_spin;
105 static TAILQ_HEAD(rq, lwp) dummy_runq;
106 static int dummy_runqcount;
107 
108 static int usched_dummy_rrinterval = (ESTCPUFREQ + 9) / 10;
109 SYSCTL_INT(_kern, OID_AUTO, usched_dummy_rrinterval, CTLFLAG_RW,
110         &usched_dummy_rrinterval, 0, "");
111 
112 /*
113  * Initialize the run queues at boot time, clear cpu 0 in curprocmask
114  * to allow dummy scheduling on cpu 0.
115  */
116 static void
117 dummyinit(void *dummy)
118 {
119 	TAILQ_INIT(&dummy_runq);
120 	spin_init(&dummy_spin);
121 	atomic_clear_int(&dummy_curprocmask, 1);
122 }
123 SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, dummyinit, NULL)
124 
125 /*
126  * DUMMY_ACQUIRE_CURPROC
127  *
128  * This function is called when the kernel intends to return to userland.
129  * It is responsible for making the thread the current designated userland
130  * thread for this cpu, blocking if necessary.
131  *
132  * We are expected to handle userland reschedule requests here too.
133  *
134  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
135  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
136  * occur, this function is called only under very controlled circumstances.
137  *
138  * MPSAFE
139  */
140 static void
141 dummy_acquire_curproc(struct lwp *lp)
142 {
143 	globaldata_t gd = mycpu;
144 	dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
145 	thread_t td = lp->lwp_thread;
146 
147 	/*
148 	 * Possibly select another thread
149 	 */
150 	if (user_resched_wanted())
151 		dummy_select_curproc(gd);
152 
153 	/*
154 	 * If this cpu has no current thread, select ourself
155 	 */
156 	if (dd->uschedcp == lp ||
157 	    (dd->uschedcp == NULL && TAILQ_EMPTY(&dummy_runq))) {
158 		atomic_set_int(&dummy_curprocmask, gd->gd_cpumask);
159 		dd->uschedcp = lp;
160 		return;
161 	}
162 
163 	/*
164 	 * If this cpu's current user process thread is not our thread,
165 	 * deschedule ourselves and place us on the run queue, then
166 	 * switch away.
167 	 *
168 	 * We loop until we become the current process.  Its a good idea
169 	 * to run any passive release(s) before we mess with the scheduler
170 	 * so our thread is in the expected state.
171 	 */
172 	KKASSERT(dd->uschedcp != lp);
173 	if (td->td_release)
174 		td->td_release(lp->lwp_thread);
175 	do {
176 		crit_enter();
177 		lwkt_deschedule_self(td);
178 		dummy_setrunqueue(lp);
179 		if ((td->td_flags & TDF_RUNQ) == 0)
180 			++lp->lwp_ru.ru_nivcsw;
181 		lwkt_switch();		/* WE MAY MIGRATE TO ANOTHER CPU */
182 		crit_exit();
183 		gd = mycpu;
184 		dd = &dummy_pcpu[gd->gd_cpuid];
185 		KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0);
186 	} while (dd->uschedcp != lp);
187 }
188 
189 /*
190  * DUMMY_RELEASE_CURPROC
191  *
192  * This routine detaches the current thread from the userland scheduler,
193  * usually because the thread needs to run in the kernel (at kernel priority)
194  * for a while.
195  *
196  * This routine is also responsible for selecting a new thread to
197  * make the current thread.
198  *
199  * WARNING!  The MP lock may be in an unsynchronized state due to the
200  * way get_mplock() works and the fact that this function may be called
201  * from a passive release during a lwkt_switch().   try_mplock() will deal
202  * with this for us but you should be aware that td_mpcount may not be
203  * useable.
204  *
205  * MPSAFE
206  */
207 static void
208 dummy_release_curproc(struct lwp *lp)
209 {
210 	globaldata_t gd = mycpu;
211 	dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
212 
213 	KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0);
214 	if (dd->uschedcp == lp) {
215 		dummy_select_curproc(gd);
216 	}
217 }
218 
219 /*
220  * DUMMY_SELECT_CURPROC
221  *
222  * Select a new current process for this cpu.  This satisfies a user
223  * scheduler reschedule request so clear that too.
224  *
225  * This routine is also responsible for equal-priority round-robining,
226  * typically triggered from dummy_schedulerclock().  In our dummy example
227  * all the 'user' threads are LWKT scheduled all at once and we just
228  * call lwkt_switch().
229  *
230  * MPSAFE
231  */
232 static
233 void
234 dummy_select_curproc(globaldata_t gd)
235 {
236 	dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
237 	struct lwp *lp;
238 
239 	clear_user_resched();
240 	spin_lock_wr(&dummy_spin);
241 	if ((lp = TAILQ_FIRST(&dummy_runq)) == NULL) {
242 		dd->uschedcp = NULL;
243 		atomic_clear_int(&dummy_curprocmask, gd->gd_cpumask);
244 		spin_unlock_wr(&dummy_spin);
245 	} else {
246 		--dummy_runqcount;
247 		TAILQ_REMOVE(&dummy_runq, lp, lwp_procq);
248 		lp->lwp_flag &= ~LWP_ONRUNQ;
249 		dd->uschedcp = lp;
250 		atomic_set_int(&dummy_curprocmask, gd->gd_cpumask);
251 		spin_unlock_wr(&dummy_spin);
252 #ifdef SMP
253 		lwkt_acquire(lp->lwp_thread);
254 #endif
255 		lwkt_schedule(lp->lwp_thread);
256 	}
257 }
258 
259 /*
260  * DUMMY_SETRUNQUEUE
261  *
262  * This routine is called to schedule a new user process after a fork.
263  * The scheduler module itself might also call this routine to place
264  * the current process on the userland scheduler's run queue prior
265  * to calling dummy_select_curproc().
266  *
267  * The caller may set P_PASSIVE_ACQ in p_flag to indicate that we should
268  * attempt to leave the thread on the current cpu.
269  *
270  * MPSAFE
271  */
272 static void
273 dummy_setrunqueue(struct lwp *lp)
274 {
275 	globaldata_t gd = mycpu;
276 	dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
277 	cpumask_t mask;
278 	int cpuid;
279 
280 	if (dd->uschedcp == NULL) {
281 		dd->uschedcp = lp;
282 		atomic_set_int(&dummy_curprocmask, gd->gd_cpumask);
283 		lwkt_schedule(lp->lwp_thread);
284 	} else {
285 		/*
286 		 * Add to our global runq
287 		 */
288 		KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0);
289 		spin_lock_wr(&dummy_spin);
290 		++dummy_runqcount;
291 		TAILQ_INSERT_TAIL(&dummy_runq, lp, lwp_procq);
292 		lp->lwp_flag |= LWP_ONRUNQ;
293 #ifdef SMP
294 		lwkt_giveaway(lp->lwp_thread);
295 #endif
296 
297 		/* lp = TAILQ_FIRST(&dummy_runq); */
298 
299 		/*
300 		 * Notify the next available cpu.  P.S. some
301 		 * cpu affinity could be done here.
302 		 *
303 		 * The rdyprocmask bit placeholds the knowledge that there
304 		 * is a process on the runq that needs service.  If the
305 		 * helper thread cannot find a home for it it will forward
306 		 * the request to another available cpu.
307 		 */
308 		mask = ~dummy_curprocmask & dummy_rdyprocmask &
309 		       gd->gd_other_cpus;
310 		if (mask) {
311 			cpuid = bsfl(mask);
312 			atomic_clear_int(&dummy_rdyprocmask, 1 << cpuid);
313 			spin_unlock_wr(&dummy_spin);
314 			lwkt_schedule(&dummy_pcpu[cpuid].helper_thread);
315 		} else {
316 			spin_unlock_wr(&dummy_spin);
317 		}
318 	}
319 }
320 
321 /*
322  * This routine is called from a systimer IPI.  Thus it is called with
323  * a critical section held.  Any spinlocks we get here that are also
324  * obtained in other procedures must be proected by a critical section
325  * in those other procedures to avoid a deadlock.
326  *
327  * The MP lock may or may not be held on entry and cannot be obtained
328  * by this routine (because it is called from a systimer IPI).  Additionally,
329  * because this is equivalent to a FAST interrupt, spinlocks cannot be used
330  * (or at least, you have to check that gd_spin* counts are 0 before you
331  * can).
332  *
333  * This routine is called at ESTCPUFREQ on each cpu independantly.
334  *
335  * This routine typically queues a reschedule request, which will cause
336  * the scheduler's BLAH_select_curproc() to be called as soon as possible.
337  *
338  * MPSAFE
339  */
340 static
341 void
342 dummy_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
343 {
344 	globaldata_t gd = mycpu;
345 	dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
346 
347 	if (++dd->rrcount >= usched_dummy_rrinterval) {
348 		dd->rrcount = 0;
349 		need_user_resched();
350 	}
351 }
352 
353 /*
354  * DUMMY_RECALCULATE_ESTCPU
355  *
356  * Called once a second for any process that is running or has slept
357  * for less then 2 seconds.
358  *
359  * MPSAFE
360  */
361 static
362 void
363 dummy_recalculate_estcpu(struct lwp *lp)
364 {
365 }
366 
367 static
368 void
369 dummy_yield(struct lwp *lp)
370 {
371 	need_user_resched();
372 }
373 
374 /*
375  * DUMMY_RESETPRIORITY
376  *
377  * This routine is called after the kernel has potentially modified
378  * the lwp_rtprio structure.  The target process may be running or sleeping
379  * or scheduled but not yet running or owned by another cpu.  Basically,
380  * it can be in virtually any state.
381  *
382  * This routine is called by fork1() for initial setup with the process
383  * of the run queue, and also may be called normally with the process on or
384  * off the run queue.
385  *
386  * MPSAFE
387  */
388 static void
389 dummy_resetpriority(struct lwp *lp)
390 {
391 	/* XXX spinlock usually needed */
392 	/*
393 	 * Set p_priority for general process comparisons
394 	 */
395 	switch(lp->lwp_rtprio.type) {
396 	case RTP_PRIO_REALTIME:
397 		lp->lwp_priority = PRIBASE_REALTIME + lp->lwp_rtprio.prio;
398 		return;
399 	case RTP_PRIO_NORMAL:
400 		lp->lwp_priority = PRIBASE_NORMAL + lp->lwp_rtprio.prio;
401 		break;
402 	case RTP_PRIO_IDLE:
403 		lp->lwp_priority = PRIBASE_IDLE + lp->lwp_rtprio.prio;
404 		return;
405 	case RTP_PRIO_THREAD:
406 		lp->lwp_priority = PRIBASE_THREAD + lp->lwp_rtprio.prio;
407 		return;
408 	}
409 	/* XXX spinlock usually needed */
410 }
411 
412 
413 /*
414  * DUMMY_FORKING
415  *
416  * Called from fork1() when a new child process is being created.  Allows
417  * the scheduler to predispose the child process before it gets scheduled.
418  *
419  * MPSAFE
420  */
421 static void
422 dummy_forking(struct lwp *plp, struct lwp *lp)
423 {
424 	lp->lwp_estcpu = plp->lwp_estcpu;
425 #if 0
426 	++plp->lwp_estcpu;
427 #endif
428 }
429 
430 /*
431  * DUMMY_EXITING
432  *
433  * Called when the parent reaps a child.   Typically used to propogate cpu
434  * use by the child back to the parent as part of a batch detection
435  * heuristic.
436  *
437  * NOTE: cpu use is not normally back-propogated to PID 1.
438  *
439  * MPSAFE
440  */
441 static void
442 dummy_exiting(struct lwp *plp, struct lwp *lp)
443 {
444 }
445 
446 /*
447  * SMP systems may need a scheduler helper thread.  This is how one can be
448  * setup.
449  *
450  * We use a neat LWKT scheduling trick to interlock the helper thread.  It
451  * is possible to deschedule an LWKT thread and then do some work before
452  * switching away.  The thread can be rescheduled at any time, even before
453  * we switch away.
454  */
455 #ifdef SMP
456 
457 static void
458 dummy_sched_thread(void *dummy)
459 {
460     globaldata_t gd;
461     dummy_pcpu_t dd;
462     struct lwp *lp;
463     cpumask_t cpumask;
464     cpumask_t tmpmask;
465     int cpuid;
466     int tmpid;
467 
468     gd = mycpu;
469     cpuid = gd->gd_cpuid;
470     dd = &dummy_pcpu[cpuid];
471     cpumask = 1 << cpuid;
472 
473     /*
474      * Our Scheduler helper thread does not need to hold the MP lock
475      */
476     rel_mplock();
477 
478     for (;;) {
479 	lwkt_deschedule_self(gd->gd_curthread);		/* interlock */
480 	atomic_set_int(&dummy_rdyprocmask, cpumask);
481 	spin_lock_wr(&dummy_spin);
482 	if (dd->uschedcp) {
483 		/*
484 		 * We raced another cpu trying to schedule a thread onto us.
485 		 * If the runq isn't empty hit another free cpu.
486 		 */
487 		tmpmask = ~dummy_curprocmask & dummy_rdyprocmask &
488 		          gd->gd_other_cpus;
489 		if (tmpmask && dummy_runqcount) {
490 			tmpid = bsfl(tmpmask);
491 			KKASSERT(tmpid != cpuid);
492 			atomic_clear_int(&dummy_rdyprocmask, 1 << tmpid);
493 			spin_unlock_wr(&dummy_spin);
494 			lwkt_schedule(&dummy_pcpu[tmpid].helper_thread);
495 		} else {
496 			spin_unlock_wr(&dummy_spin);
497 		}
498 	} else if ((lp = TAILQ_FIRST(&dummy_runq)) != NULL) {
499 		--dummy_runqcount;
500 		TAILQ_REMOVE(&dummy_runq, lp, lwp_procq);
501 		lp->lwp_flag &= ~LWP_ONRUNQ;
502 		dd->uschedcp = lp;
503 		atomic_set_int(&dummy_curprocmask, cpumask);
504 		spin_unlock_wr(&dummy_spin);
505 #ifdef SMP
506 		lwkt_acquire(lp->lwp_thread);
507 #endif
508 		lwkt_schedule(lp->lwp_thread);
509 	} else {
510 		spin_unlock_wr(&dummy_spin);
511 	}
512 	lwkt_switch();
513     }
514 }
515 
516 /*
517  * Setup our scheduler helpers.  Note that curprocmask bit 0 has already
518  * been cleared by rqinit() and we should not mess with it further.
519  */
520 static void
521 dummy_sched_thread_cpu_init(void)
522 {
523     int i;
524 
525     if (bootverbose)
526 	kprintf("start dummy scheduler helpers on cpus:");
527 
528     for (i = 0; i < ncpus; ++i) {
529 	dummy_pcpu_t dd = &dummy_pcpu[i];
530 	cpumask_t mask = 1 << i;
531 
532 	if ((mask & smp_active_mask) == 0)
533 	    continue;
534 
535 	if (bootverbose)
536 	    kprintf(" %d", i);
537 
538 	lwkt_create(dummy_sched_thread, NULL, NULL, &dd->helper_thread,
539 		    TDF_STOPREQ, i, "dsched %d", i);
540 
541 	/*
542 	 * Allow user scheduling on the target cpu.  cpu #0 has already
543 	 * been enabled in rqinit().
544 	 */
545 	if (i)
546 	    atomic_clear_int(&dummy_curprocmask, mask);
547 	atomic_set_int(&dummy_rdyprocmask, mask);
548     }
549     if (bootverbose)
550 	kprintf("\n");
551 }
552 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
553 	dummy_sched_thread_cpu_init, NULL)
554 
555 #endif
556 
557