xref: /dragonfly/sys/kern/usched_dummy.c (revision 19380330)
1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/usched_dummy.c,v 1.9 2008/04/21 15:24:46 dillon Exp $
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <machine/cpu.h>
49 #include <machine/smp.h>
50 
51 #include <sys/thread2.h>
52 #include <sys/spinlock2.h>
53 #include <sys/mplock2.h>
54 
55 #define MAXPRI			128
56 #define PRIBASE_REALTIME	0
57 #define PRIBASE_NORMAL		MAXPRI
58 #define PRIBASE_IDLE		(MAXPRI * 2)
59 #define PRIBASE_THREAD		(MAXPRI * 3)
60 #define PRIBASE_NULL		(MAXPRI * 4)
61 
62 #define lwp_priority	lwp_usdata.bsd4.priority
63 #define lwp_estcpu	lwp_usdata.bsd4.estcpu
64 
65 static void dummy_acquire_curproc(struct lwp *lp);
66 static void dummy_release_curproc(struct lwp *lp);
67 static void dummy_select_curproc(globaldata_t gd);
68 static void dummy_setrunqueue(struct lwp *lp);
69 static void dummy_schedulerclock(struct lwp *lp, sysclock_t period,
70 				sysclock_t cpstamp);
71 static void dummy_recalculate_estcpu(struct lwp *lp);
72 static void dummy_resetpriority(struct lwp *lp);
73 static void dummy_forking(struct lwp *plp, struct lwp *lp);
74 static void dummy_exiting(struct lwp *plp, struct proc *child);
75 static void dummy_uload_update(struct lwp *lp);
76 static void dummy_yield(struct lwp *lp);
77 
78 struct usched usched_dummy = {
79 	{ NULL },
80 	"dummy", "Dummy DragonFly Scheduler",
81 	NULL,			/* default registration */
82 	NULL,			/* default deregistration */
83 	dummy_acquire_curproc,
84 	dummy_release_curproc,
85 	dummy_setrunqueue,
86 	dummy_schedulerclock,
87 	dummy_recalculate_estcpu,
88 	dummy_resetpriority,
89 	dummy_forking,
90 	dummy_exiting,
91 	dummy_uload_update,
92 	NULL,			/* setcpumask not supported */
93 	dummy_yield
94 };
95 
96 struct usched_dummy_pcpu {
97 	int	rrcount;
98 	struct thread helper_thread;
99 	struct lwp *uschedcp;
100 };
101 
102 typedef struct usched_dummy_pcpu *dummy_pcpu_t;
103 
104 static struct usched_dummy_pcpu dummy_pcpu[MAXCPU];
105 static cpumask_t dummy_curprocmask = -1;
106 static cpumask_t dummy_rdyprocmask;
107 static struct spinlock dummy_spin;
108 static TAILQ_HEAD(rq, lwp) dummy_runq;
109 static int dummy_runqcount;
110 
111 static int usched_dummy_rrinterval = (ESTCPUFREQ + 9) / 10;
112 SYSCTL_INT(_kern, OID_AUTO, usched_dummy_rrinterval, CTLFLAG_RW,
113         &usched_dummy_rrinterval, 0, "");
114 
115 /*
116  * Initialize the run queues at boot time, clear cpu 0 in curprocmask
117  * to allow dummy scheduling on cpu 0.
118  */
119 static void
120 dummyinit(void *dummy)
121 {
122 	TAILQ_INIT(&dummy_runq);
123 	spin_init(&dummy_spin);
124 	atomic_clear_cpumask(&dummy_curprocmask, 1);
125 }
126 SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, dummyinit, NULL)
127 
128 /*
129  * DUMMY_ACQUIRE_CURPROC
130  *
131  * This function is called when the kernel intends to return to userland.
132  * It is responsible for making the thread the current designated userland
133  * thread for this cpu, blocking if necessary.
134  *
135  * The kernel will not depress our LWKT priority until after we return,
136  * in case we have to shove over to another cpu.
137  *
138  * We must determine our thread's disposition before we switch away.  This
139  * is very sensitive code.
140  *
141  * We are expected to handle userland reschedule requests here too.
142  *
143  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
144  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
145  * occur, this function is called only under very controlled circumstances.
146  *
147  * MPSAFE
148  */
149 static void
150 dummy_acquire_curproc(struct lwp *lp)
151 {
152 	globaldata_t gd = mycpu;
153 	dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
154 	thread_t td = lp->lwp_thread;
155 
156 	/*
157 	 * Possibly select another thread
158 	 */
159 	if (user_resched_wanted())
160 		dummy_select_curproc(gd);
161 
162 	/*
163 	 * If this cpu has no current thread, select ourself
164 	 */
165 	if (dd->uschedcp == lp ||
166 	    (dd->uschedcp == NULL && TAILQ_EMPTY(&dummy_runq))) {
167 		atomic_set_cpumask(&dummy_curprocmask, gd->gd_cpumask);
168 		dd->uschedcp = lp;
169 		return;
170 	}
171 
172 	/*
173 	 * If this cpu's current user process thread is not our thread,
174 	 * deschedule ourselves and place us on the run queue, then
175 	 * switch away.
176 	 *
177 	 * We loop until we become the current process.  Its a good idea
178 	 * to run any passive release(s) before we mess with the scheduler
179 	 * so our thread is in the expected state.
180 	 */
181 	KKASSERT(dd->uschedcp != lp);
182 	if (td->td_release)
183 		td->td_release(lp->lwp_thread);
184 	do {
185 		crit_enter();
186 		lwkt_deschedule_self(td);
187 		dummy_setrunqueue(lp);
188 		if ((td->td_flags & TDF_RUNQ) == 0)
189 			++lp->lwp_ru.ru_nivcsw;
190 		lwkt_switch();		/* WE MAY MIGRATE TO ANOTHER CPU */
191 		crit_exit();
192 		gd = mycpu;
193 		dd = &dummy_pcpu[gd->gd_cpuid];
194 		KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
195 	} while (dd->uschedcp != lp);
196 }
197 
198 /*
199  * DUMMY_RELEASE_CURPROC
200  *
201  * This routine detaches the current thread from the userland scheduler,
202  * usually because the thread needs to run in the kernel (at kernel priority)
203  * for a while.
204  *
205  * This routine is also responsible for selecting a new thread to
206  * make the current thread.
207  *
208  * MPSAFE
209  */
210 static void
211 dummy_release_curproc(struct lwp *lp)
212 {
213 	globaldata_t gd = mycpu;
214 	dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
215 
216 	KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
217 	if (dd->uschedcp == lp) {
218 		dummy_select_curproc(gd);
219 	}
220 }
221 
222 /*
223  * DUMMY_SELECT_CURPROC
224  *
225  * Select a new current process for this cpu.  This satisfies a user
226  * scheduler reschedule request so clear that too.
227  *
228  * This routine is also responsible for equal-priority round-robining,
229  * typically triggered from dummy_schedulerclock().  In our dummy example
230  * all the 'user' threads are LWKT scheduled all at once and we just
231  * call lwkt_switch().
232  *
233  * MPSAFE
234  */
235 static
236 void
237 dummy_select_curproc(globaldata_t gd)
238 {
239 	dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
240 	struct lwp *lp;
241 
242 	clear_user_resched();
243 	spin_lock(&dummy_spin);
244 	if ((lp = TAILQ_FIRST(&dummy_runq)) == NULL) {
245 		dd->uschedcp = NULL;
246 		atomic_clear_cpumask(&dummy_curprocmask, gd->gd_cpumask);
247 		spin_unlock(&dummy_spin);
248 	} else {
249 		--dummy_runqcount;
250 		TAILQ_REMOVE(&dummy_runq, lp, lwp_procq);
251 		atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
252 		dd->uschedcp = lp;
253 		atomic_set_cpumask(&dummy_curprocmask, gd->gd_cpumask);
254 		spin_unlock(&dummy_spin);
255 #ifdef SMP
256 		lwkt_acquire(lp->lwp_thread);
257 #endif
258 		lwkt_schedule(lp->lwp_thread);
259 	}
260 }
261 
262 /*
263  * DUMMY_SETRUNQUEUE
264  *
265  * This routine is called to schedule a new user process after a fork.
266  * The scheduler module itself might also call this routine to place
267  * the current process on the userland scheduler's run queue prior
268  * to calling dummy_select_curproc().
269  *
270  * The caller may set LWP_PASSIVE_ACQ in lwp_flags to indicate that we should
271  * attempt to leave the thread on the current cpu.
272  *
273  * MPSAFE
274  */
275 static void
276 dummy_setrunqueue(struct lwp *lp)
277 {
278 	globaldata_t gd = mycpu;
279 	dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
280 	cpumask_t mask;
281 	int cpuid;
282 
283 	if (dd->uschedcp == NULL) {
284 		dd->uschedcp = lp;
285 		atomic_set_cpumask(&dummy_curprocmask, gd->gd_cpumask);
286 		lwkt_schedule(lp->lwp_thread);
287 	} else {
288 		/*
289 		 * Add to our global runq
290 		 */
291 		KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
292 		spin_lock(&dummy_spin);
293 		++dummy_runqcount;
294 		TAILQ_INSERT_TAIL(&dummy_runq, lp, lwp_procq);
295 		atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
296 #ifdef SMP
297 		lwkt_giveaway(lp->lwp_thread);
298 #endif
299 
300 		/* lp = TAILQ_FIRST(&dummy_runq); */
301 
302 		/*
303 		 * Notify the next available cpu.  P.S. some
304 		 * cpu affinity could be done here.
305 		 *
306 		 * The rdyprocmask bit placeholds the knowledge that there
307 		 * is a process on the runq that needs service.  If the
308 		 * helper thread cannot find a home for it it will forward
309 		 * the request to another available cpu.
310 		 */
311 		mask = ~dummy_curprocmask & dummy_rdyprocmask &
312 		       gd->gd_other_cpus;
313 		if (mask) {
314 			cpuid = BSFCPUMASK(mask);
315 			atomic_clear_cpumask(&dummy_rdyprocmask, CPUMASK(cpuid));
316 			spin_unlock(&dummy_spin);
317 			lwkt_schedule(&dummy_pcpu[cpuid].helper_thread);
318 		} else {
319 			spin_unlock(&dummy_spin);
320 		}
321 	}
322 }
323 
324 /*
325  * This routine is called from a systimer IPI.  It must NEVER block.
326  * If a lwp compatible with this scheduler is the currently running
327  * thread this function is called with a non-NULL lp, otherwise it
328  * will be called with a NULL lp.
329  *
330  * This routine is called at ESTCPUFREQ on each cpu independantly.
331  *
332  * This routine typically queues a reschedule request, which will cause
333  * the scheduler's BLAH_select_curproc() to be called as soon as possible.
334  */
335 static
336 void
337 dummy_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
338 {
339 	globaldata_t gd = mycpu;
340 	dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
341 
342 	if (lp == NULL)
343 		return;
344 
345 	if (++dd->rrcount >= usched_dummy_rrinterval) {
346 		dd->rrcount = 0;
347 		need_user_resched();
348 	}
349 }
350 
351 /*
352  * DUMMY_RECALCULATE_ESTCPU
353  *
354  * Called once a second for any process that is running or has slept
355  * for less then 2 seconds.
356  *
357  * MPSAFE
358  */
359 static
360 void
361 dummy_recalculate_estcpu(struct lwp *lp)
362 {
363 }
364 
365 /*
366  * MPSAFE
367  */
368 static
369 void
370 dummy_yield(struct lwp *lp)
371 {
372 	need_user_resched();
373 }
374 
375 /*
376  * DUMMY_RESETPRIORITY
377  *
378  * This routine is called after the kernel has potentially modified
379  * the lwp_rtprio structure.  The target process may be running or sleeping
380  * or scheduled but not yet running or owned by another cpu.  Basically,
381  * it can be in virtually any state.
382  *
383  * This routine is called by fork1() for initial setup with the process
384  * of the run queue, and also may be called normally with the process on or
385  * off the run queue.
386  *
387  * MPSAFE
388  */
389 static void
390 dummy_resetpriority(struct lwp *lp)
391 {
392 	/* XXX spinlock usually needed */
393 	/*
394 	 * Set p_priority for general process comparisons
395 	 */
396 	switch(lp->lwp_rtprio.type) {
397 	case RTP_PRIO_REALTIME:
398 		lp->lwp_priority = PRIBASE_REALTIME + lp->lwp_rtprio.prio;
399 		return;
400 	case RTP_PRIO_NORMAL:
401 		lp->lwp_priority = PRIBASE_NORMAL + lp->lwp_rtprio.prio;
402 		break;
403 	case RTP_PRIO_IDLE:
404 		lp->lwp_priority = PRIBASE_IDLE + lp->lwp_rtprio.prio;
405 		return;
406 	case RTP_PRIO_THREAD:
407 		lp->lwp_priority = PRIBASE_THREAD + lp->lwp_rtprio.prio;
408 		return;
409 	}
410 
411 	/*
412 	 * td_upri has normal sense (higher numbers are more desireable),
413 	 * so negate it.
414 	 */
415 	lp->lwp_thread->td_upri = -lp->lwp_priority;
416 	/* XXX spinlock usually needed */
417 }
418 
419 
420 /*
421  * DUMMY_FORKING
422  *
423  * Called from fork1() when a new child process is being created.  Allows
424  * the scheduler to predispose the child process before it gets scheduled.
425  *
426  * MPSAFE
427  */
428 static void
429 dummy_forking(struct lwp *plp, struct lwp *lp)
430 {
431 	lp->lwp_estcpu = plp->lwp_estcpu;
432 #if 0
433 	++plp->lwp_estcpu;
434 #endif
435 }
436 
437 /*
438  * Called when a lwp is being removed from this scheduler, typically
439  * during lwp_exit().
440  */
441 static void
442 dummy_exiting(struct lwp *plp, struct proc *child)
443 {
444 }
445 
446 static void
447 dummy_uload_update(struct lwp *lp)
448 {
449 }
450 
451 /*
452  * SMP systems may need a scheduler helper thread.  This is how one can be
453  * setup.
454  *
455  * We use a neat LWKT scheduling trick to interlock the helper thread.  It
456  * is possible to deschedule an LWKT thread and then do some work before
457  * switching away.  The thread can be rescheduled at any time, even before
458  * we switch away.
459  *
460  * MPSAFE
461  */
462 #ifdef SMP
463 
464 static void
465 dummy_sched_thread(void *dummy)
466 {
467     globaldata_t gd;
468     dummy_pcpu_t dd;
469     struct lwp *lp;
470     cpumask_t cpumask;
471     cpumask_t tmpmask;
472     int cpuid;
473     int tmpid;
474 
475     gd = mycpu;
476     cpuid = gd->gd_cpuid;
477     dd = &dummy_pcpu[cpuid];
478     cpumask = CPUMASK(cpuid);
479 
480     for (;;) {
481 	lwkt_deschedule_self(gd->gd_curthread);		/* interlock */
482 	atomic_set_cpumask(&dummy_rdyprocmask, cpumask);
483 	spin_lock(&dummy_spin);
484 	if (dd->uschedcp) {
485 		/*
486 		 * We raced another cpu trying to schedule a thread onto us.
487 		 * If the runq isn't empty hit another free cpu.
488 		 */
489 		tmpmask = ~dummy_curprocmask & dummy_rdyprocmask &
490 		          gd->gd_other_cpus;
491 		if (tmpmask && dummy_runqcount) {
492 			tmpid = BSFCPUMASK(tmpmask);
493 			KKASSERT(tmpid != cpuid);
494 			atomic_clear_cpumask(&dummy_rdyprocmask, CPUMASK(tmpid));
495 			spin_unlock(&dummy_spin);
496 			lwkt_schedule(&dummy_pcpu[tmpid].helper_thread);
497 		} else {
498 			spin_unlock(&dummy_spin);
499 		}
500 	} else if ((lp = TAILQ_FIRST(&dummy_runq)) != NULL) {
501 		--dummy_runqcount;
502 		TAILQ_REMOVE(&dummy_runq, lp, lwp_procq);
503 		atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
504 		dd->uschedcp = lp;
505 		atomic_set_cpumask(&dummy_curprocmask, cpumask);
506 		spin_unlock(&dummy_spin);
507 #ifdef SMP
508 		lwkt_acquire(lp->lwp_thread);
509 #endif
510 		lwkt_schedule(lp->lwp_thread);
511 	} else {
512 		spin_unlock(&dummy_spin);
513 	}
514 	lwkt_switch();
515     }
516 }
517 
518 /*
519  * Setup our scheduler helpers.  Note that curprocmask bit 0 has already
520  * been cleared by rqinit() and we should not mess with it further.
521  */
522 static void
523 dummy_sched_thread_cpu_init(void)
524 {
525     int i;
526 
527     if (bootverbose)
528 	kprintf("start dummy scheduler helpers on cpus:");
529 
530     for (i = 0; i < ncpus; ++i) {
531 	dummy_pcpu_t dd = &dummy_pcpu[i];
532 	cpumask_t mask = CPUMASK(i);
533 
534 	if ((mask & smp_active_mask) == 0)
535 	    continue;
536 
537 	if (bootverbose)
538 	    kprintf(" %d", i);
539 
540 	lwkt_create(dummy_sched_thread, NULL, NULL, &dd->helper_thread,
541 		    TDF_NOSTART, i, "dsched %d", i);
542 
543 	/*
544 	 * Allow user scheduling on the target cpu.  cpu #0 has already
545 	 * been enabled in rqinit().
546 	 */
547 	if (i)
548 	    atomic_clear_cpumask(&dummy_curprocmask, mask);
549 	atomic_set_cpumask(&dummy_rdyprocmask, mask);
550     }
551     if (bootverbose)
552 	kprintf("\n");
553 }
554 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
555 	dummy_sched_thread_cpu_init, NULL)
556 
557 #endif
558 
559