xref: /minix/minix/servers/sched/schedule.c (revision e1cdaee1)
1 /* This file contains the scheduling policy for SCHED
2  *
3  * The entry points are:
4  *   do_noquantum:        Called on behalf of process' that run out of quantum
5  *   do_start_scheduling  Request to start scheduling a proc
6  *   do_stop_scheduling   Request to stop scheduling a proc
7  *   do_nice		  Request to change the nice level on a proc
8  *   init_scheduling      Called from main.c to set up/prepare scheduling
9  */
10 #include "sched.h"
11 #include "schedproc.h"
12 #include <assert.h>
13 #include <minix/com.h>
14 #include <machine/archtypes.h>
15 
16 static unsigned balance_timeout;
17 
18 #define BALANCE_TIMEOUT	5 /* how often to balance queues in seconds */
19 
20 static int schedule_process(struct schedproc * rmp, unsigned flags);
21 
22 #define SCHEDULE_CHANGE_PRIO	0x1
23 #define SCHEDULE_CHANGE_QUANTUM	0x2
24 #define SCHEDULE_CHANGE_CPU	0x4
25 
26 #define SCHEDULE_CHANGE_ALL	(	\
27 		SCHEDULE_CHANGE_PRIO	|	\
28 		SCHEDULE_CHANGE_QUANTUM	|	\
29 		SCHEDULE_CHANGE_CPU		\
30 		)
31 
32 #define schedule_process_local(p)	\
33 	schedule_process(p, SCHEDULE_CHANGE_PRIO | SCHEDULE_CHANGE_QUANTUM)
34 #define schedule_process_migrate(p)	\
35 	schedule_process(p, SCHEDULE_CHANGE_CPU)
36 
37 #define CPU_DEAD	-1
38 
39 #define cpu_is_available(c)	(cpu_proc[c] >= 0)
40 
41 #define DEFAULT_USER_TIME_SLICE 200
42 
43 /* processes created by RS are sysytem processes */
44 #define is_system_proc(p)	((p)->parent == RS_PROC_NR)
45 
46 static unsigned cpu_proc[CONFIG_MAX_CPUS];
47 
48 static void pick_cpu(struct schedproc * proc)
49 {
50 #ifdef CONFIG_SMP
51 	unsigned cpu, c;
52 	unsigned cpu_load = (unsigned) -1;
53 
54 	if (machine.processors_count == 1) {
55 		proc->cpu = machine.bsp_id;
56 		return;
57 	}
58 
59 	/* schedule sysytem processes only on the boot cpu */
60 	if (is_system_proc(proc)) {
61 		proc->cpu = machine.bsp_id;
62 		return;
63 	}
64 
65 	/* if no other cpu available, try BSP */
66 	cpu = machine.bsp_id;
67 	for (c = 0; c < machine.processors_count; c++) {
68 		/* skip dead cpus */
69 		if (!cpu_is_available(c))
70 			continue;
71 		if (c != machine.bsp_id && cpu_load > cpu_proc[c]) {
72 			cpu_load = cpu_proc[c];
73 			cpu = c;
74 		}
75 	}
76 	proc->cpu = cpu;
77 	cpu_proc[cpu]++;
78 #else
79 	proc->cpu = 0;
80 #endif
81 }
82 
83 /*===========================================================================*
84  *				do_noquantum				     *
85  *===========================================================================*/
86 
87 int do_noquantum(message *m_ptr)
88 {
89 	register struct schedproc *rmp;
90 	int rv, proc_nr_n;
91 
92 	if (sched_isokendpt(m_ptr->m_source, &proc_nr_n) != OK) {
93 		printf("SCHED: WARNING: got an invalid endpoint in OOQ msg %u.\n",
94 		m_ptr->m_source);
95 		return EBADEPT;
96 	}
97 
98 	rmp = &schedproc[proc_nr_n];
99 	if (rmp->priority < MIN_USER_Q) {
100 		rmp->priority += 1; /* lower priority */
101 	}
102 
103 	if ((rv = schedule_process_local(rmp)) != OK) {
104 		return rv;
105 	}
106 	return OK;
107 }
108 
109 /*===========================================================================*
110  *				do_stop_scheduling			     *
111  *===========================================================================*/
112 int do_stop_scheduling(message *m_ptr)
113 {
114 	register struct schedproc *rmp;
115 	int proc_nr_n;
116 
117 	/* check who can send you requests */
118 	if (!accept_message(m_ptr))
119 		return EPERM;
120 
121 	if (sched_isokendpt(m_ptr->m_lsys_sched_scheduling_stop.endpoint,
122 		    &proc_nr_n) != OK) {
123 		printf("SCHED: WARNING: got an invalid endpoint in OOQ msg "
124 		"%d\n", m_ptr->m_lsys_sched_scheduling_stop.endpoint);
125 		return EBADEPT;
126 	}
127 
128 	rmp = &schedproc[proc_nr_n];
129 #ifdef CONFIG_SMP
130 	cpu_proc[rmp->cpu]--;
131 #endif
132 	rmp->flags = 0; /*&= ~IN_USE;*/
133 
134 	return OK;
135 }
136 
137 /*===========================================================================*
138  *				do_start_scheduling			     *
139  *===========================================================================*/
140 int do_start_scheduling(message *m_ptr)
141 {
142 	register struct schedproc *rmp;
143 	int rv, proc_nr_n, parent_nr_n;
144 
145 	/* we can handle two kinds of messages here */
146 	assert(m_ptr->m_type == SCHEDULING_START ||
147 		m_ptr->m_type == SCHEDULING_INHERIT);
148 
149 	/* check who can send you requests */
150 	if (!accept_message(m_ptr))
151 		return EPERM;
152 
153 	/* Resolve endpoint to proc slot. */
154 	if ((rv = sched_isemtyendpt(m_ptr->m_lsys_sched_scheduling_start.endpoint,
155 			&proc_nr_n)) != OK) {
156 		return rv;
157 	}
158 	rmp = &schedproc[proc_nr_n];
159 
160 	/* Populate process slot */
161 	rmp->endpoint     = m_ptr->m_lsys_sched_scheduling_start.endpoint;
162 	rmp->parent       = m_ptr->m_lsys_sched_scheduling_start.parent;
163 	rmp->max_priority = m_ptr->m_lsys_sched_scheduling_start.maxprio;
164 	if (rmp->max_priority >= NR_SCHED_QUEUES) {
165 		return EINVAL;
166 	}
167 
168 	/* Inherit current priority and time slice from parent. Since there
169 	 * is currently only one scheduler scheduling the whole system, this
170 	 * value is local and we assert that the parent endpoint is valid */
171 	if (rmp->endpoint == rmp->parent) {
172 		/* We have a special case here for init, which is the first
173 		   process scheduled, and the parent of itself. */
174 		rmp->priority   = USER_Q;
175 		rmp->time_slice = DEFAULT_USER_TIME_SLICE;
176 
177 		/*
178 		 * Since kernel never changes the cpu of a process, all are
179 		 * started on the BSP and the userspace scheduling hasn't
180 		 * changed that yet either, we can be sure that BSP is the
181 		 * processor where the processes run now.
182 		 */
183 #ifdef CONFIG_SMP
184 		rmp->cpu = machine.bsp_id;
185 		/* FIXME set the cpu mask */
186 #endif
187 	}
188 
189 	switch (m_ptr->m_type) {
190 
191 	case SCHEDULING_START:
192 		/* We have a special case here for system processes, for which
193 		 * quanum and priority are set explicitly rather than inherited
194 		 * from the parent */
195 		rmp->priority   = rmp->max_priority;
196 		rmp->time_slice = m_ptr->m_lsys_sched_scheduling_start.quantum;
197 		break;
198 
199 	case SCHEDULING_INHERIT:
200 		/* Inherit current priority and time slice from parent. Since there
201 		 * is currently only one scheduler scheduling the whole system, this
202 		 * value is local and we assert that the parent endpoint is valid */
203 		if ((rv = sched_isokendpt(m_ptr->m_lsys_sched_scheduling_start.parent,
204 				&parent_nr_n)) != OK)
205 			return rv;
206 
207 		rmp->priority = schedproc[parent_nr_n].priority;
208 		rmp->time_slice = schedproc[parent_nr_n].time_slice;
209 		break;
210 
211 	default:
212 		/* not reachable */
213 		assert(0);
214 	}
215 
216 	/* Take over scheduling the process. The kernel reply message populates
217 	 * the processes current priority and its time slice */
218 	if ((rv = sys_schedctl(0, rmp->endpoint, 0, 0, 0)) != OK) {
219 		printf("Sched: Error taking over scheduling for %d, kernel said %d\n",
220 			rmp->endpoint, rv);
221 		return rv;
222 	}
223 	rmp->flags = IN_USE;
224 
225 	/* Schedule the process, giving it some quantum */
226 	pick_cpu(rmp);
227 	while ((rv = schedule_process(rmp, SCHEDULE_CHANGE_ALL)) == EBADCPU) {
228 		/* don't try this CPU ever again */
229 		cpu_proc[rmp->cpu] = CPU_DEAD;
230 		pick_cpu(rmp);
231 	}
232 
233 	if (rv != OK) {
234 		printf("Sched: Error while scheduling process, kernel replied %d\n",
235 			rv);
236 		return rv;
237 	}
238 
239 	/* Mark ourselves as the new scheduler.
240 	 * By default, processes are scheduled by the parents scheduler. In case
241 	 * this scheduler would want to delegate scheduling to another
242 	 * scheduler, it could do so and then write the endpoint of that
243 	 * scheduler into the "scheduler" field.
244 	 */
245 
246 	m_ptr->m_sched_lsys_scheduling_start.scheduler = SCHED_PROC_NR;
247 
248 	return OK;
249 }
250 
251 /*===========================================================================*
252  *				do_nice					     *
253  *===========================================================================*/
254 int do_nice(message *m_ptr)
255 {
256 	struct schedproc *rmp;
257 	int rv;
258 	int proc_nr_n;
259 	unsigned new_q, old_q, old_max_q;
260 
261 	/* check who can send you requests */
262 	if (!accept_message(m_ptr))
263 		return EPERM;
264 
265 	if (sched_isokendpt(m_ptr->m_pm_sched_scheduling_set_nice.endpoint, &proc_nr_n) != OK) {
266 		printf("SCHED: WARNING: got an invalid endpoint in OoQ msg "
267 		"%d\n", m_ptr->m_pm_sched_scheduling_set_nice.endpoint);
268 		return EBADEPT;
269 	}
270 
271 	rmp = &schedproc[proc_nr_n];
272 	new_q = m_ptr->m_pm_sched_scheduling_set_nice.maxprio;
273 	if (new_q >= NR_SCHED_QUEUES) {
274 		return EINVAL;
275 	}
276 
277 	/* Store old values, in case we need to roll back the changes */
278 	old_q     = rmp->priority;
279 	old_max_q = rmp->max_priority;
280 
281 	/* Update the proc entry and reschedule the process */
282 	rmp->max_priority = rmp->priority = new_q;
283 
284 	if ((rv = schedule_process_local(rmp)) != OK) {
285 		/* Something went wrong when rescheduling the process, roll
286 		 * back the changes to proc struct */
287 		rmp->priority     = old_q;
288 		rmp->max_priority = old_max_q;
289 	}
290 
291 	return rv;
292 }
293 
294 /*===========================================================================*
295  *				schedule_process			     *
296  *===========================================================================*/
297 static int schedule_process(struct schedproc * rmp, unsigned flags)
298 {
299 	int err;
300 	int new_prio, new_quantum, new_cpu, niced;
301 
302 	pick_cpu(rmp);
303 
304 	if (flags & SCHEDULE_CHANGE_PRIO)
305 		new_prio = rmp->priority;
306 	else
307 		new_prio = -1;
308 
309 	if (flags & SCHEDULE_CHANGE_QUANTUM)
310 		new_quantum = rmp->time_slice;
311 	else
312 		new_quantum = -1;
313 
314 	if (flags & SCHEDULE_CHANGE_CPU)
315 		new_cpu = rmp->cpu;
316 	else
317 		new_cpu = -1;
318 
319 	niced = (rmp->max_priority > USER_Q);
320 
321 	if ((err = sys_schedule(rmp->endpoint, new_prio,
322 		new_quantum, new_cpu, niced)) != OK) {
323 		printf("PM: An error occurred when trying to schedule %d: %d\n",
324 		rmp->endpoint, err);
325 	}
326 
327 	return err;
328 }
329 
330 
331 /*===========================================================================*
332  *				init_scheduling				     *
333  *===========================================================================*/
334 void init_scheduling(void)
335 {
336 	int r;
337 
338 	balance_timeout = BALANCE_TIMEOUT * sys_hz();
339 
340 	if ((r = sys_setalarm(balance_timeout, 0)) != OK)
341 		panic("sys_setalarm failed: %d", r);
342 }
343 
344 /*===========================================================================*
345  *				balance_queues				     *
346  *===========================================================================*/
347 
348 /* This function in called every N ticks to rebalance the queues. The current
349  * scheduler bumps processes down one priority when ever they run out of
350  * quantum. This function will find all proccesses that have been bumped down,
351  * and pulls them back up. This default policy will soon be changed.
352  */
353 void balance_queues(void)
354 {
355 	struct schedproc *rmp;
356 	int r, proc_nr;
357 
358 	for (proc_nr=0, rmp=schedproc; proc_nr < NR_PROCS; proc_nr++, rmp++) {
359 		if (rmp->flags & IN_USE) {
360 			if (rmp->priority > rmp->max_priority) {
361 				rmp->priority -= 1; /* increase priority */
362 				schedule_process_local(rmp);
363 			}
364 		}
365 	}
366 
367 	if ((r = sys_setalarm(balance_timeout, 0)) != OK)
368 		panic("sys_setalarm failed: %d", r);
369 }
370