xref: /freebsd/sys/kern/ksched.c (revision f4636c59)
1917e476dSPeter Dufault /*
2917e476dSPeter Dufault  * Copyright (c) 1996, 1997
3917e476dSPeter Dufault  *	HD Associates, Inc.  All rights reserved.
4917e476dSPeter Dufault  *
5917e476dSPeter Dufault  * Redistribution and use in source and binary forms, with or without
6917e476dSPeter Dufault  * modification, are permitted provided that the following conditions
7917e476dSPeter Dufault  * are met:
8917e476dSPeter Dufault  * 1. Redistributions of source code must retain the above copyright
9917e476dSPeter Dufault  *    notice, this list of conditions and the following disclaimer.
10917e476dSPeter Dufault  * 2. Redistributions in binary form must reproduce the above copyright
11917e476dSPeter Dufault  *    notice, this list of conditions and the following disclaimer in the
12917e476dSPeter Dufault  *    documentation and/or other materials provided with the distribution.
13917e476dSPeter Dufault  * 3. All advertising materials mentioning features or use of this software
14917e476dSPeter Dufault  *    must display the following acknowledgement:
15917e476dSPeter Dufault  *	This product includes software developed by HD Associates, Inc
16917e476dSPeter Dufault  * 4. Neither the name of the author nor the names of any co-contributors
17917e476dSPeter Dufault  *    may be used to endorse or promote products derived from this software
18917e476dSPeter Dufault  *    without specific prior written permission.
19917e476dSPeter Dufault  *
20917e476dSPeter Dufault  * THIS SOFTWARE IS PROVIDED BY HD ASSOCIATES AND CONTRIBUTORS ``AS IS'' AND
21917e476dSPeter Dufault  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22917e476dSPeter Dufault  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23917e476dSPeter Dufault  * ARE DISCLAIMED.  IN NO EVENT SHALL HD ASSOCIATES OR CONTRIBUTORS BE LIABLE
24917e476dSPeter Dufault  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25917e476dSPeter Dufault  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26917e476dSPeter Dufault  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27917e476dSPeter Dufault  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28917e476dSPeter Dufault  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29917e476dSPeter Dufault  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30917e476dSPeter Dufault  * SUCH DAMAGE.
31917e476dSPeter Dufault  */
32917e476dSPeter Dufault 
33917e476dSPeter Dufault /* ksched: Soft real time scheduling based on "rtprio".
34917e476dSPeter Dufault  */
35917e476dSPeter Dufault 
36f4636c59SDavid E. O'Brien #include <sys/cdefs.h>
37f4636c59SDavid E. O'Brien __FBSDID("$FreeBSD$");
38f4636c59SDavid E. O'Brien 
39b565fb9eSAlfred Perlstein #include "opt_posix.h"
40b565fb9eSAlfred Perlstein 
41917e476dSPeter Dufault #include <sys/param.h>
42917e476dSPeter Dufault #include <sys/systm.h>
43fb919e4dSMark Murray #include <sys/lock.h>
443a187295SJohn Baldwin #include <sys/mutex.h>
45fb919e4dSMark Murray #include <sys/proc.h>
4638c76440SPeter Dufault #include <sys/resource.h>
47b43179fbSJeff Roberson #include <sys/sched.h>
48917e476dSPeter Dufault 
498a6472b7SPeter Dufault #include <posix4/posix4.h>
50917e476dSPeter Dufault 
51917e476dSPeter Dufault /* ksched: Real-time extension to support POSIX priority scheduling.
52917e476dSPeter Dufault  */
53917e476dSPeter Dufault 
548a6472b7SPeter Dufault struct ksched {
558a6472b7SPeter Dufault 	struct timespec rr_interval;
568a6472b7SPeter Dufault };
57917e476dSPeter Dufault 
588a6472b7SPeter Dufault int ksched_attach(struct ksched **p)
59917e476dSPeter Dufault {
608a6472b7SPeter Dufault 	struct ksched *ksched= p31b_malloc(sizeof(*ksched));
61917e476dSPeter Dufault 
628a6472b7SPeter Dufault 	ksched->rr_interval.tv_sec = 0;
63b43179fbSJeff Roberson 	ksched->rr_interval.tv_nsec = 1000000000L / sched_rr_interval();
64917e476dSPeter Dufault 
658a6472b7SPeter Dufault 	*p = ksched;
66917e476dSPeter Dufault 	return 0;
67917e476dSPeter Dufault }
68917e476dSPeter Dufault 
69b40ce416SJulian Elischer int ksched_detach(struct ksched *ks)
70917e476dSPeter Dufault {
71b40ce416SJulian Elischer 	p31b_free(ks);
728a6472b7SPeter Dufault 
73917e476dSPeter Dufault 	return 0;
74917e476dSPeter Dufault }
75917e476dSPeter Dufault 
76917e476dSPeter Dufault /*
77917e476dSPeter Dufault  * XXX About priorities
78917e476dSPeter Dufault  *
798a6472b7SPeter Dufault  *	POSIX 1003.1b requires that numerically higher priorities be of
80917e476dSPeter Dufault  *	higher priority.  It also permits sched_setparam to be
81917e476dSPeter Dufault  *	implementation defined for SCHED_OTHER.  I don't like
82917e476dSPeter Dufault  *	the notion of inverted priorites for normal processes when
83917e476dSPeter Dufault  *  you can use "setpriority" for that.
84917e476dSPeter Dufault  *
85917e476dSPeter Dufault  *	I'm rejecting sched_setparam for SCHED_OTHER with EINVAL.
86917e476dSPeter Dufault  */
87917e476dSPeter Dufault 
88917e476dSPeter Dufault /* Macros to convert between the unix (lower numerically is higher priority)
898a6472b7SPeter Dufault  * and POSIX 1003.1b (higher numerically is higher priority)
90917e476dSPeter Dufault  */
91917e476dSPeter Dufault 
92917e476dSPeter Dufault #define p4prio_to_rtpprio(P) (RTP_PRIO_MAX - (P))
93917e476dSPeter Dufault #define rtpprio_to_p4prio(P) (RTP_PRIO_MAX - (P))
94917e476dSPeter Dufault 
95aebde782SPeter Dufault /* These improve readability a bit for me:
96aebde782SPeter Dufault  */
97aebde782SPeter Dufault #define P1B_PRIO_MIN rtpprio_to_p4prio(RTP_PRIO_MAX)
98aebde782SPeter Dufault #define P1B_PRIO_MAX rtpprio_to_p4prio(RTP_PRIO_MIN)
99aebde782SPeter Dufault 
100c1087c13SBruce Evans static __inline int
101b40ce416SJulian Elischer getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
102917e476dSPeter Dufault {
103d5a08a60SJake Burkholder 	struct rtprio rtp;
104917e476dSPeter Dufault 	int e = 0;
105917e476dSPeter Dufault 
10651b4eed9SJohn Baldwin 	mtx_lock_spin(&sched_lock);
1072c100766SJulian Elischer 	pri_to_rtp(td->td_ksegrp, &rtp);
10851b4eed9SJohn Baldwin 	mtx_unlock_spin(&sched_lock);
109d5a08a60SJake Burkholder 	switch (rtp.type)
110917e476dSPeter Dufault 	{
111917e476dSPeter Dufault 		case RTP_PRIO_FIFO:
112917e476dSPeter Dufault 		*ret = SCHED_FIFO;
113917e476dSPeter Dufault 		break;
114917e476dSPeter Dufault 
115917e476dSPeter Dufault 		case RTP_PRIO_REALTIME:
116917e476dSPeter Dufault 		*ret = SCHED_RR;
117917e476dSPeter Dufault 		break;
118917e476dSPeter Dufault 
119917e476dSPeter Dufault 		default:
120917e476dSPeter Dufault 		*ret = SCHED_OTHER;
121917e476dSPeter Dufault 		break;
122917e476dSPeter Dufault 	}
123917e476dSPeter Dufault 
124917e476dSPeter Dufault 	return e;
125917e476dSPeter Dufault }
126917e476dSPeter Dufault 
1279f79feecSBruce Evans int ksched_setparam(register_t *ret, struct ksched *ksched,
128b40ce416SJulian Elischer 	struct thread *td, const struct sched_param *param)
129917e476dSPeter Dufault {
1309f79feecSBruce Evans 	register_t policy;
1319f79feecSBruce Evans 	int e;
132917e476dSPeter Dufault 
133b40ce416SJulian Elischer 	e = getscheduler(&policy, ksched, td);
134917e476dSPeter Dufault 
135917e476dSPeter Dufault 	if (e == 0)
136917e476dSPeter Dufault 	{
137917e476dSPeter Dufault 		if (policy == SCHED_OTHER)
138917e476dSPeter Dufault 			e = EINVAL;
139917e476dSPeter Dufault 		else
140b40ce416SJulian Elischer 			e = ksched_setscheduler(ret, ksched, td, policy, param);
141917e476dSPeter Dufault 	}
142917e476dSPeter Dufault 
143917e476dSPeter Dufault 	return e;
144917e476dSPeter Dufault }
145917e476dSPeter Dufault 
1469f79feecSBruce Evans int ksched_getparam(register_t *ret, struct ksched *ksched,
147b40ce416SJulian Elischer 	struct thread *td, struct sched_param *param)
148917e476dSPeter Dufault {
149d5a08a60SJake Burkholder 	struct rtprio rtp;
150d5a08a60SJake Burkholder 
15151b4eed9SJohn Baldwin 	mtx_lock_spin(&sched_lock);
1522c100766SJulian Elischer 	pri_to_rtp(td->td_ksegrp, &rtp);
15351b4eed9SJohn Baldwin 	mtx_unlock_spin(&sched_lock);
154d5a08a60SJake Burkholder 	if (RTP_PRIO_IS_REALTIME(rtp.type))
155d5a08a60SJake Burkholder 		param->sched_priority = rtpprio_to_p4prio(rtp.prio);
156917e476dSPeter Dufault 
157917e476dSPeter Dufault 	return 0;
158917e476dSPeter Dufault }
159917e476dSPeter Dufault 
160917e476dSPeter Dufault /*
161917e476dSPeter Dufault  * XXX The priority and scheduler modifications should
162917e476dSPeter Dufault  *     be moved into published interfaces in kern/kern_sync.
163917e476dSPeter Dufault  *
1648a6472b7SPeter Dufault  * The permissions to modify process p were checked in "p31b_proc()".
165917e476dSPeter Dufault  *
166917e476dSPeter Dufault  */
1679f79feecSBruce Evans int ksched_setscheduler(register_t *ret, struct ksched *ksched,
168b40ce416SJulian Elischer 	struct thread *td, int policy, const struct sched_param *param)
169917e476dSPeter Dufault {
170917e476dSPeter Dufault 	int e = 0;
171917e476dSPeter Dufault 	struct rtprio rtp;
1722c100766SJulian Elischer 	struct ksegrp *kg = td->td_ksegrp;
173917e476dSPeter Dufault 
174917e476dSPeter Dufault 	switch(policy)
175917e476dSPeter Dufault 	{
176917e476dSPeter Dufault 		case SCHED_RR:
177917e476dSPeter Dufault 		case SCHED_FIFO:
178917e476dSPeter Dufault 
179aebde782SPeter Dufault 		if (param->sched_priority >= P1B_PRIO_MIN &&
180aebde782SPeter Dufault 		param->sched_priority <= P1B_PRIO_MAX)
181917e476dSPeter Dufault 		{
182aebde782SPeter Dufault 			rtp.prio = p4prio_to_rtpprio(param->sched_priority);
183917e476dSPeter Dufault 			rtp.type = (policy == SCHED_FIFO)
184917e476dSPeter Dufault 				? RTP_PRIO_FIFO : RTP_PRIO_REALTIME;
185917e476dSPeter Dufault 
1863a187295SJohn Baldwin 			mtx_lock_spin(&sched_lock);
1872c100766SJulian Elischer 			rtp_to_pri(&rtp, kg);
188e602ba25SJulian Elischer 			FOREACH_THREAD_IN_GROUP(kg, td) { /* XXXKSE */
18971fad9fdSJulian Elischer 				if (TD_IS_RUNNING(td)) {
1904a338afdSJulian Elischer 					td->td_flags |= TDF_NEEDRESCHED;
19171fad9fdSJulian Elischer 				} else if (TD_ON_RUNQ(td)) {
192e602ba25SJulian Elischer 					if (td->td_priority > kg->kg_user_pri) {
1931f955e2dSJulian Elischer 						sched_prio(td, kg->kg_user_pri);
194e602ba25SJulian Elischer 					}
195e602ba25SJulian Elischer 				}
196e602ba25SJulian Elischer 			}
1973a187295SJohn Baldwin 			mtx_unlock_spin(&sched_lock);
198917e476dSPeter Dufault 		}
199917e476dSPeter Dufault 		else
200917e476dSPeter Dufault 			e = EPERM;
201917e476dSPeter Dufault 
202917e476dSPeter Dufault 
203917e476dSPeter Dufault 		break;
204917e476dSPeter Dufault 
205917e476dSPeter Dufault 		case SCHED_OTHER:
206917e476dSPeter Dufault 		{
207917e476dSPeter Dufault 			rtp.type = RTP_PRIO_NORMAL;
2082a61a110SPeter Dufault 			rtp.prio = p4prio_to_rtpprio(param->sched_priority);
2093a187295SJohn Baldwin 			mtx_lock_spin(&sched_lock);
2102c100766SJulian Elischer 			rtp_to_pri(&rtp, kg);
211917e476dSPeter Dufault 
212917e476dSPeter Dufault 			/* XXX Simply revert to whatever we had for last
213917e476dSPeter Dufault 			 *     normal scheduler priorities.
214917e476dSPeter Dufault 			 *     This puts a requirement
215917e476dSPeter Dufault 			 *     on the scheduling code: You must leave the
216917e476dSPeter Dufault 			 *     scheduling info alone.
217917e476dSPeter Dufault 			 */
218e602ba25SJulian Elischer 			FOREACH_THREAD_IN_GROUP(kg, td) {
21971fad9fdSJulian Elischer 				if (TD_IS_RUNNING(td)) {
2204a338afdSJulian Elischer 					td->td_flags |= TDF_NEEDRESCHED;
22171fad9fdSJulian Elischer 				} else if (TD_ON_RUNQ(td)) {
222e602ba25SJulian Elischer 					if (td->td_priority > kg->kg_user_pri) {
2231f955e2dSJulian Elischer 						sched_prio(td, kg->kg_user_pri);
224e602ba25SJulian Elischer 					}
225e602ba25SJulian Elischer 				}
226e602ba25SJulian Elischer 
227e602ba25SJulian Elischer 			}
2283a187295SJohn Baldwin 			mtx_unlock_spin(&sched_lock);
229917e476dSPeter Dufault 		}
230917e476dSPeter Dufault 		break;
231917e476dSPeter Dufault 	}
232917e476dSPeter Dufault 
233917e476dSPeter Dufault 	return e;
234917e476dSPeter Dufault }
235917e476dSPeter Dufault 
236b40ce416SJulian Elischer int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
237917e476dSPeter Dufault {
238b40ce416SJulian Elischer 	return getscheduler(ret, ksched, td);
239917e476dSPeter Dufault }
240917e476dSPeter Dufault 
241917e476dSPeter Dufault /* ksched_yield: Yield the CPU.
242917e476dSPeter Dufault  */
2439f79feecSBruce Evans int ksched_yield(register_t *ret, struct ksched *ksched)
244917e476dSPeter Dufault {
2453a187295SJohn Baldwin 	mtx_lock_spin(&sched_lock);
2464a338afdSJulian Elischer 	curthread->td_flags |= TDF_NEEDRESCHED;
2473a187295SJohn Baldwin 	mtx_unlock_spin(&sched_lock);
248917e476dSPeter Dufault 	return 0;
249917e476dSPeter Dufault }
250917e476dSPeter Dufault 
2519f79feecSBruce Evans int ksched_get_priority_max(register_t*ret, struct ksched *ksched, int policy)
252917e476dSPeter Dufault {
253917e476dSPeter Dufault 	int e = 0;
254917e476dSPeter Dufault 
255917e476dSPeter Dufault 	switch (policy)
256917e476dSPeter Dufault 	{
257917e476dSPeter Dufault 		case SCHED_FIFO:
258917e476dSPeter Dufault 		case SCHED_RR:
259917e476dSPeter Dufault 		*ret = RTP_PRIO_MAX;
260917e476dSPeter Dufault 		break;
261917e476dSPeter Dufault 
262917e476dSPeter Dufault 		case SCHED_OTHER:
263917e476dSPeter Dufault 		*ret =  PRIO_MAX;
264917e476dSPeter Dufault 		break;
265917e476dSPeter Dufault 
266917e476dSPeter Dufault 		default:
267917e476dSPeter Dufault 		e = EINVAL;
268917e476dSPeter Dufault 	}
269917e476dSPeter Dufault 
270917e476dSPeter Dufault 	return e;
271917e476dSPeter Dufault }
272917e476dSPeter Dufault 
2739f79feecSBruce Evans int ksched_get_priority_min(register_t *ret, struct ksched *ksched, int policy)
274917e476dSPeter Dufault {
275917e476dSPeter Dufault 	int e = 0;
276917e476dSPeter Dufault 
277917e476dSPeter Dufault 	switch (policy)
278917e476dSPeter Dufault 	{
279917e476dSPeter Dufault 		case SCHED_FIFO:
280917e476dSPeter Dufault 		case SCHED_RR:
281aebde782SPeter Dufault 		*ret = P1B_PRIO_MIN;
282917e476dSPeter Dufault 		break;
283917e476dSPeter Dufault 
284917e476dSPeter Dufault 		case SCHED_OTHER:
285917e476dSPeter Dufault 		*ret =  PRIO_MIN;
286917e476dSPeter Dufault 		break;
287917e476dSPeter Dufault 
288917e476dSPeter Dufault 		default:
289917e476dSPeter Dufault 		e = EINVAL;
290917e476dSPeter Dufault 	}
291917e476dSPeter Dufault 
292917e476dSPeter Dufault 	return e;
293917e476dSPeter Dufault }
294917e476dSPeter Dufault 
2959f79feecSBruce Evans int ksched_rr_get_interval(register_t *ret, struct ksched *ksched,
296b40ce416SJulian Elischer 	struct thread *td, struct timespec *timespec)
297917e476dSPeter Dufault {
2988a6472b7SPeter Dufault 	*timespec = ksched->rr_interval;
299917e476dSPeter Dufault 
300917e476dSPeter Dufault 	return 0;
301917e476dSPeter Dufault }
302