xref: /freebsd/sys/kern/ksched.c (revision b43179fb)
1917e476dSPeter Dufault /*
2917e476dSPeter Dufault  * Copyright (c) 1996, 1997
3917e476dSPeter Dufault  *	HD Associates, Inc.  All rights reserved.
4917e476dSPeter Dufault  *
5917e476dSPeter Dufault  * Redistribution and use in source and binary forms, with or without
6917e476dSPeter Dufault  * modification, are permitted provided that the following conditions
7917e476dSPeter Dufault  * are met:
8917e476dSPeter Dufault  * 1. Redistributions of source code must retain the above copyright
9917e476dSPeter Dufault  *    notice, this list of conditions and the following disclaimer.
10917e476dSPeter Dufault  * 2. Redistributions in binary form must reproduce the above copyright
11917e476dSPeter Dufault  *    notice, this list of conditions and the following disclaimer in the
12917e476dSPeter Dufault  *    documentation and/or other materials provided with the distribution.
13917e476dSPeter Dufault  * 3. All advertising materials mentioning features or use of this software
14917e476dSPeter Dufault  *    must display the following acknowledgement:
15917e476dSPeter Dufault  *	This product includes software developed by HD Associates, Inc
16917e476dSPeter Dufault  * 4. Neither the name of the author nor the names of any co-contributors
17917e476dSPeter Dufault  *    may be used to endorse or promote products derived from this software
18917e476dSPeter Dufault  *    without specific prior written permission.
19917e476dSPeter Dufault  *
20917e476dSPeter Dufault  * THIS SOFTWARE IS PROVIDED BY HD ASSOCIATES AND CONTRIBUTORS ``AS IS'' AND
21917e476dSPeter Dufault  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22917e476dSPeter Dufault  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23917e476dSPeter Dufault  * ARE DISCLAIMED.  IN NO EVENT SHALL HD ASSOCIATES OR CONTRIBUTORS BE LIABLE
24917e476dSPeter Dufault  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25917e476dSPeter Dufault  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26917e476dSPeter Dufault  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27917e476dSPeter Dufault  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28917e476dSPeter Dufault  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29917e476dSPeter Dufault  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30917e476dSPeter Dufault  * SUCH DAMAGE.
31917e476dSPeter Dufault  *
329f79feecSBruce Evans  * $FreeBSD$
33917e476dSPeter Dufault  */
34917e476dSPeter Dufault 
35917e476dSPeter Dufault /* ksched: Soft real time scheduling based on "rtprio".
36917e476dSPeter Dufault  */
37917e476dSPeter Dufault 
38917e476dSPeter Dufault #include <sys/param.h>
39917e476dSPeter Dufault #include <sys/systm.h>
40fb919e4dSMark Murray #include <sys/lock.h>
413a187295SJohn Baldwin #include <sys/mutex.h>
42fb919e4dSMark Murray #include <sys/proc.h>
4338c76440SPeter Dufault #include <sys/resource.h>
44b43179fbSJeff Roberson #include <sys/sched.h>
45917e476dSPeter Dufault 
468a6472b7SPeter Dufault #include <posix4/posix4.h>
47917e476dSPeter Dufault 
48917e476dSPeter Dufault /* ksched: Real-time extension to support POSIX priority scheduling.
49917e476dSPeter Dufault  */
50917e476dSPeter Dufault 
518a6472b7SPeter Dufault struct ksched {
528a6472b7SPeter Dufault 	struct timespec rr_interval;
538a6472b7SPeter Dufault };
54917e476dSPeter Dufault 
558a6472b7SPeter Dufault int ksched_attach(struct ksched **p)
56917e476dSPeter Dufault {
578a6472b7SPeter Dufault 	struct ksched *ksched= p31b_malloc(sizeof(*ksched));
58917e476dSPeter Dufault 
598a6472b7SPeter Dufault 	ksched->rr_interval.tv_sec = 0;
60b43179fbSJeff Roberson 	ksched->rr_interval.tv_nsec = 1000000000L / sched_rr_interval();
61917e476dSPeter Dufault 
628a6472b7SPeter Dufault 	*p = ksched;
63917e476dSPeter Dufault 	return 0;
64917e476dSPeter Dufault }
65917e476dSPeter Dufault 
66b40ce416SJulian Elischer int ksched_detach(struct ksched *ks)
67917e476dSPeter Dufault {
68b40ce416SJulian Elischer 	p31b_free(ks);
698a6472b7SPeter Dufault 
70917e476dSPeter Dufault 	return 0;
71917e476dSPeter Dufault }
72917e476dSPeter Dufault 
73917e476dSPeter Dufault /*
74917e476dSPeter Dufault  * XXX About priorities
75917e476dSPeter Dufault  *
768a6472b7SPeter Dufault  *	POSIX 1003.1b requires that numerically higher priorities be of
77917e476dSPeter Dufault  *	higher priority.  It also permits sched_setparam to be
78917e476dSPeter Dufault  *	implementation defined for SCHED_OTHER.  I don't like
79917e476dSPeter Dufault  *	the notion of inverted priorites for normal processes when
80917e476dSPeter Dufault  *  you can use "setpriority" for that.
81917e476dSPeter Dufault  *
82917e476dSPeter Dufault  *	I'm rejecting sched_setparam for SCHED_OTHER with EINVAL.
83917e476dSPeter Dufault  */
84917e476dSPeter Dufault 
85917e476dSPeter Dufault /* Macros to convert between the unix (lower numerically is higher priority)
868a6472b7SPeter Dufault  * and POSIX 1003.1b (higher numerically is higher priority)
87917e476dSPeter Dufault  */
88917e476dSPeter Dufault 
89917e476dSPeter Dufault #define p4prio_to_rtpprio(P) (RTP_PRIO_MAX - (P))
90917e476dSPeter Dufault #define rtpprio_to_p4prio(P) (RTP_PRIO_MAX - (P))
91917e476dSPeter Dufault 
92aebde782SPeter Dufault /* These improve readability a bit for me:
93aebde782SPeter Dufault  */
94aebde782SPeter Dufault #define P1B_PRIO_MIN rtpprio_to_p4prio(RTP_PRIO_MAX)
95aebde782SPeter Dufault #define P1B_PRIO_MAX rtpprio_to_p4prio(RTP_PRIO_MIN)
96aebde782SPeter Dufault 
97c1087c13SBruce Evans static __inline int
98b40ce416SJulian Elischer getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
99917e476dSPeter Dufault {
100d5a08a60SJake Burkholder 	struct rtprio rtp;
101917e476dSPeter Dufault 	int e = 0;
102917e476dSPeter Dufault 
10351b4eed9SJohn Baldwin 	mtx_lock_spin(&sched_lock);
1042c100766SJulian Elischer 	pri_to_rtp(td->td_ksegrp, &rtp);
10551b4eed9SJohn Baldwin 	mtx_unlock_spin(&sched_lock);
106d5a08a60SJake Burkholder 	switch (rtp.type)
107917e476dSPeter Dufault 	{
108917e476dSPeter Dufault 		case RTP_PRIO_FIFO:
109917e476dSPeter Dufault 		*ret = SCHED_FIFO;
110917e476dSPeter Dufault 		break;
111917e476dSPeter Dufault 
112917e476dSPeter Dufault 		case RTP_PRIO_REALTIME:
113917e476dSPeter Dufault 		*ret = SCHED_RR;
114917e476dSPeter Dufault 		break;
115917e476dSPeter Dufault 
116917e476dSPeter Dufault 		default:
117917e476dSPeter Dufault 		*ret = SCHED_OTHER;
118917e476dSPeter Dufault 		break;
119917e476dSPeter Dufault 	}
120917e476dSPeter Dufault 
121917e476dSPeter Dufault 	return e;
122917e476dSPeter Dufault }
123917e476dSPeter Dufault 
1249f79feecSBruce Evans int ksched_setparam(register_t *ret, struct ksched *ksched,
125b40ce416SJulian Elischer 	struct thread *td, const struct sched_param *param)
126917e476dSPeter Dufault {
1279f79feecSBruce Evans 	register_t policy;
1289f79feecSBruce Evans 	int e;
129917e476dSPeter Dufault 
130b40ce416SJulian Elischer 	e = getscheduler(&policy, ksched, td);
131917e476dSPeter Dufault 
132917e476dSPeter Dufault 	if (e == 0)
133917e476dSPeter Dufault 	{
134917e476dSPeter Dufault 		if (policy == SCHED_OTHER)
135917e476dSPeter Dufault 			e = EINVAL;
136917e476dSPeter Dufault 		else
137b40ce416SJulian Elischer 			e = ksched_setscheduler(ret, ksched, td, policy, param);
138917e476dSPeter Dufault 	}
139917e476dSPeter Dufault 
140917e476dSPeter Dufault 	return e;
141917e476dSPeter Dufault }
142917e476dSPeter Dufault 
1439f79feecSBruce Evans int ksched_getparam(register_t *ret, struct ksched *ksched,
144b40ce416SJulian Elischer 	struct thread *td, struct sched_param *param)
145917e476dSPeter Dufault {
146d5a08a60SJake Burkholder 	struct rtprio rtp;
147d5a08a60SJake Burkholder 
14851b4eed9SJohn Baldwin 	mtx_lock_spin(&sched_lock);
1492c100766SJulian Elischer 	pri_to_rtp(td->td_ksegrp, &rtp);
15051b4eed9SJohn Baldwin 	mtx_unlock_spin(&sched_lock);
151d5a08a60SJake Burkholder 	if (RTP_PRIO_IS_REALTIME(rtp.type))
152d5a08a60SJake Burkholder 		param->sched_priority = rtpprio_to_p4prio(rtp.prio);
153917e476dSPeter Dufault 
154917e476dSPeter Dufault 	return 0;
155917e476dSPeter Dufault }
156917e476dSPeter Dufault 
157917e476dSPeter Dufault /*
158917e476dSPeter Dufault  * XXX The priority and scheduler modifications should
159917e476dSPeter Dufault  *     be moved into published interfaces in kern/kern_sync.
160917e476dSPeter Dufault  *
1618a6472b7SPeter Dufault  * The permissions to modify process p were checked in "p31b_proc()".
162917e476dSPeter Dufault  *
163917e476dSPeter Dufault  */
1649f79feecSBruce Evans int ksched_setscheduler(register_t *ret, struct ksched *ksched,
165b40ce416SJulian Elischer 	struct thread *td, int policy, const struct sched_param *param)
166917e476dSPeter Dufault {
167917e476dSPeter Dufault 	int e = 0;
168917e476dSPeter Dufault 	struct rtprio rtp;
1692c100766SJulian Elischer 	struct ksegrp *kg = td->td_ksegrp;
170917e476dSPeter Dufault 
171917e476dSPeter Dufault 	switch(policy)
172917e476dSPeter Dufault 	{
173917e476dSPeter Dufault 		case SCHED_RR:
174917e476dSPeter Dufault 		case SCHED_FIFO:
175917e476dSPeter Dufault 
176aebde782SPeter Dufault 		if (param->sched_priority >= P1B_PRIO_MIN &&
177aebde782SPeter Dufault 		param->sched_priority <= P1B_PRIO_MAX)
178917e476dSPeter Dufault 		{
179aebde782SPeter Dufault 			rtp.prio = p4prio_to_rtpprio(param->sched_priority);
180917e476dSPeter Dufault 			rtp.type = (policy == SCHED_FIFO)
181917e476dSPeter Dufault 				? RTP_PRIO_FIFO : RTP_PRIO_REALTIME;
182917e476dSPeter Dufault 
1833a187295SJohn Baldwin 			mtx_lock_spin(&sched_lock);
1842c100766SJulian Elischer 			rtp_to_pri(&rtp, kg);
185e602ba25SJulian Elischer 			FOREACH_THREAD_IN_GROUP(kg, td) { /* XXXKSE */
18671fad9fdSJulian Elischer 				if (TD_IS_RUNNING(td)) {
187e602ba25SJulian Elischer 					td->td_kse->ke_flags |= KEF_NEEDRESCHED;
18871fad9fdSJulian Elischer 				} else if (TD_ON_RUNQ(td)) {
189e602ba25SJulian Elischer 					if (td->td_priority > kg->kg_user_pri) {
190e602ba25SJulian Elischer 						remrunqueue(td);
191e602ba25SJulian Elischer 						td->td_priority =
192e602ba25SJulian Elischer 						    kg->kg_user_pri;
193e602ba25SJulian Elischer 						setrunqueue(td);
194e602ba25SJulian Elischer 					}
195e602ba25SJulian Elischer 				}
196e602ba25SJulian Elischer 			}
1973a187295SJohn Baldwin 			mtx_unlock_spin(&sched_lock);
198917e476dSPeter Dufault 		}
199917e476dSPeter Dufault 		else
200917e476dSPeter Dufault 			e = EPERM;
201917e476dSPeter Dufault 
202917e476dSPeter Dufault 
203917e476dSPeter Dufault 		break;
204917e476dSPeter Dufault 
205917e476dSPeter Dufault 		case SCHED_OTHER:
206917e476dSPeter Dufault 		{
207917e476dSPeter Dufault 			rtp.type = RTP_PRIO_NORMAL;
2082a61a110SPeter Dufault 			rtp.prio = p4prio_to_rtpprio(param->sched_priority);
2093a187295SJohn Baldwin 			mtx_lock_spin(&sched_lock);
2102c100766SJulian Elischer 			rtp_to_pri(&rtp, kg);
211917e476dSPeter Dufault 
212917e476dSPeter Dufault 			/* XXX Simply revert to whatever we had for last
213917e476dSPeter Dufault 			 *     normal scheduler priorities.
214917e476dSPeter Dufault 			 *     This puts a requirement
215917e476dSPeter Dufault 			 *     on the scheduling code: You must leave the
216917e476dSPeter Dufault 			 *     scheduling info alone.
217917e476dSPeter Dufault 			 */
218e602ba25SJulian Elischer 			FOREACH_THREAD_IN_GROUP(kg, td) {
21971fad9fdSJulian Elischer 				if (TD_IS_RUNNING(td)) {
220e602ba25SJulian Elischer 					td->td_kse->ke_flags |= KEF_NEEDRESCHED;
22171fad9fdSJulian Elischer 				} else if (TD_ON_RUNQ(td)) {
222e602ba25SJulian Elischer 					if (td->td_priority > kg->kg_user_pri) {
223e602ba25SJulian Elischer 						remrunqueue(td);
224e602ba25SJulian Elischer 						td->td_priority =
225e602ba25SJulian Elischer 						    kg->kg_user_pri;
226e602ba25SJulian Elischer 						setrunqueue(td);
227e602ba25SJulian Elischer 					}
228e602ba25SJulian Elischer 				}
229e602ba25SJulian Elischer 
230e602ba25SJulian Elischer 			}
2313a187295SJohn Baldwin 			mtx_unlock_spin(&sched_lock);
232917e476dSPeter Dufault 		}
233917e476dSPeter Dufault 		break;
234917e476dSPeter Dufault 	}
235917e476dSPeter Dufault 
236917e476dSPeter Dufault 	return e;
237917e476dSPeter Dufault }
238917e476dSPeter Dufault 
239b40ce416SJulian Elischer int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
240917e476dSPeter Dufault {
241b40ce416SJulian Elischer 	return getscheduler(ret, ksched, td);
242917e476dSPeter Dufault }
243917e476dSPeter Dufault 
244917e476dSPeter Dufault /* ksched_yield: Yield the CPU.
245917e476dSPeter Dufault  */
2469f79feecSBruce Evans int ksched_yield(register_t *ret, struct ksched *ksched)
247917e476dSPeter Dufault {
2483a187295SJohn Baldwin 	mtx_lock_spin(&sched_lock);
249b40ce416SJulian Elischer 	curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
2503a187295SJohn Baldwin 	mtx_unlock_spin(&sched_lock);
251917e476dSPeter Dufault 	return 0;
252917e476dSPeter Dufault }
253917e476dSPeter Dufault 
2549f79feecSBruce Evans int ksched_get_priority_max(register_t*ret, struct ksched *ksched, int policy)
255917e476dSPeter Dufault {
256917e476dSPeter Dufault 	int e = 0;
257917e476dSPeter Dufault 
258917e476dSPeter Dufault 	switch (policy)
259917e476dSPeter Dufault 	{
260917e476dSPeter Dufault 		case SCHED_FIFO:
261917e476dSPeter Dufault 		case SCHED_RR:
262917e476dSPeter Dufault 		*ret = RTP_PRIO_MAX;
263917e476dSPeter Dufault 		break;
264917e476dSPeter Dufault 
265917e476dSPeter Dufault 		case SCHED_OTHER:
266917e476dSPeter Dufault 		*ret =  PRIO_MAX;
267917e476dSPeter Dufault 		break;
268917e476dSPeter Dufault 
269917e476dSPeter Dufault 		default:
270917e476dSPeter Dufault 		e = EINVAL;
271917e476dSPeter Dufault 	}
272917e476dSPeter Dufault 
273917e476dSPeter Dufault 	return e;
274917e476dSPeter Dufault }
275917e476dSPeter Dufault 
2769f79feecSBruce Evans int ksched_get_priority_min(register_t *ret, struct ksched *ksched, int policy)
277917e476dSPeter Dufault {
278917e476dSPeter Dufault 	int e = 0;
279917e476dSPeter Dufault 
280917e476dSPeter Dufault 	switch (policy)
281917e476dSPeter Dufault 	{
282917e476dSPeter Dufault 		case SCHED_FIFO:
283917e476dSPeter Dufault 		case SCHED_RR:
284aebde782SPeter Dufault 		*ret = P1B_PRIO_MIN;
285917e476dSPeter Dufault 		break;
286917e476dSPeter Dufault 
287917e476dSPeter Dufault 		case SCHED_OTHER:
288917e476dSPeter Dufault 		*ret =  PRIO_MIN;
289917e476dSPeter Dufault 		break;
290917e476dSPeter Dufault 
291917e476dSPeter Dufault 		default:
292917e476dSPeter Dufault 		e = EINVAL;
293917e476dSPeter Dufault 	}
294917e476dSPeter Dufault 
295917e476dSPeter Dufault 	return e;
296917e476dSPeter Dufault }
297917e476dSPeter Dufault 
2989f79feecSBruce Evans int ksched_rr_get_interval(register_t *ret, struct ksched *ksched,
299b40ce416SJulian Elischer 	struct thread *td, struct timespec *timespec)
300917e476dSPeter Dufault {
3018a6472b7SPeter Dufault 	*timespec = ksched->rr_interval;
302917e476dSPeter Dufault 
303917e476dSPeter Dufault 	return 0;
304917e476dSPeter Dufault }
305