xref: /netbsd/sys/sys/sched.h (revision f4090881)
1 /*	$NetBSD: sched.h,v 1.92 2023/07/13 12:06:20 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2002, 2007, 2008, 2019, 2020
5  *    The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Ross Harvey, Jason R. Thorpe, Nathan J. Williams, Andrew Doran and
10  * Daniel Sieger.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*-
35  * Copyright (c) 1982, 1986, 1991, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  * (c) UNIX System Laboratories, Inc.
38  * All or some portions of this file are derived from material licensed
39  * to the University of California by American Telephone and Telegraph
40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41  * the permission of UNIX System Laboratories, Inc.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
68  */
69 
70 #ifndef	_SYS_SCHED_H_
71 #define	_SYS_SCHED_H_
72 
73 #include <sys/featuretest.h>
74 #include <sys/types.h>
75 
76 #if defined(_KERNEL_OPT)
77 #include "opt_multiprocessor.h"
78 #include "opt_lockdebug.h"
79 #endif
80 
81 struct sched_param {
82 	int	sched_priority;
83 };
84 
85 /*
86  * Scheduling policies required by IEEE Std 1003.1-2001
87  */
88 #define	SCHED_NONE	-1
89 #define	SCHED_OTHER	0
90 #define	SCHED_FIFO	1
91 #define	SCHED_RR	2
92 
93 #if defined(_NETBSD_SOURCE)
94 __BEGIN_DECLS
95 
96 /*
97  * Interface of CPU-sets.
98  */
99 typedef struct _cpuset cpuset_t;
100 
101 #ifndef _KERNEL
102 
103 #define	cpuset_create()		_cpuset_create()
104 #define	cpuset_destroy(c)	_cpuset_destroy(c)
105 #define	cpuset_size(c)		_cpuset_size(c)
106 #define	cpuset_zero(c)		_cpuset_zero(c)
107 #define	cpuset_isset(i, c)	_cpuset_isset(i, c)
108 #define	cpuset_set(i, c)	_cpuset_set(i, c)
109 #define	cpuset_clr(i, c)	_cpuset_clr(i, c)
110 
111 cpuset_t *_cpuset_create(void);
112 void	_cpuset_destroy(cpuset_t *);
113 void	_cpuset_zero(cpuset_t *);
114 int	_cpuset_set(cpuid_t, cpuset_t *);
115 int	_cpuset_clr(cpuid_t, cpuset_t *);
116 int	_cpuset_isset(cpuid_t, const cpuset_t *);
117 size_t	_cpuset_size(const cpuset_t *);
118 
119 #endif
120 
121 /*
122  * Internal affinity and scheduling calls.
123  */
124 int	_sched_getaffinity(pid_t, lwpid_t, size_t, cpuset_t *);
125 int	_sched_setaffinity(pid_t, lwpid_t, size_t, const cpuset_t *);
126 int	_sched_getparam(pid_t, lwpid_t, int *, struct sched_param *);
127 int	_sched_setparam(pid_t, lwpid_t, int, const struct sched_param *);
128 int	_sched_protect(int);
129 __END_DECLS
130 
131 /*
132  * CPU states.
133  * XXX Not really scheduler state, but no other good place to put
134  * it right now, and it really is per-CPU.
135  */
136 #define	CP_USER		0
137 #define	CP_NICE		1
138 #define	CP_SYS		2
139 #define	CP_INTR		3
140 #define	CP_IDLE		4
141 #define	CPUSTATES	5
142 
143 #if defined(_KERNEL) || defined(_KMEMUSER)
144 
145 #include <sys/time.h>
146 #include <sys/queue.h>
147 
148 struct kmutex;
149 
150 /*
151  * Per-CPU scheduler state.  Field markings and the corresponding locks:
152  *
153  * s:	splsched, may only be safely accessed by the CPU itself
154  * m:	spc_mutex
155  * (:	unlocked, stable
156  * c:	cpu_lock
157  */
158 struct schedstate_percpu {
159 	struct kmutex	*spc_mutex;	/* (: lock on below, runnable LWPs */
160 	struct kmutex	*spc_lwplock;	/* (: general purpose lock for LWPs */
161 	struct lwp	*spc_migrating;	/* (: migrating LWP */
162 	struct cpu_info *spc_nextpkg;	/* (: next package 1st for RR */
163 	psetid_t	spc_psid;	/* c: processor-set ID */
164 	time_t		spc_lastmod;	/* c: time of last cpu state change */
165 	volatile int	spc_flags;	/* s: flags; see below */
166 	u_int		spc_schedticks;	/* s: ticks for schedclock() */
167 	uint64_t	spc_cp_time[CPUSTATES];/* s: CPU state statistics */
168 	int		spc_ticks;	/* s: ticks until sched_tick() */
169 	int		spc_pscnt;	/* s: prof/stat counter */
170 	int		spc_psdiv;	/* s: prof/stat divisor */
171 	int		spc_nextskim;	/* s: next time to skim other queues */
172 	/* Run queue */
173 	volatile pri_t	spc_curpriority;/* s: usrpri of curlwp */
174 	pri_t		spc_maxpriority;/* m: highest priority queued */
175 	u_int		spc_count;	/* m: count of the threads */
176 	u_int		spc_mcount;	/* m: count of migratable threads */
177 	uint32_t	spc_bitmap[8];	/* m: bitmap of active queues */
178 	TAILQ_HEAD(,lwp) *spc_queue;	/* m: queue for each priority */
179 };
180 
181 /* spc_flags */
182 #define	SPCF_SEENRR		0x0001	/* process has seen roundrobin() */
183 #define	SPCF_SHOULDYIELD	0x0002	/* process should yield the CPU */
184 #define	SPCF_OFFLINE		0x0004	/* CPU marked offline */
185 #define	SPCF_RUNNING		0x0008	/* CPU is running */
186 #define	SPCF_NOINTR		0x0010	/* shielded from interrupts */
187 #define	SPCF_IDLE		0x0020	/* CPU is currently idle */
188 #define	SPCF_1STCLASS		0x0040	/* first class scheduling entity */
189 #define	SPCF_CORE1ST		0x0100	/* first CPU in core */
190 #define	SPCF_PACKAGE1ST		0x0200	/* first CPU in package */
191 
192 #define	SPCF_SWITCHCLEAR	(SPCF_SEENRR|SPCF_SHOULDYIELD)
193 
194 #endif /* defined(_KERNEL) || defined(_KMEMUSER) */
195 
196 /*
197  * Flags passed to the Linux-compatible __clone(2) system call.
198  */
199 #define	CLONE_CSIGNAL		0x000000ff	/* signal to be sent at exit */
200 #define	CLONE_VM		0x00000100	/* share address space */
201 #define	CLONE_FS		0x00000200	/* share "file system" info */
202 #define	CLONE_FILES		0x00000400	/* share file descriptors */
203 #define	CLONE_SIGHAND		0x00000800	/* share signal actions */
204 #define	CLONE_PTRACE		0x00002000	/* ptrace(2) continues on
205 						   child */
206 #define	CLONE_VFORK		0x00004000	/* parent blocks until child
207 						   exits */
208 
209 #endif /* _NETBSD_SOURCE */
210 
211 #ifdef _KERNEL
212 
213 extern int schedhz;			/* ideally: 16 */
214 extern u_int sched_rrticks;
215 extern u_int sched_pstats_ticks;
216 
217 struct proc;
218 struct cpu_info;
219 
220 /*
221  * Common Scheduler Interface.
222  */
223 
224 /* Scheduler initialization */
225 void		runq_init(void);
226 void		synch_init(void);
227 void		sched_init(void);
228 void		sched_rqinit(void);
229 void		sched_cpuattach(struct cpu_info *);
230 
231 /* Time-driven events */
232 void		sched_tick(struct cpu_info *);
233 void		schedclock(struct lwp *);
234 void		sched_schedclock(struct lwp *);
235 void		sched_pstats(void);
236 void		sched_lwp_stats(struct lwp *);
237 void		sched_pstats_hook(struct lwp *, int);
238 
239 /* Runqueue-related functions */
240 bool		sched_curcpu_runnable_p(void);
241 void		sched_dequeue(struct lwp *);
242 void		sched_enqueue(struct lwp *);
243 void		sched_preempted(struct lwp *);
244 void		sched_resched_cpu(struct cpu_info *, pri_t, bool);
245 void		sched_resched_lwp(struct lwp *, bool);
246 struct lwp *	sched_nextlwp(void);
247 void		sched_oncpu(struct lwp *);
248 void		sched_newts(struct lwp *);
249 void		sched_vforkexec(struct lwp *, bool);
250 
251 /* Priority adjustment */
252 void		sched_nice(struct proc *, int);
253 
254 /* Handlers of fork and exit */
255 void		sched_proc_fork(struct proc *, struct proc *);
256 void		sched_proc_exit(struct proc *, struct proc *);
257 void		sched_lwp_fork(struct lwp *, struct lwp *);
258 void		sched_lwp_collect(struct lwp *);
259 
260 void		sched_slept(struct lwp *);
261 void		sched_wakeup(struct lwp *);
262 
263 void		setrunnable(struct lwp *);
264 void		sched_setrunnable(struct lwp *);
265 
266 struct cpu_info *sched_takecpu(struct lwp *);
267 void		sched_print_runqueue(void (*pr)(const char *, ...)
268     __printflike(1, 2));
269 
270 /* Dispatching */
271 bool		kpreempt(uintptr_t);
272 void		preempt(void);
273 bool		preempt_needed(void);
274 void		preempt_point(void);
275 void		yield(void);
276 void		mi_switch(struct lwp *);
277 void		updatertime(lwp_t *, const struct bintime *);
278 void		sched_idle(void);
279 void		suspendsched(void);
280 
281 int		do_sched_setparam(pid_t, lwpid_t, int, const struct sched_param *);
282 int		do_sched_getparam(pid_t, lwpid_t, int *, struct sched_param *);
283 
284 #endif	/* _KERNEL */
285 #endif	/* _SYS_SCHED_H_ */
286