xref: /dragonfly/sys/sys/usched.h (revision 01bedb5a)
1 /*
2  * SYS/USCHED.H
3  *
4  *	Userland scheduler API
5  */
6 
7 #ifndef _SYS_USCHED_H_
8 #define _SYS_USCHED_H_
9 
10 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
11 
12 #ifndef _SYS_TYPES_H_
13 #include <sys/types.h>
14 #endif
15 #ifndef _SYS_CPUMASK_H_
16 #include <sys/cpumask.h>
17 #endif
18 #ifndef _SYS_QUEUE_H_
19 #include <sys/queue.h>
20 #endif
21 #ifndef _SYS_SYSTIMER_H_
22 #include <sys/systimer.h>
23 #endif
24 
25 #define NAME_LENGTH 32
26 
27 struct lwp;
28 struct proc;
29 struct globaldata;
30 
31 struct usched {
32     TAILQ_ENTRY(usched) entry;
33     const char *name;
34     const char *desc;
35     void (*usched_register)(void);
36     void (*usched_unregister)(void);
37     void (*acquire_curproc)(struct lwp *);
38     void (*release_curproc)(struct lwp *);
39     void (*setrunqueue)(struct lwp *);
40     void (*schedulerclock)(struct lwp *, sysclock_t, sysclock_t);
41     void (*recalculate)(struct lwp *);
42     void (*resetpriority)(struct lwp *);
43     void (*heuristic_forking)(struct lwp *, struct lwp *);
44     void (*heuristic_exiting)(struct lwp *, struct proc *);
45     void (*uload_update)(struct lwp *);
46     void (*setcpumask)(struct usched *, cpumask_t);
47     void (*yield)(struct lwp *);
48     void (*changedcpu)(struct lwp *);
49 };
50 
51 union usched_data {
52     /*
53      * BSD4 scheduler.
54      */
55     struct {
56 	short	priority;	/* lower is better */
57 	char	unused01;	/* (currently not used) */
58 	char	rqindex;
59 	int	batch;		/* batch mode heuristic */
60 	int	estcpu;		/* dynamic priority modification */
61 	u_short rqtype;		/* protected copy of rtprio type */
62 	u_short	unused02;
63     } bsd4;
64     struct {
65 	short	priority;	/* lower is better */
66 	char	forked;		/* lock cpu during fork */
67 	char	rqindex;
68 	short	estfast;	/* fast estcpu collapse mode */
69 	short	uload;		/* for delta uload adjustments */
70 	int	estcpu;		/* dynamic priority modification */
71 	u_short rqtype;		/* protected copy of rtprio type */
72 	u_short	qcpu;		/* which cpu are we enqueued on? */
73 	u_short rrcount;	/* reset when moved to runq tail */
74 	u_short unused01;
75 	u_short unused02;
76 	u_short unused03;
77     } dfly;
78 
79     int		pad[6];		/* PAD for future expansion */
80 };
81 
82 /*
83  * Flags for usched_ctl()
84  */
85 #define        USCH_ADD        0x00000001
86 #define        USCH_REM        0x00000010
87 
88 #endif	/* _KERNEL || _KERNEL_STRUCTURES */
89 
90 #define USCHED_SET_SCHEDULER	0
91 #define USCHED_SET_CPU		1
92 #define USCHED_ADD_CPU		2
93 #define USCHED_DEL_CPU		3
94 #define USCHED_GET_CPU		4
95 #define USCHED_GET_CPUMASK	5	/* since DragonFly 4.5 */
96 #define USCHED_SET_CPUMASK	6	/* since DragonFly 4.7 */
97 
98 /*
99  * Kernel variables and procedures, or user system calls.
100  */
101 #ifdef _KERNEL
102 
103 extern struct usched	usched_bsd4;
104 extern struct usched	usched_dfly;
105 extern struct usched	usched_dummy;
106 void dfly_acquire_curproc(struct lwp *);
107 extern cpumask_t usched_mastermask;
108 extern int sched_ticks; /* From sys/kern/kern_clock.c */
109 
110 int usched_ctl(struct usched *, int);
111 struct usched *usched_init(void);
112 void usched_schedulerclock(struct lwp *, sysclock_t, sysclock_t);
113 
114 #endif
115 
116 #if !defined(_KERNEL) || defined(_KERNEL_VIRTUAL)
117 
118 int usched_set(pid_t, int, void *, int);
119 
120 #endif
121 
122 #endif
123 
124