xref: /dragonfly/sys/sys/usched.h (revision cecb9aae)
1 /*
2  * SYS/USCHED.H
3  *
4  *	Userland scheduler API
5  *
6  * $DragonFly: src/sys/sys/usched.h,v 1.15 2008/04/21 15:24:47 dillon Exp $
7  */
8 
9 #ifndef _SYS_USCHED_H_
10 #define _SYS_USCHED_H_
11 
12 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
13 
14 #ifndef _SYS_TYPES_H_
15 #include <sys/types.h>
16 #endif
17 #ifndef _SYS_QUEUE_H_
18 #include <sys/queue.h>
19 #endif
20 #ifndef _SYS_SYSTIMER_H_
21 #include <sys/systimer.h>
22 #endif
23 
24 #define NAME_LENGTH 32
25 
26 struct lwp;
27 struct proc;
28 struct globaldata;
29 
30 struct usched {
31     TAILQ_ENTRY(usched) entry;
32     const char *name;
33     const char *desc;
34     void (*usched_register)(void);
35     void (*usched_unregister)(void);
36     void (*acquire_curproc)(struct lwp *);
37     void (*release_curproc)(struct lwp *);
38     void (*setrunqueue)(struct lwp *);
39     void (*schedulerclock)(struct lwp *, sysclock_t, sysclock_t);
40     void (*recalculate)(struct lwp *);
41     void (*resetpriority)(struct lwp *);
42     void (*heuristic_forking)(struct lwp *, struct lwp *);
43     void (*heuristic_exiting)(struct lwp *, struct proc *);
44     void (*uload_update)(struct lwp *);
45     void (*setcpumask)(struct usched *, cpumask_t);
46     void (*yield)(struct lwp *);
47 };
48 
49 union usched_data {
50     /*
51      * BSD4 scheduler.
52      */
53     struct {
54 	short	priority;	/* lower is better */
55 	char	unused01;	/* (currently not used) */
56 	char	rqindex;
57 	int	batch;		/* batch mode heuristic */
58 	int	estcpu;		/* dynamic priority modification */
59 	u_short rqtype;		/* protected copy of rtprio type */
60 	u_short	unused02;
61     } bsd4;
62     struct {
63 	short	priority;	/* lower is better */
64 	char	forked;		/* lock cpu during fork */
65 	char	rqindex;
66 	short	estfast;	/* fast estcpu collapse mode */
67 	short	uload;		/* for delta uload adjustments */
68 	int	estcpu;		/* dynamic priority modification */
69 	u_short rqtype;		/* protected copy of rtprio type */
70 	u_short	qcpu;		/* which cpu are we enqueued on? */
71 	u_short rrcount;	/* reset when moved to runq tail */
72 	u_short unused01;
73 	u_short unused02;
74 	u_short unused03;
75     } dfly;
76 
77     int		pad[6];		/* PAD for future expansion */
78 };
79 
80 /*
81  * Flags for usched_ctl()
82  */
83 #define        USCH_ADD        0x00000001
84 #define        USCH_REM        0x00000010
85 
86 #endif	/* _KERNEL || _KERNEL_STRUCTURES */
87 
88 #define USCHED_SET_SCHEDULER	0
89 #define USCHED_SET_CPU		1
90 #define USCHED_ADD_CPU		2
91 #define USCHED_DEL_CPU		3
92 #define USCHED_GET_CPU		4
93 
94 /*
95  * Kernel variables and procedures, or user system calls.
96  */
97 #ifdef _KERNEL
98 
99 extern struct usched	usched_bsd4;
100 extern struct usched	usched_dfly;
101 extern struct usched	usched_dummy;
102 extern cpumask_t usched_mastermask;
103 extern int sched_ticks; /* From sys/kern/kern_clock.c */
104 
105 int usched_ctl(struct usched *, int);
106 struct usched *usched_init(void);
107 void usched_schedulerclock(struct lwp *, sysclock_t, sysclock_t);
108 
109 #endif
110 
111 #if !defined(_KERNEL) || defined(_KERNEL_VIRTUAL)
112 
113 int usched_set(pid_t, int, void *, int);
114 
115 #endif
116 
117 #endif
118 
119