xref: /dragonfly/sys/sys/usched_dfly.h (revision 335b9e93)
1 /*
2  * Copyright (c) 2012-2020 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>,
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #ifndef _SYS_USCHED_DFLY_H_
36 #define _SYS_USCHED_DFLY_H_
37 
38 /*
39  * Priorities.  Note that with 32 run queues per scheduler each queue
40  * represents four priority levels.
41  */
42 
43 #define MAXPRI			128
44 #define PRIMASK			(MAXPRI - 1)
45 #define PRIBASE_REALTIME	0
46 #define PRIBASE_NORMAL		MAXPRI
47 #define PRIBASE_IDLE		(MAXPRI * 2)
48 #define PRIBASE_THREAD		(MAXPRI * 3)
49 #define PRIBASE_NULL		(MAXPRI * 4)
50 
51 #define NQS	32			/* 32 run queues. */
52 #define PPQ	(MAXPRI / NQS)		/* priorities per queue */
53 #define PPQMASK	(PPQ - 1)
54 
55 /*
56  * NICE_QS	- maximum queues nice can shift the process
57  * EST_QS	- maximum queues estcpu can shift the process
58  *
59  * ESTCPUPPQ	- number of estcpu units per priority queue
60  * ESTCPUMAX	- number of estcpu units
61  *
62  * Remember that NICE runs over the whole -20 to +20 range.
63  */
64 #define NICE_QS		24	/* -20 to +20 shift in whole queues */
65 #define EST_QS		20	/* 0-MAX shift in whole queues */
66 #define ESTCPUPPQ	512
67 #define ESTCPUMAX	(ESTCPUPPQ * EST_QS)
68 #define PRIO_RANGE	(PRIO_MAX - PRIO_MIN + 1)
69 
70 #define ESTCPULIM(v)	min((v), ESTCPUMAX)
71 
72 TAILQ_HEAD(rq, lwp);
73 
74 #define lwp_priority	lwp_usdata.dfly.priority
75 #define lwp_forked	lwp_usdata.dfly.forked
76 #define lwp_rqindex	lwp_usdata.dfly.rqindex
77 #define lwp_estcpu	lwp_usdata.dfly.estcpu
78 #define lwp_estfast	lwp_usdata.dfly.estfast
79 #define lwp_uload	lwp_usdata.dfly.uload
80 #define lwp_rqtype	lwp_usdata.dfly.rqtype
81 #define lwp_qcpu	lwp_usdata.dfly.qcpu
82 #define lwp_rrcount	lwp_usdata.dfly.rrcount
83 
84 static __inline int
85 lptouload(struct lwp *lp)
86 {
87 	int uload;
88 
89 	uload = lp->lwp_estcpu / NQS;
90 	uload -= uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
91 
92 	return uload;
93 }
94 
95 /*
96  * DFly scheduler pcpu structure.  Note that the pcpu uload field must
97  * be 64-bits to avoid overflowing in the situation where more than 32768
98  * processes are on a single cpu's queue.  Since high-end systems can
99  * easily run 900,000+ processes, we have to deal with it.
100  */
101 struct usched_dfly_pcpu {
102 	struct spinlock spin;
103 	struct thread	*helper_thread;
104 	struct globaldata *gd;
105 	u_short		scancpu;
106 	short		upri;
107 	long		uload;		/* 64-bits to avoid overflow (1) */
108 	int		ucount;
109 	int		flags;
110 	struct lwp	*uschedcp;
111 	struct rq	queues[NQS];
112 	struct rq	rtqueues[NQS];
113 	struct rq	idqueues[NQS];
114 	u_int32_t	queuebits;
115 	u_int32_t	rtqueuebits;
116 	u_int32_t	idqueuebits;
117 	int		runqcount;
118 	int		cpuid;
119 	cpumask_t	cpumask;
120 	cpu_node_t	*cpunode;
121 } __cachealign;
122 
123 /*
124  * Reflecting bits in the global atomic masks allows us to avoid
125  * a certain degree of global ping-ponging.
126  */
127 #define DFLY_PCPU_RDYMASK	0x0001	/* reflect rdyprocmask */
128 #define DFLY_PCPU_CURMASK	0x0002	/* reflect curprocmask */
129 
130 typedef struct usched_dfly_pcpu	*dfly_pcpu_t;
131 
132 #endif
133