xref: /dragonfly/sys/sys/thread.h (revision 16777b6b)
1 /*
2  * SYS/THREAD.H
3  *
4  *	Implements the architecture independant portion of the LWKT
5  *	subsystem.
6  *
7  * $DragonFly: src/sys/sys/thread.h,v 1.32 2003/09/24 18:37:51 dillon Exp $
8  */
9 
10 #ifndef _SYS_THREAD_H_
11 #define _SYS_THREAD_H_
12 
13 #ifndef _SYS_QUEUE_H_
14 #include <sys/queue.h>		/* TAILQ_* macros */
15 #endif
16 
17 struct globaldata;
18 struct proc;
19 struct thread;
20 struct lwkt_queue;
21 struct lwkt_token;
22 struct lwkt_wait;
23 struct lwkt_ipiq;
24 struct lwkt_cpu_msg;
25 struct lwkt_cpu_port;
26 struct lwkt_rwlock;
27 struct lwkt_msg;
28 struct lwkt_port;
29 union sysunion;
30 
31 typedef struct lwkt_queue	*lwkt_queue_t;
32 typedef struct lwkt_token	*lwkt_token_t;
33 typedef struct lwkt_wait	*lwkt_wait_t;
34 typedef struct lwkt_cpu_msg	*lwkt_cpu_msg_t;
35 typedef struct lwkt_cpu_port	*lwkt_cpu_port_t;
36 typedef struct lwkt_rwlock	*lwkt_rwlock_t;
37 typedef struct lwkt_ipiq	*lwkt_ipiq_t;
38 typedef struct thread 		*thread_t;
39 
40 typedef TAILQ_HEAD(lwkt_queue, thread) lwkt_queue;
41 
42 #ifndef _MACHINE_THREAD_H_
43 #include <machine/thread.h>		/* md_thread */
44 #endif
45 #ifndef _SYS_MSGPORT_H_
46 #include <sys/msgport.h>
47 #endif
48 
49 /*
50  * Tokens arbitrate access to information.  They are 'soft' arbitrators
51  * in that they are associated with cpus rather then threads, making the
52  * optimal aquisition case very fast if your cpu already happens to own the
53  * token you are requesting.
54  */
55 typedef struct lwkt_token {
56     int		t_cpu;		/* the current owner of the token */
57     int		t_reqcpu;	/* return ownership to this cpu on release */
58     int		t_gen;		/* generation number */
59 #if 0
60     int		t_pri;		/* raise thread priority to hold token */
61 #endif
62 } lwkt_token;
63 
64 /*
65  * Wait structures deal with blocked threads.  Due to the way remote cpus
66  * interact with these structures stable storage must be used.
67  */
68 typedef struct lwkt_wait {
69     lwkt_queue	wa_waitq;	/* list of waiting threads */
70     lwkt_token	wa_token;	/* who currently owns the list */
71     int		wa_gen;
72     int		wa_count;
73 } lwkt_wait;
74 
75 #define MAXCPUFIFO      16	/* power of 2 */
76 #define MAXCPUFIFO_MASK	(MAXCPUFIFO - 1)
77 
78 typedef void (*ipifunc_t)(void *arg);
79 
80 typedef struct lwkt_ipiq {
81     int		ip_rindex;      /* only written by target cpu */
82     int		ip_xindex;      /* writte by target, indicates completion */
83     int		ip_windex;      /* only written by source cpu */
84     ipifunc_t	ip_func[MAXCPUFIFO];
85     void	*ip_arg[MAXCPUFIFO];
86 } lwkt_ipiq;
87 
88 /*
89  * The standard message and queue structure used for communications between
90  * cpus.  Messages are typically queued via a machine-specific non-linked
91  * FIFO matrix allowing any cpu to send a message to any other cpu without
92  * blocking.
93  */
94 typedef struct lwkt_cpu_msg {
95     void	(*cm_func)(lwkt_cpu_msg_t msg);	/* primary dispatch function */
96     int		cm_code;		/* request code if applicable */
97     int		cm_cpu;			/* reply to cpu */
98     thread_t	cm_originator;		/* originating thread for wakeup */
99 } lwkt_cpu_msg;
100 
101 /*
102  * reader/writer lock
103  */
104 typedef struct lwkt_rwlock {
105     lwkt_wait	rw_wait;
106     thread_t	rw_owner;
107     int		rw_count;
108     int		rw_requests;
109 } lwkt_rwlock;
110 
111 #define rw_token	rw_wait.wa_token
112 
113 /*
114  * Thread structure.  Note that ownership of a thread structure is special
115  * cased and there is no 'token'.  A thread is always owned by the cpu
116  * represented by td_gd, any manipulation of the thread by some other cpu
117  * must be done through cpu_*msg() functions.  e.g. you could request
118  * ownership of a thread that way, or hand a thread off to another cpu.
119  *
120  * NOTE: td_pri is bumped by TDPRI_CRIT when entering a critical section,
121  * but this does not effect how the thread is scheduled by LWKT.
122  */
123 struct md_intr_info;
124 
125 struct thread {
126     TAILQ_ENTRY(thread) td_threadq;
127     TAILQ_ENTRY(thread) td_allq;
128     lwkt_port	td_msgport;	/* built-in message port for replies */
129     struct proc	*td_proc;	/* (optional) associated process */
130     struct pcb	*td_pcb;	/* points to pcb and top of kstack */
131     struct globaldata *td_gd;	/* associated with this cpu */
132     const char	*td_wmesg;	/* string name for blockage */
133     void	*td_wchan;	/* waiting on channel */
134     int		td_pri;		/* 0-31, 31=highest priority (note 1) */
135     int		td_flags;	/* TDF flags */
136     int		td_gen;		/* wait queue chasing generation number */
137 				/* maybe preempt */
138     void	(*td_preemptable)(struct thread *td, int critpri);
139     void	(*td_release)(struct thread *td);
140     union {
141 	struct md_intr_info *intdata;
142     } td_info;
143     char	*td_kstack;	/* kernel stack */
144     char	*td_sp;		/* kernel stack pointer for LWKT restore */
145     void	(*td_switch)(struct thread *ntd);
146     lwkt_wait_t td_wait;	/* thread sitting on wait structure */
147     u_int64_t	td_uticks;	/* Statclock hits in user mode (uS) */
148     u_int64_t	td_sticks;      /* Statclock hits in system mode (uS) */
149     u_int64_t	td_iticks;	/* Statclock hits processing intr (uS) */
150     int		td_locks;	/* lockmgr lock debugging YYY */
151     int		td_refs;	/* hold position in gd_tdallq / hold free */
152 #ifdef SMP
153     int		td_mpcount;	/* MP lock held (count) */
154 #else
155     int		td_unused001;
156 #endif
157     char	td_comm[MAXCOMLEN+1]; /* typ 16+1 bytes */
158     struct thread *td_preempted; /* we preempted this thread */
159     struct md_thread td_mach;
160 };
161 
162 /*
163  * Thread flags.  Note that TDF_RUNNING is cleared on the old thread after
164  * we switch to the new one, which is necessary because LWKTs don't need
165  * to hold the BGL.  This flag is used by the exit code and the managed
166  * thread migration code.
167  *
168  * LWKT threads stay on their (per-cpu) run queue while running, not to
169  * be confused with user processes which are removed from the user scheduling
170  * run queue while actually running.
171  */
172 #define TDF_RUNNING		0x0001	/* thread still active */
173 #define TDF_RUNQ		0x0002	/* on an LWKT run queue */
174 #define TDF_PREEMPT_LOCK	0x0004	/* I have been preempted */
175 #define TDF_PREEMPT_DONE	0x0008	/* acknowledge preemption complete */
176 #define TDF_IDLE_NOHLT		0x0010	/* we need to spin */
177 
178 #define TDF_ONALLQ		0x0100	/* on gd_tdallq */
179 #define TDF_ALLOCATED_THREAD	0x0200	/* zalloc allocated thread */
180 #define TDF_ALLOCATED_STACK	0x0400	/* zalloc allocated stack */
181 #define TDF_VERBOSE		0x0800	/* verbose on exit */
182 #define TDF_DEADLKTREAT		0x1000	/* special lockmgr deadlock treatment */
183 #define TDF_STOPREQ		0x2000	/* suspend_kproc */
184 #define TDF_WAKEREQ		0x4000	/* resume_kproc */
185 #define TDF_TIMEOUT		0x8000	/* tsleep timeout */
186 #define TDF_INTTHREAD		0x00010000	/* interrupt thread */
187 
188 /*
189  * Thread priorities.  Typically only one thread from any given
190  * user process scheduling queue is on the LWKT run queue at a time.
191  * Remember that there is one LWKT run queue per cpu.
192  *
193  * Critical sections are handled by bumping td_pri above TDPRI_MAX, which
194  * causes interrupts to be masked as they occur.  When this occurs a
195  * rollup flag will be set in mycpu->gd_reqflags.
196  */
197 #define TDPRI_IDLE_THREAD	0	/* the idle thread */
198 #define TDPRI_USER_IDLE		4	/* user scheduler idle */
199 #define TDPRI_USER_NORM		6	/* user scheduler normal */
200 #define TDPRI_USER_REAL		8	/* user scheduler real time */
201 #define TDPRI_KERN_USER		10	/* kernel / block in syscall */
202 #define TDPRI_KERN_DAEMON	12	/* kernel daemon (pageout, etc) */
203 #define TDPRI_SOFT_NORM		14	/* kernel / normal */
204 #define TDPRI_SOFT_TIMER	16	/* kernel / timer */
205 #define TDPRI_EXITING		19	/* exiting thread */
206 #define TDPRI_INT_SUPPORT	20	/* kernel / high priority support */
207 #define TDPRI_INT_LOW		27	/* low priority interrupt */
208 #define TDPRI_INT_MED		28	/* medium priority interrupt */
209 #define TDPRI_INT_HIGH		29	/* high priority interrupt */
210 #define TDPRI_MAX		31
211 
212 #define TDPRI_MASK		31
213 #define TDPRI_CRIT		32	/* high bits of td_pri used for crit */
214 
215 #define CACHE_NTHREADS		6
216 
217 #define IN_CRITICAL_SECT(td)	((td)->td_pri >= TDPRI_CRIT)
218 
219 #ifdef _KERNEL
220 
221 extern struct vm_zone	*thread_zone;
222 
223 extern struct thread *lwkt_alloc_thread(struct thread *template);
224 extern void lwkt_init_thread(struct thread *td, void *stack, int flags,
225 	struct globaldata *gd);
226 extern void lwkt_set_comm(thread_t td, const char *ctl, ...);
227 extern void lwkt_wait_free(struct thread *td);
228 extern void lwkt_free_thread(struct thread *td);
229 extern void lwkt_init_wait(struct lwkt_wait *w);
230 extern void lwkt_gdinit(struct globaldata *gd);
231 extern void lwkt_switch(void);
232 extern void lwkt_maybe_switch(void);
233 extern void lwkt_preempt(thread_t ntd, int critpri);
234 extern void lwkt_schedule(thread_t td);
235 extern void lwkt_schedule_self(void);
236 extern void lwkt_deschedule(thread_t td);
237 extern void lwkt_deschedule_self(void);
238 extern void lwkt_acquire(thread_t td);
239 extern void lwkt_yield(void);
240 extern void lwkt_yield_quick(void);
241 extern void lwkt_hold(thread_t td);
242 extern void lwkt_rele(thread_t td);
243 
244 extern void lwkt_block(lwkt_wait_t w, const char *wmesg, int *gen);
245 extern void lwkt_signal(lwkt_wait_t w, int count);
246 extern int lwkt_trytoken(lwkt_token_t tok);
247 extern int lwkt_gettoken(lwkt_token_t tok);
248 extern int lwkt_gentoken(lwkt_token_t tok, int *gen);
249 extern void lwkt_reltoken(lwkt_token_t tok);
250 extern void lwkt_inittoken(lwkt_token_t tok);
251 extern int  lwkt_regettoken(lwkt_token_t tok);
252 extern void lwkt_rwlock_init(lwkt_rwlock_t lock);
253 extern void lwkt_exlock(lwkt_rwlock_t lock, const char *wmesg);
254 extern void lwkt_shlock(lwkt_rwlock_t lock, const char *wmesg);
255 extern void lwkt_exunlock(lwkt_rwlock_t lock);
256 extern void lwkt_shunlock(lwkt_rwlock_t lock);
257 extern void lwkt_setpri(thread_t td, int pri);
258 extern void lwkt_setpri_self(int pri);
259 extern int  lwkt_send_ipiq(int dcpu, ipifunc_t func, void *arg);
260 extern void lwkt_send_ipiq_mask(u_int32_t mask, ipifunc_t func, void *arg);
261 extern void lwkt_wait_ipiq(int dcpu, int seq);
262 extern void lwkt_process_ipiq(void);
263 extern void crit_panic(void);
264 extern struct proc *lwkt_preempted_proc(void);
265 
266 extern int  lwkt_create (void (*func)(void *), void *arg, struct thread **ptd,
267 			    struct thread *template, int tdflags,
268 			    const char *ctl, ...);
269 extern void lwkt_exit (void) __dead2;
270 
271 #endif
272 
273 #endif
274 
275