xref: /minix/sys/sys/lwp.h (revision 0a6a1f1d)
1 /*	$NetBSD: lwp.h,v 1.170 2015/03/31 01:10:02 matt Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010
5  *    The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Nathan J. Williams and Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #ifndef _SYS_LWP_H_
34 #define _SYS_LWP_H_
35 
36 #if defined(_KERNEL) || defined(_KMEMUSER)
37 
38 #include <sys/param.h>
39 #include <sys/time.h>
40 #include <sys/queue.h>
41 #include <sys/callout.h>
42 #include <sys/kcpuset.h>
43 #include <sys/mutex.h>
44 #include <sys/condvar.h>
45 #include <sys/signalvar.h>
46 #include <sys/sched.h>
47 #include <sys/specificdata.h>
48 #include <sys/syncobj.h>
49 #include <sys/resource.h>
50 
51 #if defined(_KERNEL)
52 struct lwp;
53 /* forward declare this for <machine/cpu.h> so it can get l_cpu. */
54 static inline struct cpu_info *lwp_getcpu(struct lwp *);
55 #include <machine/cpu.h>		/* curcpu() and cpu_info */
56 #endif
57 
58 #include <machine/proc.h>		/* Machine-dependent proc substruct. */
59 
60 /*
61  * Lightweight process.  Field markings and the corresponding locks:
62  *
63  * a:	proc_lock
64  * c:	condition variable interlock, passed to cv_wait()
65  * l:	*l_mutex
66  * p:	l_proc->p_lock
67  * s:	spc_mutex, which may or may not be referenced by l_mutex
68  * S:	l_selcluster->sc_lock
69  * (:	unlocked, stable
70  * !:	unlocked, may only be reliably accessed by the LWP itself
71  *
72  * Fields are clustered together by usage (to increase the likelyhood
73  * of cache hits) and by size (to reduce dead space in the structure).
74  */
75 
76 #include <sys/pcu.h>
77 
78 struct lockdebug;
79 struct sysent;
80 
81 struct lwp {
82 	/* Scheduling and overall state. */
83 	TAILQ_ENTRY(lwp) l_runq;	/* s: run queue */
84 	union {
85 		void *	info;		/* s: scheduler-specific structure */
86 		u_int	timeslice;	/* l: time-quantum for SCHED_M2 */
87 	} l_sched;
88 	struct cpu_info *volatile l_cpu;/* s: CPU we're on if LSONPROC */
89 	kmutex_t * volatile l_mutex;	/* l: ptr to mutex on sched state */
90 	int		l_ctxswtch;	/* l: performing a context switch */
91 	void		*l_addr;	/* l: PCB address; use lwp_getpcb() */
92 	struct mdlwp	l_md;		/* l: machine-dependent fields. */
93 	int		l_flag;		/* l: misc flag values */
94 	int		l_stat;		/* l: overall LWP status */
95 	struct bintime 	l_rtime;	/* l: real time */
96 	struct bintime	l_stime;	/* l: start time (while ONPROC) */
97 	u_int		l_swtime;	/* l: time swapped in or out */
98 	u_int		l_rticks;	/* l: Saved start time of run */
99 	u_int		l_rticksum;	/* l: Sum of ticks spent running */
100 	u_int		l_slpticks;	/* l: Saved start time of sleep */
101 	u_int		l_slpticksum;	/* l: Sum of ticks spent sleeping */
102 	int		l_biglocks;	/* l: biglock count before sleep */
103 	int		l_class;	/* l: scheduling class */
104 	int		l_kpriority;	/* !: has kernel priority boost */
105 	pri_t		l_kpribase;	/* !: kernel priority base level */
106 	pri_t		l_priority;	/* l: scheduler priority */
107 	pri_t		l_inheritedprio;/* l: inherited priority */
108 	SLIST_HEAD(, turnstile) l_pi_lenders; /* l: ts lending us priority */
109 	uint64_t	l_ncsw;		/* l: total context switches */
110 	uint64_t	l_nivcsw;	/* l: involuntary context switches */
111 	u_int		l_cpticks;	/* (: Ticks of CPU time */
112 	fixpt_t		l_pctcpu;	/* p: %cpu during l_swtime */
113 	fixpt_t		l_estcpu;	/* l: cpu time for SCHED_4BSD */
114 	psetid_t	l_psid;		/* l: assigned processor-set ID */
115 	struct cpu_info *l_target_cpu;	/* l: target CPU to migrate */
116 	struct lwpctl	*l_lwpctl;	/* p: lwpctl block kernel address */
117 	struct lcpage	*l_lcpage;	/* p: lwpctl containing page */
118 	kcpuset_t	*l_affinity;	/* l: CPU set for affinity */
119 
120 	/* Synchronisation. */
121 	struct turnstile *l_ts;		/* l: current turnstile */
122 	struct syncobj	*l_syncobj;	/* l: sync object operations set */
123 	TAILQ_ENTRY(lwp) l_sleepchain;	/* l: sleep queue */
124 	wchan_t		l_wchan;	/* l: sleep address */
125 	const char	*l_wmesg;	/* l: reason for sleep */
126 	struct sleepq	*l_sleepq;	/* l: current sleep queue */
127 	int		l_sleeperr;	/* !: error before unblock */
128 	u_int		l_slptime;	/* l: time since last blocked */
129 	callout_t	l_timeout_ch;	/* !: callout for tsleep */
130 	u_int		l_emap_gen;	/* !: emap generation number */
131 	kcondvar_t	l_waitcv;	/* a: vfork() wait */
132 
133 #if PCU_UNIT_COUNT > 0
134 	struct cpu_info	* volatile l_pcu_cpu[PCU_UNIT_COUNT];
135 	uint32_t	l_pcu_valid;
136 #endif
137 
138 	/* Process level and global state, misc. */
139 	LIST_ENTRY(lwp)	l_list;		/* a: entry on list of all LWPs */
140 	void		*l_ctxlink;	/* p: uc_link {get,set}context */
141 	struct proc	*l_proc;	/* p: parent process */
142 	LIST_ENTRY(lwp)	l_sibling;	/* p: entry on proc's list of LWPs */
143 	lwpid_t		l_waiter;	/* p: first LWP waiting on us */
144 	lwpid_t 	l_waitingfor;	/* p: specific LWP we are waiting on */
145 	int		l_prflag;	/* p: process level flags */
146 	u_int		l_refcnt;	/* p: reference count on this LWP */
147 	lwpid_t		l_lid;		/* (: LWP identifier; local to proc */
148 	char		*l_name;	/* (: name, optional */
149 
150 	/* State of select() or poll(). */
151 	int		l_selflag;	/* S: polling state flags */
152 	SLIST_HEAD(,selinfo) l_selwait;	/* S: descriptors waited on */
153 	int		l_selret;	/* S: return value of select/poll */
154 	uintptr_t	l_selrec;	/* !: argument for selrecord() */
155 	struct selcluster *l_selcluster;/* !: associated cluster data */
156 	void *		l_selbits;	/* (: select() bit-field */
157 	size_t		l_selni;	/* (: size of a single bit-field */
158 
159 	/* Signals. */
160 	int		l_sigrestore;	/* p: need to restore old sig mask */
161 	sigset_t	l_sigwaitset;	/* p: signals being waited for */
162 	kcondvar_t	l_sigcv;	/* p: for sigsuspend() */
163 	struct ksiginfo	*l_sigwaited;	/* p: delivered signals from set */
164 	sigpend_t	*l_sigpendset;	/* p: XXX issignal()/postsig() baton */
165 	LIST_ENTRY(lwp)	l_sigwaiter;	/* p: chain on list of waiting LWPs */
166 	stack_t		l_sigstk;	/* p: sp & on stack state variable */
167 	sigset_t	l_sigmask;	/* p: signal mask */
168 	sigpend_t	l_sigpend;	/* p: signals to this LWP */
169 	sigset_t	l_sigoldmask;	/* p: mask for sigpause */
170 
171 	/* Private data. */
172 	specificdata_reference
173 		l_specdataref;		/* !: subsystem lwp-specific data */
174 	struct timespec l_ktrcsw;	/* !: for ktrace CSW trace XXX */
175 	void		*l_private;	/* !: svr4-style lwp-private data */
176 	struct lwp	*l_switchto;	/* !: mi_switch: switch to this LWP */
177 	struct kauth_cred *l_cred;	/* !: cached credentials */
178 	struct filedesc	*l_fd;		/* !: cached copy of proc::p_fd */
179 	void		*l_emuldata;	/* !: kernel lwp-private data */
180 	u_int		l_cv_signalled;	/* c: restarted by cv_signal() */
181 	u_short		l_shlocks;	/* !: lockdebug: shared locks held */
182 	u_short		l_exlocks;	/* !: lockdebug: excl. locks held */
183 	u_short		l_unused;	/* !: unused */
184 	u_short		l_blcnt;	/* !: count of kernel_lock held */
185 	int		l_nopreempt;	/* !: don't preempt me! */
186 	u_int		l_dopreempt;	/* s: kernel preemption pending */
187 	int		l_pflag;	/* !: LWP private flags */
188 	int		l_dupfd;	/* !: side return from cloning devs XXX */
189 	const struct sysent * volatile l_sysent;/* !: currently active syscall */
190 	struct rusage	l_ru;		/* !: accounting information */
191 	uint64_t	l_pfailtime;	/* !: for kernel preemption */
192 	uintptr_t	l_pfailaddr;	/* !: for kernel preemption */
193 	uintptr_t	l_pfaillock;	/* !: for kernel preemption */
194 	_TAILQ_HEAD(,struct lockdebug,volatile) l_ld_locks;/* !: locks held by LWP */
195 	int		l_tcgen;	/* !: for timecounter removal */
196 
197 	/* These are only used by 'options SYSCALL_TIMES'. */
198 	uint32_t	l_syscall_time;	/* !: time epoch for current syscall */
199 	uint64_t	*l_syscall_counter; /* !: counter for current process */
200 
201 	struct kdtrace_thread *l_dtrace; /* (: DTrace-specific data. */
202 };
203 
204 /*
205  * UAREA_PCB_OFFSET: an offset of PCB structure in the uarea.  MD code may
206  * define it in <machine/proc.h>, to indicate a different uarea layout.
207  */
208 #ifndef UAREA_PCB_OFFSET
209 #define	UAREA_PCB_OFFSET	0
210 #endif
211 
212 LIST_HEAD(lwplist, lwp);		/* A list of LWPs. */
213 
214 #ifdef _KERNEL
215 extern struct lwplist	alllwp;		/* List of all LWPs. */
216 extern lwp_t		lwp0;		/* LWP for proc0. */
217 extern int		maxlwp __read_mostly;	/* max number of lwps */
218 #ifndef MAXLWP
219 #define	MAXLWP		2048
220 #endif
221 #ifndef	__HAVE_CPU_MAXLWP
222 #define	cpu_maxlwp()	MAXLWP
223 #endif
224 #endif
225 
226 #endif /* _KERNEL || _KMEMUSER */
227 
228 /* These flags are kept in l_flag. */
229 #define	LW_IDLE		0x00000001 /* Idle lwp. */
230 #define	LW_LWPCTL	0x00000002 /* Adjust lwpctl in userret */
231 #define	LW_SINTR	0x00000080 /* Sleep is interruptible. */
232 #define	LW_SYSTEM	0x00000200 /* Kernel thread */
233 #define	LW_WSUSPEND	0x00020000 /* Suspend before return to user */
234 #define	LW_BATCH	0x00040000 /* LWP tends to hog CPU */
235 #define	LW_WCORE	0x00080000 /* Stop for core dump on return to user */
236 #define	LW_WEXIT	0x00100000 /* Exit before return to user */
237 #define	LW_PENDSIG	0x01000000 /* Pending signal for us */
238 #define	LW_CANCELLED	0x02000000 /* tsleep should not sleep */
239 #define	LW_WREBOOT	0x08000000 /* System is rebooting, please suspend */
240 #define	LW_UNPARKED	0x10000000 /* Unpark op pending */
241 #define	LW_RUMP_CLEAR	0x40000000 /* Clear curlwp in RUMP scheduler */
242 #define	LW_RUMP_QEXIT	0x80000000 /* LWP should exit ASAP */
243 
244 /* The second set of flags is kept in l_pflag. */
245 #define	LP_KTRACTIVE	0x00000001 /* Executing ktrace operation */
246 #define	LP_KTRCSW	0x00000002 /* ktrace context switch marker */
247 #define	LP_KTRCSWUSER	0x00000004 /* ktrace context switch marker */
248 #define	LP_PIDLID	0x00000008 /* free LID from PID space on exit */
249 #define	LP_OWEUPC	0x00000010 /* Owe user profiling tick */
250 #define	LP_MPSAFE	0x00000020 /* Starts life without kernel_lock */
251 #define	LP_INTR		0x00000040 /* Soft interrupt handler */
252 #define	LP_SYSCTLWRITE	0x00000080 /* sysctl write lock held */
253 #define	LP_MUSTJOIN	0x00000100 /* Must join kthread on exit */
254 #define	LP_VFORKWAIT	0x00000200 /* Waiting at vfork() for a child */
255 #define	LP_TIMEINTR	0x00010000 /* Time this soft interrupt */
256 #define	LP_RUNNING	0x20000000 /* Active on a CPU */
257 #define	LP_BOUND	0x80000000 /* Bound to a CPU */
258 
259 /* The third set is kept in l_prflag. */
260 #define	LPR_DETACHED	0x00800000 /* Won't be waited for. */
261 #define	LPR_CRMOD	0x00000100 /* Credentials modified */
262 
263 /*
264  * Mask indicating that there is "exceptional" work to be done on return to
265  * user.
266  */
267 #define	LW_USERRET	\
268     (LW_WEXIT | LW_PENDSIG | LW_WREBOOT | LW_WSUSPEND | LW_WCORE | LW_LWPCTL)
269 
270 /*
271  * Status values.
272  *
273  * A note about LSRUN and LSONPROC: LSRUN indicates that a process is
274  * runnable but *not* yet running, i.e. is on a run queue.  LSONPROC
275  * indicates that the process is actually executing on a CPU, i.e.
276  * it is no longer on a run queue.
277  */
278 #define	LSIDL		1	/* Process being created by fork. */
279 #define	LSRUN		2	/* Currently runnable. */
280 #define	LSSLEEP		3	/* Sleeping on an address. */
281 #define	LSSTOP		4	/* Process debugging or suspension. */
282 #define	LSZOMB		5	/* Awaiting collection by parent. */
283 /* unused, for source compatibility with NetBSD 4.0 and earlier. */
284 #define	LSDEAD		6	/* Process is almost a zombie. */
285 #define	LSONPROC	7	/* Process is currently on a CPU. */
286 #define	LSSUSPENDED	8	/* Not running, not signalable. */
287 
288 #if defined(_KERNEL) || defined(_KMEMUSER)
289 static inline void *
lwp_getpcb(struct lwp * l)290 lwp_getpcb(struct lwp *l)
291 {
292 
293 	return l->l_addr;
294 }
295 #endif /* _KERNEL || _KMEMUSER */
296 
297 #ifdef _KERNEL
298 #define	LWP_CACHE_CREDS(l, p)						\
299 do {									\
300 	(void)p;							\
301 	if (__predict_false((l)->l_prflag & LPR_CRMOD))			\
302 		lwp_update_creds(l);					\
303 } while (/* CONSTCOND */ 0)
304 
305 void	lwpinit(void);
306 void	lwp0_init(void);
307 void	lwp_sys_init(void);
308 
309 void	lwp_startup(lwp_t *, lwp_t *);
310 void	startlwp(void *);
311 
312 int	lwp_locked(lwp_t *, kmutex_t *);
313 void	lwp_setlock(lwp_t *, kmutex_t *);
314 void	lwp_unlock_to(lwp_t *, kmutex_t *);
315 int	lwp_trylock(lwp_t *);
316 void	lwp_addref(lwp_t *);
317 void	lwp_delref(lwp_t *);
318 void	lwp_delref2(lwp_t *);
319 void	lwp_drainrefs(lwp_t *);
320 bool	lwp_alive(lwp_t *);
321 lwp_t	*lwp_find_first(proc_t *);
322 
323 int	lwp_wait(lwp_t *, lwpid_t, lwpid_t *, bool);
324 void	lwp_continue(lwp_t *);
325 void	lwp_unsleep(lwp_t *, bool);
326 void	lwp_unstop(lwp_t *);
327 void	lwp_exit(lwp_t *);
328 void	lwp_exit_switchaway(lwp_t *) __dead;
329 int	lwp_suspend(lwp_t *, lwp_t *);
330 int	lwp_create1(lwp_t *, const void *, size_t, u_long, lwpid_t *);
331 void	lwp_update_creds(lwp_t *);
332 void	lwp_migrate(lwp_t *, struct cpu_info *);
333 lwp_t *	lwp_find2(pid_t, lwpid_t);
334 lwp_t *	lwp_find(proc_t *, int);
335 void	lwp_userret(lwp_t *);
336 void	lwp_need_userret(lwp_t *);
337 void	lwp_free(lwp_t *, bool, bool);
338 uint64_t lwp_pctr(void);
339 int	lwp_setprivate(lwp_t *, void *);
340 int	do_lwp_create(lwp_t *, void *, u_long, lwpid_t *);
341 
342 void	lwpinit_specificdata(void);
343 int	lwp_specific_key_create(specificdata_key_t *, specificdata_dtor_t);
344 void	lwp_specific_key_delete(specificdata_key_t);
345 void	lwp_initspecific(lwp_t *);
346 void	lwp_finispecific(lwp_t *);
347 void	*lwp_getspecific(specificdata_key_t);
348 #if defined(_LWP_API_PRIVATE)
349 void	*_lwp_getspecific_by_lwp(lwp_t *, specificdata_key_t);
350 #endif
351 void	lwp_setspecific(specificdata_key_t, void *);
352 
353 /* Syscalls. */
354 int	lwp_park(clockid_t, int, struct timespec *, const void *);
355 int	lwp_unpark(lwpid_t, const void *);
356 
357 /* DDB. */
358 void	lwp_whatis(uintptr_t, void (*)(const char *, ...) __printflike(1, 2));
359 
360 /*
361  * Lock an LWP. XXX _MODULE
362  */
363 static inline void
lwp_lock(lwp_t * l)364 lwp_lock(lwp_t *l)
365 {
366 	kmutex_t *old = l->l_mutex;
367 
368 	/*
369 	 * Note: mutex_spin_enter() will have posted a read barrier.
370 	 * Re-test l->l_mutex.  If it has changed, we need to try again.
371 	 */
372 	mutex_spin_enter(old);
373 	while (__predict_false(l->l_mutex != old)) {
374 		mutex_spin_exit(old);
375 		old = l->l_mutex;
376 		mutex_spin_enter(old);
377 	}
378 }
379 
380 /*
381  * Unlock an LWP. XXX _MODULE
382  */
383 static inline void
lwp_unlock(lwp_t * l)384 lwp_unlock(lwp_t *l)
385 {
386 	mutex_spin_exit(l->l_mutex);
387 }
388 
389 static inline void
lwp_changepri(lwp_t * l,pri_t pri)390 lwp_changepri(lwp_t *l, pri_t pri)
391 {
392 	KASSERT(mutex_owned(l->l_mutex));
393 
394 	if (l->l_priority == pri)
395 		return;
396 
397 	(*l->l_syncobj->sobj_changepri)(l, pri);
398 	KASSERT(l->l_priority == pri);
399 }
400 
401 static inline void
lwp_lendpri(lwp_t * l,pri_t pri)402 lwp_lendpri(lwp_t *l, pri_t pri)
403 {
404 	KASSERT(mutex_owned(l->l_mutex));
405 
406 	if (l->l_inheritedprio == pri)
407 		return;
408 
409 	(*l->l_syncobj->sobj_lendpri)(l, pri);
410 	KASSERT(l->l_inheritedprio == pri);
411 }
412 
413 static inline pri_t
lwp_eprio(lwp_t * l)414 lwp_eprio(lwp_t *l)
415 {
416 	pri_t pri;
417 
418 	pri = l->l_priority;
419 	if ((l->l_flag & LW_SYSTEM) == 0 && l->l_kpriority && pri < PRI_KERNEL)
420 		pri = (pri >> 1) + l->l_kpribase;
421 	return MAX(l->l_inheritedprio, pri);
422 }
423 
424 int lwp_create(lwp_t *, struct proc *, vaddr_t, int,
425     void *, size_t, void (*)(void *), void *, lwp_t **, int);
426 
427 /*
428  * XXX _MODULE
429  * We should provide real stubs for the below that modules can use.
430  */
431 
432 static inline void
spc_lock(struct cpu_info * ci)433 spc_lock(struct cpu_info *ci)
434 {
435 	mutex_spin_enter(ci->ci_schedstate.spc_mutex);
436 }
437 
438 static inline void
spc_unlock(struct cpu_info * ci)439 spc_unlock(struct cpu_info *ci)
440 {
441 	mutex_spin_exit(ci->ci_schedstate.spc_mutex);
442 }
443 
444 static inline void
spc_dlock(struct cpu_info * ci1,struct cpu_info * ci2)445 spc_dlock(struct cpu_info *ci1, struct cpu_info *ci2)
446 {
447 	struct schedstate_percpu *spc1 = &ci1->ci_schedstate;
448 	struct schedstate_percpu *spc2 = &ci2->ci_schedstate;
449 
450 	KASSERT(ci1 != ci2);
451 	if (ci1 < ci2) {
452 		mutex_spin_enter(spc1->spc_mutex);
453 		mutex_spin_enter(spc2->spc_mutex);
454 	} else {
455 		mutex_spin_enter(spc2->spc_mutex);
456 		mutex_spin_enter(spc1->spc_mutex);
457 	}
458 }
459 
460 /*
461  * Allow machine-dependent code to override curlwp in <machine/cpu.h> for
462  * its own convenience.  Otherwise, we declare it as appropriate.
463  */
464 #if !defined(curlwp)
465 #if defined(MULTIPROCESSOR)
466 #define	curlwp		curcpu()->ci_curlwp	/* Current running LWP */
467 #else
468 extern struct lwp	*curlwp;		/* Current running LWP */
469 #endif /* MULTIPROCESSOR */
470 #endif /* ! curlwp */
471 #define	curproc		(curlwp->l_proc)
472 
473 /*
474  * This provide a way for <machine/cpu.h> to get l_cpu for curlwp before
475  * struct lwp is defined.
476  */
477 static inline struct cpu_info *
lwp_getcpu(struct lwp * l)478 lwp_getcpu(struct lwp *l)
479 {
480 	return l->l_cpu;
481 }
482 
483 static inline bool
CURCPU_IDLE_P(void)484 CURCPU_IDLE_P(void)
485 {
486 	struct cpu_info *ci = curcpu();
487 	return ci->ci_data.cpu_onproc == ci->ci_data.cpu_idlelwp;
488 }
489 
490 /*
491  * Disable and re-enable preemption.  Only for low-level kernel
492  * use.  Device drivers and anything that could potentially be
493  * compiled as a module should use kpreempt_disable() and
494  * kpreempt_enable().
495  */
496 static inline void
KPREEMPT_DISABLE(lwp_t * l)497 KPREEMPT_DISABLE(lwp_t *l)
498 {
499 
500 	KASSERT(l == curlwp);
501 	l->l_nopreempt++;
502 	__insn_barrier();
503 }
504 
505 static inline void
KPREEMPT_ENABLE(lwp_t * l)506 KPREEMPT_ENABLE(lwp_t *l)
507 {
508 
509 	KASSERT(l == curlwp);
510 	KASSERT(l->l_nopreempt > 0);
511 	__insn_barrier();
512 	if (--l->l_nopreempt != 0)
513 		return;
514 	__insn_barrier();
515 	if (__predict_false(l->l_dopreempt))
516 		kpreempt(0);
517 	__insn_barrier();
518 }
519 
520 /* For lwp::l_dopreempt */
521 #define	DOPREEMPT_ACTIVE	0x01
522 #define	DOPREEMPT_COUNTED	0x02
523 
524 #endif /* _KERNEL */
525 
526 /* Flags for _lwp_create(), as per Solaris. */
527 #define	LWP_DETACHED	0x00000040
528 #define	LWP_SUSPENDED	0x00000080
529 
530 /* Kernel-internal flags for LWP creation. */
531 #define	LWP_PIDLID	0x40000000
532 #define	LWP_VFORK	0x80000000
533 
534 #endif	/* !_SYS_LWP_H_ */
535