xref: /freebsd/sys/sys/systm.h (revision fd45b686)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1988, 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #ifndef _SYS_SYSTM_H_
38 #define	_SYS_SYSTM_H_
39 
40 #include <sys/types.h>
41 #include <sys/callout.h>
42 #include <sys/kassert.h>
43 #include <sys/queue.h>
44 #include <sys/stdint.h>		/* for people using printf mainly */
45 #include <machine/atomic.h>
46 #include <machine/cpufunc.h>
47 
48 __NULLABILITY_PRAGMA_PUSH
49 
50 #ifdef _KERNEL
51 extern int cold;		/* nonzero if we are doing a cold boot */
52 extern int suspend_blocked;	/* block suspend due to pending shutdown */
53 extern int rebooting;		/* kern_reboot() has been called. */
54 extern char version[];		/* system version */
55 extern char compiler_version[];	/* compiler version */
56 extern char copyright[];	/* system copyright */
57 extern int kstack_pages;	/* number of kernel stack pages */
58 
59 extern u_long pagesizes[];	/* supported page sizes */
60 extern long physmem;		/* physical memory */
61 extern long realmem;		/* 'real' memory */
62 
63 extern char *rootdevnames[2];	/* names of possible root devices */
64 
65 extern int boothowto;		/* reboot flags, from console subsystem */
66 extern int bootverbose;		/* nonzero to print verbose messages */
67 
68 extern int maxusers;		/* system tune hint */
69 extern int ngroups_max;		/* max # of supplemental groups */
70 extern int vm_guest;		/* Running as virtual machine guest? */
71 
72 extern u_long maxphys;		/* max raw I/O transfer size */
73 
74 /*
75  * Detected virtual machine guest types. The intention is to expand
76  * and/or add to the VM_GUEST_VM type if specific VM functionality is
77  * ever implemented (e.g. vendor-specific paravirtualization features).
78  * Keep in sync with vm_guest_sysctl_names[].
79  */
80 enum VM_GUEST { VM_GUEST_NO = 0, VM_GUEST_VM, VM_GUEST_XEN, VM_GUEST_HV,
81 		VM_GUEST_VMWARE, VM_GUEST_KVM, VM_GUEST_BHYVE, VM_GUEST_VBOX,
82 		VM_GUEST_PARALLELS, VM_LAST };
83 
84 #endif /* KERNEL */
85 
86 /*
87  * Align variables.
88  */
89 #define	__read_mostly		__section(".data.read_mostly")
90 #define	__read_frequently	__section(".data.read_frequently")
91 #define	__exclusive_cache_line	__aligned(CACHE_LINE_SIZE) \
92 				    __section(".data.exclusive_cache_line")
93 #if defined(_STANDALONE)
94 struct ucred;
95 #endif
96 
97 #ifdef _KERNEL
98 #include <sys/param.h>		/* MAXCPU */
99 #include <sys/pcpu.h>		/* curthread */
100 #include <sys/kpilite.h>
101 
102 extern bool scheduler_stopped;
103 
104 /*
105  * If we have already panic'd and this is the thread that called
106  * panic(), then don't block on any mutexes but silently succeed.
107  * Otherwise, the kernel will deadlock since the scheduler isn't
108  * going to run the thread that holds any lock we need.
109  */
110 #define	SCHEDULER_STOPPED()	__predict_false(scheduler_stopped)
111 
112 extern int osreldate;
113 
114 extern const void *zero_region;	/* address space maps to a zeroed page	*/
115 
116 extern int unmapped_buf_allowed;
117 
118 #ifdef __LP64__
119 #define	IOSIZE_MAX		iosize_max()
120 #define	DEVFS_IOSIZE_MAX	devfs_iosize_max()
121 #else
122 #define	IOSIZE_MAX		SSIZE_MAX
123 #define	DEVFS_IOSIZE_MAX	SSIZE_MAX
124 #endif
125 
126 /*
127  * General function declarations.
128  */
129 
130 struct inpcb;
131 struct lock_object;
132 struct malloc_type;
133 struct mtx;
134 struct proc;
135 struct socket;
136 struct thread;
137 struct tty;
138 struct ucred;
139 struct uio;
140 struct _jmp_buf;
141 struct trapframe;
142 struct eventtimer;
143 
144 int	setjmp(struct _jmp_buf *) __returns_twice;
145 void	longjmp(struct _jmp_buf *, int) __dead2;
146 int	dumpstatus(vm_offset_t addr, off_t count);
147 int	nullop(void);
148 int	eopnotsupp(void);
149 int	ureadc(int, struct uio *);
150 void	hashdestroy(void *, struct malloc_type *, u_long);
151 void	*hashinit(int count, struct malloc_type *type, u_long *hashmask);
152 void	*hashinit_flags(int count, struct malloc_type *type,
153     u_long *hashmask, int flags);
154 #define	HASH_NOWAIT	0x00000001
155 #define	HASH_WAITOK	0x00000002
156 
157 void	*phashinit(int count, struct malloc_type *type, u_long *nentries);
158 void	*phashinit_flags(int count, struct malloc_type *type, u_long *nentries,
159     int flags);
160 
161 void	cpu_flush_dcache(void *, size_t);
162 void	cpu_rootconf(void);
163 void	critical_enter_KBI(void);
164 void	critical_exit_KBI(void);
165 void	critical_exit_preempt(void);
166 void	init_param1(void);
167 void	init_param2(long physpages);
168 void	init_static_kenv(char *, size_t);
169 void	tablefull(const char *);
170 
171 /*
172  * Allocate per-thread "current" state in the linuxkpi
173  */
174 extern int (*lkpi_alloc_current)(struct thread *, int);
175 int linux_alloc_current_noop(struct thread *, int);
176 
177 #if (defined(KLD_MODULE) && !defined(KLD_TIED)) || defined(KTR_CRITICAL) || !defined(_KERNEL) || defined(GENOFFSET)
178 #define critical_enter() critical_enter_KBI()
179 #define critical_exit() critical_exit_KBI()
180 #else
181 static __inline void
182 critical_enter(void)
183 {
184 	struct thread_lite *td;
185 
186 	td = (struct thread_lite *)curthread;
187 	td->td_critnest++;
188 	atomic_interrupt_fence();
189 }
190 
191 static __inline void
192 critical_exit(void)
193 {
194 	struct thread_lite *td;
195 
196 	td = (struct thread_lite *)curthread;
197 	KASSERT(td->td_critnest != 0,
198 	    ("critical_exit: td_critnest == 0"));
199 	atomic_interrupt_fence();
200 	td->td_critnest--;
201 	atomic_interrupt_fence();
202 	if (__predict_false(td->td_owepreempt))
203 		critical_exit_preempt();
204 
205 }
206 #endif
207 
208 #ifdef  EARLY_PRINTF
209 typedef void early_putc_t(int ch);
210 extern early_putc_t *early_putc;
211 #endif
212 int	kvprintf(char const *, void (*)(int, void*), void *, int,
213 	    __va_list) __printflike(1, 0);
214 void	log(int, const char *, ...) __printflike(2, 3);
215 void	log_console(struct uio *);
216 void	vlog(int, const char *, __va_list) __printflike(2, 0);
217 int	asprintf(char **ret, struct malloc_type *mtp, const char *format,
218 	    ...) __printflike(3, 4);
219 int	printf(const char *, ...) __printflike(1, 2);
220 int	snprintf(char *, size_t, const char *, ...) __printflike(3, 4);
221 int	sprintf(char *buf, const char *, ...) __printflike(2, 3);
222 int	uprintf(const char *, ...) __printflike(1, 2);
223 int	vprintf(const char *, __va_list) __printflike(1, 0);
224 int	vasprintf(char **ret, struct malloc_type *mtp, const char *format,
225 	    __va_list ap) __printflike(3, 0);
226 int	vsnprintf(char *, size_t, const char *, __va_list) __printflike(3, 0);
227 int	vsnrprintf(char *, size_t, int, const char *, __va_list) __printflike(4, 0);
228 int	vsprintf(char *buf, const char *, __va_list) __printflike(2, 0);
229 int	sscanf(const char *, char const * _Nonnull, ...) __scanflike(2, 3);
230 int	vsscanf(const char * _Nonnull, char const * _Nonnull, __va_list)  __scanflike(2, 0);
231 long	strtol(const char *, char **, int);
232 u_long	strtoul(const char *, char **, int);
233 quad_t	strtoq(const char *, char **, int);
234 u_quad_t strtouq(const char *, char **, int);
235 void	tprintf(struct proc *p, int pri, const char *, ...) __printflike(3, 4);
236 void	vtprintf(struct proc *, int, const char *, __va_list) __printflike(3, 0);
237 void	hexdump(const void *ptr, int length, const char *hdr, int flags);
238 #define	HD_COLUMN_MASK	0xff
239 #define	HD_DELIM_MASK	0xff00
240 #define	HD_OMIT_COUNT	(1 << 16)
241 #define	HD_OMIT_HEX	(1 << 17)
242 #define	HD_OMIT_CHARS	(1 << 18)
243 
244 #define ovbcopy(f, t, l) bcopy((f), (t), (l))
245 void	explicit_bzero(void * _Nonnull, size_t);
246 
247 void	*memset(void * _Nonnull buf, int c, size_t len);
248 void	*memcpy(void * _Nonnull to, const void * _Nonnull from, size_t len);
249 void	*memmove(void * _Nonnull dest, const void * _Nonnull src, size_t n);
250 int	memcmp(const void *b1, const void *b2, size_t len);
251 
252 #ifdef SAN_NEEDS_INTERCEPTORS
253 #define	SAN_INTERCEPTOR(func)	\
254 	__CONCAT(SAN_INTERCEPTOR_PREFIX, __CONCAT(_, func))
255 void	*SAN_INTERCEPTOR(memset)(void *, int, size_t);
256 void	*SAN_INTERCEPTOR(memcpy)(void *, const void *, size_t);
257 void	*SAN_INTERCEPTOR(memmove)(void *, const void *, size_t);
258 int	SAN_INTERCEPTOR(memcmp)(const void *, const void *, size_t);
259 #ifndef SAN_RUNTIME
260 #define bcopy(from, to, len)	SAN_INTERCEPTOR(memmove)((to), (from), (len))
261 #define bzero(buf, len)		SAN_INTERCEPTOR(memset)((buf), 0, (len))
262 #define bcmp(b1, b2, len)	SAN_INTERCEPTOR(memcmp)((b1), (b2), (len))
263 #define memset(buf, c, len)	SAN_INTERCEPTOR(memset)((buf), (c), (len))
264 #define memcpy(to, from, len)	SAN_INTERCEPTOR(memcpy)((to), (from), (len))
265 #define memmove(dest, src, n)	SAN_INTERCEPTOR(memmove)((dest), (src), (n))
266 #define memcmp(b1, b2, len)	SAN_INTERCEPTOR(memcmp)((b1), (b2), (len))
267 #endif /* !SAN_RUNTIME */
268 #else /* !SAN_NEEDS_INTERCEPTORS */
269 #define bcopy(from, to, len)	__builtin_memmove((to), (from), (len))
270 #define bzero(buf, len)		__builtin_memset((buf), 0, (len))
271 #define bcmp(b1, b2, len)	__builtin_memcmp((b1), (b2), (len))
272 #define memset(buf, c, len)	__builtin_memset((buf), (c), (len))
273 #define memcpy(to, from, len)	__builtin_memcpy((to), (from), (len))
274 #define memmove(dest, src, n)	__builtin_memmove((dest), (src), (n))
275 #define memcmp(b1, b2, len)	__builtin_memcmp((b1), (b2), (len))
276 #endif /* SAN_NEEDS_INTERCEPTORS */
277 
278 void	*memset_early(void * _Nonnull buf, int c, size_t len);
279 #define bzero_early(buf, len) memset_early((buf), 0, (len))
280 void	*memcpy_early(void * _Nonnull to, const void * _Nonnull from, size_t len);
281 void	*memmove_early(void * _Nonnull dest, const void * _Nonnull src, size_t n);
282 #define bcopy_early(from, to, len) memmove_early((to), (from), (len))
283 
284 #define	copystr(src, dst, len, outlen)	({			\
285 	size_t __r, __len, *__outlen;				\
286 								\
287 	__len = (len);						\
288 	__outlen = (outlen);					\
289 	__r = strlcpy((dst), (src), __len);			\
290 	if (__outlen != NULL)					\
291 		*__outlen = ((__r >= __len) ? __len : __r + 1);	\
292 	((__r >= __len) ? ENAMETOOLONG : 0);			\
293 })
294 
295 int __result_use_check copyinstr(const void * __restrict udaddr,
296     void * _Nonnull __restrict kaddr, size_t len,
297     size_t * __restrict lencopied);
298 int __result_use_check copyin(const void * __restrict udaddr,
299     void * _Nonnull __restrict kaddr, size_t len);
300 int __result_use_check copyin_nofault(const void * __restrict udaddr,
301     void * _Nonnull __restrict kaddr, size_t len);
302 int copyout(const void * _Nonnull __restrict kaddr,
303     void * __restrict udaddr, size_t len);
304 int copyout_nofault(const void * _Nonnull __restrict kaddr,
305     void * __restrict udaddr, size_t len);
306 
307 #ifdef SAN_NEEDS_INTERCEPTORS
308 int	SAN_INTERCEPTOR(copyin)(const void *, void *, size_t);
309 int	SAN_INTERCEPTOR(copyinstr)(const void *, void *, size_t, size_t *);
310 int	SAN_INTERCEPTOR(copyout)(const void *, void *, size_t);
311 #ifndef SAN_RUNTIME
312 #define	copyin(u, k, l)		SAN_INTERCEPTOR(copyin)((u), (k), (l))
313 #define	copyinstr(u, k, l, lc)	SAN_INTERCEPTOR(copyinstr)((u), (k), (l), (lc))
314 #define	copyout(k, u, l)	SAN_INTERCEPTOR(copyout)((k), (u), (l))
315 #endif /* !SAN_RUNTIME */
316 #endif /* SAN_NEEDS_INTERCEPTORS */
317 
318 int	fubyte(volatile const void *base);
319 long	fuword(volatile const void *base);
320 int	fuword16(volatile const void *base);
321 int32_t	fuword32(volatile const void *base);
322 int64_t	fuword64(volatile const void *base);
323 int __result_use_check fueword(volatile const void *base, long *val);
324 int __result_use_check fueword32(volatile const void *base, int32_t *val);
325 int __result_use_check fueword64(volatile const void *base, int64_t *val);
326 int subyte(volatile void *base, int byte);
327 int suword(volatile void *base, long word);
328 int suword16(volatile void *base, int word);
329 int suword32(volatile void *base, int32_t word);
330 int suword64(volatile void *base, int64_t word);
331 uint32_t casuword32(volatile uint32_t *base, uint32_t oldval, uint32_t newval);
332 u_long	casuword(volatile u_long *p, u_long oldval, u_long newval);
333 int	casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp,
334 	    uint32_t newval);
335 int	casueword(volatile u_long *p, u_long oldval, u_long *oldvalp,
336 	    u_long newval);
337 
338 #if defined(SAN_NEEDS_INTERCEPTORS) && !defined(KCSAN)
339 int	SAN_INTERCEPTOR(fubyte)(volatile const void *base);
340 int	SAN_INTERCEPTOR(fuword16)(volatile const void *base);
341 int	SAN_INTERCEPTOR(fueword)(volatile const void *base, long *val);
342 int	SAN_INTERCEPTOR(fueword32)(volatile const void *base, int32_t *val);
343 int	SAN_INTERCEPTOR(fueword64)(volatile const void *base, int64_t *val);
344 int	SAN_INTERCEPTOR(subyte)(volatile void *base, int byte);
345 int	SAN_INTERCEPTOR(suword)(volatile void *base, long word);
346 int	SAN_INTERCEPTOR(suword16)(volatile void *base, int word);
347 int	SAN_INTERCEPTOR(suword32)(volatile void *base, int32_t word);
348 int	SAN_INTERCEPTOR(suword64)(volatile void *base, int64_t word);
349 int	SAN_INTERCEPTOR(casueword32)(volatile uint32_t *base, uint32_t oldval,
350 	    uint32_t *oldvalp, uint32_t newval);
351 int	SAN_INTERCEPTOR(casueword)(volatile u_long *p, u_long oldval,
352 	    u_long *oldvalp, u_long newval);
353 #ifndef SAN_RUNTIME
354 #define	fubyte(b)		SAN_INTERCEPTOR(fubyte)((b))
355 #define	fuword16(b)		SAN_INTERCEPTOR(fuword16)((b))
356 #define	fueword(b, v)		SAN_INTERCEPTOR(fueword)((b), (v))
357 #define	fueword32(b, v)		SAN_INTERCEPTOR(fueword32)((b), (v))
358 #define	fueword64(b, v)		SAN_INTERCEPTOR(fueword64)((b), (v))
359 #define	subyte(b, w)		SAN_INTERCEPTOR(subyte)((b), (w))
360 #define	suword(b, w)		SAN_INTERCEPTOR(suword)((b), (w))
361 #define	suword16(b, w)		SAN_INTERCEPTOR(suword16)((b), (w))
362 #define	suword32(b, w)		SAN_INTERCEPTOR(suword32)((b), (w))
363 #define	suword64(b, w)		SAN_INTERCEPTOR(suword64)((b), (w))
364 #define	casueword32(b, o, p, n)	SAN_INTERCEPTOR(casueword32)((b), (o), (p), (n))
365 #define	casueword(b, o, p, n)	SAN_INTERCEPTOR(casueword)((b), (o), (p), (n))
366 #endif /* !SAN_RUNTIME */
367 #endif /* SAN_NEEDS_INTERCEPTORS && !KCSAN */
368 
369 int	sysbeep(int hertz, sbintime_t duration);
370 
371 void	hardclock(int cnt, int usermode);
372 void	hardclock_sync(int cpu);
373 void	statclock(int cnt, int usermode);
374 void	profclock(int cnt, int usermode, uintfptr_t pc);
375 
376 int	hardclockintr(void);
377 
378 void	startprofclock(struct proc *);
379 void	stopprofclock(struct proc *);
380 void	cpu_startprofclock(void);
381 void	cpu_stopprofclock(void);
382 void	suspendclock(void);
383 void	resumeclock(void);
384 sbintime_t 	cpu_idleclock(void);
385 void	cpu_activeclock(void);
386 void	cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt);
387 void	cpu_et_frequency(struct eventtimer *et, uint64_t newfreq);
388 extern int	cpu_disable_c2_sleep;
389 extern int	cpu_disable_c3_sleep;
390 
391 extern void	(*tcp_hpts_softclock)(void);
392 #define	tcp_hpts_softclock()	do {					\
393 		if (tcp_hpts_softclock != NULL)				\
394 			tcp_hpts_softclock();				\
395 } while (0)
396 
397 char	*kern_getenv(const char *name);
398 void	freeenv(char *env);
399 int	getenv_int(const char *name, int *data);
400 int	getenv_uint(const char *name, unsigned int *data);
401 int	getenv_long(const char *name, long *data);
402 int	getenv_ulong(const char *name, unsigned long *data);
403 int	getenv_string(const char *name, char *data, int size);
404 int	getenv_int64(const char *name, int64_t *data);
405 int	getenv_uint64(const char *name, uint64_t *data);
406 int	getenv_quad(const char *name, quad_t *data);
407 int	getenv_bool(const char *name, bool *data);
408 bool	getenv_is_true(const char *name);
409 bool	getenv_is_false(const char *name);
410 int	kern_setenv(const char *name, const char *value);
411 int	kern_unsetenv(const char *name);
412 int	testenv(const char *name);
413 
414 int	getenv_array(const char *name, void *data, int size, int *psize,
415     int type_size, bool allow_signed);
416 #define	GETENV_UNSIGNED	false	/* negative numbers not allowed */
417 #define	GETENV_SIGNED	true	/* negative numbers allowed */
418 
419 typedef uint64_t (cpu_tick_f)(void);
420 void set_cputicker(cpu_tick_f *func, uint64_t freq, bool isvariable);
421 extern cpu_tick_f *cpu_ticks;
422 uint64_t cpu_tickrate(void);
423 uint64_t cputick2usec(uint64_t tick);
424 
425 #include <sys/libkern.h>
426 
427 /* Initialize the world */
428 void	consinit(void);
429 void	cpu_initclocks(void);
430 void	cpu_initclocks_bsp(void);
431 void	cpu_initclocks_ap(void);
432 void	usrinfoinit(void);
433 
434 /* Finalize the world */
435 void	kern_reboot(int) __dead2;
436 void	shutdown_nice(int);
437 
438 /* Stubs for obsolete functions that used to be for interrupt management */
439 static __inline intrmask_t	splhigh(void)		{ return 0; }
440 static __inline intrmask_t	splimp(void)		{ return 0; }
441 static __inline intrmask_t	splnet(void)		{ return 0; }
442 static __inline intrmask_t	spltty(void)		{ return 0; }
443 static __inline void		splx(intrmask_t ipl __unused)	{ return; }
444 
445 /*
446  * Common `proc' functions are declared here so that proc.h can be included
447  * less often.
448  */
449 int	_sleep(const void * _Nonnull chan, struct lock_object *lock, int pri,
450 	   const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags);
451 #define	msleep(chan, mtx, pri, wmesg, timo)				\
452 	_sleep((chan), &(mtx)->lock_object, (pri), (wmesg),		\
453 	    tick_sbt * (timo), 0, C_HARDCLOCK)
454 #define	msleep_sbt(chan, mtx, pri, wmesg, bt, pr, flags)		\
455 	_sleep((chan), &(mtx)->lock_object, (pri), (wmesg), (bt), (pr),	\
456 	    (flags))
457 int	msleep_spin_sbt(const void * _Nonnull chan, struct mtx *mtx,
458 	    const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags);
459 #define	msleep_spin(chan, mtx, wmesg, timo)				\
460 	msleep_spin_sbt((chan), (mtx), (wmesg), tick_sbt * (timo),	\
461 	    0, C_HARDCLOCK)
462 int	pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr,
463 	    int flags);
464 static __inline int
465 pause(const char *wmesg, int timo)
466 {
467 	return (pause_sbt(wmesg, tick_sbt * timo, 0, C_HARDCLOCK));
468 }
469 #define	pause_sig(wmesg, timo)						\
470 	pause_sbt((wmesg), tick_sbt * (timo), 0, C_HARDCLOCK | C_CATCH)
471 #define	tsleep(chan, pri, wmesg, timo)					\
472 	_sleep((chan), NULL, (pri), (wmesg), tick_sbt * (timo),		\
473 	    0, C_HARDCLOCK)
474 #define	tsleep_sbt(chan, pri, wmesg, bt, pr, flags)			\
475 	_sleep((chan), NULL, (pri), (wmesg), (bt), (pr), (flags))
476 void	wakeup(const void *chan);
477 void	wakeup_one(const void *chan);
478 void	wakeup_any(const void *chan);
479 
480 /*
481  * Common `struct cdev *' stuff are declared here to avoid #include poisoning
482  */
483 
484 struct cdev;
485 dev_t dev2udev(struct cdev *x);
486 const char *devtoname(struct cdev *cdev);
487 
488 #ifdef __LP64__
489 size_t	devfs_iosize_max(void);
490 size_t	iosize_max(void);
491 #endif
492 
493 int poll_no_poll(int events);
494 
495 /* XXX: Should be void nanodelay(u_int nsec); */
496 void	DELAY(int usec);
497 
498 int kcmp_cmp(uintptr_t a, uintptr_t b);
499 
500 /* Root mount holdback API */
501 struct root_hold_token {
502 	int				flags;
503 	const char			*who;
504 	TAILQ_ENTRY(root_hold_token)	list;
505 };
506 
507 struct root_hold_token *root_mount_hold(const char *identifier);
508 void root_mount_hold_token(const char *identifier, struct root_hold_token *h);
509 void root_mount_rel(struct root_hold_token *h);
510 int root_mounted(void);
511 
512 /*
513  * Unit number allocation API. (kern/subr_unit.c)
514  */
515 struct unrhdr;
516 #define	UNR_NO_MTX	((void *)(uintptr_t)-1)
517 struct unrhdr *new_unrhdr(int low, int high, struct mtx *mutex);
518 void init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex);
519 void delete_unrhdr(struct unrhdr *uh);
520 void clear_unrhdr(struct unrhdr *uh);
521 void clean_unrhdr(struct unrhdr *uh);
522 void clean_unrhdrl(struct unrhdr *uh);
523 int alloc_unr(struct unrhdr *uh);
524 int alloc_unr_specific(struct unrhdr *uh, u_int item);
525 int alloc_unrl(struct unrhdr *uh);
526 void free_unr(struct unrhdr *uh, u_int item);
527 void *create_iter_unr(struct unrhdr *uh);
528 int next_iter_unr(void *handle);
529 void free_iter_unr(void *handle);
530 
531 struct unrhdr64 {
532         uint64_t	counter;
533 };
534 
535 static __inline void
536 new_unrhdr64(struct unrhdr64 *unr64, uint64_t low)
537 {
538 
539 	unr64->counter = low;
540 }
541 
542 static __inline uint64_t
543 alloc_unr64(struct unrhdr64 *unr64)
544 {
545 
546 	return (atomic_fetchadd_64(&unr64->counter, 1));
547 }
548 
549 void	intr_prof_stack_use(struct thread *td, struct trapframe *frame);
550 
551 void counted_warning(unsigned *counter, const char *msg);
552 
553 /*
554  * APIs to manage deprecation and obsolescence.
555  */
556 void _gone_in(int major, const char *msg);
557 void _gone_in_dev(device_t dev, int major, const char *msg);
558 #ifdef NO_OBSOLETE_CODE
559 #define __gone_ok(m, msg)					 \
560 	_Static_assert(m < P_OSREL_MAJOR(__FreeBSD_version)),	 \
561 	    "Obsolete code: " msg);
562 #else
563 #define	__gone_ok(m, msg)
564 #endif
565 #define gone_in(major, msg)		__gone_ok(major, msg) _gone_in(major, msg)
566 #define gone_in_dev(dev, major, msg)	__gone_ok(major, msg) _gone_in_dev(dev, major, msg)
567 
568 #ifdef INVARIANTS
569 #define	__diagused
570 #else
571 #define	__diagused	__unused
572 #endif
573 
574 #ifdef WITNESS
575 #define	__witness_used
576 #else
577 #define	__witness_used	__unused
578 #endif
579 
580 #endif /* _KERNEL */
581 
582 __NULLABILITY_PRAGMA_POP
583 #endif /* !_SYS_SYSTM_H_ */
584