1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1988, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)systm.h 8.7 (Berkeley) 3/29/95 37 * $FreeBSD$ 38 */ 39 40 #ifndef _SYS_SYSTM_H_ 41 #define _SYS_SYSTM_H_ 42 43 #include <sys/cdefs.h> 44 #include <machine/atomic.h> 45 #include <machine/cpufunc.h> 46 #include <sys/callout.h> 47 #include <sys/queue.h> 48 #include <sys/stdint.h> /* for people using printf mainly */ 49 50 __NULLABILITY_PRAGMA_PUSH 51 52 extern int cold; /* nonzero if we are doing a cold boot */ 53 extern int suspend_blocked; /* block suspend due to pending shutdown */ 54 extern int rebooting; /* kern_reboot() has been called. */ 55 extern const char *panicstr; /* panic message */ 56 extern char version[]; /* system version */ 57 extern char compiler_version[]; /* compiler version */ 58 extern char copyright[]; /* system copyright */ 59 extern int kstack_pages; /* number of kernel stack pages */ 60 61 extern u_long pagesizes[]; /* supported page sizes */ 62 extern long physmem; /* physical memory */ 63 extern long realmem; /* 'real' memory */ 64 65 extern char *rootdevnames[2]; /* names of possible root devices */ 66 67 extern int boothowto; /* reboot flags, from console subsystem */ 68 extern int bootverbose; /* nonzero to print verbose messages */ 69 70 extern int maxusers; /* system tune hint */ 71 extern int ngroups_max; /* max # of supplemental groups */ 72 extern int vm_guest; /* Running as virtual machine guest? */ 73 74 /* 75 * Detected virtual machine guest types. The intention is to expand 76 * and/or add to the VM_GUEST_VM type if specific VM functionality is 77 * ever implemented (e.g. vendor-specific paravirtualization features). 78 * Keep in sync with vm_guest_sysctl_names[]. 79 */ 80 enum VM_GUEST { VM_GUEST_NO = 0, VM_GUEST_VM, VM_GUEST_XEN, VM_GUEST_HV, 81 VM_GUEST_VMWARE, VM_GUEST_KVM, VM_GUEST_BHYVE, VM_LAST }; 82 83 /* 84 * These functions need to be declared before the KASSERT macro is invoked in 85 * !KASSERT_PANIC_OPTIONAL builds, so their declarations are sort of out of 86 * place compared to other function definitions in this header. On the other 87 * hand, this header is a bit disorganized anyway. 88 */ 89 void panic(const char *, ...) __dead2 __printflike(1, 2); 90 void vpanic(const char *, __va_list) __dead2 __printflike(1, 0); 91 92 #if defined(WITNESS) || defined(INVARIANT_SUPPORT) 93 #ifdef KASSERT_PANIC_OPTIONAL 94 void kassert_panic(const char *fmt, ...) __printflike(1, 2); 95 #else 96 #define kassert_panic panic 97 #endif 98 #endif 99 100 #ifdef INVARIANTS /* The option is always available */ 101 #define KASSERT(exp,msg) do { \ 102 if (__predict_false(!(exp))) \ 103 kassert_panic msg; \ 104 } while (0) 105 #define VNASSERT(exp, vp, msg) do { \ 106 if (__predict_false(!(exp))) { \ 107 vn_printf(vp, "VNASSERT failed\n"); \ 108 kassert_panic msg; \ 109 } \ 110 } while (0) 111 #else 112 #define KASSERT(exp,msg) do { \ 113 } while (0) 114 115 #define VNASSERT(exp, vp, msg) do { \ 116 } while (0) 117 #endif 118 119 #ifndef CTASSERT /* Allow lint to override */ 120 #define CTASSERT(x) _Static_assert(x, "compile-time assertion failed") 121 #endif 122 123 #if defined(_KERNEL) 124 #include <sys/param.h> /* MAXCPU */ 125 #include <sys/pcpu.h> /* curthread */ 126 #include <sys/kpilite.h> 127 #endif 128 129 /* 130 * Assert that a pointer can be loaded from memory atomically. 131 * 132 * This assertion enforces stronger alignment than necessary. For example, 133 * on some architectures, atomicity for unaligned loads will depend on 134 * whether or not the load spans multiple cache lines. 135 */ 136 #define ASSERT_ATOMIC_LOAD_PTR(var, msg) \ 137 KASSERT(sizeof(var) == sizeof(void *) && \ 138 ((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg) 139 140 /* 141 * Assert that a thread is in critical(9) section. 142 */ 143 #define CRITICAL_ASSERT(td) \ 144 KASSERT((td)->td_critnest >= 1, ("Not in critical section")); 145 146 /* 147 * If we have already panic'd and this is the thread that called 148 * panic(), then don't block on any mutexes but silently succeed. 149 * Otherwise, the kernel will deadlock since the scheduler isn't 150 * going to run the thread that holds any lock we need. 151 */ 152 #define SCHEDULER_STOPPED_TD(td) ({ \ 153 MPASS((td) == curthread); \ 154 __predict_false((td)->td_stopsched); \ 155 }) 156 #define SCHEDULER_STOPPED() SCHEDULER_STOPPED_TD(curthread) 157 158 /* 159 * Align variables. 160 */ 161 #define __read_mostly __section(".data.read_mostly") 162 #define __read_frequently __section(".data.read_frequently") 163 #define __exclusive_cache_line __aligned(CACHE_LINE_SIZE) \ 164 __section(".data.exclusive_cache_line") 165 /* 166 * XXX the hints declarations are even more misplaced than most declarations 167 * in this file, since they are needed in one file (per arch) and only used 168 * in two files. 169 * XXX most of these variables should be const. 170 */ 171 extern int osreldate; 172 extern bool dynamic_kenv; 173 extern struct mtx kenv_lock; 174 extern char *kern_envp; 175 extern char *md_envp; 176 extern char static_env[]; 177 extern char static_hints[]; /* by config for now */ 178 179 extern char **kenvp; 180 181 extern const void *zero_region; /* address space maps to a zeroed page */ 182 183 extern int unmapped_buf_allowed; 184 185 #ifdef __LP64__ 186 #define IOSIZE_MAX iosize_max() 187 #define DEVFS_IOSIZE_MAX devfs_iosize_max() 188 #else 189 #define IOSIZE_MAX SSIZE_MAX 190 #define DEVFS_IOSIZE_MAX SSIZE_MAX 191 #endif 192 193 /* 194 * General function declarations. 195 */ 196 197 struct inpcb; 198 struct lock_object; 199 struct malloc_type; 200 struct mtx; 201 struct proc; 202 struct socket; 203 struct thread; 204 struct tty; 205 struct ucred; 206 struct uio; 207 struct _jmp_buf; 208 struct trapframe; 209 struct eventtimer; 210 211 int setjmp(struct _jmp_buf *) __returns_twice; 212 void longjmp(struct _jmp_buf *, int) __dead2; 213 int dumpstatus(vm_offset_t addr, off_t count); 214 int nullop(void); 215 int eopnotsupp(void); 216 int ureadc(int, struct uio *); 217 void hashdestroy(void *, struct malloc_type *, u_long); 218 void *hashinit(int count, struct malloc_type *type, u_long *hashmask); 219 void *hashinit_flags(int count, struct malloc_type *type, 220 u_long *hashmask, int flags); 221 #define HASH_NOWAIT 0x00000001 222 #define HASH_WAITOK 0x00000002 223 224 void *phashinit(int count, struct malloc_type *type, u_long *nentries); 225 void *phashinit_flags(int count, struct malloc_type *type, u_long *nentries, 226 int flags); 227 void g_waitidle(void); 228 229 void cpu_boot(int); 230 void cpu_flush_dcache(void *, size_t); 231 void cpu_rootconf(void); 232 void critical_enter_KBI(void); 233 void critical_exit_KBI(void); 234 void critical_exit_preempt(void); 235 void init_param1(void); 236 void init_param2(long physpages); 237 void init_static_kenv(char *, size_t); 238 void tablefull(const char *); 239 240 /* 241 * Allocate per-thread "current" state in the linuxkpi 242 */ 243 extern int (*lkpi_alloc_current)(struct thread *, int); 244 int linux_alloc_current_noop(struct thread *, int); 245 246 247 #if defined(KLD_MODULE) || defined(KTR_CRITICAL) || !defined(_KERNEL) || defined(GENOFFSET) 248 #define critical_enter() critical_enter_KBI() 249 #define critical_exit() critical_exit_KBI() 250 #else 251 static __inline void 252 critical_enter(void) 253 { 254 struct thread_lite *td; 255 256 td = (struct thread_lite *)curthread; 257 td->td_critnest++; 258 __compiler_membar(); 259 } 260 261 static __inline void 262 critical_exit(void) 263 { 264 struct thread_lite *td; 265 266 td = (struct thread_lite *)curthread; 267 KASSERT(td->td_critnest != 0, 268 ("critical_exit: td_critnest == 0")); 269 __compiler_membar(); 270 td->td_critnest--; 271 __compiler_membar(); 272 if (__predict_false(td->td_owepreempt)) 273 critical_exit_preempt(); 274 275 } 276 #endif 277 278 279 #ifdef EARLY_PRINTF 280 typedef void early_putc_t(int ch); 281 extern early_putc_t *early_putc; 282 #endif 283 int kvprintf(char const *, void (*)(int, void*), void *, int, 284 __va_list) __printflike(1, 0); 285 void log(int, const char *, ...) __printflike(2, 3); 286 void log_console(struct uio *); 287 void vlog(int, const char *, __va_list) __printflike(2, 0); 288 int asprintf(char **ret, struct malloc_type *mtp, const char *format, 289 ...) __printflike(3, 4); 290 int printf(const char *, ...) __printflike(1, 2); 291 int snprintf(char *, size_t, const char *, ...) __printflike(3, 4); 292 int sprintf(char *buf, const char *, ...) __printflike(2, 3); 293 int uprintf(const char *, ...) __printflike(1, 2); 294 int vprintf(const char *, __va_list) __printflike(1, 0); 295 int vasprintf(char **ret, struct malloc_type *mtp, const char *format, 296 __va_list ap) __printflike(3, 0); 297 int vsnprintf(char *, size_t, const char *, __va_list) __printflike(3, 0); 298 int vsnrprintf(char *, size_t, int, const char *, __va_list) __printflike(4, 0); 299 int vsprintf(char *buf, const char *, __va_list) __printflike(2, 0); 300 int sscanf(const char *, char const * _Nonnull, ...) __scanflike(2, 3); 301 int vsscanf(const char * _Nonnull, char const * _Nonnull, __va_list) __scanflike(2, 0); 302 long strtol(const char *, char **, int); 303 u_long strtoul(const char *, char **, int); 304 quad_t strtoq(const char *, char **, int); 305 u_quad_t strtouq(const char *, char **, int); 306 void tprintf(struct proc *p, int pri, const char *, ...) __printflike(3, 4); 307 void vtprintf(struct proc *, int, const char *, __va_list) __printflike(3, 0); 308 void hexdump(const void *ptr, int length, const char *hdr, int flags); 309 #define HD_COLUMN_MASK 0xff 310 #define HD_DELIM_MASK 0xff00 311 #define HD_OMIT_COUNT (1 << 16) 312 #define HD_OMIT_HEX (1 << 17) 313 #define HD_OMIT_CHARS (1 << 18) 314 315 #define ovbcopy(f, t, l) bcopy((f), (t), (l)) 316 void bcopy(const void * _Nonnull from, void * _Nonnull to, size_t len); 317 #define bcopy(from, to, len) __builtin_memmove((to), (from), (len)) 318 void bzero(void * _Nonnull buf, size_t len); 319 #define bzero(buf, len) __builtin_memset((buf), 0, (len)) 320 void explicit_bzero(void * _Nonnull, size_t); 321 int bcmp(const void *b1, const void *b2, size_t len); 322 #define bcmp(b1, b2, len) __builtin_memcmp((b1), (b2), (len)) 323 324 void *memset(void * _Nonnull buf, int c, size_t len); 325 #define memset(buf, c, len) __builtin_memset((buf), (c), (len)) 326 void *memcpy(void * _Nonnull to, const void * _Nonnull from, size_t len); 327 #define memcpy(to, from, len) __builtin_memcpy((to), (from), (len)) 328 void *memmove(void * _Nonnull dest, const void * _Nonnull src, size_t n); 329 #define memmove(dest, src, n) __builtin_memmove((dest), (src), (n)) 330 int memcmp(const void *b1, const void *b2, size_t len); 331 #define memcmp(b1, b2, len) __builtin_memcmp((b1), (b2), (len)) 332 333 void *memset_early(void * _Nonnull buf, int c, size_t len); 334 #define bzero_early(buf, len) memset_early((buf), 0, (len)) 335 void *memcpy_early(void * _Nonnull to, const void * _Nonnull from, size_t len); 336 void *memmove_early(void * _Nonnull dest, const void * _Nonnull src, size_t n); 337 #define bcopy_early(from, to, len) memmove_early((to), (from), (len)) 338 339 int copystr(const void * _Nonnull __restrict kfaddr, 340 void * _Nonnull __restrict kdaddr, size_t len, 341 size_t * __restrict lencopied); 342 int copyinstr(const void * __restrict udaddr, 343 void * _Nonnull __restrict kaddr, size_t len, 344 size_t * __restrict lencopied); 345 int copyin(const void * __restrict udaddr, 346 void * _Nonnull __restrict kaddr, size_t len); 347 int copyin_nofault(const void * __restrict udaddr, 348 void * _Nonnull __restrict kaddr, size_t len); 349 int copyout(const void * _Nonnull __restrict kaddr, 350 void * __restrict udaddr, size_t len); 351 int copyout_nofault(const void * _Nonnull __restrict kaddr, 352 void * __restrict udaddr, size_t len); 353 354 int fubyte(volatile const void *base); 355 long fuword(volatile const void *base); 356 int fuword16(volatile const void *base); 357 int32_t fuword32(volatile const void *base); 358 int64_t fuword64(volatile const void *base); 359 int fueword(volatile const void *base, long *val); 360 int fueword32(volatile const void *base, int32_t *val); 361 int fueword64(volatile const void *base, int64_t *val); 362 int subyte(volatile void *base, int byte); 363 int suword(volatile void *base, long word); 364 int suword16(volatile void *base, int word); 365 int suword32(volatile void *base, int32_t word); 366 int suword64(volatile void *base, int64_t word); 367 uint32_t casuword32(volatile uint32_t *base, uint32_t oldval, uint32_t newval); 368 u_long casuword(volatile u_long *p, u_long oldval, u_long newval); 369 int casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp, 370 uint32_t newval); 371 int casueword(volatile u_long *p, u_long oldval, u_long *oldvalp, 372 u_long newval); 373 374 void realitexpire(void *); 375 376 int sysbeep(int hertz, int period); 377 378 void hardclock(int cnt, int usermode); 379 void hardclock_sync(int cpu); 380 void softclock(void *); 381 void statclock(int cnt, int usermode); 382 void profclock(int cnt, int usermode, uintfptr_t pc); 383 384 int hardclockintr(void); 385 386 void startprofclock(struct proc *); 387 void stopprofclock(struct proc *); 388 void cpu_startprofclock(void); 389 void cpu_stopprofclock(void); 390 void suspendclock(void); 391 void resumeclock(void); 392 sbintime_t cpu_idleclock(void); 393 void cpu_activeclock(void); 394 void cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt); 395 void cpu_et_frequency(struct eventtimer *et, uint64_t newfreq); 396 extern int cpu_disable_c2_sleep; 397 extern int cpu_disable_c3_sleep; 398 399 char *kern_getenv(const char *name); 400 void freeenv(char *env); 401 int getenv_int(const char *name, int *data); 402 int getenv_uint(const char *name, unsigned int *data); 403 int getenv_long(const char *name, long *data); 404 int getenv_ulong(const char *name, unsigned long *data); 405 int getenv_string(const char *name, char *data, int size); 406 int getenv_int64(const char *name, int64_t *data); 407 int getenv_uint64(const char *name, uint64_t *data); 408 int getenv_quad(const char *name, quad_t *data); 409 int kern_setenv(const char *name, const char *value); 410 int kern_unsetenv(const char *name); 411 int testenv(const char *name); 412 413 int getenv_array(const char *name, void *data, int size, int *psize, 414 int type_size, bool allow_signed); 415 #define GETENV_UNSIGNED false /* negative numbers not allowed */ 416 #define GETENV_SIGNED true /* negative numbers allowed */ 417 418 typedef uint64_t (cpu_tick_f)(void); 419 void set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var); 420 extern cpu_tick_f *cpu_ticks; 421 uint64_t cpu_tickrate(void); 422 uint64_t cputick2usec(uint64_t tick); 423 424 #ifdef APM_FIXUP_CALLTODO 425 struct timeval; 426 void adjust_timeout_calltodo(struct timeval *time_change); 427 #endif /* APM_FIXUP_CALLTODO */ 428 429 #include <sys/libkern.h> 430 431 /* Initialize the world */ 432 void consinit(void); 433 void cpu_initclocks(void); 434 void cpu_initclocks_bsp(void); 435 void cpu_initclocks_ap(void); 436 void usrinfoinit(void); 437 438 /* Finalize the world */ 439 void kern_reboot(int) __dead2; 440 void shutdown_nice(int); 441 442 /* Timeouts */ 443 typedef void timeout_t(void *); /* timeout function type */ 444 #define CALLOUT_HANDLE_INITIALIZER(handle) \ 445 { NULL } 446 447 void callout_handle_init(struct callout_handle *); 448 struct callout_handle timeout(timeout_t *, void *, int); 449 void untimeout(timeout_t *, void *, struct callout_handle); 450 451 /* Stubs for obsolete functions that used to be for interrupt management */ 452 static __inline intrmask_t splbio(void) { return 0; } 453 static __inline intrmask_t splcam(void) { return 0; } 454 static __inline intrmask_t splclock(void) { return 0; } 455 static __inline intrmask_t splhigh(void) { return 0; } 456 static __inline intrmask_t splimp(void) { return 0; } 457 static __inline intrmask_t splnet(void) { return 0; } 458 static __inline intrmask_t spltty(void) { return 0; } 459 static __inline void splx(intrmask_t ipl __unused) { return; } 460 461 /* 462 * Common `proc' functions are declared here so that proc.h can be included 463 * less often. 464 */ 465 int _sleep(void * _Nonnull chan, struct lock_object *lock, int pri, 466 const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); 467 #define msleep(chan, mtx, pri, wmesg, timo) \ 468 _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \ 469 tick_sbt * (timo), 0, C_HARDCLOCK) 470 #define msleep_sbt(chan, mtx, pri, wmesg, bt, pr, flags) \ 471 _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), (bt), (pr), \ 472 (flags)) 473 int msleep_spin_sbt(void * _Nonnull chan, struct mtx *mtx, 474 const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); 475 #define msleep_spin(chan, mtx, wmesg, timo) \ 476 msleep_spin_sbt((chan), (mtx), (wmesg), tick_sbt * (timo), \ 477 0, C_HARDCLOCK) 478 int pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, 479 int flags); 480 #define pause(wmesg, timo) \ 481 pause_sbt((wmesg), tick_sbt * (timo), 0, C_HARDCLOCK) 482 #define pause_sig(wmesg, timo) \ 483 pause_sbt((wmesg), tick_sbt * (timo), 0, C_HARDCLOCK | C_CATCH) 484 #define tsleep(chan, pri, wmesg, timo) \ 485 _sleep((chan), NULL, (pri), (wmesg), tick_sbt * (timo), \ 486 0, C_HARDCLOCK) 487 #define tsleep_sbt(chan, pri, wmesg, bt, pr, flags) \ 488 _sleep((chan), NULL, (pri), (wmesg), (bt), (pr), (flags)) 489 void wakeup(void * chan); 490 void wakeup_one(void * chan); 491 492 /* 493 * Common `struct cdev *' stuff are declared here to avoid #include poisoning 494 */ 495 496 struct cdev; 497 dev_t dev2udev(struct cdev *x); 498 const char *devtoname(struct cdev *cdev); 499 500 #ifdef __LP64__ 501 size_t devfs_iosize_max(void); 502 size_t iosize_max(void); 503 #endif 504 505 int poll_no_poll(int events); 506 507 /* XXX: Should be void nanodelay(u_int nsec); */ 508 void DELAY(int usec); 509 510 /* Root mount holdback API */ 511 struct root_hold_token; 512 513 struct root_hold_token *root_mount_hold(const char *identifier); 514 void root_mount_rel(struct root_hold_token *h); 515 int root_mounted(void); 516 517 518 /* 519 * Unit number allocation API. (kern/subr_unit.c) 520 */ 521 struct unrhdr; 522 struct unrhdr *new_unrhdr(int low, int high, struct mtx *mutex); 523 void init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex); 524 void delete_unrhdr(struct unrhdr *uh); 525 void clear_unrhdr(struct unrhdr *uh); 526 void clean_unrhdr(struct unrhdr *uh); 527 void clean_unrhdrl(struct unrhdr *uh); 528 int alloc_unr(struct unrhdr *uh); 529 int alloc_unr_specific(struct unrhdr *uh, u_int item); 530 int alloc_unrl(struct unrhdr *uh); 531 void free_unr(struct unrhdr *uh, u_int item); 532 533 #ifndef __LP64__ 534 #define UNR64_LOCKED 535 #endif 536 537 struct unrhdr64 { 538 uint64_t counter; 539 }; 540 541 static __inline void 542 new_unrhdr64(struct unrhdr64 *unr64, uint64_t low) 543 { 544 545 unr64->counter = low; 546 } 547 548 #ifdef UNR64_LOCKED 549 uint64_t alloc_unr64(struct unrhdr64 *); 550 #else 551 static __inline uint64_t 552 alloc_unr64(struct unrhdr64 *unr64) 553 { 554 555 return (atomic_fetchadd_64(&unr64->counter, 1)); 556 } 557 #endif 558 559 void intr_prof_stack_use(struct thread *td, struct trapframe *frame); 560 561 void counted_warning(unsigned *counter, const char *msg); 562 563 /* 564 * APIs to manage deprecation and obsolescence. 565 */ 566 struct device; 567 void _gone_in(int major, const char *msg); 568 void _gone_in_dev(struct device *dev, int major, const char *msg); 569 #ifdef NO_OBSOLETE_CODE 570 #define __gone_ok(m, msg) \ 571 _Static_assert(m < P_OSREL_MAJOR(__FreeBSD_version)), \ 572 "Obsolete code" msg); 573 #else 574 #define __gone_ok(m, msg) 575 #endif 576 #define gone_in(major, msg) __gone_ok(major, msg) _gone_in(major, msg) 577 #define gone_in_dev(dev, major, msg) __gone_ok(major, msg) _gone_in_dev(dev, major, msg) 578 #define gone_by_fcp101_dev(dev) \ 579 gone_in_dev((dev), 13, \ 580 "see https://github.com/freebsd/fcp/blob/master/fcp-0101.md") 581 582 __NULLABILITY_PRAGMA_POP 583 584 #endif /* !_SYS_SYSTM_H_ */ 585