1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)socketvar.h 8.3 (Berkeley) 2/19/95 32 */ 33 34 #ifndef _SYS_SOCKETVAR_H_ 35 #define _SYS_SOCKETVAR_H_ 36 37 /* 38 * Socket generation count type. Also used in xinpcb, xtcpcb, xunpcb. 39 */ 40 typedef uint64_t so_gen_t; 41 42 #if defined(_KERNEL) || defined(_WANT_SOCKET) 43 #include <sys/queue.h> /* for TAILQ macros */ 44 #include <sys/selinfo.h> /* for struct selinfo */ 45 #include <sys/_lock.h> 46 #include <sys/_mutex.h> 47 #include <sys/osd.h> 48 #include <sys/_sx.h> 49 #include <sys/sockbuf.h> 50 #ifdef _KERNEL 51 #include <sys/caprights.h> 52 #include <sys/sockopt.h> 53 #endif 54 55 struct vnet; 56 57 /* 58 * Kernel structure per socket. 59 * Contains send and receive buffer queues, 60 * handle on protocol and pointer to protocol 61 * private data and error information. 62 */ 63 typedef int so_upcall_t(struct socket *, void *, int); 64 typedef void so_dtor_t(struct socket *); 65 66 struct socket; 67 68 enum socket_qstate { 69 SQ_NONE = 0, 70 SQ_INCOMP = 0x0800, /* on sol_incomp */ 71 SQ_COMP = 0x1000, /* on sol_comp */ 72 }; 73 74 /*- 75 * Locking key to struct socket: 76 * (a) constant after allocation, no locking required. 77 * (b) locked by SOCK_LOCK(so). 78 * (cr) locked by SOCK_RECVBUF_LOCK(so) 79 * (cs) locked by SOCK_SENDBUF_LOCK(so) 80 * (e) locked by SOLISTEN_LOCK() of corresponding listening socket. 81 * (f) not locked since integer reads/writes are atomic. 82 * (g) used only as a sleep/wakeup address, no value. 83 * (h) locked by global mutex so_global_mtx. 84 * (k) locked by KTLS workqueue mutex 85 */ 86 TAILQ_HEAD(accept_queue, socket); 87 struct socket { 88 struct mtx so_lock; 89 volatile u_int so_count; /* (b / refcount) */ 90 struct selinfo so_rdsel; /* (b/cr) for so_rcv/so_comp */ 91 struct selinfo so_wrsel; /* (b/cs) for so_snd */ 92 int so_options; /* (b) from socket call, see socket.h */ 93 short so_type; /* (a) generic type, see socket.h */ 94 short so_state; /* (b) internal state flags SS_* */ 95 void *so_pcb; /* protocol control block */ 96 struct vnet *so_vnet; /* (a) network stack instance */ 97 struct protosw *so_proto; /* (a) protocol handle */ 98 short so_linger; /* time to linger close(2) */ 99 short so_timeo; /* (g) connection timeout */ 100 u_short so_error; /* (f) error affecting connection */ 101 u_short so_rerror; /* (f) error affecting connection */ 102 struct sigio *so_sigio; /* [sg] information for async I/O or 103 out of band data (SIGURG) */ 104 struct ucred *so_cred; /* (a) user credentials */ 105 struct label *so_label; /* (b) MAC label for socket */ 106 /* NB: generation count must not be first. */ 107 so_gen_t so_gencnt; /* (h) generation count */ 108 void *so_emuldata; /* (b) private data for emulators */ 109 so_dtor_t *so_dtor; /* (b) optional destructor */ 110 struct osd osd; /* Object Specific extensions */ 111 /* 112 * so_fibnum, so_user_cookie and friends can be used to attach 113 * some user-specified metadata to a socket, which then can be 114 * used by the kernel for various actions. 115 * so_user_cookie is used by ipfw/dummynet. 116 */ 117 int so_fibnum; /* routing domain for this socket */ 118 uint32_t so_user_cookie; 119 120 int so_ts_clock; /* type of the clock used for timestamps */ 121 uint32_t so_max_pacing_rate; /* (f) TX rate limit in bytes/s */ 122 123 /* 124 * Mutexes to prevent interleaving of socket I/O. These have to be 125 * outside of the socket buffers in order to interlock with listen(2). 126 */ 127 struct sx so_snd_sx __aligned(CACHE_LINE_SIZE); 128 struct mtx so_snd_mtx; 129 130 struct sx so_rcv_sx __aligned(CACHE_LINE_SIZE); 131 struct mtx so_rcv_mtx; 132 133 union { 134 /* Regular (data flow) socket. */ 135 struct { 136 /* (cr, cs) Receive and send buffers. */ 137 struct sockbuf so_rcv, so_snd; 138 139 /* (e) Our place on accept queue. */ 140 TAILQ_ENTRY(socket) so_list; 141 struct socket *so_listen; /* (b) */ 142 enum socket_qstate so_qstate; /* (b) */ 143 /* (b) cached MAC label for peer */ 144 struct label *so_peerlabel; 145 u_long so_oobmark; /* chars to oob mark */ 146 147 /* (k) Our place on KTLS RX work queue. */ 148 STAILQ_ENTRY(socket) so_ktls_rx_list; 149 }; 150 /* 151 * Listening socket, where accepts occur, is so_listen in all 152 * subsidiary sockets. If so_listen is NULL, socket is not 153 * related to an accept. For a listening socket itself 154 * sol_incomp queues partially completed connections, while 155 * sol_comp is a queue of connections ready to be accepted. 156 * If a connection is aborted and it has so_listen set, then 157 * it has to be pulled out of either sol_incomp or sol_comp. 158 * We allow connections to queue up based on current queue 159 * lengths and limit on number of queued connections for this 160 * socket. 161 */ 162 struct { 163 /* (e) queue of partial unaccepted connections */ 164 struct accept_queue sol_incomp; 165 /* (e) queue of complete unaccepted connections */ 166 struct accept_queue sol_comp; 167 u_int sol_qlen; /* (e) sol_comp length */ 168 u_int sol_incqlen; /* (e) sol_incomp length */ 169 u_int sol_qlimit; /* (e) queue limit */ 170 171 /* accept_filter(9) optional data */ 172 struct accept_filter *sol_accept_filter; 173 void *sol_accept_filter_arg; /* saved filter args */ 174 char *sol_accept_filter_str; /* saved user args */ 175 176 /* Optional upcall, for kernel socket. */ 177 so_upcall_t *sol_upcall; /* (e) */ 178 void *sol_upcallarg; /* (e) */ 179 180 /* Socket buffer parameters, to be copied to 181 * dataflow sockets, accepted from this one. */ 182 int sol_sbrcv_lowat; 183 int sol_sbsnd_lowat; 184 u_int sol_sbrcv_hiwat; 185 u_int sol_sbsnd_hiwat; 186 short sol_sbrcv_flags; 187 short sol_sbsnd_flags; 188 sbintime_t sol_sbrcv_timeo; 189 sbintime_t sol_sbsnd_timeo; 190 191 /* Information tracking listen queue overflows. */ 192 struct timeval sol_lastover; /* (e) */ 193 int sol_overcount; /* (e) */ 194 }; 195 }; 196 }; 197 #endif /* defined(_KERNEL) || defined(_WANT_SOCKET) */ 198 199 /* 200 * Socket state bits. 201 * 202 * Historically, these bits were all kept in the so_state field. 203 * They are now split into separate, lock-specific fields. 204 * so_state maintains basic socket state protected by the socket lock. 205 * so_qstate holds information about the socket accept queues. 206 * Each socket buffer also has a state field holding information 207 * relevant to that socket buffer (can't send, rcv). 208 * Many fields will be read without locks to improve performance and avoid 209 * lock order issues. However, this approach must be used with caution. 210 */ 211 #define SS_ISCONNECTED 0x0002 /* socket connected to a peer */ 212 #define SS_ISCONNECTING 0x0004 /* in process of connecting to peer */ 213 #define SS_ISDISCONNECTING 0x0008 /* in process of disconnecting */ 214 #define SS_NBIO 0x0100 /* non-blocking ops */ 215 #define SS_ASYNC 0x0200 /* async i/o notify */ 216 #define SS_ISCONFIRMING 0x0400 /* deciding to accept connection req */ 217 #define SS_ISDISCONNECTED 0x2000 /* socket disconnected from peer */ 218 219 #ifdef _KERNEL 220 221 #define SOCK_MTX(so) (&(so)->so_lock) 222 #define SOCK_LOCK(so) mtx_lock(&(so)->so_lock) 223 #define SOCK_OWNED(so) mtx_owned(&(so)->so_lock) 224 #define SOCK_UNLOCK(so) mtx_unlock(&(so)->so_lock) 225 #define SOCK_LOCK_ASSERT(so) mtx_assert(&(so)->so_lock, MA_OWNED) 226 #define SOCK_UNLOCK_ASSERT(so) mtx_assert(&(so)->so_lock, MA_NOTOWNED) 227 228 #define SOLISTENING(sol) (((sol)->so_options & SO_ACCEPTCONN) != 0) 229 #define SOLISTEN_LOCK(sol) do { \ 230 mtx_lock(&(sol)->so_lock); \ 231 KASSERT(SOLISTENING(sol), \ 232 ("%s: %p not listening", __func__, (sol))); \ 233 } while (0) 234 #define SOLISTEN_TRYLOCK(sol) mtx_trylock(&(sol)->so_lock) 235 #define SOLISTEN_UNLOCK(sol) do { \ 236 KASSERT(SOLISTENING(sol), \ 237 ("%s: %p not listening", __func__, (sol))); \ 238 mtx_unlock(&(sol)->so_lock); \ 239 } while (0) 240 #define SOLISTEN_LOCK_ASSERT(sol) do { \ 241 mtx_assert(&(sol)->so_lock, MA_OWNED); \ 242 KASSERT(SOLISTENING(sol), \ 243 ("%s: %p not listening", __func__, (sol))); \ 244 } while (0) 245 #define SOLISTEN_UNLOCK_ASSERT(sol) do { \ 246 mtx_assert(&(sol)->so_lock, MA_NOTOWNED); \ 247 KASSERT(SOLISTENING(sol), \ 248 ("%s: %p not listening", __func__, (sol))); \ 249 } while (0) 250 251 /* 252 * Socket buffer locks. These are strongly preferred over SOCKBUF_LOCK(sb) 253 * macros, as we are moving towards protocol specific socket buffers. 254 */ 255 #define SOCK_RECVBUF_MTX(so) \ 256 (&(so)->so_rcv_mtx) 257 #define SOCK_RECVBUF_LOCK(so) \ 258 mtx_lock(SOCK_RECVBUF_MTX(so)) 259 #define SOCK_RECVBUF_UNLOCK(so) \ 260 mtx_unlock(SOCK_RECVBUF_MTX(so)) 261 #define SOCK_RECVBUF_LOCK_ASSERT(so) \ 262 mtx_assert(SOCK_RECVBUF_MTX(so), MA_OWNED) 263 #define SOCK_RECVBUF_UNLOCK_ASSERT(so) \ 264 mtx_assert(SOCK_RECVBUF_MTX(so), MA_NOTOWNED) 265 266 #define SOCK_SENDBUF_MTX(so) \ 267 (&(so)->so_snd_mtx) 268 #define SOCK_SENDBUF_LOCK(so) \ 269 mtx_lock(SOCK_SENDBUF_MTX(so)) 270 #define SOCK_SENDBUF_UNLOCK(so) \ 271 mtx_unlock(SOCK_SENDBUF_MTX(so)) 272 #define SOCK_SENDBUF_LOCK_ASSERT(so) \ 273 mtx_assert(SOCK_SENDBUF_MTX(so), MA_OWNED) 274 #define SOCK_SENDBUF_UNLOCK_ASSERT(so) \ 275 mtx_assert(SOCK_SENDBUF_MTX(so), MA_NOTOWNED) 276 277 #define SOCK_BUF_LOCK(so, which) \ 278 mtx_lock(soeventmtx(so, which)) 279 #define SOCK_BUF_UNLOCK(so, which) \ 280 mtx_unlock(soeventmtx(so, which)) 281 #define SOCK_BUF_LOCK_ASSERT(so, which) \ 282 mtx_assert(soeventmtx(so, which), MA_OWNED) 283 #define SOCK_BUF_UNLOCK_ASSERT(so, which) \ 284 mtx_assert(soeventmtx(so, which), MA_NOTOWNED) 285 286 static inline struct sockbuf * 287 sobuf(struct socket *so, const sb_which which) 288 { 289 return (which == SO_RCV ? &so->so_rcv : &so->so_snd); 290 } 291 292 static inline struct mtx * 293 soeventmtx(struct socket *so, const sb_which which) 294 { 295 return (which == SO_RCV ? SOCK_RECVBUF_MTX(so) : SOCK_SENDBUF_MTX(so)); 296 } 297 298 /* 299 * Macros for sockets and socket buffering. 300 */ 301 302 /* 303 * Flags to soiolock(). 304 */ 305 #define SBL_WAIT 0x00000001 /* Wait if not immediately available. */ 306 #define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */ 307 #define SBL_VALID (SBL_WAIT | SBL_NOINTR) 308 309 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) 310 311 #define SOCK_IO_SEND_LOCK(so, flags) \ 312 soiolock((so), &(so)->so_snd_sx, (flags)) 313 #define SOCK_IO_SEND_UNLOCK(so) \ 314 soiounlock(&(so)->so_snd_sx) 315 #define SOCK_IO_SEND_OWNED(so) sx_xlocked(&(so)->so_snd_sx) 316 #define SOCK_IO_RECV_LOCK(so, flags) \ 317 soiolock((so), &(so)->so_rcv_sx, (flags)) 318 #define SOCK_IO_RECV_UNLOCK(so) \ 319 soiounlock(&(so)->so_rcv_sx) 320 #define SOCK_IO_RECV_OWNED(so) sx_xlocked(&(so)->so_rcv_sx) 321 322 /* do we have to send all at once on a socket? */ 323 #define sosendallatonce(so) \ 324 ((so)->so_proto->pr_flags & PR_ATOMIC) 325 326 /* can we read something from so? */ 327 #define soreadabledata(so) \ 328 (sbavail(&(so)->so_rcv) >= (so)->so_rcv.sb_lowat || \ 329 (so)->so_error || (so)->so_rerror) 330 #define soreadable(so) \ 331 (soreadabledata(so) || ((so)->so_rcv.sb_state & SBS_CANTRCVMORE)) 332 333 /* can we write something to so? */ 334 #define sowriteable(so) \ 335 ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \ 336 (((so)->so_state&SS_ISCONNECTED) || \ 337 ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \ 338 ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \ 339 (so)->so_error) 340 341 /* 342 * soref()/sorele() ref-count the socket structure. 343 * soref() may be called without owning socket lock, but in that case a 344 * caller must own something that holds socket, and so_count must be not 0. 345 * Note that you must still explicitly close the socket, but the last ref 346 * count will free the structure. 347 */ 348 #define soref(so) refcount_acquire(&(so)->so_count) 349 #define sorele(so) do { \ 350 SOCK_UNLOCK_ASSERT(so); \ 351 if (!refcount_release_if_not_last(&(so)->so_count)) { \ 352 SOCK_LOCK(so); \ 353 sorele_locked(so); \ 354 } \ 355 } while (0) 356 357 /* 358 * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to 359 * avoid a non-atomic test-and-wakeup. However, sowakeup is 360 * responsible for releasing the lock if it is called. We unlock only 361 * if we don't call into sowakeup. If any code is introduced that 362 * directly invokes the underlying sowakeup() primitives, it must 363 * maintain the same semantics. 364 */ 365 #define sorwakeup(so) do { \ 366 SOCK_RECVBUF_LOCK(so); \ 367 sorwakeup_locked(so); \ 368 } while (0) 369 370 #define sowwakeup(so) do { \ 371 SOCK_SENDBUF_LOCK(so); \ 372 sowwakeup_locked(so); \ 373 } while (0) 374 375 struct accept_filter { 376 char accf_name[16]; 377 int (*accf_callback) 378 (struct socket *so, void *arg, int waitflag); 379 void * (*accf_create) 380 (struct socket *so, char *arg); 381 void (*accf_destroy) 382 (struct socket *so); 383 SLIST_ENTRY(accept_filter) accf_next; 384 }; 385 386 #define ACCEPT_FILTER_DEFINE(modname, filtname, cb, create, destroy, ver) \ 387 static struct accept_filter modname##_filter = { \ 388 .accf_name = filtname, \ 389 .accf_callback = cb, \ 390 .accf_create = create, \ 391 .accf_destroy = destroy, \ 392 }; \ 393 static moduledata_t modname##_mod = { \ 394 .name = __XSTRING(modname), \ 395 .evhand = accept_filt_generic_mod_event, \ 396 .priv = &modname##_filter, \ 397 }; \ 398 DECLARE_MODULE(modname, modname##_mod, SI_SUB_DRIVERS, \ 399 SI_ORDER_MIDDLE); \ 400 MODULE_VERSION(modname, ver) 401 402 #ifdef MALLOC_DECLARE 403 MALLOC_DECLARE(M_ACCF); 404 MALLOC_DECLARE(M_PCB); 405 MALLOC_DECLARE(M_SONAME); 406 #endif 407 408 /* 409 * Socket specific helper hook point identifiers 410 * Do not leave holes in the sequence, hook registration is a loop. 411 */ 412 #define HHOOK_SOCKET_OPT 0 413 #define HHOOK_SOCKET_CREATE 1 414 #define HHOOK_SOCKET_RCV 2 415 #define HHOOK_SOCKET_SND 3 416 #define HHOOK_FILT_SOREAD 4 417 #define HHOOK_FILT_SOWRITE 5 418 #define HHOOK_SOCKET_CLOSE 6 419 #define HHOOK_SOCKET_LAST HHOOK_SOCKET_CLOSE 420 421 struct socket_hhook_data { 422 struct socket *so; 423 struct mbuf *m; 424 void *hctx; /* hook point specific data*/ 425 int status; 426 }; 427 428 extern int maxsockets; 429 extern u_long sb_max; 430 extern so_gen_t so_gencnt; 431 432 struct file; 433 struct filecaps; 434 struct filedesc; 435 struct mbuf; 436 struct sockaddr; 437 struct ucred; 438 struct uio; 439 440 /* Return values for socket upcalls. */ 441 #define SU_OK 0 442 #define SU_ISCONNECTED 1 443 444 /* 445 * From uipc_socket and friends 446 */ 447 int getsockaddr(struct sockaddr **namp, const struct sockaddr *uaddr, 448 size_t len); 449 int getsock_cap(struct thread *td, int fd, cap_rights_t *rightsp, 450 struct file **fpp, struct filecaps *havecaps); 451 int getsock(struct thread *td, int fd, cap_rights_t *rightsp, 452 struct file **fpp); 453 void soabort(struct socket *so); 454 int soaccept(struct socket *so, struct sockaddr **nam); 455 void soaio_enqueue(struct task *task); 456 void soaio_rcv(void *context, int pending); 457 void soaio_snd(void *context, int pending); 458 int socheckuid(struct socket *so, uid_t uid); 459 int sobind(struct socket *so, struct sockaddr *nam, struct thread *td); 460 int sobindat(int fd, struct socket *so, struct sockaddr *nam, 461 struct thread *td); 462 int soclose(struct socket *so); 463 int soconnect(struct socket *so, struct sockaddr *nam, struct thread *td); 464 int soconnectat(int fd, struct socket *so, struct sockaddr *nam, 465 struct thread *td); 466 int soconnect2(struct socket *so1, struct socket *so2); 467 int socreate(int dom, struct socket **aso, int type, int proto, 468 struct ucred *cred, struct thread *td); 469 int sodisconnect(struct socket *so); 470 void sodtor_set(struct socket *, so_dtor_t *); 471 struct sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags); 472 void sohasoutofband(struct socket *so); 473 int solisten(struct socket *so, int backlog, struct thread *td); 474 void solisten_proto(struct socket *so, int backlog); 475 void solisten_proto_abort(struct socket *so); 476 int solisten_proto_check(struct socket *so); 477 bool solisten_enqueue(struct socket *, int); 478 int solisten_dequeue(struct socket *, struct socket **, int); 479 struct socket * 480 solisten_clone(struct socket *); 481 struct socket * 482 sonewconn(struct socket *head, int connstatus); 483 struct socket * 484 sopeeloff(struct socket *); 485 int sopoll(struct socket *so, int events, struct ucred *active_cred, 486 struct thread *td); 487 int sopoll_generic(struct socket *so, int events, 488 struct ucred *active_cred, struct thread *td); 489 int soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio, 490 struct mbuf **mp0, struct mbuf **controlp, int *flagsp); 491 int soreceive_stream(struct socket *so, struct sockaddr **paddr, 492 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, 493 int *flagsp); 494 int soreceive_dgram(struct socket *so, struct sockaddr **paddr, 495 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, 496 int *flagsp); 497 int soreceive_generic(struct socket *so, struct sockaddr **paddr, 498 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, 499 int *flagsp); 500 void sorele_locked(struct socket *so); 501 void sodealloc(struct socket *); 502 int soreserve(struct socket *so, u_long sndcc, u_long rcvcc); 503 void sorflush(struct socket *so); 504 int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 505 struct mbuf *top, struct mbuf *control, int flags, 506 struct thread *td); 507 int sousrsend(struct socket *so, struct sockaddr *addr, struct uio *uio, 508 struct mbuf *control, int flags, struct proc *); 509 int sosend_dgram(struct socket *so, struct sockaddr *addr, 510 struct uio *uio, struct mbuf *top, struct mbuf *control, 511 int flags, struct thread *td); 512 int sosend_generic(struct socket *so, struct sockaddr *addr, 513 struct uio *uio, struct mbuf *top, struct mbuf *control, 514 int flags, struct thread *td); 515 int soshutdown(struct socket *so, int how); 516 void soupcall_clear(struct socket *, sb_which); 517 void soupcall_set(struct socket *, sb_which, so_upcall_t, void *); 518 void solisten_upcall_set(struct socket *, so_upcall_t, void *); 519 void sorwakeup_locked(struct socket *); 520 void sowwakeup_locked(struct socket *); 521 void sowakeup_aio(struct socket *, sb_which); 522 void solisten_wakeup(struct socket *); 523 int selsocket(struct socket *so, int events, struct timeval *tv, 524 struct thread *td); 525 void soisconnected(struct socket *so); 526 void soisconnecting(struct socket *so); 527 void soisdisconnected(struct socket *so); 528 void soisdisconnecting(struct socket *so); 529 void socantrcvmore(struct socket *so); 530 void socantrcvmore_locked(struct socket *so); 531 void socantsendmore(struct socket *so); 532 void socantsendmore_locked(struct socket *so); 533 void soroverflow(struct socket *so); 534 void soroverflow_locked(struct socket *so); 535 int soiolock(struct socket *so, struct sx *sx, int flags); 536 void soiounlock(struct sx *sx); 537 538 /* 539 * Accept filter functions (duh). 540 */ 541 int accept_filt_add(struct accept_filter *filt); 542 int accept_filt_del(char *name); 543 struct accept_filter *accept_filt_get(char *name); 544 #ifdef ACCEPT_FILTER_MOD 545 #ifdef SYSCTL_DECL 546 SYSCTL_DECL(_net_inet_accf); 547 #endif 548 int accept_filt_generic_mod_event(module_t mod, int event, void *data); 549 #endif 550 551 #endif /* _KERNEL */ 552 553 /* 554 * Structure to export socket from kernel to utilities, via sysctl(3). 555 */ 556 struct xsocket { 557 ksize_t xso_len; /* length of this structure */ 558 kvaddr_t xso_so; /* kernel address of struct socket */ 559 kvaddr_t so_pcb; /* kernel address of struct inpcb */ 560 uint64_t so_oobmark; 561 int64_t so_spare64[8]; 562 int32_t xso_protocol; 563 int32_t xso_family; 564 uint32_t so_qlen; 565 uint32_t so_incqlen; 566 uint32_t so_qlimit; 567 pid_t so_pgid; 568 uid_t so_uid; 569 int32_t so_spare32[8]; 570 int16_t so_type; 571 int16_t so_options; 572 int16_t so_linger; 573 int16_t so_state; 574 int16_t so_timeo; 575 uint16_t so_error; 576 struct xsockbuf { 577 uint32_t sb_cc; 578 uint32_t sb_hiwat; 579 uint32_t sb_mbcnt; 580 uint32_t sb_spare0; /* was sb_mcnt */ 581 uint32_t sb_spare1; /* was sb_ccnt */ 582 uint32_t sb_mbmax; 583 int32_t sb_lowat; 584 int32_t sb_timeo; 585 int16_t sb_flags; 586 } so_rcv, so_snd; 587 }; 588 589 #ifdef _KERNEL 590 void sotoxsocket(struct socket *so, struct xsocket *xso); 591 void sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb); 592 #endif 593 594 /* 595 * Socket buffer state bits. Exported via libprocstat(3). 596 */ 597 #define SBS_CANTSENDMORE 0x0010 /* can't send more data to peer */ 598 #define SBS_CANTRCVMORE 0x0020 /* can't receive more data from peer */ 599 #define SBS_RCVATMARK 0x0040 /* at mark on input */ 600 601 #endif /* !_SYS_SOCKETVAR_H_ */ 602