xref: /freebsd/sys/sys/socketvar.h (revision 8c0d1eca)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1990, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)socketvar.h	8.3 (Berkeley) 2/19/95
32  *
33  * $FreeBSD$
34  */
35 
36 #ifndef _SYS_SOCKETVAR_H_
37 #define _SYS_SOCKETVAR_H_
38 
39 /*
40  * Socket generation count type.  Also used in xinpcb, xtcpcb, xunpcb.
41  */
42 typedef uint64_t so_gen_t;
43 
44 #if defined(_KERNEL) || defined(_WANT_SOCKET)
45 #include <sys/queue.h>			/* for TAILQ macros */
46 #include <sys/selinfo.h>		/* for struct selinfo */
47 #include <sys/_lock.h>
48 #include <sys/_mutex.h>
49 #include <sys/osd.h>
50 #include <sys/_sx.h>
51 #include <sys/sockbuf.h>
52 #ifdef _KERNEL
53 #include <sys/caprights.h>
54 #include <sys/sockopt.h>
55 #endif
56 
57 struct vnet;
58 
59 /*
60  * Kernel structure per socket.
61  * Contains send and receive buffer queues,
62  * handle on protocol and pointer to protocol
63  * private data and error information.
64  */
65 typedef	int so_upcall_t(struct socket *, void *, int);
66 typedef	void so_dtor_t(struct socket *);
67 
68 struct socket;
69 
70 enum socket_qstate {
71 	SQ_NONE = 0,
72 	SQ_INCOMP = 0x0800,	/* on sol_incomp */
73 	SQ_COMP = 0x1000,	/* on sol_comp */
74 };
75 
76 /*-
77  * Locking key to struct socket:
78  * (a) constant after allocation, no locking required.
79  * (b) locked by SOCK_LOCK(so).
80  * (cr) locked by SOCK_RECVBUF_LOCK(so)
81  * (cs) locked by SOCK_SENDBUF_LOCK(so)
82  * (e) locked by SOLISTEN_LOCK() of corresponding listening socket.
83  * (f) not locked since integer reads/writes are atomic.
84  * (g) used only as a sleep/wakeup address, no value.
85  * (h) locked by global mutex so_global_mtx.
86  * (k) locked by KTLS workqueue mutex
87  */
88 TAILQ_HEAD(accept_queue, socket);
89 struct socket {
90 	struct mtx	so_lock;
91 	volatile u_int	so_count;	/* (b / refcount) */
92 	struct selinfo	so_rdsel;	/* (b/cr) for so_rcv/so_comp */
93 	struct selinfo	so_wrsel;	/* (b/cs) for so_snd */
94 	int	so_options;		/* (b) from socket call, see socket.h */
95 	short	so_type;		/* (a) generic type, see socket.h */
96 	short	so_state;		/* (b) internal state flags SS_* */
97 	void	*so_pcb;		/* protocol control block */
98 	struct	vnet *so_vnet;		/* (a) network stack instance */
99 	struct	protosw *so_proto;	/* (a) protocol handle */
100 	short	so_linger;		/* time to linger close(2) */
101 	short	so_timeo;		/* (g) connection timeout */
102 	u_short	so_error;		/* (f) error affecting connection */
103 	u_short so_rerror;		/* (f) error affecting connection */
104 	struct	sigio *so_sigio;	/* [sg] information for async I/O or
105 					   out of band data (SIGURG) */
106 	struct	ucred *so_cred;		/* (a) user credentials */
107 	struct	label *so_label;	/* (b) MAC label for socket */
108 	/* NB: generation count must not be first. */
109 	so_gen_t so_gencnt;		/* (h) generation count */
110 	void	*so_emuldata;		/* (b) private data for emulators */
111 	so_dtor_t *so_dtor;		/* (b) optional destructor */
112 	struct	osd	osd;		/* Object Specific extensions */
113 	/*
114 	 * so_fibnum, so_user_cookie and friends can be used to attach
115 	 * some user-specified metadata to a socket, which then can be
116 	 * used by the kernel for various actions.
117 	 * so_user_cookie is used by ipfw/dummynet.
118 	 */
119 	int so_fibnum;		/* routing domain for this socket */
120 	uint32_t so_user_cookie;
121 
122 	int so_ts_clock;	/* type of the clock used for timestamps */
123 	uint32_t so_max_pacing_rate;	/* (f) TX rate limit in bytes/s */
124 
125 	/*
126 	 * Mutexes to prevent interleaving of socket I/O.  These have to be
127 	 * outside of the socket buffers in order to interlock with listen(2).
128 	 */
129 	struct sx so_snd_sx __aligned(CACHE_LINE_SIZE);
130 	struct mtx so_snd_mtx;
131 
132 	struct sx so_rcv_sx __aligned(CACHE_LINE_SIZE);
133 	struct mtx so_rcv_mtx;
134 
135 	union {
136 		/* Regular (data flow) socket. */
137 		struct {
138 			/* (cr, cs) Receive and send buffers. */
139 			struct sockbuf		so_rcv, so_snd;
140 
141 			/* (e) Our place on accept queue. */
142 			TAILQ_ENTRY(socket)	so_list;
143 			struct socket		*so_listen;	/* (b) */
144 			enum socket_qstate so_qstate;		/* (b) */
145 			/* (b) cached MAC label for peer */
146 			struct	label		*so_peerlabel;
147 			u_long	so_oobmark;	/* chars to oob mark */
148 
149 			/* (k) Our place on KTLS RX work queue. */
150 			STAILQ_ENTRY(socket)	so_ktls_rx_list;
151 		};
152 		/*
153 		 * Listening socket, where accepts occur, is so_listen in all
154 		 * subsidiary sockets.  If so_listen is NULL, socket is not
155 		 * related to an accept.  For a listening socket itself
156 		 * sol_incomp queues partially completed connections, while
157 		 * sol_comp is a queue of connections ready to be accepted.
158 		 * If a connection is aborted and it has so_listen set, then
159 		 * it has to be pulled out of either sol_incomp or sol_comp.
160 		 * We allow connections to queue up based on current queue
161 		 * lengths and limit on number of queued connections for this
162 		 * socket.
163 		 */
164 		struct {
165 			/* (e) queue of partial unaccepted connections */
166 			struct accept_queue	sol_incomp;
167 			/* (e) queue of complete unaccepted connections */
168 			struct accept_queue	sol_comp;
169 			u_int	sol_qlen;    /* (e) sol_comp length */
170 			u_int	sol_incqlen; /* (e) sol_incomp length */
171 			u_int	sol_qlimit;  /* (e) queue limit */
172 
173 			/* accept_filter(9) optional data */
174 			struct	accept_filter	*sol_accept_filter;
175 			void	*sol_accept_filter_arg;	/* saved filter args */
176 			char	*sol_accept_filter_str;	/* saved user args */
177 
178 			/* Optional upcall, for kernel socket. */
179 			so_upcall_t	*sol_upcall;	/* (e) */
180 			void		*sol_upcallarg;	/* (e) */
181 
182 			/* Socket buffer parameters, to be copied to
183 			 * dataflow sockets, accepted from this one. */
184 			int		sol_sbrcv_lowat;
185 			int		sol_sbsnd_lowat;
186 			u_int		sol_sbrcv_hiwat;
187 			u_int		sol_sbsnd_hiwat;
188 			short		sol_sbrcv_flags;
189 			short		sol_sbsnd_flags;
190 			sbintime_t	sol_sbrcv_timeo;
191 			sbintime_t	sol_sbsnd_timeo;
192 
193 			/* Information tracking listen queue overflows. */
194 			struct timeval	sol_lastover;	/* (e) */
195 			int		sol_overcount;	/* (e) */
196 		};
197 	};
198 };
199 #endif	/* defined(_KERNEL) || defined(_WANT_SOCKET) */
200 
201 /*
202  * Socket state bits.
203  *
204  * Historically, these bits were all kept in the so_state field.
205  * They are now split into separate, lock-specific fields.
206  * so_state maintains basic socket state protected by the socket lock.
207  * so_qstate holds information about the socket accept queues.
208  * Each socket buffer also has a state field holding information
209  * relevant to that socket buffer (can't send, rcv).
210  * Many fields will be read without locks to improve performance and avoid
211  * lock order issues.  However, this approach must be used with caution.
212  */
213 #define	SS_NOFDREF		0x0001	/* no file table ref any more */
214 #define	SS_ISCONNECTED		0x0002	/* socket connected to a peer */
215 #define	SS_ISCONNECTING		0x0004	/* in process of connecting to peer */
216 #define	SS_ISDISCONNECTING	0x0008	/* in process of disconnecting */
217 #define	SS_NBIO			0x0100	/* non-blocking ops */
218 #define	SS_ASYNC		0x0200	/* async i/o notify */
219 #define	SS_ISCONFIRMING		0x0400	/* deciding to accept connection req */
220 #define	SS_ISDISCONNECTED	0x2000	/* socket disconnected from peer */
221 
222 /*
223  * Protocols can mark a socket as SS_PROTOREF to indicate that, following
224  * pru_detach, they still want the socket to persist, and will free it
225  * themselves when they are done.  Protocols should only ever call sofree()
226  * following setting this flag in pru_detach(), and never otherwise, as
227  * sofree() bypasses socket reference counting.
228  */
229 #define	SS_PROTOREF		0x4000	/* strong protocol reference */
230 
231 #ifdef _KERNEL
232 
233 #define	SOCK_MTX(so)		(&(so)->so_lock)
234 #define	SOCK_LOCK(so)		mtx_lock(&(so)->so_lock)
235 #define	SOCK_OWNED(so)		mtx_owned(&(so)->so_lock)
236 #define	SOCK_UNLOCK(so)		mtx_unlock(&(so)->so_lock)
237 #define	SOCK_LOCK_ASSERT(so)	mtx_assert(&(so)->so_lock, MA_OWNED)
238 #define	SOCK_UNLOCK_ASSERT(so)	mtx_assert(&(so)->so_lock, MA_NOTOWNED)
239 
240 #define	SOLISTENING(sol)	(((sol)->so_options & SO_ACCEPTCONN) != 0)
241 #define	SOLISTEN_LOCK(sol)	do {					\
242 	mtx_lock(&(sol)->so_lock);					\
243 	KASSERT(SOLISTENING(sol),					\
244 	    ("%s: %p not listening", __func__, (sol)));			\
245 } while (0)
246 #define	SOLISTEN_TRYLOCK(sol)	mtx_trylock(&(sol)->so_lock)
247 #define	SOLISTEN_UNLOCK(sol)	do {					\
248 	KASSERT(SOLISTENING(sol),					\
249 	    ("%s: %p not listening", __func__, (sol)));			\
250 	mtx_unlock(&(sol)->so_lock);					\
251 } while (0)
252 #define	SOLISTEN_LOCK_ASSERT(sol)	do {				\
253 	mtx_assert(&(sol)->so_lock, MA_OWNED);				\
254 	KASSERT(SOLISTENING(sol),					\
255 	    ("%s: %p not listening", __func__, (sol)));			\
256 } while (0)
257 
258 /*
259  * Socket buffer locks.  These are strongly preferred over SOCKBUF_LOCK(sb)
260  * macros, as we are moving towards protocol specific socket buffers.
261  */
262 #define	SOCK_RECVBUF_MTX(so)						\
263 	(&(so)->so_rcv_mtx)
264 #define	SOCK_RECVBUF_LOCK(so)						\
265 	mtx_lock(SOCK_RECVBUF_MTX(so))
266 #define	SOCK_RECVBUF_UNLOCK(so)						\
267 	mtx_unlock(SOCK_RECVBUF_MTX(so))
268 #define	SOCK_RECVBUF_LOCK_ASSERT(so)					\
269 	mtx_assert(SOCK_RECVBUF_MTX(so), MA_OWNED)
270 #define	SOCK_RECVBUF_UNLOCK_ASSERT(so)					\
271 	mtx_assert(SOCK_RECVBUF_MTX(so), MA_NOTOWNED)
272 
273 #define	SOCK_SENDBUF_MTX(so)						\
274 	(&(so)->so_snd_mtx)
275 #define	SOCK_SENDBUF_LOCK(so)						\
276 	mtx_lock(SOCK_SENDBUF_MTX(so))
277 #define	SOCK_SENDBUF_UNLOCK(so)						\
278 	mtx_unlock(SOCK_SENDBUF_MTX(so))
279 #define	SOCK_SENDBUF_LOCK_ASSERT(so)					\
280 	mtx_assert(SOCK_SENDBUF_MTX(so), MA_OWNED)
281 #define	SOCK_SENDBUF_UNLOCK_ASSERT(so)					\
282 	mtx_assert(SOCK_SENDBUF_MTX(so), MA_NOTOWNED)
283 
284 #define	SOCK_BUF_LOCK(so, which)					\
285 	mtx_lock(soeventmtx(so, which))
286 #define	SOCK_BUF_UNLOCK(so, which)					\
287 	mtx_unlock(soeventmtx(so, which))
288 #define	SOCK_BUF_LOCK_ASSERT(so, which)					\
289 	mtx_assert(soeventmtx(so, which), MA_OWNED)
290 #define	SOCK_BUF_UNLOCK_ASSERT(so, which)				\
291 	mtx_assert(soeventmtx(so, which), MA_NOTOWNED)
292 
293 static inline struct sockbuf *
294 sobuf(struct socket *so, const sb_which which)
295 {
296 	return (which == SO_RCV ? &so->so_rcv : &so->so_snd);
297 }
298 
299 static inline struct mtx *
300 soeventmtx(struct socket *so, const sb_which which)
301 {
302 	return (which == SO_RCV ? SOCK_RECVBUF_MTX(so) : SOCK_SENDBUF_MTX(so));
303 }
304 
305 /*
306  * Macros for sockets and socket buffering.
307  */
308 
309 /*
310  * Flags to soiolock().
311  */
312 #define	SBL_WAIT	0x00000001	/* Wait if not immediately available. */
313 #define	SBL_NOINTR	0x00000002	/* Force non-interruptible sleep. */
314 #define	SBL_VALID	(SBL_WAIT | SBL_NOINTR)
315 
316 #define	SBLOCKWAIT(f)	(((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
317 
318 #define	SOCK_IO_SEND_LOCK(so, flags)					\
319 	soiolock((so), &(so)->so_snd_sx, (flags))
320 #define	SOCK_IO_SEND_UNLOCK(so)						\
321 	soiounlock(&(so)->so_snd_sx)
322 #define	SOCK_IO_SEND_OWNED(so)	sx_xlocked(&(so)->so_snd_sx)
323 #define	SOCK_IO_RECV_LOCK(so, flags)					\
324 	soiolock((so), &(so)->so_rcv_sx, (flags))
325 #define	SOCK_IO_RECV_UNLOCK(so)						\
326 	soiounlock(&(so)->so_rcv_sx)
327 #define	SOCK_IO_RECV_OWNED(so)	sx_xlocked(&(so)->so_rcv_sx)
328 
329 /* do we have to send all at once on a socket? */
330 #define	sosendallatonce(so) \
331     ((so)->so_proto->pr_flags & PR_ATOMIC)
332 
333 /* can we read something from so? */
334 #define	soreadabledata(so) \
335 	(sbavail(&(so)->so_rcv) >= (so)->so_rcv.sb_lowat || \
336 	(so)->so_error || (so)->so_rerror)
337 #define	soreadable(so) \
338 	(soreadabledata(so) || ((so)->so_rcv.sb_state & SBS_CANTRCVMORE))
339 
340 /* can we write something to so? */
341 #define	sowriteable(so) \
342     ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \
343 	(((so)->so_state&SS_ISCONNECTED) || \
344 	  ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \
345      ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \
346      (so)->so_error)
347 
348 /*
349  * soref()/sorele() ref-count the socket structure.
350  * soref() may be called without owning socket lock, but in that case a
351  * caller must own something that holds socket, and so_count must be not 0.
352  * Note that you must still explicitly close the socket, but the last ref
353  * count will free the structure.
354  */
355 #define	soref(so)	refcount_acquire(&(so)->so_count)
356 #define	sorele(so) do {							\
357 	SOCK_UNLOCK_ASSERT(so);						\
358 	if (!refcount_release_if_not_last(&(so)->so_count)) {		\
359 		SOCK_LOCK(so);						\
360 		sorele_locked(so);					\
361 	}								\
362 } while (0)
363 
364 /*
365  * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to
366  * avoid a non-atomic test-and-wakeup.  However, sowakeup is
367  * responsible for releasing the lock if it is called.  We unlock only
368  * if we don't call into sowakeup.  If any code is introduced that
369  * directly invokes the underlying sowakeup() primitives, it must
370  * maintain the same semantics.
371  */
372 #define	sorwakeup(so) do {						\
373 	SOCK_RECVBUF_LOCK(so);						\
374 	sorwakeup_locked(so);						\
375 } while (0)
376 
377 #define	sowwakeup(so) do {						\
378 	SOCK_SENDBUF_LOCK(so);						\
379 	sowwakeup_locked(so);						\
380 } while (0)
381 
382 struct accept_filter {
383 	char	accf_name[16];
384 	int	(*accf_callback)
385 		(struct socket *so, void *arg, int waitflag);
386 	void *	(*accf_create)
387 		(struct socket *so, char *arg);
388 	void	(*accf_destroy)
389 		(struct socket *so);
390 	SLIST_ENTRY(accept_filter) accf_next;
391 };
392 
393 #define	ACCEPT_FILTER_DEFINE(modname, filtname, cb, create, destroy, ver) \
394 	static struct accept_filter modname##_filter = {		\
395 		.accf_name = filtname,					\
396 		.accf_callback = cb,					\
397 		.accf_create = create,					\
398 		.accf_destroy = destroy,				\
399 	};								\
400 	static moduledata_t modname##_mod = {				\
401 		.name = __XSTRING(modname),				\
402 		.evhand = accept_filt_generic_mod_event,		\
403 		.priv = &modname##_filter,				\
404 	};								\
405 	DECLARE_MODULE(modname, modname##_mod, SI_SUB_DRIVERS,		\
406 	    SI_ORDER_MIDDLE);						\
407 	MODULE_VERSION(modname, ver)
408 
409 #ifdef MALLOC_DECLARE
410 MALLOC_DECLARE(M_ACCF);
411 MALLOC_DECLARE(M_PCB);
412 MALLOC_DECLARE(M_SONAME);
413 #endif
414 
415 /*
416  * Socket specific helper hook point identifiers
417  * Do not leave holes in the sequence, hook registration is a loop.
418  */
419 #define HHOOK_SOCKET_OPT		0
420 #define HHOOK_SOCKET_CREATE		1
421 #define HHOOK_SOCKET_RCV 		2
422 #define HHOOK_SOCKET_SND		3
423 #define HHOOK_FILT_SOREAD		4
424 #define HHOOK_FILT_SOWRITE		5
425 #define HHOOK_SOCKET_CLOSE		6
426 #define HHOOK_SOCKET_LAST		HHOOK_SOCKET_CLOSE
427 
428 struct socket_hhook_data {
429 	struct socket	*so;
430 	struct mbuf	*m;
431 	void		*hctx;		/* hook point specific data*/
432 	int		status;
433 };
434 
435 extern int	maxsockets;
436 extern u_long	sb_max;
437 extern so_gen_t so_gencnt;
438 
439 struct file;
440 struct filecaps;
441 struct filedesc;
442 struct mbuf;
443 struct sockaddr;
444 struct ucred;
445 struct uio;
446 
447 /* Return values for socket upcalls. */
448 #define	SU_OK		0
449 #define	SU_ISCONNECTED	1
450 
451 /*
452  * From uipc_socket and friends
453  */
454 int	getsockaddr(struct sockaddr **namp, const struct sockaddr *uaddr,
455 	    size_t len);
456 int	getsock_cap(struct thread *td, int fd, cap_rights_t *rightsp,
457 	    struct file **fpp, u_int *fflagp, struct filecaps *havecaps);
458 void	soabort(struct socket *so);
459 int	soaccept(struct socket *so, struct sockaddr **nam);
460 void	soaio_enqueue(struct task *task);
461 void	soaio_rcv(void *context, int pending);
462 void	soaio_snd(void *context, int pending);
463 int	socheckuid(struct socket *so, uid_t uid);
464 int	sobind(struct socket *so, struct sockaddr *nam, struct thread *td);
465 int	sobindat(int fd, struct socket *so, struct sockaddr *nam,
466 	    struct thread *td);
467 int	soclose(struct socket *so);
468 int	soconnect(struct socket *so, struct sockaddr *nam, struct thread *td);
469 int	soconnectat(int fd, struct socket *so, struct sockaddr *nam,
470 	    struct thread *td);
471 int	soconnect2(struct socket *so1, struct socket *so2);
472 int	socreate(int dom, struct socket **aso, int type, int proto,
473 	    struct ucred *cred, struct thread *td);
474 int	sodisconnect(struct socket *so);
475 void	sodtor_set(struct socket *, so_dtor_t *);
476 struct	sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags);
477 void	sofree(struct socket *so);
478 void	sohasoutofband(struct socket *so);
479 int	solisten(struct socket *so, int backlog, struct thread *td);
480 void	solisten_proto(struct socket *so, int backlog);
481 void	solisten_proto_abort(struct socket *so);
482 int	solisten_proto_check(struct socket *so);
483 int	solisten_dequeue(struct socket *, struct socket **, int);
484 struct socket *
485 	sonewconn(struct socket *head, int connstatus);
486 struct socket *
487 	sopeeloff(struct socket *);
488 int	sopoll(struct socket *so, int events, struct ucred *active_cred,
489 	    struct thread *td);
490 int	sopoll_generic(struct socket *so, int events,
491 	    struct ucred *active_cred, struct thread *td);
492 int	soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio,
493 	    struct mbuf **mp0, struct mbuf **controlp, int *flagsp);
494 int	soreceive_stream(struct socket *so, struct sockaddr **paddr,
495 	    struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
496 	    int *flagsp);
497 int	soreceive_dgram(struct socket *so, struct sockaddr **paddr,
498 	    struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
499 	    int *flagsp);
500 int	soreceive_generic(struct socket *so, struct sockaddr **paddr,
501 	    struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
502 	    int *flagsp);
503 void	sorele_locked(struct socket *so);
504 int	soreserve(struct socket *so, u_long sndcc, u_long rcvcc);
505 void	sorflush(struct socket *so);
506 int	sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
507 	    struct mbuf *top, struct mbuf *control, int flags,
508 	    struct thread *td);
509 int	sosend_dgram(struct socket *so, struct sockaddr *addr,
510 	    struct uio *uio, struct mbuf *top, struct mbuf *control,
511 	    int flags, struct thread *td);
512 int	sosend_generic(struct socket *so, struct sockaddr *addr,
513 	    struct uio *uio, struct mbuf *top, struct mbuf *control,
514 	    int flags, struct thread *td);
515 int	soshutdown(struct socket *so, int how);
516 void	soupcall_clear(struct socket *, sb_which);
517 void	soupcall_set(struct socket *, sb_which, so_upcall_t, void *);
518 void	solisten_upcall_set(struct socket *, so_upcall_t, void *);
519 void	sorwakeup_locked(struct socket *);
520 void	sowwakeup_locked(struct socket *);
521 void	sowakeup_aio(struct socket *, sb_which);
522 void	solisten_wakeup(struct socket *);
523 int	selsocket(struct socket *so, int events, struct timeval *tv,
524 	    struct thread *td);
525 void	soisconnected(struct socket *so);
526 void	soisconnecting(struct socket *so);
527 void	soisdisconnected(struct socket *so);
528 void	soisdisconnecting(struct socket *so);
529 void	socantrcvmore(struct socket *so);
530 void	socantrcvmore_locked(struct socket *so);
531 void	socantsendmore(struct socket *so);
532 void	socantsendmore_locked(struct socket *so);
533 void	soroverflow(struct socket *so);
534 void	soroverflow_locked(struct socket *so);
535 int	soiolock(struct socket *so, struct sx *sx, int flags);
536 void	soiounlock(struct sx *sx);
537 
538 /*
539  * Accept filter functions (duh).
540  */
541 int	accept_filt_add(struct accept_filter *filt);
542 int	accept_filt_del(char *name);
543 struct	accept_filter *accept_filt_get(char *name);
544 #ifdef ACCEPT_FILTER_MOD
545 #ifdef SYSCTL_DECL
546 SYSCTL_DECL(_net_inet_accf);
547 #endif
548 int	accept_filt_generic_mod_event(module_t mod, int event, void *data);
549 #endif
550 
551 #endif /* _KERNEL */
552 
553 /*
554  * Structure to export socket from kernel to utilities, via sysctl(3).
555  */
556 struct xsocket {
557 	ksize_t		xso_len;	/* length of this structure */
558 	kvaddr_t	xso_so;		/* kernel address of struct socket */
559 	kvaddr_t	so_pcb;		/* kernel address of struct inpcb */
560 	uint64_t	so_oobmark;
561 	int64_t		so_spare64[8];
562 	int32_t		xso_protocol;
563 	int32_t		xso_family;
564 	uint32_t	so_qlen;
565 	uint32_t	so_incqlen;
566 	uint32_t	so_qlimit;
567 	pid_t		so_pgid;
568 	uid_t		so_uid;
569 	int32_t		so_spare32[8];
570 	int16_t		so_type;
571 	int16_t		so_options;
572 	int16_t		so_linger;
573 	int16_t		so_state;
574 	int16_t		so_timeo;
575 	uint16_t	so_error;
576 	struct xsockbuf {
577 		uint32_t	sb_cc;
578 		uint32_t	sb_hiwat;
579 		uint32_t	sb_mbcnt;
580 		uint32_t	sb_spare0;	/* was sb_mcnt */
581 		uint32_t	sb_spare1;	/* was sb_ccnt */
582 		uint32_t	sb_mbmax;
583 		int32_t		sb_lowat;
584 		int32_t		sb_timeo;
585 		int16_t		sb_flags;
586 	} so_rcv, so_snd;
587 };
588 
589 #ifdef _KERNEL
590 void	sotoxsocket(struct socket *so, struct xsocket *xso);
591 void	sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb);
592 #endif
593 
594 /*
595  * Socket buffer state bits.  Exported via libprocstat(3).
596  */
597 #define	SBS_CANTSENDMORE	0x0010	/* can't send more data to peer */
598 #define	SBS_CANTRCVMORE		0x0020	/* can't receive more data from peer */
599 #define	SBS_RCVATMARK		0x0040	/* at mark on input */
600 
601 #endif /* !_SYS_SOCKETVAR_H_ */
602