1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All Rights Reserved.
6 * Copyright (c) 2004-2009 Robert N. M. Watson All Rights Reserved.
7 * Copyright (c) 2018 Matthew Macy
8 * Copyright (c) 2022 Gleb Smirnoff <glebius@FreeBSD.org>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 /*
36 * UNIX Domain (Local) Sockets
37 *
38 * This is an implementation of UNIX (local) domain sockets. Each socket has
39 * an associated struct unpcb (UNIX protocol control block). Stream sockets
40 * may be connected to 0 or 1 other socket. Datagram sockets may be
41 * connected to 0, 1, or many other sockets. Sockets may be created and
42 * connected in pairs (socketpair(2)), or bound/connected to using the file
43 * system name space. For most purposes, only the receive socket buffer is
44 * used, as sending on one socket delivers directly to the receive socket
45 * buffer of a second socket.
46 *
47 * The implementation is substantially complicated by the fact that
48 * "ancillary data", such as file descriptors or credentials, may be passed
49 * across UNIX domain sockets. The potential for passing UNIX domain sockets
50 * over other UNIX domain sockets requires the implementation of a simple
51 * garbage collector to find and tear down cycles of disconnected sockets.
52 *
53 * TODO:
54 * RDM
55 * rethink name space problems
56 * need a proper out-of-band
57 */
58
59 #include <sys/cdefs.h>
60 #include "opt_ddb.h"
61
62 #include <sys/param.h>
63 #include <sys/capsicum.h>
64 #include <sys/domain.h>
65 #include <sys/eventhandler.h>
66 #include <sys/fcntl.h>
67 #include <sys/file.h>
68 #include <sys/filedesc.h>
69 #include <sys/kernel.h>
70 #include <sys/lock.h>
71 #include <sys/malloc.h>
72 #include <sys/mbuf.h>
73 #include <sys/mount.h>
74 #include <sys/mutex.h>
75 #include <sys/namei.h>
76 #include <sys/proc.h>
77 #include <sys/protosw.h>
78 #include <sys/queue.h>
79 #include <sys/resourcevar.h>
80 #include <sys/rwlock.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h>
83 #include <sys/signalvar.h>
84 #include <sys/stat.h>
85 #include <sys/sx.h>
86 #include <sys/sysctl.h>
87 #include <sys/systm.h>
88 #include <sys/taskqueue.h>
89 #include <sys/un.h>
90 #include <sys/unpcb.h>
91 #include <sys/vnode.h>
92
93 #include <net/vnet.h>
94
95 #ifdef DDB
96 #include <ddb/ddb.h>
97 #endif
98
99 #include <security/mac/mac_framework.h>
100
101 #include <vm/uma.h>
102
103 MALLOC_DECLARE(M_FILECAPS);
104
105 static struct domain localdomain;
106
107 static uma_zone_t unp_zone;
108 static unp_gen_t unp_gencnt; /* (l) */
109 static u_int unp_count; /* (l) Count of local sockets. */
110 static ino_t unp_ino; /* Prototype for fake inode numbers. */
111 static int unp_rights; /* (g) File descriptors in flight. */
112 static struct unp_head unp_shead; /* (l) List of stream sockets. */
113 static struct unp_head unp_dhead; /* (l) List of datagram sockets. */
114 static struct unp_head unp_sphead; /* (l) List of seqpacket sockets. */
115
116 struct unp_defer {
117 SLIST_ENTRY(unp_defer) ud_link;
118 struct file *ud_fp;
119 };
120 static SLIST_HEAD(, unp_defer) unp_defers;
121 static int unp_defers_count;
122
123 static const struct sockaddr sun_noname = {
124 .sa_len = sizeof(sun_noname),
125 .sa_family = AF_LOCAL,
126 };
127
128 /*
129 * Garbage collection of cyclic file descriptor/socket references occurs
130 * asynchronously in a taskqueue context in order to avoid recursion and
131 * reentrance in the UNIX domain socket, file descriptor, and socket layer
132 * code. See unp_gc() for a full description.
133 */
134 static struct timeout_task unp_gc_task;
135
136 /*
137 * The close of unix domain sockets attached as SCM_RIGHTS is
138 * postponed to the taskqueue, to avoid arbitrary recursion depth.
139 * The attached sockets might have another sockets attached.
140 */
141 static struct task unp_defer_task;
142
143 /*
144 * Both send and receive buffers are allocated PIPSIZ bytes of buffering for
145 * stream sockets, although the total for sender and receiver is actually
146 * only PIPSIZ.
147 *
148 * Datagram sockets really use the sendspace as the maximum datagram size,
149 * and don't really want to reserve the sendspace. Their recvspace should be
150 * large enough for at least one max-size datagram plus address.
151 */
152 #ifndef PIPSIZ
153 #define PIPSIZ 8192
154 #endif
155 static u_long unpst_sendspace = PIPSIZ;
156 static u_long unpst_recvspace = PIPSIZ;
157 static u_long unpdg_maxdgram = 8*1024; /* support 8KB syslog msgs */
158 static u_long unpdg_recvspace = 16*1024;
159 static u_long unpsp_sendspace = PIPSIZ; /* really max datagram size */
160 static u_long unpsp_recvspace = PIPSIZ;
161
162 static SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
163 "Local domain");
164 static SYSCTL_NODE(_net_local, SOCK_STREAM, stream,
165 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
166 "SOCK_STREAM");
167 static SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram,
168 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
169 "SOCK_DGRAM");
170 static SYSCTL_NODE(_net_local, SOCK_SEQPACKET, seqpacket,
171 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
172 "SOCK_SEQPACKET");
173
174 SYSCTL_ULONG(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW,
175 &unpst_sendspace, 0, "Default stream send space.");
176 SYSCTL_ULONG(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW,
177 &unpst_recvspace, 0, "Default stream receive space.");
178 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW,
179 &unpdg_maxdgram, 0, "Maximum datagram size.");
180 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW,
181 &unpdg_recvspace, 0, "Default datagram receive space.");
182 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, maxseqpacket, CTLFLAG_RW,
183 &unpsp_sendspace, 0, "Default seqpacket send space.");
184 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, recvspace, CTLFLAG_RW,
185 &unpsp_recvspace, 0, "Default seqpacket receive space.");
186 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0,
187 "File descriptors in flight.");
188 SYSCTL_INT(_net_local, OID_AUTO, deferred, CTLFLAG_RD,
189 &unp_defers_count, 0,
190 "File descriptors deferred to taskqueue for close.");
191
192 /*
193 * Locking and synchronization:
194 *
195 * Several types of locks exist in the local domain socket implementation:
196 * - a global linkage lock
197 * - a global connection list lock
198 * - the mtxpool lock
199 * - per-unpcb mutexes
200 *
201 * The linkage lock protects the global socket lists, the generation number
202 * counter and garbage collector state.
203 *
204 * The connection list lock protects the list of referring sockets in a datagram
205 * socket PCB. This lock is also overloaded to protect a global list of
206 * sockets whose buffers contain socket references in the form of SCM_RIGHTS
207 * messages. To avoid recursion, such references are released by a dedicated
208 * thread.
209 *
210 * The mtxpool lock protects the vnode from being modified while referenced.
211 * Lock ordering rules require that it be acquired before any PCB locks.
212 *
213 * The unpcb lock (unp_mtx) protects the most commonly referenced fields in the
214 * unpcb. This includes the unp_conn field, which either links two connected
215 * PCBs together (for connected socket types) or points at the destination
216 * socket (for connectionless socket types). The operations of creating or
217 * destroying a connection therefore involve locking multiple PCBs. To avoid
218 * lock order reversals, in some cases this involves dropping a PCB lock and
219 * using a reference counter to maintain liveness.
220 *
221 * UNIX domain sockets each have an unpcb hung off of their so_pcb pointer,
222 * allocated in pr_attach() and freed in pr_detach(). The validity of that
223 * pointer is an invariant, so no lock is required to dereference the so_pcb
224 * pointer if a valid socket reference is held by the caller. In practice,
225 * this is always true during operations performed on a socket. Each unpcb
226 * has a back-pointer to its socket, unp_socket, which will be stable under
227 * the same circumstances.
228 *
229 * This pointer may only be safely dereferenced as long as a valid reference
230 * to the unpcb is held. Typically, this reference will be from the socket,
231 * or from another unpcb when the referring unpcb's lock is held (in order
232 * that the reference not be invalidated during use). For example, to follow
233 * unp->unp_conn->unp_socket, you need to hold a lock on unp_conn to guarantee
234 * that detach is not run clearing unp_socket.
235 *
236 * Blocking with UNIX domain sockets is a tricky issue: unlike most network
237 * protocols, bind() is a non-atomic operation, and connect() requires
238 * potential sleeping in the protocol, due to potentially waiting on local or
239 * distributed file systems. We try to separate "lookup" operations, which
240 * may sleep, and the IPC operations themselves, which typically can occur
241 * with relative atomicity as locks can be held over the entire operation.
242 *
243 * Another tricky issue is simultaneous multi-threaded or multi-process
244 * access to a single UNIX domain socket. These are handled by the flags
245 * UNP_CONNECTING and UNP_BINDING, which prevent concurrent connecting or
246 * binding, both of which involve dropping UNIX domain socket locks in order
247 * to perform namei() and other file system operations.
248 */
249 static struct rwlock unp_link_rwlock;
250 static struct mtx unp_defers_lock;
251
252 #define UNP_LINK_LOCK_INIT() rw_init(&unp_link_rwlock, \
253 "unp_link_rwlock")
254
255 #define UNP_LINK_LOCK_ASSERT() rw_assert(&unp_link_rwlock, \
256 RA_LOCKED)
257 #define UNP_LINK_UNLOCK_ASSERT() rw_assert(&unp_link_rwlock, \
258 RA_UNLOCKED)
259
260 #define UNP_LINK_RLOCK() rw_rlock(&unp_link_rwlock)
261 #define UNP_LINK_RUNLOCK() rw_runlock(&unp_link_rwlock)
262 #define UNP_LINK_WLOCK() rw_wlock(&unp_link_rwlock)
263 #define UNP_LINK_WUNLOCK() rw_wunlock(&unp_link_rwlock)
264 #define UNP_LINK_WLOCK_ASSERT() rw_assert(&unp_link_rwlock, \
265 RA_WLOCKED)
266 #define UNP_LINK_WOWNED() rw_wowned(&unp_link_rwlock)
267
268 #define UNP_DEFERRED_LOCK_INIT() mtx_init(&unp_defers_lock, \
269 "unp_defer", NULL, MTX_DEF)
270 #define UNP_DEFERRED_LOCK() mtx_lock(&unp_defers_lock)
271 #define UNP_DEFERRED_UNLOCK() mtx_unlock(&unp_defers_lock)
272
273 #define UNP_REF_LIST_LOCK() UNP_DEFERRED_LOCK();
274 #define UNP_REF_LIST_UNLOCK() UNP_DEFERRED_UNLOCK();
275
276 #define UNP_PCB_LOCK_INIT(unp) mtx_init(&(unp)->unp_mtx, \
277 "unp", "unp", \
278 MTX_DUPOK|MTX_DEF)
279 #define UNP_PCB_LOCK_DESTROY(unp) mtx_destroy(&(unp)->unp_mtx)
280 #define UNP_PCB_LOCKPTR(unp) (&(unp)->unp_mtx)
281 #define UNP_PCB_LOCK(unp) mtx_lock(&(unp)->unp_mtx)
282 #define UNP_PCB_TRYLOCK(unp) mtx_trylock(&(unp)->unp_mtx)
283 #define UNP_PCB_UNLOCK(unp) mtx_unlock(&(unp)->unp_mtx)
284 #define UNP_PCB_OWNED(unp) mtx_owned(&(unp)->unp_mtx)
285 #define UNP_PCB_LOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_OWNED)
286 #define UNP_PCB_UNLOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_NOTOWNED)
287
288 static int uipc_connect2(struct socket *, struct socket *);
289 static int uipc_ctloutput(struct socket *, struct sockopt *);
290 static int unp_connect(struct socket *, struct sockaddr *,
291 struct thread *);
292 static int unp_connectat(int, struct socket *, struct sockaddr *,
293 struct thread *, bool);
294 static void unp_connect2(struct socket *so, struct socket *so2);
295 static void unp_disconnect(struct unpcb *unp, struct unpcb *unp2);
296 static void unp_dispose(struct socket *so);
297 static void unp_shutdown(struct unpcb *);
298 static void unp_drop(struct unpcb *);
299 static void unp_gc(__unused void *, int);
300 static void unp_scan(struct mbuf *, void (*)(struct filedescent **, int));
301 static void unp_discard(struct file *);
302 static void unp_freerights(struct filedescent **, int);
303 static int unp_internalize(struct mbuf **, struct thread *,
304 struct mbuf **, u_int *, u_int *);
305 static void unp_internalize_fp(struct file *);
306 static int unp_externalize(struct mbuf *, struct mbuf **, int);
307 static int unp_externalize_fp(struct file *);
308 static struct mbuf *unp_addsockcred(struct thread *, struct mbuf *,
309 int, struct mbuf **, u_int *, u_int *);
310 static void unp_process_defers(void * __unused, int);
311
312 static void
unp_pcb_hold(struct unpcb * unp)313 unp_pcb_hold(struct unpcb *unp)
314 {
315 u_int old __unused;
316
317 old = refcount_acquire(&unp->unp_refcount);
318 KASSERT(old > 0, ("%s: unpcb %p has no references", __func__, unp));
319 }
320
321 static __result_use_check bool
unp_pcb_rele(struct unpcb * unp)322 unp_pcb_rele(struct unpcb *unp)
323 {
324 bool ret;
325
326 UNP_PCB_LOCK_ASSERT(unp);
327
328 if ((ret = refcount_release(&unp->unp_refcount))) {
329 UNP_PCB_UNLOCK(unp);
330 UNP_PCB_LOCK_DESTROY(unp);
331 uma_zfree(unp_zone, unp);
332 }
333 return (ret);
334 }
335
336 static void
unp_pcb_rele_notlast(struct unpcb * unp)337 unp_pcb_rele_notlast(struct unpcb *unp)
338 {
339 bool ret __unused;
340
341 ret = refcount_release(&unp->unp_refcount);
342 KASSERT(!ret, ("%s: unpcb %p has no references", __func__, unp));
343 }
344
345 static void
unp_pcb_lock_pair(struct unpcb * unp,struct unpcb * unp2)346 unp_pcb_lock_pair(struct unpcb *unp, struct unpcb *unp2)
347 {
348 UNP_PCB_UNLOCK_ASSERT(unp);
349 UNP_PCB_UNLOCK_ASSERT(unp2);
350
351 if (unp == unp2) {
352 UNP_PCB_LOCK(unp);
353 } else if ((uintptr_t)unp2 > (uintptr_t)unp) {
354 UNP_PCB_LOCK(unp);
355 UNP_PCB_LOCK(unp2);
356 } else {
357 UNP_PCB_LOCK(unp2);
358 UNP_PCB_LOCK(unp);
359 }
360 }
361
362 static void
unp_pcb_unlock_pair(struct unpcb * unp,struct unpcb * unp2)363 unp_pcb_unlock_pair(struct unpcb *unp, struct unpcb *unp2)
364 {
365 UNP_PCB_UNLOCK(unp);
366 if (unp != unp2)
367 UNP_PCB_UNLOCK(unp2);
368 }
369
370 /*
371 * Try to lock the connected peer of an already locked socket. In some cases
372 * this requires that we unlock the current socket. The pairbusy counter is
373 * used to block concurrent connection attempts while the lock is dropped. The
374 * caller must be careful to revalidate PCB state.
375 */
376 static struct unpcb *
unp_pcb_lock_peer(struct unpcb * unp)377 unp_pcb_lock_peer(struct unpcb *unp)
378 {
379 struct unpcb *unp2;
380
381 UNP_PCB_LOCK_ASSERT(unp);
382 unp2 = unp->unp_conn;
383 if (unp2 == NULL)
384 return (NULL);
385 if (__predict_false(unp == unp2))
386 return (unp);
387
388 UNP_PCB_UNLOCK_ASSERT(unp2);
389
390 if (__predict_true(UNP_PCB_TRYLOCK(unp2)))
391 return (unp2);
392 if ((uintptr_t)unp2 > (uintptr_t)unp) {
393 UNP_PCB_LOCK(unp2);
394 return (unp2);
395 }
396 unp->unp_pairbusy++;
397 unp_pcb_hold(unp2);
398 UNP_PCB_UNLOCK(unp);
399
400 UNP_PCB_LOCK(unp2);
401 UNP_PCB_LOCK(unp);
402 KASSERT(unp->unp_conn == unp2 || unp->unp_conn == NULL,
403 ("%s: socket %p was reconnected", __func__, unp));
404 if (--unp->unp_pairbusy == 0 && (unp->unp_flags & UNP_WAITING) != 0) {
405 unp->unp_flags &= ~UNP_WAITING;
406 wakeup(unp);
407 }
408 if (unp_pcb_rele(unp2)) {
409 /* unp2 is unlocked. */
410 return (NULL);
411 }
412 if (unp->unp_conn == NULL) {
413 UNP_PCB_UNLOCK(unp2);
414 return (NULL);
415 }
416 return (unp2);
417 }
418
419 static void
uipc_abort(struct socket * so)420 uipc_abort(struct socket *so)
421 {
422 struct unpcb *unp, *unp2;
423
424 unp = sotounpcb(so);
425 KASSERT(unp != NULL, ("uipc_abort: unp == NULL"));
426 UNP_PCB_UNLOCK_ASSERT(unp);
427
428 UNP_PCB_LOCK(unp);
429 unp2 = unp->unp_conn;
430 if (unp2 != NULL) {
431 unp_pcb_hold(unp2);
432 UNP_PCB_UNLOCK(unp);
433 unp_drop(unp2);
434 } else
435 UNP_PCB_UNLOCK(unp);
436 }
437
438 static int
uipc_attach(struct socket * so,int proto,struct thread * td)439 uipc_attach(struct socket *so, int proto, struct thread *td)
440 {
441 u_long sendspace, recvspace;
442 struct unpcb *unp;
443 int error;
444 bool locked;
445
446 KASSERT(so->so_pcb == NULL, ("uipc_attach: so_pcb != NULL"));
447 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
448 switch (so->so_type) {
449 case SOCK_STREAM:
450 sendspace = unpst_sendspace;
451 recvspace = unpst_recvspace;
452 break;
453
454 case SOCK_DGRAM:
455 STAILQ_INIT(&so->so_rcv.uxdg_mb);
456 STAILQ_INIT(&so->so_snd.uxdg_mb);
457 TAILQ_INIT(&so->so_rcv.uxdg_conns);
458 /*
459 * Since send buffer is either bypassed or is a part
460 * of one-to-many receive buffer, we assign both space
461 * limits to unpdg_recvspace.
462 */
463 sendspace = recvspace = unpdg_recvspace;
464 break;
465
466 case SOCK_SEQPACKET:
467 sendspace = unpsp_sendspace;
468 recvspace = unpsp_recvspace;
469 break;
470
471 default:
472 panic("uipc_attach");
473 }
474 error = soreserve(so, sendspace, recvspace);
475 if (error)
476 return (error);
477 }
478 unp = uma_zalloc(unp_zone, M_NOWAIT | M_ZERO);
479 if (unp == NULL)
480 return (ENOBUFS);
481 LIST_INIT(&unp->unp_refs);
482 UNP_PCB_LOCK_INIT(unp);
483 unp->unp_socket = so;
484 so->so_pcb = unp;
485 refcount_init(&unp->unp_refcount, 1);
486
487 if ((locked = UNP_LINK_WOWNED()) == false)
488 UNP_LINK_WLOCK();
489
490 unp->unp_gencnt = ++unp_gencnt;
491 unp->unp_ino = ++unp_ino;
492 unp_count++;
493 switch (so->so_type) {
494 case SOCK_STREAM:
495 LIST_INSERT_HEAD(&unp_shead, unp, unp_link);
496 break;
497
498 case SOCK_DGRAM:
499 LIST_INSERT_HEAD(&unp_dhead, unp, unp_link);
500 break;
501
502 case SOCK_SEQPACKET:
503 LIST_INSERT_HEAD(&unp_sphead, unp, unp_link);
504 break;
505
506 default:
507 panic("uipc_attach");
508 }
509
510 if (locked == false)
511 UNP_LINK_WUNLOCK();
512
513 return (0);
514 }
515
516 static int
uipc_bindat(int fd,struct socket * so,struct sockaddr * nam,struct thread * td)517 uipc_bindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
518 {
519 struct sockaddr_un *soun = (struct sockaddr_un *)nam;
520 struct vattr vattr;
521 int error, namelen;
522 struct nameidata nd;
523 struct unpcb *unp;
524 struct vnode *vp;
525 struct mount *mp;
526 cap_rights_t rights;
527 char *buf;
528
529 if (nam->sa_family != AF_UNIX)
530 return (EAFNOSUPPORT);
531
532 unp = sotounpcb(so);
533 KASSERT(unp != NULL, ("uipc_bind: unp == NULL"));
534
535 if (soun->sun_len > sizeof(struct sockaddr_un))
536 return (EINVAL);
537 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path);
538 if (namelen <= 0)
539 return (EINVAL);
540
541 /*
542 * We don't allow simultaneous bind() calls on a single UNIX domain
543 * socket, so flag in-progress operations, and return an error if an
544 * operation is already in progress.
545 *
546 * Historically, we have not allowed a socket to be rebound, so this
547 * also returns an error. Not allowing re-binding simplifies the
548 * implementation and avoids a great many possible failure modes.
549 */
550 UNP_PCB_LOCK(unp);
551 if (unp->unp_vnode != NULL) {
552 UNP_PCB_UNLOCK(unp);
553 return (EINVAL);
554 }
555 if (unp->unp_flags & UNP_BINDING) {
556 UNP_PCB_UNLOCK(unp);
557 return (EALREADY);
558 }
559 unp->unp_flags |= UNP_BINDING;
560 UNP_PCB_UNLOCK(unp);
561
562 buf = malloc(namelen + 1, M_TEMP, M_WAITOK);
563 bcopy(soun->sun_path, buf, namelen);
564 buf[namelen] = 0;
565
566 restart:
567 NDINIT_ATRIGHTS(&nd, CREATE, NOFOLLOW | LOCKPARENT | NOCACHE,
568 UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_BINDAT));
569 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */
570 error = namei(&nd);
571 if (error)
572 goto error;
573 vp = nd.ni_vp;
574 if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
575 NDFREE_PNBUF(&nd);
576 if (nd.ni_dvp == vp)
577 vrele(nd.ni_dvp);
578 else
579 vput(nd.ni_dvp);
580 if (vp != NULL) {
581 vrele(vp);
582 error = EADDRINUSE;
583 goto error;
584 }
585 error = vn_start_write(NULL, &mp, V_XSLEEP | V_PCATCH);
586 if (error)
587 goto error;
588 goto restart;
589 }
590 VATTR_NULL(&vattr);
591 vattr.va_type = VSOCK;
592 vattr.va_mode = (ACCESSPERMS & ~td->td_proc->p_pd->pd_cmask);
593 #ifdef MAC
594 error = mac_vnode_check_create(td->td_ucred, nd.ni_dvp, &nd.ni_cnd,
595 &vattr);
596 #endif
597 if (error == 0) {
598 /*
599 * The prior lookup may have left LK_SHARED in cn_lkflags,
600 * and VOP_CREATE technically only requires the new vnode to
601 * be locked shared. Most filesystems will return the new vnode
602 * locked exclusive regardless, but we should explicitly
603 * specify that here since we require it and assert to that
604 * effect below.
605 */
606 nd.ni_cnd.cn_lkflags = (nd.ni_cnd.cn_lkflags & ~LK_SHARED) |
607 LK_EXCLUSIVE;
608 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
609 }
610 NDFREE_PNBUF(&nd);
611 if (error) {
612 VOP_VPUT_PAIR(nd.ni_dvp, NULL, true);
613 vn_finished_write(mp);
614 if (error == ERELOOKUP)
615 goto restart;
616 goto error;
617 }
618 vp = nd.ni_vp;
619 ASSERT_VOP_ELOCKED(vp, "uipc_bind");
620 soun = (struct sockaddr_un *)sodupsockaddr(nam, M_WAITOK);
621
622 UNP_PCB_LOCK(unp);
623 VOP_UNP_BIND(vp, unp);
624 unp->unp_vnode = vp;
625 unp->unp_addr = soun;
626 unp->unp_flags &= ~UNP_BINDING;
627 UNP_PCB_UNLOCK(unp);
628 vref(vp);
629 VOP_VPUT_PAIR(nd.ni_dvp, &vp, true);
630 vn_finished_write(mp);
631 free(buf, M_TEMP);
632 return (0);
633
634 error:
635 UNP_PCB_LOCK(unp);
636 unp->unp_flags &= ~UNP_BINDING;
637 UNP_PCB_UNLOCK(unp);
638 free(buf, M_TEMP);
639 return (error);
640 }
641
642 static int
uipc_bind(struct socket * so,struct sockaddr * nam,struct thread * td)643 uipc_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
644 {
645
646 return (uipc_bindat(AT_FDCWD, so, nam, td));
647 }
648
649 static int
uipc_connect(struct socket * so,struct sockaddr * nam,struct thread * td)650 uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
651 {
652 int error;
653
654 KASSERT(td == curthread, ("uipc_connect: td != curthread"));
655 error = unp_connect(so, nam, td);
656 return (error);
657 }
658
659 static int
uipc_connectat(int fd,struct socket * so,struct sockaddr * nam,struct thread * td)660 uipc_connectat(int fd, struct socket *so, struct sockaddr *nam,
661 struct thread *td)
662 {
663 int error;
664
665 KASSERT(td == curthread, ("uipc_connectat: td != curthread"));
666 error = unp_connectat(fd, so, nam, td, false);
667 return (error);
668 }
669
670 static void
uipc_close(struct socket * so)671 uipc_close(struct socket *so)
672 {
673 struct unpcb *unp, *unp2;
674 struct vnode *vp = NULL;
675 struct mtx *vplock;
676
677 unp = sotounpcb(so);
678 KASSERT(unp != NULL, ("uipc_close: unp == NULL"));
679
680 vplock = NULL;
681 if ((vp = unp->unp_vnode) != NULL) {
682 vplock = mtx_pool_find(mtxpool_sleep, vp);
683 mtx_lock(vplock);
684 }
685 UNP_PCB_LOCK(unp);
686 if (vp && unp->unp_vnode == NULL) {
687 mtx_unlock(vplock);
688 vp = NULL;
689 }
690 if (vp != NULL) {
691 VOP_UNP_DETACH(vp);
692 unp->unp_vnode = NULL;
693 }
694 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
695 unp_disconnect(unp, unp2);
696 else
697 UNP_PCB_UNLOCK(unp);
698 if (vp) {
699 mtx_unlock(vplock);
700 vrele(vp);
701 }
702 }
703
704 static int
uipc_connect2(struct socket * so1,struct socket * so2)705 uipc_connect2(struct socket *so1, struct socket *so2)
706 {
707 struct unpcb *unp, *unp2;
708
709 if (so1->so_type != so2->so_type)
710 return (EPROTOTYPE);
711
712 unp = so1->so_pcb;
713 KASSERT(unp != NULL, ("uipc_connect2: unp == NULL"));
714 unp2 = so2->so_pcb;
715 KASSERT(unp2 != NULL, ("uipc_connect2: unp2 == NULL"));
716 unp_pcb_lock_pair(unp, unp2);
717 unp_connect2(so1, so2);
718 unp_pcb_unlock_pair(unp, unp2);
719
720 return (0);
721 }
722
723 static void
uipc_detach(struct socket * so)724 uipc_detach(struct socket *so)
725 {
726 struct unpcb *unp, *unp2;
727 struct mtx *vplock;
728 struct vnode *vp;
729 int local_unp_rights;
730
731 unp = sotounpcb(so);
732 KASSERT(unp != NULL, ("uipc_detach: unp == NULL"));
733
734 vp = NULL;
735 vplock = NULL;
736
737 if (!SOLISTENING(so))
738 unp_dispose(so);
739
740 UNP_LINK_WLOCK();
741 LIST_REMOVE(unp, unp_link);
742 if (unp->unp_gcflag & UNPGC_DEAD)
743 LIST_REMOVE(unp, unp_dead);
744 unp->unp_gencnt = ++unp_gencnt;
745 --unp_count;
746 UNP_LINK_WUNLOCK();
747
748 UNP_PCB_UNLOCK_ASSERT(unp);
749 restart:
750 if ((vp = unp->unp_vnode) != NULL) {
751 vplock = mtx_pool_find(mtxpool_sleep, vp);
752 mtx_lock(vplock);
753 }
754 UNP_PCB_LOCK(unp);
755 if (unp->unp_vnode != vp && unp->unp_vnode != NULL) {
756 if (vplock)
757 mtx_unlock(vplock);
758 UNP_PCB_UNLOCK(unp);
759 goto restart;
760 }
761 if ((vp = unp->unp_vnode) != NULL) {
762 VOP_UNP_DETACH(vp);
763 unp->unp_vnode = NULL;
764 }
765 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
766 unp_disconnect(unp, unp2);
767 else
768 UNP_PCB_UNLOCK(unp);
769
770 UNP_REF_LIST_LOCK();
771 while (!LIST_EMPTY(&unp->unp_refs)) {
772 struct unpcb *ref = LIST_FIRST(&unp->unp_refs);
773
774 unp_pcb_hold(ref);
775 UNP_REF_LIST_UNLOCK();
776
777 MPASS(ref != unp);
778 UNP_PCB_UNLOCK_ASSERT(ref);
779 unp_drop(ref);
780 UNP_REF_LIST_LOCK();
781 }
782 UNP_REF_LIST_UNLOCK();
783
784 UNP_PCB_LOCK(unp);
785 local_unp_rights = unp_rights;
786 unp->unp_socket->so_pcb = NULL;
787 unp->unp_socket = NULL;
788 free(unp->unp_addr, M_SONAME);
789 unp->unp_addr = NULL;
790 if (!unp_pcb_rele(unp))
791 UNP_PCB_UNLOCK(unp);
792 if (vp) {
793 mtx_unlock(vplock);
794 vrele(vp);
795 }
796 if (local_unp_rights)
797 taskqueue_enqueue_timeout(taskqueue_thread, &unp_gc_task, -1);
798
799 switch (so->so_type) {
800 case SOCK_DGRAM:
801 /*
802 * Everything should have been unlinked/freed by unp_dispose()
803 * and/or unp_disconnect().
804 */
805 MPASS(so->so_rcv.uxdg_peeked == NULL);
806 MPASS(STAILQ_EMPTY(&so->so_rcv.uxdg_mb));
807 MPASS(TAILQ_EMPTY(&so->so_rcv.uxdg_conns));
808 MPASS(STAILQ_EMPTY(&so->so_snd.uxdg_mb));
809 }
810 }
811
812 static int
uipc_disconnect(struct socket * so)813 uipc_disconnect(struct socket *so)
814 {
815 struct unpcb *unp, *unp2;
816
817 unp = sotounpcb(so);
818 KASSERT(unp != NULL, ("uipc_disconnect: unp == NULL"));
819
820 UNP_PCB_LOCK(unp);
821 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
822 unp_disconnect(unp, unp2);
823 else
824 UNP_PCB_UNLOCK(unp);
825 return (0);
826 }
827
828 static int
uipc_listen(struct socket * so,int backlog,struct thread * td)829 uipc_listen(struct socket *so, int backlog, struct thread *td)
830 {
831 struct unpcb *unp;
832 int error;
833
834 MPASS(so->so_type != SOCK_DGRAM);
835
836 /*
837 * Synchronize with concurrent connection attempts.
838 */
839 error = 0;
840 unp = sotounpcb(so);
841 UNP_PCB_LOCK(unp);
842 if (unp->unp_conn != NULL || (unp->unp_flags & UNP_CONNECTING) != 0)
843 error = EINVAL;
844 else if (unp->unp_vnode == NULL)
845 error = EDESTADDRREQ;
846 if (error != 0) {
847 UNP_PCB_UNLOCK(unp);
848 return (error);
849 }
850
851 SOCK_LOCK(so);
852 error = solisten_proto_check(so);
853 if (error == 0) {
854 cru2xt(td, &unp->unp_peercred);
855 solisten_proto(so, backlog);
856 }
857 SOCK_UNLOCK(so);
858 UNP_PCB_UNLOCK(unp);
859 return (error);
860 }
861
862 static int
uipc_peeraddr(struct socket * so,struct sockaddr * ret)863 uipc_peeraddr(struct socket *so, struct sockaddr *ret)
864 {
865 struct unpcb *unp, *unp2;
866 const struct sockaddr *sa;
867
868 unp = sotounpcb(so);
869 KASSERT(unp != NULL, ("uipc_peeraddr: unp == NULL"));
870
871 UNP_PCB_LOCK(unp);
872 unp2 = unp_pcb_lock_peer(unp);
873 if (unp2 != NULL) {
874 if (unp2->unp_addr != NULL)
875 sa = (struct sockaddr *)unp2->unp_addr;
876 else
877 sa = &sun_noname;
878 bcopy(sa, ret, sa->sa_len);
879 unp_pcb_unlock_pair(unp, unp2);
880 } else {
881 UNP_PCB_UNLOCK(unp);
882 sa = &sun_noname;
883 bcopy(sa, ret, sa->sa_len);
884 }
885 return (0);
886 }
887
888 static int
uipc_rcvd(struct socket * so,int flags)889 uipc_rcvd(struct socket *so, int flags)
890 {
891 struct unpcb *unp, *unp2;
892 struct socket *so2;
893 u_int mbcnt, sbcc;
894
895 unp = sotounpcb(so);
896 KASSERT(unp != NULL, ("%s: unp == NULL", __func__));
897 KASSERT(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET,
898 ("%s: socktype %d", __func__, so->so_type));
899
900 /*
901 * Adjust backpressure on sender and wakeup any waiting to write.
902 *
903 * The unp lock is acquired to maintain the validity of the unp_conn
904 * pointer; no lock on unp2 is required as unp2->unp_socket will be
905 * static as long as we don't permit unp2 to disconnect from unp,
906 * which is prevented by the lock on unp. We cache values from
907 * so_rcv to avoid holding the so_rcv lock over the entire
908 * transaction on the remote so_snd.
909 */
910 SOCKBUF_LOCK(&so->so_rcv);
911 mbcnt = so->so_rcv.sb_mbcnt;
912 sbcc = sbavail(&so->so_rcv);
913 SOCKBUF_UNLOCK(&so->so_rcv);
914 /*
915 * There is a benign race condition at this point. If we're planning to
916 * clear SB_STOP, but uipc_send is called on the connected socket at
917 * this instant, it might add data to the sockbuf and set SB_STOP. Then
918 * we would erroneously clear SB_STOP below, even though the sockbuf is
919 * full. The race is benign because the only ill effect is to allow the
920 * sockbuf to exceed its size limit, and the size limits are not
921 * strictly guaranteed anyway.
922 */
923 UNP_PCB_LOCK(unp);
924 unp2 = unp->unp_conn;
925 if (unp2 == NULL) {
926 UNP_PCB_UNLOCK(unp);
927 return (0);
928 }
929 so2 = unp2->unp_socket;
930 SOCKBUF_LOCK(&so2->so_snd);
931 if (sbcc < so2->so_snd.sb_hiwat && mbcnt < so2->so_snd.sb_mbmax)
932 so2->so_snd.sb_flags &= ~SB_STOP;
933 sowwakeup_locked(so2);
934 UNP_PCB_UNLOCK(unp);
935 return (0);
936 }
937
938 static int
uipc_send(struct socket * so,int flags,struct mbuf * m,struct sockaddr * nam,struct mbuf * control,struct thread * td)939 uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
940 struct mbuf *control, struct thread *td)
941 {
942 struct unpcb *unp, *unp2;
943 struct socket *so2;
944 u_int mbcnt, sbcc;
945 int error;
946
947 unp = sotounpcb(so);
948 KASSERT(unp != NULL, ("%s: unp == NULL", __func__));
949 KASSERT(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET,
950 ("%s: socktype %d", __func__, so->so_type));
951
952 error = 0;
953 if (flags & PRUS_OOB) {
954 error = EOPNOTSUPP;
955 goto release;
956 }
957 if (control != NULL &&
958 (error = unp_internalize(&control, td, NULL, NULL, NULL)))
959 goto release;
960
961 unp2 = NULL;
962 if ((so->so_state & SS_ISCONNECTED) == 0) {
963 if (nam != NULL) {
964 if ((error = unp_connect(so, nam, td)) != 0)
965 goto out;
966 } else {
967 error = ENOTCONN;
968 goto out;
969 }
970 }
971
972 UNP_PCB_LOCK(unp);
973 if ((unp2 = unp_pcb_lock_peer(unp)) == NULL) {
974 UNP_PCB_UNLOCK(unp);
975 error = ENOTCONN;
976 goto out;
977 } else if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
978 unp_pcb_unlock_pair(unp, unp2);
979 error = EPIPE;
980 goto out;
981 }
982 UNP_PCB_UNLOCK(unp);
983 if ((so2 = unp2->unp_socket) == NULL) {
984 UNP_PCB_UNLOCK(unp2);
985 error = ENOTCONN;
986 goto out;
987 }
988 SOCKBUF_LOCK(&so2->so_rcv);
989 if (unp2->unp_flags & UNP_WANTCRED_MASK) {
990 /*
991 * Credentials are passed only once on SOCK_STREAM and
992 * SOCK_SEQPACKET (LOCAL_CREDS => WANTCRED_ONESHOT), or
993 * forever (LOCAL_CREDS_PERSISTENT => WANTCRED_ALWAYS).
994 */
995 control = unp_addsockcred(td, control, unp2->unp_flags, NULL,
996 NULL, NULL);
997 unp2->unp_flags &= ~UNP_WANTCRED_ONESHOT;
998 }
999
1000 /*
1001 * Send to paired receive port and wake up readers. Don't
1002 * check for space available in the receive buffer if we're
1003 * attaching ancillary data; Unix domain sockets only check
1004 * for space in the sending sockbuf, and that check is
1005 * performed one level up the stack. At that level we cannot
1006 * precisely account for the amount of buffer space used
1007 * (e.g., because control messages are not yet internalized).
1008 */
1009 switch (so->so_type) {
1010 case SOCK_STREAM:
1011 if (control != NULL) {
1012 sbappendcontrol_locked(&so2->so_rcv,
1013 m->m_len > 0 ? m : NULL, control, flags);
1014 control = NULL;
1015 } else
1016 sbappend_locked(&so2->so_rcv, m, flags);
1017 break;
1018
1019 case SOCK_SEQPACKET:
1020 if (sbappendaddr_nospacecheck_locked(&so2->so_rcv,
1021 &sun_noname, m, control))
1022 control = NULL;
1023 break;
1024 }
1025
1026 mbcnt = so2->so_rcv.sb_mbcnt;
1027 sbcc = sbavail(&so2->so_rcv);
1028 if (sbcc)
1029 sorwakeup_locked(so2);
1030 else
1031 SOCKBUF_UNLOCK(&so2->so_rcv);
1032
1033 /*
1034 * The PCB lock on unp2 protects the SB_STOP flag. Without it,
1035 * it would be possible for uipc_rcvd to be called at this
1036 * point, drain the receiving sockbuf, clear SB_STOP, and then
1037 * we would set SB_STOP below. That could lead to an empty
1038 * sockbuf having SB_STOP set
1039 */
1040 SOCKBUF_LOCK(&so->so_snd);
1041 if (sbcc >= so->so_snd.sb_hiwat || mbcnt >= so->so_snd.sb_mbmax)
1042 so->so_snd.sb_flags |= SB_STOP;
1043 SOCKBUF_UNLOCK(&so->so_snd);
1044 UNP_PCB_UNLOCK(unp2);
1045 m = NULL;
1046 out:
1047 /*
1048 * PRUS_EOF is equivalent to pr_send followed by pr_shutdown.
1049 */
1050 if (flags & PRUS_EOF) {
1051 UNP_PCB_LOCK(unp);
1052 socantsendmore(so);
1053 unp_shutdown(unp);
1054 UNP_PCB_UNLOCK(unp);
1055 }
1056 if (control != NULL && error != 0)
1057 unp_scan(control, unp_freerights);
1058
1059 release:
1060 if (control != NULL)
1061 m_freem(control);
1062 /*
1063 * In case of PRUS_NOTREADY, uipc_ready() is responsible
1064 * for freeing memory.
1065 */
1066 if (m != NULL && (flags & PRUS_NOTREADY) == 0)
1067 m_freem(m);
1068 return (error);
1069 }
1070
1071 /* PF_UNIX/SOCK_DGRAM version of sbspace() */
1072 static inline bool
uipc_dgram_sbspace(struct sockbuf * sb,u_int cc,u_int mbcnt)1073 uipc_dgram_sbspace(struct sockbuf *sb, u_int cc, u_int mbcnt)
1074 {
1075 u_int bleft, mleft;
1076
1077 /*
1078 * Negative space may happen if send(2) is followed by
1079 * setsockopt(SO_SNDBUF/SO_RCVBUF) that shrinks maximum.
1080 */
1081 if (__predict_false(sb->sb_hiwat < sb->uxdg_cc ||
1082 sb->sb_mbmax < sb->uxdg_mbcnt))
1083 return (false);
1084
1085 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE))
1086 return (false);
1087
1088 bleft = sb->sb_hiwat - sb->uxdg_cc;
1089 mleft = sb->sb_mbmax - sb->uxdg_mbcnt;
1090
1091 return (bleft >= cc && mleft >= mbcnt);
1092 }
1093
1094 /*
1095 * PF_UNIX/SOCK_DGRAM send
1096 *
1097 * Allocate a record consisting of 3 mbufs in the sequence of
1098 * from -> control -> data and append it to the socket buffer.
1099 *
1100 * The first mbuf carries sender's name and is a pkthdr that stores
1101 * overall length of datagram, its memory consumption and control length.
1102 */
1103 #define ctllen PH_loc.thirtytwo[1]
1104 _Static_assert(offsetof(struct pkthdr, memlen) + sizeof(u_int) <=
1105 offsetof(struct pkthdr, ctllen), "unix/dgram can not store ctllen");
1106 static int
uipc_sosend_dgram(struct socket * so,struct sockaddr * addr,struct uio * uio,struct mbuf * m,struct mbuf * c,int flags,struct thread * td)1107 uipc_sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
1108 struct mbuf *m, struct mbuf *c, int flags, struct thread *td)
1109 {
1110 struct unpcb *unp, *unp2;
1111 const struct sockaddr *from;
1112 struct socket *so2;
1113 struct sockbuf *sb;
1114 struct mbuf *f, *clast;
1115 u_int cc, ctl, mbcnt;
1116 u_int dcc __diagused, dctl __diagused, dmbcnt __diagused;
1117 int error;
1118
1119 MPASS((uio != NULL && m == NULL) || (m != NULL && uio == NULL));
1120
1121 error = 0;
1122 f = NULL;
1123 ctl = 0;
1124
1125 if (__predict_false(flags & MSG_OOB)) {
1126 error = EOPNOTSUPP;
1127 goto out;
1128 }
1129 if (m == NULL) {
1130 if (__predict_false(uio->uio_resid > unpdg_maxdgram)) {
1131 error = EMSGSIZE;
1132 goto out;
1133 }
1134 m = m_uiotombuf(uio, M_WAITOK, 0, max_hdr, M_PKTHDR);
1135 if (__predict_false(m == NULL)) {
1136 error = EFAULT;
1137 goto out;
1138 }
1139 f = m_gethdr(M_WAITOK, MT_SONAME);
1140 cc = m->m_pkthdr.len;
1141 mbcnt = MSIZE + m->m_pkthdr.memlen;
1142 if (c != NULL &&
1143 (error = unp_internalize(&c, td, &clast, &ctl, &mbcnt)))
1144 goto out;
1145 } else {
1146 /* pr_sosend() with mbuf usually is a kernel thread. */
1147
1148 M_ASSERTPKTHDR(m);
1149 if (__predict_false(c != NULL))
1150 panic("%s: control from a kernel thread", __func__);
1151
1152 if (__predict_false(m->m_pkthdr.len > unpdg_maxdgram)) {
1153 error = EMSGSIZE;
1154 goto out;
1155 }
1156 if ((f = m_gethdr(M_NOWAIT, MT_SONAME)) == NULL) {
1157 error = ENOBUFS;
1158 goto out;
1159 }
1160 /* Condition the foreign mbuf to our standards. */
1161 m_clrprotoflags(m);
1162 m_tag_delete_chain(m, NULL);
1163 m->m_pkthdr.rcvif = NULL;
1164 m->m_pkthdr.flowid = 0;
1165 m->m_pkthdr.csum_flags = 0;
1166 m->m_pkthdr.fibnum = 0;
1167 m->m_pkthdr.rsstype = 0;
1168
1169 cc = m->m_pkthdr.len;
1170 mbcnt = MSIZE;
1171 for (struct mbuf *mb = m; mb != NULL; mb = mb->m_next) {
1172 mbcnt += MSIZE;
1173 if (mb->m_flags & M_EXT)
1174 mbcnt += mb->m_ext.ext_size;
1175 }
1176 }
1177
1178 unp = sotounpcb(so);
1179 MPASS(unp);
1180
1181 /*
1182 * XXXGL: would be cool to fully remove so_snd out of the equation
1183 * and avoid this lock, which is not only extraneous, but also being
1184 * released, thus still leaving possibility for a race. We can easily
1185 * handle SBS_CANTSENDMORE/SS_ISCONNECTED complement in unpcb, but it
1186 * is more difficult to invent something to handle so_error.
1187 */
1188 error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags));
1189 if (error)
1190 goto out2;
1191 SOCK_SENDBUF_LOCK(so);
1192 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1193 SOCK_SENDBUF_UNLOCK(so);
1194 error = EPIPE;
1195 goto out3;
1196 }
1197 if (so->so_error != 0) {
1198 error = so->so_error;
1199 so->so_error = 0;
1200 SOCK_SENDBUF_UNLOCK(so);
1201 goto out3;
1202 }
1203 if (((so->so_state & SS_ISCONNECTED) == 0) && addr == NULL) {
1204 SOCK_SENDBUF_UNLOCK(so);
1205 error = EDESTADDRREQ;
1206 goto out3;
1207 }
1208 SOCK_SENDBUF_UNLOCK(so);
1209
1210 if (addr != NULL) {
1211 if ((error = unp_connectat(AT_FDCWD, so, addr, td, true)))
1212 goto out3;
1213 UNP_PCB_LOCK_ASSERT(unp);
1214 unp2 = unp->unp_conn;
1215 UNP_PCB_LOCK_ASSERT(unp2);
1216 } else {
1217 UNP_PCB_LOCK(unp);
1218 unp2 = unp_pcb_lock_peer(unp);
1219 if (unp2 == NULL) {
1220 UNP_PCB_UNLOCK(unp);
1221 error = ENOTCONN;
1222 goto out3;
1223 }
1224 }
1225
1226 if (unp2->unp_flags & UNP_WANTCRED_MASK)
1227 c = unp_addsockcred(td, c, unp2->unp_flags, &clast, &ctl,
1228 &mbcnt);
1229 if (unp->unp_addr != NULL)
1230 from = (struct sockaddr *)unp->unp_addr;
1231 else
1232 from = &sun_noname;
1233 f->m_len = from->sa_len;
1234 MPASS(from->sa_len <= MLEN);
1235 bcopy(from, mtod(f, void *), from->sa_len);
1236 ctl += f->m_len;
1237
1238 /*
1239 * Concatenate mbufs: from -> control -> data.
1240 * Save overall cc and mbcnt in "from" mbuf.
1241 */
1242 if (c != NULL) {
1243 #ifdef INVARIANTS
1244 struct mbuf *mc;
1245
1246 for (mc = c; mc->m_next != NULL; mc = mc->m_next);
1247 MPASS(mc == clast);
1248 #endif
1249 f->m_next = c;
1250 clast->m_next = m;
1251 c = NULL;
1252 } else
1253 f->m_next = m;
1254 m = NULL;
1255 #ifdef INVARIANTS
1256 dcc = dctl = dmbcnt = 0;
1257 for (struct mbuf *mb = f; mb != NULL; mb = mb->m_next) {
1258 if (mb->m_type == MT_DATA)
1259 dcc += mb->m_len;
1260 else
1261 dctl += mb->m_len;
1262 dmbcnt += MSIZE;
1263 if (mb->m_flags & M_EXT)
1264 dmbcnt += mb->m_ext.ext_size;
1265 }
1266 MPASS(dcc == cc);
1267 MPASS(dctl == ctl);
1268 MPASS(dmbcnt == mbcnt);
1269 #endif
1270 f->m_pkthdr.len = cc + ctl;
1271 f->m_pkthdr.memlen = mbcnt;
1272 f->m_pkthdr.ctllen = ctl;
1273
1274 /*
1275 * Destination socket buffer selection.
1276 *
1277 * Unconnected sends, when !(so->so_state & SS_ISCONNECTED) and the
1278 * destination address is supplied, create a temporary connection for
1279 * the run time of the function (see call to unp_connectat() above and
1280 * to unp_disconnect() below). We distinguish them by condition of
1281 * (addr != NULL). We intentionally avoid adding 'bool connected' for
1282 * that condition, since, again, through the run time of this code we
1283 * are always connected. For such "unconnected" sends, the destination
1284 * buffer would be the receive buffer of destination socket so2.
1285 *
1286 * For connected sends, data lands on the send buffer of the sender's
1287 * socket "so". Then, if we just added the very first datagram
1288 * on this send buffer, we need to add the send buffer on to the
1289 * receiving socket's buffer list. We put ourselves on top of the
1290 * list. Such logic gives infrequent senders priority over frequent
1291 * senders.
1292 *
1293 * Note on byte count management. As long as event methods kevent(2),
1294 * select(2) are not protocol specific (yet), we need to maintain
1295 * meaningful values on the receive buffer. So, the receive buffer
1296 * would accumulate counters from all connected buffers potentially
1297 * having sb_ccc > sb_hiwat or sb_mbcnt > sb_mbmax.
1298 */
1299 so2 = unp2->unp_socket;
1300 sb = (addr == NULL) ? &so->so_snd : &so2->so_rcv;
1301 SOCK_RECVBUF_LOCK(so2);
1302 if (uipc_dgram_sbspace(sb, cc + ctl, mbcnt)) {
1303 if (addr == NULL && STAILQ_EMPTY(&sb->uxdg_mb))
1304 TAILQ_INSERT_HEAD(&so2->so_rcv.uxdg_conns, &so->so_snd,
1305 uxdg_clist);
1306 STAILQ_INSERT_TAIL(&sb->uxdg_mb, f, m_stailqpkt);
1307 sb->uxdg_cc += cc + ctl;
1308 sb->uxdg_ctl += ctl;
1309 sb->uxdg_mbcnt += mbcnt;
1310 so2->so_rcv.sb_acc += cc + ctl;
1311 so2->so_rcv.sb_ccc += cc + ctl;
1312 so2->so_rcv.sb_ctl += ctl;
1313 so2->so_rcv.sb_mbcnt += mbcnt;
1314 sorwakeup_locked(so2);
1315 f = NULL;
1316 } else {
1317 soroverflow_locked(so2);
1318 error = ENOBUFS;
1319 if (f->m_next->m_type == MT_CONTROL) {
1320 c = f->m_next;
1321 f->m_next = NULL;
1322 }
1323 }
1324
1325 if (addr != NULL)
1326 unp_disconnect(unp, unp2);
1327 else
1328 unp_pcb_unlock_pair(unp, unp2);
1329
1330 td->td_ru.ru_msgsnd++;
1331
1332 out3:
1333 SOCK_IO_SEND_UNLOCK(so);
1334 out2:
1335 if (c)
1336 unp_scan(c, unp_freerights);
1337 out:
1338 if (f)
1339 m_freem(f);
1340 if (c)
1341 m_freem(c);
1342 if (m)
1343 m_freem(m);
1344
1345 return (error);
1346 }
1347
1348 /*
1349 * PF_UNIX/SOCK_DGRAM receive with MSG_PEEK.
1350 * The mbuf has already been unlinked from the uxdg_mb of socket buffer
1351 * and needs to be linked onto uxdg_peeked of receive socket buffer.
1352 */
1353 static int
uipc_peek_dgram(struct socket * so,struct mbuf * m,struct sockaddr ** psa,struct uio * uio,struct mbuf ** controlp,int * flagsp)1354 uipc_peek_dgram(struct socket *so, struct mbuf *m, struct sockaddr **psa,
1355 struct uio *uio, struct mbuf **controlp, int *flagsp)
1356 {
1357 ssize_t len = 0;
1358 int error;
1359
1360 so->so_rcv.uxdg_peeked = m;
1361 so->so_rcv.uxdg_cc += m->m_pkthdr.len;
1362 so->so_rcv.uxdg_ctl += m->m_pkthdr.ctllen;
1363 so->so_rcv.uxdg_mbcnt += m->m_pkthdr.memlen;
1364 SOCK_RECVBUF_UNLOCK(so);
1365
1366 KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type));
1367 if (psa != NULL)
1368 *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK);
1369
1370 m = m->m_next;
1371 KASSERT(m, ("%s: no data or control after soname", __func__));
1372
1373 /*
1374 * With MSG_PEEK the control isn't executed, just copied.
1375 */
1376 while (m != NULL && m->m_type == MT_CONTROL) {
1377 if (controlp != NULL) {
1378 *controlp = m_copym(m, 0, m->m_len, M_WAITOK);
1379 controlp = &(*controlp)->m_next;
1380 }
1381 m = m->m_next;
1382 }
1383 KASSERT(m == NULL || m->m_type == MT_DATA,
1384 ("%s: not MT_DATA mbuf %p", __func__, m));
1385 while (m != NULL && uio->uio_resid > 0) {
1386 len = uio->uio_resid;
1387 if (len > m->m_len)
1388 len = m->m_len;
1389 error = uiomove(mtod(m, char *), (int)len, uio);
1390 if (error) {
1391 SOCK_IO_RECV_UNLOCK(so);
1392 return (error);
1393 }
1394 if (len == m->m_len)
1395 m = m->m_next;
1396 }
1397 SOCK_IO_RECV_UNLOCK(so);
1398
1399 if (flagsp != NULL) {
1400 if (m != NULL) {
1401 if (*flagsp & MSG_TRUNC) {
1402 /* Report real length of the packet */
1403 uio->uio_resid -= m_length(m, NULL) - len;
1404 }
1405 *flagsp |= MSG_TRUNC;
1406 } else
1407 *flagsp &= ~MSG_TRUNC;
1408 }
1409
1410 return (0);
1411 }
1412
1413 /*
1414 * PF_UNIX/SOCK_DGRAM receive
1415 */
1416 static int
uipc_soreceive_dgram(struct socket * so,struct sockaddr ** psa,struct uio * uio,struct mbuf ** mp0,struct mbuf ** controlp,int * flagsp)1417 uipc_soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
1418 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1419 {
1420 struct sockbuf *sb = NULL;
1421 struct mbuf *m;
1422 int flags, error;
1423 ssize_t len = 0;
1424 bool nonblock;
1425
1426 MPASS(mp0 == NULL);
1427
1428 if (psa != NULL)
1429 *psa = NULL;
1430 if (controlp != NULL)
1431 *controlp = NULL;
1432
1433 flags = flagsp != NULL ? *flagsp : 0;
1434 nonblock = (so->so_state & SS_NBIO) ||
1435 (flags & (MSG_DONTWAIT | MSG_NBIO));
1436
1437 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
1438 if (__predict_false(error))
1439 return (error);
1440
1441 /*
1442 * Loop blocking while waiting for a datagram. Prioritize connected
1443 * peers over unconnected sends. Set sb to selected socket buffer
1444 * containing an mbuf on exit from the wait loop. A datagram that
1445 * had already been peeked at has top priority.
1446 */
1447 SOCK_RECVBUF_LOCK(so);
1448 while ((m = so->so_rcv.uxdg_peeked) == NULL &&
1449 (sb = TAILQ_FIRST(&so->so_rcv.uxdg_conns)) == NULL &&
1450 (m = STAILQ_FIRST(&so->so_rcv.uxdg_mb)) == NULL) {
1451 if (so->so_error) {
1452 error = so->so_error;
1453 if (!(flags & MSG_PEEK))
1454 so->so_error = 0;
1455 SOCK_RECVBUF_UNLOCK(so);
1456 SOCK_IO_RECV_UNLOCK(so);
1457 return (error);
1458 }
1459 if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
1460 uio->uio_resid == 0) {
1461 SOCK_RECVBUF_UNLOCK(so);
1462 SOCK_IO_RECV_UNLOCK(so);
1463 return (0);
1464 }
1465 if (nonblock) {
1466 SOCK_RECVBUF_UNLOCK(so);
1467 SOCK_IO_RECV_UNLOCK(so);
1468 return (EWOULDBLOCK);
1469 }
1470 error = sbwait(so, SO_RCV);
1471 if (error) {
1472 SOCK_RECVBUF_UNLOCK(so);
1473 SOCK_IO_RECV_UNLOCK(so);
1474 return (error);
1475 }
1476 }
1477
1478 if (sb == NULL)
1479 sb = &so->so_rcv;
1480 else if (m == NULL)
1481 m = STAILQ_FIRST(&sb->uxdg_mb);
1482 else
1483 MPASS(m == so->so_rcv.uxdg_peeked);
1484
1485 MPASS(sb->uxdg_cc > 0);
1486 M_ASSERTPKTHDR(m);
1487 KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type));
1488
1489 if (uio->uio_td)
1490 uio->uio_td->td_ru.ru_msgrcv++;
1491
1492 if (__predict_true(m != so->so_rcv.uxdg_peeked)) {
1493 STAILQ_REMOVE_HEAD(&sb->uxdg_mb, m_stailqpkt);
1494 if (STAILQ_EMPTY(&sb->uxdg_mb) && sb != &so->so_rcv)
1495 TAILQ_REMOVE(&so->so_rcv.uxdg_conns, sb, uxdg_clist);
1496 } else
1497 so->so_rcv.uxdg_peeked = NULL;
1498
1499 sb->uxdg_cc -= m->m_pkthdr.len;
1500 sb->uxdg_ctl -= m->m_pkthdr.ctllen;
1501 sb->uxdg_mbcnt -= m->m_pkthdr.memlen;
1502
1503 if (__predict_false(flags & MSG_PEEK))
1504 return (uipc_peek_dgram(so, m, psa, uio, controlp, flagsp));
1505
1506 so->so_rcv.sb_acc -= m->m_pkthdr.len;
1507 so->so_rcv.sb_ccc -= m->m_pkthdr.len;
1508 so->so_rcv.sb_ctl -= m->m_pkthdr.ctllen;
1509 so->so_rcv.sb_mbcnt -= m->m_pkthdr.memlen;
1510 SOCK_RECVBUF_UNLOCK(so);
1511
1512 if (psa != NULL)
1513 *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK);
1514 m = m_free(m);
1515 KASSERT(m, ("%s: no data or control after soname", __func__));
1516
1517 /*
1518 * Packet to copyout() is now in 'm' and it is disconnected from the
1519 * queue.
1520 *
1521 * Process one or more MT_CONTROL mbufs present before any data mbufs
1522 * in the first mbuf chain on the socket buffer. We call into the
1523 * unp_externalize() to perform externalization (or freeing if
1524 * controlp == NULL). In some cases there can be only MT_CONTROL mbufs
1525 * without MT_DATA mbufs.
1526 */
1527 while (m != NULL && m->m_type == MT_CONTROL) {
1528 struct mbuf *cm;
1529
1530 /* XXXGL: unp_externalize() is also dom_externalize() KBI and
1531 * it frees whole chain, so we must disconnect the mbuf.
1532 */
1533 cm = m; m = m->m_next; cm->m_next = NULL;
1534 error = unp_externalize(cm, controlp, flags);
1535 if (error != 0) {
1536 SOCK_IO_RECV_UNLOCK(so);
1537 unp_scan(m, unp_freerights);
1538 m_freem(m);
1539 return (error);
1540 }
1541 if (controlp != NULL) {
1542 while (*controlp != NULL)
1543 controlp = &(*controlp)->m_next;
1544 }
1545 }
1546 KASSERT(m == NULL || m->m_type == MT_DATA,
1547 ("%s: not MT_DATA mbuf %p", __func__, m));
1548 while (m != NULL && uio->uio_resid > 0) {
1549 len = uio->uio_resid;
1550 if (len > m->m_len)
1551 len = m->m_len;
1552 error = uiomove(mtod(m, char *), (int)len, uio);
1553 if (error) {
1554 SOCK_IO_RECV_UNLOCK(so);
1555 m_freem(m);
1556 return (error);
1557 }
1558 if (len == m->m_len)
1559 m = m_free(m);
1560 else {
1561 m->m_data += len;
1562 m->m_len -= len;
1563 }
1564 }
1565 SOCK_IO_RECV_UNLOCK(so);
1566
1567 if (m != NULL) {
1568 if (flagsp != NULL) {
1569 if (flags & MSG_TRUNC) {
1570 /* Report real length of the packet */
1571 uio->uio_resid -= m_length(m, NULL);
1572 }
1573 *flagsp |= MSG_TRUNC;
1574 }
1575 m_freem(m);
1576 } else if (flagsp != NULL)
1577 *flagsp &= ~MSG_TRUNC;
1578
1579 return (0);
1580 }
1581
1582 static bool
uipc_ready_scan(struct socket * so,struct mbuf * m,int count,int * errorp)1583 uipc_ready_scan(struct socket *so, struct mbuf *m, int count, int *errorp)
1584 {
1585 struct mbuf *mb, *n;
1586 struct sockbuf *sb;
1587
1588 SOCK_LOCK(so);
1589 if (SOLISTENING(so)) {
1590 SOCK_UNLOCK(so);
1591 return (false);
1592 }
1593 mb = NULL;
1594 sb = &so->so_rcv;
1595 SOCKBUF_LOCK(sb);
1596 if (sb->sb_fnrdy != NULL) {
1597 for (mb = sb->sb_mb, n = mb->m_nextpkt; mb != NULL;) {
1598 if (mb == m) {
1599 *errorp = sbready(sb, m, count);
1600 break;
1601 }
1602 mb = mb->m_next;
1603 if (mb == NULL) {
1604 mb = n;
1605 if (mb != NULL)
1606 n = mb->m_nextpkt;
1607 }
1608 }
1609 }
1610 SOCKBUF_UNLOCK(sb);
1611 SOCK_UNLOCK(so);
1612 return (mb != NULL);
1613 }
1614
1615 static int
uipc_ready(struct socket * so,struct mbuf * m,int count)1616 uipc_ready(struct socket *so, struct mbuf *m, int count)
1617 {
1618 struct unpcb *unp, *unp2;
1619 struct socket *so2;
1620 int error, i;
1621
1622 unp = sotounpcb(so);
1623
1624 KASSERT(so->so_type == SOCK_STREAM,
1625 ("%s: unexpected socket type for %p", __func__, so));
1626
1627 UNP_PCB_LOCK(unp);
1628 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) {
1629 UNP_PCB_UNLOCK(unp);
1630 so2 = unp2->unp_socket;
1631 SOCKBUF_LOCK(&so2->so_rcv);
1632 if ((error = sbready(&so2->so_rcv, m, count)) == 0)
1633 sorwakeup_locked(so2);
1634 else
1635 SOCKBUF_UNLOCK(&so2->so_rcv);
1636 UNP_PCB_UNLOCK(unp2);
1637 return (error);
1638 }
1639 UNP_PCB_UNLOCK(unp);
1640
1641 /*
1642 * The receiving socket has been disconnected, but may still be valid.
1643 * In this case, the now-ready mbufs are still present in its socket
1644 * buffer, so perform an exhaustive search before giving up and freeing
1645 * the mbufs.
1646 */
1647 UNP_LINK_RLOCK();
1648 LIST_FOREACH(unp, &unp_shead, unp_link) {
1649 if (uipc_ready_scan(unp->unp_socket, m, count, &error))
1650 break;
1651 }
1652 UNP_LINK_RUNLOCK();
1653
1654 if (unp == NULL) {
1655 for (i = 0; i < count; i++)
1656 m = m_free(m);
1657 error = ECONNRESET;
1658 }
1659 return (error);
1660 }
1661
1662 static int
uipc_sense(struct socket * so,struct stat * sb)1663 uipc_sense(struct socket *so, struct stat *sb)
1664 {
1665 struct unpcb *unp;
1666
1667 unp = sotounpcb(so);
1668 KASSERT(unp != NULL, ("uipc_sense: unp == NULL"));
1669
1670 sb->st_blksize = so->so_snd.sb_hiwat;
1671 sb->st_dev = NODEV;
1672 sb->st_ino = unp->unp_ino;
1673 return (0);
1674 }
1675
1676 static int
uipc_shutdown(struct socket * so,enum shutdown_how how)1677 uipc_shutdown(struct socket *so, enum shutdown_how how)
1678 {
1679 struct unpcb *unp = sotounpcb(so);
1680 int error;
1681
1682 SOCK_LOCK(so);
1683 if (SOLISTENING(so)) {
1684 if (how != SHUT_WR) {
1685 so->so_error = ECONNABORTED;
1686 solisten_wakeup(so); /* unlocks so */
1687 } else
1688 SOCK_UNLOCK(so);
1689 return (ENOTCONN);
1690 } else if ((so->so_state &
1691 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) {
1692 /*
1693 * POSIX mandates us to just return ENOTCONN when shutdown(2) is
1694 * invoked on a datagram sockets, however historically we would
1695 * actually tear socket down. This is known to be leveraged by
1696 * some applications to unblock process waiting in recv(2) by
1697 * other process that it shares that socket with. Try to meet
1698 * both backward-compatibility and POSIX requirements by forcing
1699 * ENOTCONN but still flushing buffers and performing wakeup(9).
1700 *
1701 * XXXGL: it remains unknown what applications expect this
1702 * behavior and is this isolated to unix/dgram or inet/dgram or
1703 * both. See: D10351, D3039.
1704 */
1705 error = ENOTCONN;
1706 if (so->so_type != SOCK_DGRAM) {
1707 SOCK_UNLOCK(so);
1708 return (error);
1709 }
1710 } else
1711 error = 0;
1712 SOCK_UNLOCK(so);
1713
1714 switch (how) {
1715 case SHUT_RD:
1716 socantrcvmore(so);
1717 unp_dispose(so);
1718 break;
1719 case SHUT_RDWR:
1720 socantrcvmore(so);
1721 unp_dispose(so);
1722 /* FALLTHROUGH */
1723 case SHUT_WR:
1724 UNP_PCB_LOCK(unp);
1725 socantsendmore(so);
1726 unp_shutdown(unp);
1727 UNP_PCB_UNLOCK(unp);
1728 }
1729 wakeup(&so->so_timeo);
1730
1731 return (error);
1732 }
1733
1734 static int
uipc_sockaddr(struct socket * so,struct sockaddr * ret)1735 uipc_sockaddr(struct socket *so, struct sockaddr *ret)
1736 {
1737 struct unpcb *unp;
1738 const struct sockaddr *sa;
1739
1740 unp = sotounpcb(so);
1741 KASSERT(unp != NULL, ("uipc_sockaddr: unp == NULL"));
1742
1743 UNP_PCB_LOCK(unp);
1744 if (unp->unp_addr != NULL)
1745 sa = (struct sockaddr *) unp->unp_addr;
1746 else
1747 sa = &sun_noname;
1748 bcopy(sa, ret, sa->sa_len);
1749 UNP_PCB_UNLOCK(unp);
1750 return (0);
1751 }
1752
1753 static int
uipc_ctloutput(struct socket * so,struct sockopt * sopt)1754 uipc_ctloutput(struct socket *so, struct sockopt *sopt)
1755 {
1756 struct unpcb *unp;
1757 struct xucred xu;
1758 int error, optval;
1759
1760 if (sopt->sopt_level != SOL_LOCAL)
1761 return (EINVAL);
1762
1763 unp = sotounpcb(so);
1764 KASSERT(unp != NULL, ("uipc_ctloutput: unp == NULL"));
1765 error = 0;
1766 switch (sopt->sopt_dir) {
1767 case SOPT_GET:
1768 switch (sopt->sopt_name) {
1769 case LOCAL_PEERCRED:
1770 UNP_PCB_LOCK(unp);
1771 if (unp->unp_flags & UNP_HAVEPC)
1772 xu = unp->unp_peercred;
1773 else {
1774 if (so->so_type == SOCK_STREAM)
1775 error = ENOTCONN;
1776 else
1777 error = EINVAL;
1778 }
1779 UNP_PCB_UNLOCK(unp);
1780 if (error == 0)
1781 error = sooptcopyout(sopt, &xu, sizeof(xu));
1782 break;
1783
1784 case LOCAL_CREDS:
1785 /* Unlocked read. */
1786 optval = unp->unp_flags & UNP_WANTCRED_ONESHOT ? 1 : 0;
1787 error = sooptcopyout(sopt, &optval, sizeof(optval));
1788 break;
1789
1790 case LOCAL_CREDS_PERSISTENT:
1791 /* Unlocked read. */
1792 optval = unp->unp_flags & UNP_WANTCRED_ALWAYS ? 1 : 0;
1793 error = sooptcopyout(sopt, &optval, sizeof(optval));
1794 break;
1795
1796 default:
1797 error = EOPNOTSUPP;
1798 break;
1799 }
1800 break;
1801
1802 case SOPT_SET:
1803 switch (sopt->sopt_name) {
1804 case LOCAL_CREDS:
1805 case LOCAL_CREDS_PERSISTENT:
1806 error = sooptcopyin(sopt, &optval, sizeof(optval),
1807 sizeof(optval));
1808 if (error)
1809 break;
1810
1811 #define OPTSET(bit, exclusive) do { \
1812 UNP_PCB_LOCK(unp); \
1813 if (optval) { \
1814 if ((unp->unp_flags & (exclusive)) != 0) { \
1815 UNP_PCB_UNLOCK(unp); \
1816 error = EINVAL; \
1817 break; \
1818 } \
1819 unp->unp_flags |= (bit); \
1820 } else \
1821 unp->unp_flags &= ~(bit); \
1822 UNP_PCB_UNLOCK(unp); \
1823 } while (0)
1824
1825 switch (sopt->sopt_name) {
1826 case LOCAL_CREDS:
1827 OPTSET(UNP_WANTCRED_ONESHOT, UNP_WANTCRED_ALWAYS);
1828 break;
1829
1830 case LOCAL_CREDS_PERSISTENT:
1831 OPTSET(UNP_WANTCRED_ALWAYS, UNP_WANTCRED_ONESHOT);
1832 break;
1833
1834 default:
1835 break;
1836 }
1837 break;
1838 #undef OPTSET
1839 default:
1840 error = ENOPROTOOPT;
1841 break;
1842 }
1843 break;
1844
1845 default:
1846 error = EOPNOTSUPP;
1847 break;
1848 }
1849 return (error);
1850 }
1851
1852 static int
unp_connect(struct socket * so,struct sockaddr * nam,struct thread * td)1853 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1854 {
1855
1856 return (unp_connectat(AT_FDCWD, so, nam, td, false));
1857 }
1858
1859 static int
unp_connectat(int fd,struct socket * so,struct sockaddr * nam,struct thread * td,bool return_locked)1860 unp_connectat(int fd, struct socket *so, struct sockaddr *nam,
1861 struct thread *td, bool return_locked)
1862 {
1863 struct mtx *vplock;
1864 struct sockaddr_un *soun;
1865 struct vnode *vp;
1866 struct socket *so2;
1867 struct unpcb *unp, *unp2, *unp3;
1868 struct nameidata nd;
1869 char buf[SOCK_MAXADDRLEN];
1870 struct sockaddr *sa;
1871 cap_rights_t rights;
1872 int error, len;
1873 bool connreq;
1874
1875 if (nam->sa_family != AF_UNIX)
1876 return (EAFNOSUPPORT);
1877 if (nam->sa_len > sizeof(struct sockaddr_un))
1878 return (EINVAL);
1879 len = nam->sa_len - offsetof(struct sockaddr_un, sun_path);
1880 if (len <= 0)
1881 return (EINVAL);
1882 soun = (struct sockaddr_un *)nam;
1883 bcopy(soun->sun_path, buf, len);
1884 buf[len] = 0;
1885
1886 error = 0;
1887 unp = sotounpcb(so);
1888 UNP_PCB_LOCK(unp);
1889 for (;;) {
1890 /*
1891 * Wait for connection state to stabilize. If a connection
1892 * already exists, give up. For datagram sockets, which permit
1893 * multiple consecutive connect(2) calls, upper layers are
1894 * responsible for disconnecting in advance of a subsequent
1895 * connect(2), but this is not synchronized with PCB connection
1896 * state.
1897 *
1898 * Also make sure that no threads are currently attempting to
1899 * lock the peer socket, to ensure that unp_conn cannot
1900 * transition between two valid sockets while locks are dropped.
1901 */
1902 if (SOLISTENING(so))
1903 error = EOPNOTSUPP;
1904 else if (unp->unp_conn != NULL)
1905 error = EISCONN;
1906 else if ((unp->unp_flags & UNP_CONNECTING) != 0) {
1907 error = EALREADY;
1908 }
1909 if (error != 0) {
1910 UNP_PCB_UNLOCK(unp);
1911 return (error);
1912 }
1913 if (unp->unp_pairbusy > 0) {
1914 unp->unp_flags |= UNP_WAITING;
1915 mtx_sleep(unp, UNP_PCB_LOCKPTR(unp), 0, "unpeer", 0);
1916 continue;
1917 }
1918 break;
1919 }
1920 unp->unp_flags |= UNP_CONNECTING;
1921 UNP_PCB_UNLOCK(unp);
1922
1923 connreq = (so->so_proto->pr_flags & PR_CONNREQUIRED) != 0;
1924 if (connreq)
1925 sa = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
1926 else
1927 sa = NULL;
1928 NDINIT_ATRIGHTS(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF,
1929 UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_CONNECTAT));
1930 error = namei(&nd);
1931 if (error)
1932 vp = NULL;
1933 else
1934 vp = nd.ni_vp;
1935 ASSERT_VOP_LOCKED(vp, "unp_connect");
1936 if (error)
1937 goto bad;
1938 NDFREE_PNBUF(&nd);
1939
1940 if (vp->v_type != VSOCK) {
1941 error = ENOTSOCK;
1942 goto bad;
1943 }
1944 #ifdef MAC
1945 error = mac_vnode_check_open(td->td_ucred, vp, VWRITE | VREAD);
1946 if (error)
1947 goto bad;
1948 #endif
1949 error = VOP_ACCESS(vp, VWRITE, td->td_ucred, td);
1950 if (error)
1951 goto bad;
1952
1953 unp = sotounpcb(so);
1954 KASSERT(unp != NULL, ("unp_connect: unp == NULL"));
1955
1956 vplock = mtx_pool_find(mtxpool_sleep, vp);
1957 mtx_lock(vplock);
1958 VOP_UNP_CONNECT(vp, &unp2);
1959 if (unp2 == NULL) {
1960 error = ECONNREFUSED;
1961 goto bad2;
1962 }
1963 so2 = unp2->unp_socket;
1964 if (so->so_type != so2->so_type) {
1965 error = EPROTOTYPE;
1966 goto bad2;
1967 }
1968 if (connreq) {
1969 if (SOLISTENING(so2)) {
1970 CURVNET_SET(so2->so_vnet);
1971 so2 = sonewconn(so2, 0);
1972 CURVNET_RESTORE();
1973 } else
1974 so2 = NULL;
1975 if (so2 == NULL) {
1976 error = ECONNREFUSED;
1977 goto bad2;
1978 }
1979 unp3 = sotounpcb(so2);
1980 unp_pcb_lock_pair(unp2, unp3);
1981 if (unp2->unp_addr != NULL) {
1982 bcopy(unp2->unp_addr, sa, unp2->unp_addr->sun_len);
1983 unp3->unp_addr = (struct sockaddr_un *) sa;
1984 sa = NULL;
1985 }
1986
1987 unp_copy_peercred(td, unp3, unp, unp2);
1988
1989 UNP_PCB_UNLOCK(unp2);
1990 unp2 = unp3;
1991
1992 /*
1993 * It is safe to block on the PCB lock here since unp2 is
1994 * nascent and cannot be connected to any other sockets.
1995 */
1996 UNP_PCB_LOCK(unp);
1997 #ifdef MAC
1998 mac_socketpeer_set_from_socket(so, so2);
1999 mac_socketpeer_set_from_socket(so2, so);
2000 #endif
2001 } else {
2002 unp_pcb_lock_pair(unp, unp2);
2003 }
2004 KASSERT(unp2 != NULL && so2 != NULL && unp2->unp_socket == so2 &&
2005 sotounpcb(so2) == unp2,
2006 ("%s: unp2 %p so2 %p", __func__, unp2, so2));
2007 unp_connect2(so, so2);
2008 KASSERT((unp->unp_flags & UNP_CONNECTING) != 0,
2009 ("%s: unp %p has UNP_CONNECTING clear", __func__, unp));
2010 unp->unp_flags &= ~UNP_CONNECTING;
2011 if (!return_locked)
2012 unp_pcb_unlock_pair(unp, unp2);
2013 bad2:
2014 mtx_unlock(vplock);
2015 bad:
2016 if (vp != NULL) {
2017 /*
2018 * If we are returning locked (called via uipc_sosend_dgram()),
2019 * we need to be sure that vput() won't sleep. This is
2020 * guaranteed by VOP_UNP_CONNECT() call above and unp2 lock.
2021 * SOCK_STREAM/SEQPACKET can't request return_locked (yet).
2022 */
2023 MPASS(!(return_locked && connreq));
2024 vput(vp);
2025 }
2026 free(sa, M_SONAME);
2027 if (__predict_false(error)) {
2028 UNP_PCB_LOCK(unp);
2029 KASSERT((unp->unp_flags & UNP_CONNECTING) != 0,
2030 ("%s: unp %p has UNP_CONNECTING clear", __func__, unp));
2031 unp->unp_flags &= ~UNP_CONNECTING;
2032 UNP_PCB_UNLOCK(unp);
2033 }
2034 return (error);
2035 }
2036
2037 /*
2038 * Set socket peer credentials at connection time.
2039 *
2040 * The client's PCB credentials are copied from its process structure. The
2041 * server's PCB credentials are copied from the socket on which it called
2042 * listen(2). uipc_listen cached that process's credentials at the time.
2043 */
2044 void
unp_copy_peercred(struct thread * td,struct unpcb * client_unp,struct unpcb * server_unp,struct unpcb * listen_unp)2045 unp_copy_peercred(struct thread *td, struct unpcb *client_unp,
2046 struct unpcb *server_unp, struct unpcb *listen_unp)
2047 {
2048 cru2xt(td, &client_unp->unp_peercred);
2049 client_unp->unp_flags |= UNP_HAVEPC;
2050
2051 memcpy(&server_unp->unp_peercred, &listen_unp->unp_peercred,
2052 sizeof(server_unp->unp_peercred));
2053 server_unp->unp_flags |= UNP_HAVEPC;
2054 client_unp->unp_flags |= (listen_unp->unp_flags & UNP_WANTCRED_MASK);
2055 }
2056
2057 static void
unp_connect2(struct socket * so,struct socket * so2)2058 unp_connect2(struct socket *so, struct socket *so2)
2059 {
2060 struct unpcb *unp;
2061 struct unpcb *unp2;
2062
2063 MPASS(so2->so_type == so->so_type);
2064 unp = sotounpcb(so);
2065 KASSERT(unp != NULL, ("unp_connect2: unp == NULL"));
2066 unp2 = sotounpcb(so2);
2067 KASSERT(unp2 != NULL, ("unp_connect2: unp2 == NULL"));
2068
2069 UNP_PCB_LOCK_ASSERT(unp);
2070 UNP_PCB_LOCK_ASSERT(unp2);
2071 KASSERT(unp->unp_conn == NULL,
2072 ("%s: socket %p is already connected", __func__, unp));
2073
2074 unp->unp_conn = unp2;
2075 unp_pcb_hold(unp2);
2076 unp_pcb_hold(unp);
2077 switch (so->so_type) {
2078 case SOCK_DGRAM:
2079 UNP_REF_LIST_LOCK();
2080 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink);
2081 UNP_REF_LIST_UNLOCK();
2082 soisconnected(so);
2083 break;
2084
2085 case SOCK_STREAM:
2086 case SOCK_SEQPACKET:
2087 KASSERT(unp2->unp_conn == NULL,
2088 ("%s: socket %p is already connected", __func__, unp2));
2089 unp2->unp_conn = unp;
2090 soisconnected(so);
2091 soisconnected(so2);
2092 break;
2093
2094 default:
2095 panic("unp_connect2");
2096 }
2097 }
2098
2099 static void
unp_disconnect(struct unpcb * unp,struct unpcb * unp2)2100 unp_disconnect(struct unpcb *unp, struct unpcb *unp2)
2101 {
2102 struct socket *so, *so2;
2103 struct mbuf *m = NULL;
2104 #ifdef INVARIANTS
2105 struct unpcb *unptmp;
2106 #endif
2107
2108 UNP_PCB_LOCK_ASSERT(unp);
2109 UNP_PCB_LOCK_ASSERT(unp2);
2110 KASSERT(unp->unp_conn == unp2,
2111 ("%s: unpcb %p is not connected to %p", __func__, unp, unp2));
2112
2113 unp->unp_conn = NULL;
2114 so = unp->unp_socket;
2115 so2 = unp2->unp_socket;
2116 switch (unp->unp_socket->so_type) {
2117 case SOCK_DGRAM:
2118 /*
2119 * Remove our send socket buffer from the peer's receive buffer.
2120 * Move the data to the receive buffer only if it is empty.
2121 * This is a protection against a scenario where a peer
2122 * connects, floods and disconnects, effectively blocking
2123 * sendto() from unconnected sockets.
2124 */
2125 SOCK_RECVBUF_LOCK(so2);
2126 if (!STAILQ_EMPTY(&so->so_snd.uxdg_mb)) {
2127 TAILQ_REMOVE(&so2->so_rcv.uxdg_conns, &so->so_snd,
2128 uxdg_clist);
2129 if (__predict_true((so2->so_rcv.sb_state &
2130 SBS_CANTRCVMORE) == 0) &&
2131 STAILQ_EMPTY(&so2->so_rcv.uxdg_mb)) {
2132 STAILQ_CONCAT(&so2->so_rcv.uxdg_mb,
2133 &so->so_snd.uxdg_mb);
2134 so2->so_rcv.uxdg_cc += so->so_snd.uxdg_cc;
2135 so2->so_rcv.uxdg_ctl += so->so_snd.uxdg_ctl;
2136 so2->so_rcv.uxdg_mbcnt += so->so_snd.uxdg_mbcnt;
2137 } else {
2138 m = STAILQ_FIRST(&so->so_snd.uxdg_mb);
2139 STAILQ_INIT(&so->so_snd.uxdg_mb);
2140 so2->so_rcv.sb_acc -= so->so_snd.uxdg_cc;
2141 so2->so_rcv.sb_ccc -= so->so_snd.uxdg_cc;
2142 so2->so_rcv.sb_ctl -= so->so_snd.uxdg_ctl;
2143 so2->so_rcv.sb_mbcnt -= so->so_snd.uxdg_mbcnt;
2144 }
2145 /* Note: so may reconnect. */
2146 so->so_snd.uxdg_cc = 0;
2147 so->so_snd.uxdg_ctl = 0;
2148 so->so_snd.uxdg_mbcnt = 0;
2149 }
2150 SOCK_RECVBUF_UNLOCK(so2);
2151 UNP_REF_LIST_LOCK();
2152 #ifdef INVARIANTS
2153 LIST_FOREACH(unptmp, &unp2->unp_refs, unp_reflink) {
2154 if (unptmp == unp)
2155 break;
2156 }
2157 KASSERT(unptmp != NULL,
2158 ("%s: %p not found in reflist of %p", __func__, unp, unp2));
2159 #endif
2160 LIST_REMOVE(unp, unp_reflink);
2161 UNP_REF_LIST_UNLOCK();
2162 if (so) {
2163 SOCK_LOCK(so);
2164 so->so_state &= ~SS_ISCONNECTED;
2165 SOCK_UNLOCK(so);
2166 }
2167 break;
2168
2169 case SOCK_STREAM:
2170 case SOCK_SEQPACKET:
2171 if (so)
2172 soisdisconnected(so);
2173 MPASS(unp2->unp_conn == unp);
2174 unp2->unp_conn = NULL;
2175 if (so2)
2176 soisdisconnected(so2);
2177 break;
2178 }
2179
2180 if (unp == unp2) {
2181 unp_pcb_rele_notlast(unp);
2182 if (!unp_pcb_rele(unp))
2183 UNP_PCB_UNLOCK(unp);
2184 } else {
2185 if (!unp_pcb_rele(unp))
2186 UNP_PCB_UNLOCK(unp);
2187 if (!unp_pcb_rele(unp2))
2188 UNP_PCB_UNLOCK(unp2);
2189 }
2190
2191 if (m != NULL) {
2192 unp_scan(m, unp_freerights);
2193 m_freemp(m);
2194 }
2195 }
2196
2197 /*
2198 * unp_pcblist() walks the global list of struct unpcb's to generate a
2199 * pointer list, bumping the refcount on each unpcb. It then copies them out
2200 * sequentially, validating the generation number on each to see if it has
2201 * been detached. All of this is necessary because copyout() may sleep on
2202 * disk I/O.
2203 */
2204 static int
unp_pcblist(SYSCTL_HANDLER_ARGS)2205 unp_pcblist(SYSCTL_HANDLER_ARGS)
2206 {
2207 struct unpcb *unp, **unp_list;
2208 unp_gen_t gencnt;
2209 struct xunpgen *xug;
2210 struct unp_head *head;
2211 struct xunpcb *xu;
2212 u_int i;
2213 int error, n;
2214
2215 switch ((intptr_t)arg1) {
2216 case SOCK_STREAM:
2217 head = &unp_shead;
2218 break;
2219
2220 case SOCK_DGRAM:
2221 head = &unp_dhead;
2222 break;
2223
2224 case SOCK_SEQPACKET:
2225 head = &unp_sphead;
2226 break;
2227
2228 default:
2229 panic("unp_pcblist: arg1 %d", (int)(intptr_t)arg1);
2230 }
2231
2232 /*
2233 * The process of preparing the PCB list is too time-consuming and
2234 * resource-intensive to repeat twice on every request.
2235 */
2236 if (req->oldptr == NULL) {
2237 n = unp_count;
2238 req->oldidx = 2 * (sizeof *xug)
2239 + (n + n/8) * sizeof(struct xunpcb);
2240 return (0);
2241 }
2242
2243 if (req->newptr != NULL)
2244 return (EPERM);
2245
2246 /*
2247 * OK, now we're committed to doing something.
2248 */
2249 xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK | M_ZERO);
2250 UNP_LINK_RLOCK();
2251 gencnt = unp_gencnt;
2252 n = unp_count;
2253 UNP_LINK_RUNLOCK();
2254
2255 xug->xug_len = sizeof *xug;
2256 xug->xug_count = n;
2257 xug->xug_gen = gencnt;
2258 xug->xug_sogen = so_gencnt;
2259 error = SYSCTL_OUT(req, xug, sizeof *xug);
2260 if (error) {
2261 free(xug, M_TEMP);
2262 return (error);
2263 }
2264
2265 unp_list = malloc(n * sizeof *unp_list, M_TEMP, M_WAITOK);
2266
2267 UNP_LINK_RLOCK();
2268 for (unp = LIST_FIRST(head), i = 0; unp && i < n;
2269 unp = LIST_NEXT(unp, unp_link)) {
2270 UNP_PCB_LOCK(unp);
2271 if (unp->unp_gencnt <= gencnt) {
2272 if (cr_cansee(req->td->td_ucred,
2273 unp->unp_socket->so_cred)) {
2274 UNP_PCB_UNLOCK(unp);
2275 continue;
2276 }
2277 unp_list[i++] = unp;
2278 unp_pcb_hold(unp);
2279 }
2280 UNP_PCB_UNLOCK(unp);
2281 }
2282 UNP_LINK_RUNLOCK();
2283 n = i; /* In case we lost some during malloc. */
2284
2285 error = 0;
2286 xu = malloc(sizeof(*xu), M_TEMP, M_WAITOK | M_ZERO);
2287 for (i = 0; i < n; i++) {
2288 unp = unp_list[i];
2289 UNP_PCB_LOCK(unp);
2290 if (unp_pcb_rele(unp))
2291 continue;
2292
2293 if (unp->unp_gencnt <= gencnt) {
2294 xu->xu_len = sizeof *xu;
2295 xu->xu_unpp = (uintptr_t)unp;
2296 /*
2297 * XXX - need more locking here to protect against
2298 * connect/disconnect races for SMP.
2299 */
2300 if (unp->unp_addr != NULL)
2301 bcopy(unp->unp_addr, &xu->xu_addr,
2302 unp->unp_addr->sun_len);
2303 else
2304 bzero(&xu->xu_addr, sizeof(xu->xu_addr));
2305 if (unp->unp_conn != NULL &&
2306 unp->unp_conn->unp_addr != NULL)
2307 bcopy(unp->unp_conn->unp_addr,
2308 &xu->xu_caddr,
2309 unp->unp_conn->unp_addr->sun_len);
2310 else
2311 bzero(&xu->xu_caddr, sizeof(xu->xu_caddr));
2312 xu->unp_vnode = (uintptr_t)unp->unp_vnode;
2313 xu->unp_conn = (uintptr_t)unp->unp_conn;
2314 xu->xu_firstref = (uintptr_t)LIST_FIRST(&unp->unp_refs);
2315 xu->xu_nextref = (uintptr_t)LIST_NEXT(unp, unp_reflink);
2316 xu->unp_gencnt = unp->unp_gencnt;
2317 sotoxsocket(unp->unp_socket, &xu->xu_socket);
2318 UNP_PCB_UNLOCK(unp);
2319 error = SYSCTL_OUT(req, xu, sizeof *xu);
2320 } else {
2321 UNP_PCB_UNLOCK(unp);
2322 }
2323 }
2324 free(xu, M_TEMP);
2325 if (!error) {
2326 /*
2327 * Give the user an updated idea of our state. If the
2328 * generation differs from what we told her before, she knows
2329 * that something happened while we were processing this
2330 * request, and it might be necessary to retry.
2331 */
2332 xug->xug_gen = unp_gencnt;
2333 xug->xug_sogen = so_gencnt;
2334 xug->xug_count = unp_count;
2335 error = SYSCTL_OUT(req, xug, sizeof *xug);
2336 }
2337 free(unp_list, M_TEMP);
2338 free(xug, M_TEMP);
2339 return (error);
2340 }
2341
2342 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist,
2343 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2344 (void *)(intptr_t)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb",
2345 "List of active local datagram sockets");
2346 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist,
2347 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2348 (void *)(intptr_t)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb",
2349 "List of active local stream sockets");
2350 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist,
2351 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2352 (void *)(intptr_t)SOCK_SEQPACKET, 0, unp_pcblist, "S,xunpcb",
2353 "List of active local seqpacket sockets");
2354
2355 static void
unp_shutdown(struct unpcb * unp)2356 unp_shutdown(struct unpcb *unp)
2357 {
2358 struct unpcb *unp2;
2359 struct socket *so;
2360
2361 UNP_PCB_LOCK_ASSERT(unp);
2362
2363 unp2 = unp->unp_conn;
2364 if ((unp->unp_socket->so_type == SOCK_STREAM ||
2365 (unp->unp_socket->so_type == SOCK_SEQPACKET)) && unp2 != NULL) {
2366 so = unp2->unp_socket;
2367 if (so != NULL)
2368 socantrcvmore(so);
2369 }
2370 }
2371
2372 static void
unp_drop(struct unpcb * unp)2373 unp_drop(struct unpcb *unp)
2374 {
2375 struct socket *so;
2376 struct unpcb *unp2;
2377
2378 /*
2379 * Regardless of whether the socket's peer dropped the connection
2380 * with this socket by aborting or disconnecting, POSIX requires
2381 * that ECONNRESET is returned.
2382 */
2383
2384 UNP_PCB_LOCK(unp);
2385 so = unp->unp_socket;
2386 if (so)
2387 so->so_error = ECONNRESET;
2388 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) {
2389 /* Last reference dropped in unp_disconnect(). */
2390 unp_pcb_rele_notlast(unp);
2391 unp_disconnect(unp, unp2);
2392 } else if (!unp_pcb_rele(unp)) {
2393 UNP_PCB_UNLOCK(unp);
2394 }
2395 }
2396
2397 static void
unp_freerights(struct filedescent ** fdep,int fdcount)2398 unp_freerights(struct filedescent **fdep, int fdcount)
2399 {
2400 struct file *fp;
2401 int i;
2402
2403 KASSERT(fdcount > 0, ("%s: fdcount %d", __func__, fdcount));
2404
2405 for (i = 0; i < fdcount; i++) {
2406 fp = fdep[i]->fde_file;
2407 filecaps_free(&fdep[i]->fde_caps);
2408 unp_discard(fp);
2409 }
2410 free(fdep[0], M_FILECAPS);
2411 }
2412
2413 static int
unp_externalize(struct mbuf * control,struct mbuf ** controlp,int flags)2414 unp_externalize(struct mbuf *control, struct mbuf **controlp, int flags)
2415 {
2416 struct thread *td = curthread; /* XXX */
2417 struct cmsghdr *cm = mtod(control, struct cmsghdr *);
2418 int i;
2419 int *fdp;
2420 struct filedesc *fdesc = td->td_proc->p_fd;
2421 struct filedescent **fdep;
2422 void *data;
2423 socklen_t clen = control->m_len, datalen;
2424 int error, newfds;
2425 u_int newlen;
2426
2427 UNP_LINK_UNLOCK_ASSERT();
2428
2429 error = 0;
2430 if (controlp != NULL) /* controlp == NULL => free control messages */
2431 *controlp = NULL;
2432 while (cm != NULL) {
2433 MPASS(clen >= sizeof(*cm) && clen >= cm->cmsg_len);
2434
2435 data = CMSG_DATA(cm);
2436 datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data;
2437 if (cm->cmsg_level == SOL_SOCKET
2438 && cm->cmsg_type == SCM_RIGHTS) {
2439 newfds = datalen / sizeof(*fdep);
2440 if (newfds == 0)
2441 goto next;
2442 fdep = data;
2443
2444 /* If we're not outputting the descriptors free them. */
2445 if (error || controlp == NULL) {
2446 unp_freerights(fdep, newfds);
2447 goto next;
2448 }
2449 FILEDESC_XLOCK(fdesc);
2450
2451 /*
2452 * Now change each pointer to an fd in the global
2453 * table to an integer that is the index to the local
2454 * fd table entry that we set up to point to the
2455 * global one we are transferring.
2456 */
2457 newlen = newfds * sizeof(int);
2458 *controlp = sbcreatecontrol(NULL, newlen,
2459 SCM_RIGHTS, SOL_SOCKET, M_WAITOK);
2460
2461 fdp = (int *)
2462 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2463 if ((error = fdallocn(td, 0, fdp, newfds))) {
2464 FILEDESC_XUNLOCK(fdesc);
2465 unp_freerights(fdep, newfds);
2466 m_freem(*controlp);
2467 *controlp = NULL;
2468 goto next;
2469 }
2470 for (i = 0; i < newfds; i++, fdp++) {
2471 _finstall(fdesc, fdep[i]->fde_file, *fdp,
2472 (flags & MSG_CMSG_CLOEXEC) != 0 ? O_CLOEXEC : 0,
2473 &fdep[i]->fde_caps);
2474 unp_externalize_fp(fdep[i]->fde_file);
2475 }
2476
2477 /*
2478 * The new type indicates that the mbuf data refers to
2479 * kernel resources that may need to be released before
2480 * the mbuf is freed.
2481 */
2482 m_chtype(*controlp, MT_EXTCONTROL);
2483 FILEDESC_XUNLOCK(fdesc);
2484 free(fdep[0], M_FILECAPS);
2485 } else {
2486 /* We can just copy anything else across. */
2487 if (error || controlp == NULL)
2488 goto next;
2489 *controlp = sbcreatecontrol(NULL, datalen,
2490 cm->cmsg_type, cm->cmsg_level, M_WAITOK);
2491 bcopy(data,
2492 CMSG_DATA(mtod(*controlp, struct cmsghdr *)),
2493 datalen);
2494 }
2495 controlp = &(*controlp)->m_next;
2496
2497 next:
2498 if (CMSG_SPACE(datalen) < clen) {
2499 clen -= CMSG_SPACE(datalen);
2500 cm = (struct cmsghdr *)
2501 ((caddr_t)cm + CMSG_SPACE(datalen));
2502 } else {
2503 clen = 0;
2504 cm = NULL;
2505 }
2506 }
2507
2508 m_freem(control);
2509 return (error);
2510 }
2511
2512 static void
unp_zone_change(void * tag)2513 unp_zone_change(void *tag)
2514 {
2515
2516 uma_zone_set_max(unp_zone, maxsockets);
2517 }
2518
2519 #ifdef INVARIANTS
2520 static void
unp_zdtor(void * mem,int size __unused,void * arg __unused)2521 unp_zdtor(void *mem, int size __unused, void *arg __unused)
2522 {
2523 struct unpcb *unp;
2524
2525 unp = mem;
2526
2527 KASSERT(LIST_EMPTY(&unp->unp_refs),
2528 ("%s: unpcb %p has lingering refs", __func__, unp));
2529 KASSERT(unp->unp_socket == NULL,
2530 ("%s: unpcb %p has socket backpointer", __func__, unp));
2531 KASSERT(unp->unp_vnode == NULL,
2532 ("%s: unpcb %p has vnode references", __func__, unp));
2533 KASSERT(unp->unp_conn == NULL,
2534 ("%s: unpcb %p is still connected", __func__, unp));
2535 KASSERT(unp->unp_addr == NULL,
2536 ("%s: unpcb %p has leaked addr", __func__, unp));
2537 }
2538 #endif
2539
2540 static void
unp_init(void * arg __unused)2541 unp_init(void *arg __unused)
2542 {
2543 uma_dtor dtor;
2544
2545 #ifdef INVARIANTS
2546 dtor = unp_zdtor;
2547 #else
2548 dtor = NULL;
2549 #endif
2550 unp_zone = uma_zcreate("unpcb", sizeof(struct unpcb), NULL, dtor,
2551 NULL, NULL, UMA_ALIGN_CACHE, 0);
2552 uma_zone_set_max(unp_zone, maxsockets);
2553 uma_zone_set_warning(unp_zone, "kern.ipc.maxsockets limit reached");
2554 EVENTHANDLER_REGISTER(maxsockets_change, unp_zone_change,
2555 NULL, EVENTHANDLER_PRI_ANY);
2556 LIST_INIT(&unp_dhead);
2557 LIST_INIT(&unp_shead);
2558 LIST_INIT(&unp_sphead);
2559 SLIST_INIT(&unp_defers);
2560 TIMEOUT_TASK_INIT(taskqueue_thread, &unp_gc_task, 0, unp_gc, NULL);
2561 TASK_INIT(&unp_defer_task, 0, unp_process_defers, NULL);
2562 UNP_LINK_LOCK_INIT();
2563 UNP_DEFERRED_LOCK_INIT();
2564 }
2565 SYSINIT(unp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_SECOND, unp_init, NULL);
2566
2567 static void
unp_internalize_cleanup_rights(struct mbuf * control)2568 unp_internalize_cleanup_rights(struct mbuf *control)
2569 {
2570 struct cmsghdr *cp;
2571 struct mbuf *m;
2572 void *data;
2573 socklen_t datalen;
2574
2575 for (m = control; m != NULL; m = m->m_next) {
2576 cp = mtod(m, struct cmsghdr *);
2577 if (cp->cmsg_level != SOL_SOCKET ||
2578 cp->cmsg_type != SCM_RIGHTS)
2579 continue;
2580 data = CMSG_DATA(cp);
2581 datalen = (caddr_t)cp + cp->cmsg_len - (caddr_t)data;
2582 unp_freerights(data, datalen / sizeof(struct filedesc *));
2583 }
2584 }
2585
2586 static int
unp_internalize(struct mbuf ** controlp,struct thread * td,struct mbuf ** clast,u_int * space,u_int * mbcnt)2587 unp_internalize(struct mbuf **controlp, struct thread *td,
2588 struct mbuf **clast, u_int *space, u_int *mbcnt)
2589 {
2590 struct mbuf *control, **initial_controlp;
2591 struct proc *p;
2592 struct filedesc *fdesc;
2593 struct bintime *bt;
2594 struct cmsghdr *cm;
2595 struct cmsgcred *cmcred;
2596 struct filedescent *fde, **fdep, *fdev;
2597 struct file *fp;
2598 struct timeval *tv;
2599 struct timespec *ts;
2600 void *data;
2601 socklen_t clen, datalen;
2602 int i, j, error, *fdp, oldfds;
2603 u_int newlen;
2604
2605 MPASS((*controlp)->m_next == NULL); /* COMPAT_OLDSOCK may violate */
2606 UNP_LINK_UNLOCK_ASSERT();
2607
2608 p = td->td_proc;
2609 fdesc = p->p_fd;
2610 error = 0;
2611 control = *controlp;
2612 *controlp = NULL;
2613 initial_controlp = controlp;
2614 for (clen = control->m_len, cm = mtod(control, struct cmsghdr *),
2615 data = CMSG_DATA(cm);
2616
2617 clen >= sizeof(*cm) && cm->cmsg_level == SOL_SOCKET &&
2618 clen >= cm->cmsg_len && cm->cmsg_len >= sizeof(*cm) &&
2619 (char *)cm + cm->cmsg_len >= (char *)data;
2620
2621 clen -= min(CMSG_SPACE(datalen), clen),
2622 cm = (struct cmsghdr *) ((char *)cm + CMSG_SPACE(datalen)),
2623 data = CMSG_DATA(cm)) {
2624 datalen = (char *)cm + cm->cmsg_len - (char *)data;
2625 switch (cm->cmsg_type) {
2626 case SCM_CREDS:
2627 *controlp = sbcreatecontrol(NULL, sizeof(*cmcred),
2628 SCM_CREDS, SOL_SOCKET, M_WAITOK);
2629 cmcred = (struct cmsgcred *)
2630 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2631 cmcred->cmcred_pid = p->p_pid;
2632 cmcred->cmcred_uid = td->td_ucred->cr_ruid;
2633 cmcred->cmcred_gid = td->td_ucred->cr_rgid;
2634 cmcred->cmcred_euid = td->td_ucred->cr_uid;
2635 cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups,
2636 CMGROUP_MAX);
2637 for (i = 0; i < cmcred->cmcred_ngroups; i++)
2638 cmcred->cmcred_groups[i] =
2639 td->td_ucred->cr_groups[i];
2640 break;
2641
2642 case SCM_RIGHTS:
2643 oldfds = datalen / sizeof (int);
2644 if (oldfds == 0)
2645 continue;
2646 /* On some machines sizeof pointer is bigger than
2647 * sizeof int, so we need to check if data fits into
2648 * single mbuf. We could allocate several mbufs, and
2649 * unp_externalize() should even properly handle that.
2650 * But it is not worth to complicate the code for an
2651 * insane scenario of passing over 200 file descriptors
2652 * at once.
2653 */
2654 newlen = oldfds * sizeof(fdep[0]);
2655 if (CMSG_SPACE(newlen) > MCLBYTES) {
2656 error = EMSGSIZE;
2657 goto out;
2658 }
2659 /*
2660 * Check that all the FDs passed in refer to legal
2661 * files. If not, reject the entire operation.
2662 */
2663 fdp = data;
2664 FILEDESC_SLOCK(fdesc);
2665 for (i = 0; i < oldfds; i++, fdp++) {
2666 fp = fget_noref(fdesc, *fdp);
2667 if (fp == NULL) {
2668 FILEDESC_SUNLOCK(fdesc);
2669 error = EBADF;
2670 goto out;
2671 }
2672 if (!(fp->f_ops->fo_flags & DFLAG_PASSABLE)) {
2673 FILEDESC_SUNLOCK(fdesc);
2674 error = EOPNOTSUPP;
2675 goto out;
2676 }
2677 }
2678
2679 /*
2680 * Now replace the integer FDs with pointers to the
2681 * file structure and capability rights.
2682 */
2683 *controlp = sbcreatecontrol(NULL, newlen,
2684 SCM_RIGHTS, SOL_SOCKET, M_WAITOK);
2685 fdp = data;
2686 for (i = 0; i < oldfds; i++, fdp++) {
2687 if (!fhold(fdesc->fd_ofiles[*fdp].fde_file)) {
2688 fdp = data;
2689 for (j = 0; j < i; j++, fdp++) {
2690 fdrop(fdesc->fd_ofiles[*fdp].
2691 fde_file, td);
2692 }
2693 FILEDESC_SUNLOCK(fdesc);
2694 error = EBADF;
2695 goto out;
2696 }
2697 }
2698 fdp = data;
2699 fdep = (struct filedescent **)
2700 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2701 fdev = malloc(sizeof(*fdev) * oldfds, M_FILECAPS,
2702 M_WAITOK);
2703 for (i = 0; i < oldfds; i++, fdev++, fdp++) {
2704 fde = &fdesc->fd_ofiles[*fdp];
2705 fdep[i] = fdev;
2706 fdep[i]->fde_file = fde->fde_file;
2707 filecaps_copy(&fde->fde_caps,
2708 &fdep[i]->fde_caps, true);
2709 unp_internalize_fp(fdep[i]->fde_file);
2710 }
2711 FILEDESC_SUNLOCK(fdesc);
2712 break;
2713
2714 case SCM_TIMESTAMP:
2715 *controlp = sbcreatecontrol(NULL, sizeof(*tv),
2716 SCM_TIMESTAMP, SOL_SOCKET, M_WAITOK);
2717 tv = (struct timeval *)
2718 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2719 microtime(tv);
2720 break;
2721
2722 case SCM_BINTIME:
2723 *controlp = sbcreatecontrol(NULL, sizeof(*bt),
2724 SCM_BINTIME, SOL_SOCKET, M_WAITOK);
2725 bt = (struct bintime *)
2726 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2727 bintime(bt);
2728 break;
2729
2730 case SCM_REALTIME:
2731 *controlp = sbcreatecontrol(NULL, sizeof(*ts),
2732 SCM_REALTIME, SOL_SOCKET, M_WAITOK);
2733 ts = (struct timespec *)
2734 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2735 nanotime(ts);
2736 break;
2737
2738 case SCM_MONOTONIC:
2739 *controlp = sbcreatecontrol(NULL, sizeof(*ts),
2740 SCM_MONOTONIC, SOL_SOCKET, M_WAITOK);
2741 ts = (struct timespec *)
2742 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2743 nanouptime(ts);
2744 break;
2745
2746 default:
2747 error = EINVAL;
2748 goto out;
2749 }
2750
2751 if (space != NULL) {
2752 *space += (*controlp)->m_len;
2753 *mbcnt += MSIZE;
2754 if ((*controlp)->m_flags & M_EXT)
2755 *mbcnt += (*controlp)->m_ext.ext_size;
2756 *clast = *controlp;
2757 }
2758 controlp = &(*controlp)->m_next;
2759 }
2760 if (clen > 0)
2761 error = EINVAL;
2762
2763 out:
2764 if (error != 0 && initial_controlp != NULL)
2765 unp_internalize_cleanup_rights(*initial_controlp);
2766 m_freem(control);
2767 return (error);
2768 }
2769
2770 static struct mbuf *
unp_addsockcred(struct thread * td,struct mbuf * control,int mode,struct mbuf ** clast,u_int * space,u_int * mbcnt)2771 unp_addsockcred(struct thread *td, struct mbuf *control, int mode,
2772 struct mbuf **clast, u_int *space, u_int *mbcnt)
2773 {
2774 struct mbuf *m, *n, *n_prev;
2775 const struct cmsghdr *cm;
2776 int ngroups, i, cmsgtype;
2777 size_t ctrlsz;
2778
2779 ngroups = MIN(td->td_ucred->cr_ngroups, CMGROUP_MAX);
2780 if (mode & UNP_WANTCRED_ALWAYS) {
2781 ctrlsz = SOCKCRED2SIZE(ngroups);
2782 cmsgtype = SCM_CREDS2;
2783 } else {
2784 ctrlsz = SOCKCREDSIZE(ngroups);
2785 cmsgtype = SCM_CREDS;
2786 }
2787
2788 m = sbcreatecontrol(NULL, ctrlsz, cmsgtype, SOL_SOCKET, M_NOWAIT);
2789 if (m == NULL)
2790 return (control);
2791 MPASS((m->m_flags & M_EXT) == 0 && m->m_next == NULL);
2792
2793 if (mode & UNP_WANTCRED_ALWAYS) {
2794 struct sockcred2 *sc;
2795
2796 sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *));
2797 sc->sc_version = 0;
2798 sc->sc_pid = td->td_proc->p_pid;
2799 sc->sc_uid = td->td_ucred->cr_ruid;
2800 sc->sc_euid = td->td_ucred->cr_uid;
2801 sc->sc_gid = td->td_ucred->cr_rgid;
2802 sc->sc_egid = td->td_ucred->cr_gid;
2803 sc->sc_ngroups = ngroups;
2804 for (i = 0; i < sc->sc_ngroups; i++)
2805 sc->sc_groups[i] = td->td_ucred->cr_groups[i];
2806 } else {
2807 struct sockcred *sc;
2808
2809 sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *));
2810 sc->sc_uid = td->td_ucred->cr_ruid;
2811 sc->sc_euid = td->td_ucred->cr_uid;
2812 sc->sc_gid = td->td_ucred->cr_rgid;
2813 sc->sc_egid = td->td_ucred->cr_gid;
2814 sc->sc_ngroups = ngroups;
2815 for (i = 0; i < sc->sc_ngroups; i++)
2816 sc->sc_groups[i] = td->td_ucred->cr_groups[i];
2817 }
2818
2819 /*
2820 * Unlink SCM_CREDS control messages (struct cmsgcred), since just
2821 * created SCM_CREDS control message (struct sockcred) has another
2822 * format.
2823 */
2824 if (control != NULL && cmsgtype == SCM_CREDS)
2825 for (n = control, n_prev = NULL; n != NULL;) {
2826 cm = mtod(n, struct cmsghdr *);
2827 if (cm->cmsg_level == SOL_SOCKET &&
2828 cm->cmsg_type == SCM_CREDS) {
2829 if (n_prev == NULL)
2830 control = n->m_next;
2831 else
2832 n_prev->m_next = n->m_next;
2833 if (space != NULL) {
2834 MPASS(*space >= n->m_len);
2835 *space -= n->m_len;
2836 MPASS(*mbcnt >= MSIZE);
2837 *mbcnt -= MSIZE;
2838 if (n->m_flags & M_EXT) {
2839 MPASS(*mbcnt >=
2840 n->m_ext.ext_size);
2841 *mbcnt -= n->m_ext.ext_size;
2842 }
2843 MPASS(clast);
2844 if (*clast == n) {
2845 MPASS(n->m_next == NULL);
2846 if (n_prev == NULL)
2847 *clast = m;
2848 else
2849 *clast = n_prev;
2850 }
2851 }
2852 n = m_free(n);
2853 } else {
2854 n_prev = n;
2855 n = n->m_next;
2856 }
2857 }
2858
2859 /* Prepend it to the head. */
2860 m->m_next = control;
2861 if (space != NULL) {
2862 *space += m->m_len;
2863 *mbcnt += MSIZE;
2864 if (control == NULL)
2865 *clast = m;
2866 }
2867 return (m);
2868 }
2869
2870 static struct unpcb *
fptounp(struct file * fp)2871 fptounp(struct file *fp)
2872 {
2873 struct socket *so;
2874
2875 if (fp->f_type != DTYPE_SOCKET)
2876 return (NULL);
2877 if ((so = fp->f_data) == NULL)
2878 return (NULL);
2879 if (so->so_proto->pr_domain != &localdomain)
2880 return (NULL);
2881 return sotounpcb(so);
2882 }
2883
2884 static void
unp_discard(struct file * fp)2885 unp_discard(struct file *fp)
2886 {
2887 struct unp_defer *dr;
2888
2889 if (unp_externalize_fp(fp)) {
2890 dr = malloc(sizeof(*dr), M_TEMP, M_WAITOK);
2891 dr->ud_fp = fp;
2892 UNP_DEFERRED_LOCK();
2893 SLIST_INSERT_HEAD(&unp_defers, dr, ud_link);
2894 UNP_DEFERRED_UNLOCK();
2895 atomic_add_int(&unp_defers_count, 1);
2896 taskqueue_enqueue(taskqueue_thread, &unp_defer_task);
2897 } else
2898 closef_nothread(fp);
2899 }
2900
2901 static void
unp_process_defers(void * arg __unused,int pending)2902 unp_process_defers(void *arg __unused, int pending)
2903 {
2904 struct unp_defer *dr;
2905 SLIST_HEAD(, unp_defer) drl;
2906 int count;
2907
2908 SLIST_INIT(&drl);
2909 for (;;) {
2910 UNP_DEFERRED_LOCK();
2911 if (SLIST_FIRST(&unp_defers) == NULL) {
2912 UNP_DEFERRED_UNLOCK();
2913 break;
2914 }
2915 SLIST_SWAP(&unp_defers, &drl, unp_defer);
2916 UNP_DEFERRED_UNLOCK();
2917 count = 0;
2918 while ((dr = SLIST_FIRST(&drl)) != NULL) {
2919 SLIST_REMOVE_HEAD(&drl, ud_link);
2920 closef_nothread(dr->ud_fp);
2921 free(dr, M_TEMP);
2922 count++;
2923 }
2924 atomic_add_int(&unp_defers_count, -count);
2925 }
2926 }
2927
2928 static void
unp_internalize_fp(struct file * fp)2929 unp_internalize_fp(struct file *fp)
2930 {
2931 struct unpcb *unp;
2932
2933 UNP_LINK_WLOCK();
2934 if ((unp = fptounp(fp)) != NULL) {
2935 unp->unp_file = fp;
2936 unp->unp_msgcount++;
2937 }
2938 unp_rights++;
2939 UNP_LINK_WUNLOCK();
2940 }
2941
2942 static int
unp_externalize_fp(struct file * fp)2943 unp_externalize_fp(struct file *fp)
2944 {
2945 struct unpcb *unp;
2946 int ret;
2947
2948 UNP_LINK_WLOCK();
2949 if ((unp = fptounp(fp)) != NULL) {
2950 unp->unp_msgcount--;
2951 ret = 1;
2952 } else
2953 ret = 0;
2954 unp_rights--;
2955 UNP_LINK_WUNLOCK();
2956 return (ret);
2957 }
2958
2959 /*
2960 * unp_defer indicates whether additional work has been defered for a future
2961 * pass through unp_gc(). It is thread local and does not require explicit
2962 * synchronization.
2963 */
2964 static int unp_marked;
2965
2966 static void
unp_remove_dead_ref(struct filedescent ** fdep,int fdcount)2967 unp_remove_dead_ref(struct filedescent **fdep, int fdcount)
2968 {
2969 struct unpcb *unp;
2970 struct file *fp;
2971 int i;
2972
2973 /*
2974 * This function can only be called from the gc task.
2975 */
2976 KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0,
2977 ("%s: not on gc callout", __func__));
2978 UNP_LINK_LOCK_ASSERT();
2979
2980 for (i = 0; i < fdcount; i++) {
2981 fp = fdep[i]->fde_file;
2982 if ((unp = fptounp(fp)) == NULL)
2983 continue;
2984 if ((unp->unp_gcflag & UNPGC_DEAD) == 0)
2985 continue;
2986 unp->unp_gcrefs--;
2987 }
2988 }
2989
2990 static void
unp_restore_undead_ref(struct filedescent ** fdep,int fdcount)2991 unp_restore_undead_ref(struct filedescent **fdep, int fdcount)
2992 {
2993 struct unpcb *unp;
2994 struct file *fp;
2995 int i;
2996
2997 /*
2998 * This function can only be called from the gc task.
2999 */
3000 KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0,
3001 ("%s: not on gc callout", __func__));
3002 UNP_LINK_LOCK_ASSERT();
3003
3004 for (i = 0; i < fdcount; i++) {
3005 fp = fdep[i]->fde_file;
3006 if ((unp = fptounp(fp)) == NULL)
3007 continue;
3008 if ((unp->unp_gcflag & UNPGC_DEAD) == 0)
3009 continue;
3010 unp->unp_gcrefs++;
3011 unp_marked++;
3012 }
3013 }
3014
3015 static void
unp_scan_socket(struct socket * so,void (* op)(struct filedescent **,int))3016 unp_scan_socket(struct socket *so, void (*op)(struct filedescent **, int))
3017 {
3018 struct sockbuf *sb;
3019
3020 SOCK_LOCK_ASSERT(so);
3021
3022 if (sotounpcb(so)->unp_gcflag & UNPGC_IGNORE_RIGHTS)
3023 return;
3024
3025 SOCK_RECVBUF_LOCK(so);
3026 switch (so->so_type) {
3027 case SOCK_DGRAM:
3028 unp_scan(STAILQ_FIRST(&so->so_rcv.uxdg_mb), op);
3029 unp_scan(so->so_rcv.uxdg_peeked, op);
3030 TAILQ_FOREACH(sb, &so->so_rcv.uxdg_conns, uxdg_clist)
3031 unp_scan(STAILQ_FIRST(&sb->uxdg_mb), op);
3032 break;
3033 case SOCK_STREAM:
3034 case SOCK_SEQPACKET:
3035 unp_scan(so->so_rcv.sb_mb, op);
3036 break;
3037 }
3038 SOCK_RECVBUF_UNLOCK(so);
3039 }
3040
3041 static void
unp_gc_scan(struct unpcb * unp,void (* op)(struct filedescent **,int))3042 unp_gc_scan(struct unpcb *unp, void (*op)(struct filedescent **, int))
3043 {
3044 struct socket *so, *soa;
3045
3046 so = unp->unp_socket;
3047 SOCK_LOCK(so);
3048 if (SOLISTENING(so)) {
3049 /*
3050 * Mark all sockets in our accept queue.
3051 */
3052 TAILQ_FOREACH(soa, &so->sol_comp, so_list)
3053 unp_scan_socket(soa, op);
3054 } else {
3055 /*
3056 * Mark all sockets we reference with RIGHTS.
3057 */
3058 unp_scan_socket(so, op);
3059 }
3060 SOCK_UNLOCK(so);
3061 }
3062
3063 static int unp_recycled;
3064 SYSCTL_INT(_net_local, OID_AUTO, recycled, CTLFLAG_RD, &unp_recycled, 0,
3065 "Number of unreachable sockets claimed by the garbage collector.");
3066
3067 static int unp_taskcount;
3068 SYSCTL_INT(_net_local, OID_AUTO, taskcount, CTLFLAG_RD, &unp_taskcount, 0,
3069 "Number of times the garbage collector has run.");
3070
3071 SYSCTL_UINT(_net_local, OID_AUTO, sockcount, CTLFLAG_RD, &unp_count, 0,
3072 "Number of active local sockets.");
3073
3074 static void
unp_gc(__unused void * arg,int pending)3075 unp_gc(__unused void *arg, int pending)
3076 {
3077 struct unp_head *heads[] = { &unp_dhead, &unp_shead, &unp_sphead,
3078 NULL };
3079 struct unp_head **head;
3080 struct unp_head unp_deadhead; /* List of potentially-dead sockets. */
3081 struct file *f, **unref;
3082 struct unpcb *unp, *unptmp;
3083 int i, total, unp_unreachable;
3084
3085 LIST_INIT(&unp_deadhead);
3086 unp_taskcount++;
3087 UNP_LINK_RLOCK();
3088 /*
3089 * First determine which sockets may be in cycles.
3090 */
3091 unp_unreachable = 0;
3092
3093 for (head = heads; *head != NULL; head++)
3094 LIST_FOREACH(unp, *head, unp_link) {
3095 KASSERT((unp->unp_gcflag & ~UNPGC_IGNORE_RIGHTS) == 0,
3096 ("%s: unp %p has unexpected gc flags 0x%x",
3097 __func__, unp, (unsigned int)unp->unp_gcflag));
3098
3099 f = unp->unp_file;
3100
3101 /*
3102 * Check for an unreachable socket potentially in a
3103 * cycle. It must be in a queue as indicated by
3104 * msgcount, and this must equal the file reference
3105 * count. Note that when msgcount is 0 the file is
3106 * NULL.
3107 */
3108 if (f != NULL && unp->unp_msgcount != 0 &&
3109 refcount_load(&f->f_count) == unp->unp_msgcount) {
3110 LIST_INSERT_HEAD(&unp_deadhead, unp, unp_dead);
3111 unp->unp_gcflag |= UNPGC_DEAD;
3112 unp->unp_gcrefs = unp->unp_msgcount;
3113 unp_unreachable++;
3114 }
3115 }
3116
3117 /*
3118 * Scan all sockets previously marked as potentially being in a cycle
3119 * and remove the references each socket holds on any UNPGC_DEAD
3120 * sockets in its queue. After this step, all remaining references on
3121 * sockets marked UNPGC_DEAD should not be part of any cycle.
3122 */
3123 LIST_FOREACH(unp, &unp_deadhead, unp_dead)
3124 unp_gc_scan(unp, unp_remove_dead_ref);
3125
3126 /*
3127 * If a socket still has a non-negative refcount, it cannot be in a
3128 * cycle. In this case increment refcount of all children iteratively.
3129 * Stop the scan once we do a complete loop without discovering
3130 * a new reachable socket.
3131 */
3132 do {
3133 unp_marked = 0;
3134 LIST_FOREACH_SAFE(unp, &unp_deadhead, unp_dead, unptmp)
3135 if (unp->unp_gcrefs > 0) {
3136 unp->unp_gcflag &= ~UNPGC_DEAD;
3137 LIST_REMOVE(unp, unp_dead);
3138 KASSERT(unp_unreachable > 0,
3139 ("%s: unp_unreachable underflow.",
3140 __func__));
3141 unp_unreachable--;
3142 unp_gc_scan(unp, unp_restore_undead_ref);
3143 }
3144 } while (unp_marked);
3145
3146 UNP_LINK_RUNLOCK();
3147
3148 if (unp_unreachable == 0)
3149 return;
3150
3151 /*
3152 * Allocate space for a local array of dead unpcbs.
3153 * TODO: can this path be simplified by instead using the local
3154 * dead list at unp_deadhead, after taking out references
3155 * on the file object and/or unpcb and dropping the link lock?
3156 */
3157 unref = malloc(unp_unreachable * sizeof(struct file *),
3158 M_TEMP, M_WAITOK);
3159
3160 /*
3161 * Iterate looking for sockets which have been specifically marked
3162 * as unreachable and store them locally.
3163 */
3164 UNP_LINK_RLOCK();
3165 total = 0;
3166 LIST_FOREACH(unp, &unp_deadhead, unp_dead) {
3167 KASSERT((unp->unp_gcflag & UNPGC_DEAD) != 0,
3168 ("%s: unp %p not marked UNPGC_DEAD", __func__, unp));
3169 unp->unp_gcflag &= ~UNPGC_DEAD;
3170 f = unp->unp_file;
3171 if (unp->unp_msgcount == 0 || f == NULL ||
3172 refcount_load(&f->f_count) != unp->unp_msgcount ||
3173 !fhold(f))
3174 continue;
3175 unref[total++] = f;
3176 KASSERT(total <= unp_unreachable,
3177 ("%s: incorrect unreachable count.", __func__));
3178 }
3179 UNP_LINK_RUNLOCK();
3180
3181 /*
3182 * Now flush all sockets, free'ing rights. This will free the
3183 * struct files associated with these sockets but leave each socket
3184 * with one remaining ref.
3185 */
3186 for (i = 0; i < total; i++) {
3187 struct socket *so;
3188
3189 so = unref[i]->f_data;
3190 CURVNET_SET(so->so_vnet);
3191 socantrcvmore(so);
3192 unp_dispose(so);
3193 CURVNET_RESTORE();
3194 }
3195
3196 /*
3197 * And finally release the sockets so they can be reclaimed.
3198 */
3199 for (i = 0; i < total; i++)
3200 fdrop(unref[i], NULL);
3201 unp_recycled += total;
3202 free(unref, M_TEMP);
3203 }
3204
3205 /*
3206 * Synchronize against unp_gc, which can trip over data as we are freeing it.
3207 */
3208 static void
unp_dispose(struct socket * so)3209 unp_dispose(struct socket *so)
3210 {
3211 struct sockbuf *sb;
3212 struct unpcb *unp;
3213 struct mbuf *m;
3214 int error __diagused;
3215
3216 MPASS(!SOLISTENING(so));
3217
3218 unp = sotounpcb(so);
3219 UNP_LINK_WLOCK();
3220 unp->unp_gcflag |= UNPGC_IGNORE_RIGHTS;
3221 UNP_LINK_WUNLOCK();
3222
3223 /*
3224 * Grab our special mbufs before calling sbrelease().
3225 */
3226 error = SOCK_IO_RECV_LOCK(so, SBL_WAIT | SBL_NOINTR);
3227 MPASS(!error);
3228 SOCK_RECVBUF_LOCK(so);
3229 switch (so->so_type) {
3230 case SOCK_DGRAM:
3231 while ((sb = TAILQ_FIRST(&so->so_rcv.uxdg_conns)) != NULL) {
3232 STAILQ_CONCAT(&so->so_rcv.uxdg_mb, &sb->uxdg_mb);
3233 TAILQ_REMOVE(&so->so_rcv.uxdg_conns, sb, uxdg_clist);
3234 /* Note: socket of sb may reconnect. */
3235 sb->uxdg_cc = sb->uxdg_ctl = sb->uxdg_mbcnt = 0;
3236 }
3237 sb = &so->so_rcv;
3238 if (sb->uxdg_peeked != NULL) {
3239 STAILQ_INSERT_HEAD(&sb->uxdg_mb, sb->uxdg_peeked,
3240 m_stailqpkt);
3241 sb->uxdg_peeked = NULL;
3242 }
3243 m = STAILQ_FIRST(&sb->uxdg_mb);
3244 STAILQ_INIT(&sb->uxdg_mb);
3245 /* XXX: our shortened sbrelease() */
3246 (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
3247 RLIM_INFINITY);
3248 /*
3249 * XXXGL Mark sb with SBS_CANTRCVMORE. This is needed to
3250 * prevent uipc_sosend_dgram() or unp_disconnect() adding more
3251 * data to the socket.
3252 * We came here either through shutdown(2) or from the final
3253 * sofree(). The sofree() case is simple as it guarantees
3254 * that no more sends will happen, however we can race with
3255 * unp_disconnect() from our peer. The shutdown(2) case is
3256 * more exotic. It would call into unp_dispose() only if
3257 * socket is SS_ISCONNECTED. This is possible if we did
3258 * connect(2) on this socket and we also had it bound with
3259 * bind(2) and receive connections from other sockets.
3260 * Because uipc_shutdown() violates POSIX (see comment
3261 * there) we will end up here shutting down our receive side.
3262 * Of course this will have affect not only on the peer we
3263 * connect(2)ed to, but also on all of the peers who had
3264 * connect(2)ed to us. Their sends would end up with ENOBUFS.
3265 */
3266 sb->sb_state |= SBS_CANTRCVMORE;
3267 break;
3268 case SOCK_STREAM:
3269 case SOCK_SEQPACKET:
3270 sb = &so->so_rcv;
3271 m = sbcut_locked(sb, sb->sb_ccc);
3272 KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0,
3273 ("%s: ccc %u mb %p mbcnt %u", __func__,
3274 sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt));
3275 sbrelease_locked(so, SO_RCV);
3276 break;
3277 }
3278 SOCK_RECVBUF_UNLOCK(so);
3279 SOCK_IO_RECV_UNLOCK(so);
3280
3281 if (m != NULL) {
3282 unp_scan(m, unp_freerights);
3283 m_freemp(m);
3284 }
3285 }
3286
3287 static void
unp_scan(struct mbuf * m0,void (* op)(struct filedescent **,int))3288 unp_scan(struct mbuf *m0, void (*op)(struct filedescent **, int))
3289 {
3290 struct mbuf *m;
3291 struct cmsghdr *cm;
3292 void *data;
3293 socklen_t clen, datalen;
3294
3295 while (m0 != NULL) {
3296 for (m = m0; m; m = m->m_next) {
3297 if (m->m_type != MT_CONTROL)
3298 continue;
3299
3300 cm = mtod(m, struct cmsghdr *);
3301 clen = m->m_len;
3302
3303 while (cm != NULL) {
3304 if (sizeof(*cm) > clen || cm->cmsg_len > clen)
3305 break;
3306
3307 data = CMSG_DATA(cm);
3308 datalen = (caddr_t)cm + cm->cmsg_len
3309 - (caddr_t)data;
3310
3311 if (cm->cmsg_level == SOL_SOCKET &&
3312 cm->cmsg_type == SCM_RIGHTS) {
3313 (*op)(data, datalen /
3314 sizeof(struct filedescent *));
3315 }
3316
3317 if (CMSG_SPACE(datalen) < clen) {
3318 clen -= CMSG_SPACE(datalen);
3319 cm = (struct cmsghdr *)
3320 ((caddr_t)cm + CMSG_SPACE(datalen));
3321 } else {
3322 clen = 0;
3323 cm = NULL;
3324 }
3325 }
3326 }
3327 m0 = m0->m_nextpkt;
3328 }
3329 }
3330
3331 /*
3332 * Definitions of protocols supported in the LOCAL domain.
3333 */
3334 static struct protosw streamproto = {
3335 .pr_type = SOCK_STREAM,
3336 .pr_flags = PR_CONNREQUIRED | PR_WANTRCVD | PR_CAPATTACH,
3337 .pr_ctloutput = &uipc_ctloutput,
3338 .pr_abort = uipc_abort,
3339 .pr_accept = uipc_peeraddr,
3340 .pr_attach = uipc_attach,
3341 .pr_bind = uipc_bind,
3342 .pr_bindat = uipc_bindat,
3343 .pr_connect = uipc_connect,
3344 .pr_connectat = uipc_connectat,
3345 .pr_connect2 = uipc_connect2,
3346 .pr_detach = uipc_detach,
3347 .pr_disconnect = uipc_disconnect,
3348 .pr_listen = uipc_listen,
3349 .pr_peeraddr = uipc_peeraddr,
3350 .pr_rcvd = uipc_rcvd,
3351 .pr_send = uipc_send,
3352 .pr_ready = uipc_ready,
3353 .pr_sense = uipc_sense,
3354 .pr_shutdown = uipc_shutdown,
3355 .pr_sockaddr = uipc_sockaddr,
3356 .pr_soreceive = soreceive_generic,
3357 .pr_close = uipc_close,
3358 };
3359
3360 static struct protosw dgramproto = {
3361 .pr_type = SOCK_DGRAM,
3362 .pr_flags = PR_ATOMIC | PR_ADDR | PR_CAPATTACH | PR_SOCKBUF,
3363 .pr_ctloutput = &uipc_ctloutput,
3364 .pr_abort = uipc_abort,
3365 .pr_accept = uipc_peeraddr,
3366 .pr_attach = uipc_attach,
3367 .pr_bind = uipc_bind,
3368 .pr_bindat = uipc_bindat,
3369 .pr_connect = uipc_connect,
3370 .pr_connectat = uipc_connectat,
3371 .pr_connect2 = uipc_connect2,
3372 .pr_detach = uipc_detach,
3373 .pr_disconnect = uipc_disconnect,
3374 .pr_peeraddr = uipc_peeraddr,
3375 .pr_sosend = uipc_sosend_dgram,
3376 .pr_sense = uipc_sense,
3377 .pr_shutdown = uipc_shutdown,
3378 .pr_sockaddr = uipc_sockaddr,
3379 .pr_soreceive = uipc_soreceive_dgram,
3380 .pr_close = uipc_close,
3381 };
3382
3383 static struct protosw seqpacketproto = {
3384 .pr_type = SOCK_SEQPACKET,
3385 /*
3386 * XXXRW: For now, PR_ADDR because soreceive will bump into them
3387 * due to our use of sbappendaddr. A new sbappend variants is needed
3388 * that supports both atomic record writes and control data.
3389 */
3390 .pr_flags = PR_ADDR | PR_ATOMIC | PR_CONNREQUIRED |
3391 PR_WANTRCVD | PR_CAPATTACH,
3392 .pr_ctloutput = &uipc_ctloutput,
3393 .pr_abort = uipc_abort,
3394 .pr_accept = uipc_peeraddr,
3395 .pr_attach = uipc_attach,
3396 .pr_bind = uipc_bind,
3397 .pr_bindat = uipc_bindat,
3398 .pr_connect = uipc_connect,
3399 .pr_connectat = uipc_connectat,
3400 .pr_connect2 = uipc_connect2,
3401 .pr_detach = uipc_detach,
3402 .pr_disconnect = uipc_disconnect,
3403 .pr_listen = uipc_listen,
3404 .pr_peeraddr = uipc_peeraddr,
3405 .pr_rcvd = uipc_rcvd,
3406 .pr_send = uipc_send,
3407 .pr_sense = uipc_sense,
3408 .pr_shutdown = uipc_shutdown,
3409 .pr_sockaddr = uipc_sockaddr,
3410 .pr_soreceive = soreceive_generic, /* XXX: or...? */
3411 .pr_close = uipc_close,
3412 };
3413
3414 static struct domain localdomain = {
3415 .dom_family = AF_LOCAL,
3416 .dom_name = "local",
3417 .dom_externalize = unp_externalize,
3418 .dom_nprotosw = 3,
3419 .dom_protosw = {
3420 &streamproto,
3421 &dgramproto,
3422 &seqpacketproto,
3423 }
3424 };
3425 DOMAIN_SET(local);
3426
3427 /*
3428 * A helper function called by VFS before socket-type vnode reclamation.
3429 * For an active vnode it clears unp_vnode pointer and decrements unp_vnode
3430 * use count.
3431 */
3432 void
vfs_unp_reclaim(struct vnode * vp)3433 vfs_unp_reclaim(struct vnode *vp)
3434 {
3435 struct unpcb *unp;
3436 int active;
3437 struct mtx *vplock;
3438
3439 ASSERT_VOP_ELOCKED(vp, "vfs_unp_reclaim");
3440 KASSERT(vp->v_type == VSOCK,
3441 ("vfs_unp_reclaim: vp->v_type != VSOCK"));
3442
3443 active = 0;
3444 vplock = mtx_pool_find(mtxpool_sleep, vp);
3445 mtx_lock(vplock);
3446 VOP_UNP_CONNECT(vp, &unp);
3447 if (unp == NULL)
3448 goto done;
3449 UNP_PCB_LOCK(unp);
3450 if (unp->unp_vnode == vp) {
3451 VOP_UNP_DETACH(vp);
3452 unp->unp_vnode = NULL;
3453 active = 1;
3454 }
3455 UNP_PCB_UNLOCK(unp);
3456 done:
3457 mtx_unlock(vplock);
3458 if (active)
3459 vunref(vp);
3460 }
3461
3462 #ifdef DDB
3463 static void
db_print_indent(int indent)3464 db_print_indent(int indent)
3465 {
3466 int i;
3467
3468 for (i = 0; i < indent; i++)
3469 db_printf(" ");
3470 }
3471
3472 static void
db_print_unpflags(int unp_flags)3473 db_print_unpflags(int unp_flags)
3474 {
3475 int comma;
3476
3477 comma = 0;
3478 if (unp_flags & UNP_HAVEPC) {
3479 db_printf("%sUNP_HAVEPC", comma ? ", " : "");
3480 comma = 1;
3481 }
3482 if (unp_flags & UNP_WANTCRED_ALWAYS) {
3483 db_printf("%sUNP_WANTCRED_ALWAYS", comma ? ", " : "");
3484 comma = 1;
3485 }
3486 if (unp_flags & UNP_WANTCRED_ONESHOT) {
3487 db_printf("%sUNP_WANTCRED_ONESHOT", comma ? ", " : "");
3488 comma = 1;
3489 }
3490 if (unp_flags & UNP_CONNECTING) {
3491 db_printf("%sUNP_CONNECTING", comma ? ", " : "");
3492 comma = 1;
3493 }
3494 if (unp_flags & UNP_BINDING) {
3495 db_printf("%sUNP_BINDING", comma ? ", " : "");
3496 comma = 1;
3497 }
3498 }
3499
3500 static void
db_print_xucred(int indent,struct xucred * xu)3501 db_print_xucred(int indent, struct xucred *xu)
3502 {
3503 int comma, i;
3504
3505 db_print_indent(indent);
3506 db_printf("cr_version: %u cr_uid: %u cr_pid: %d cr_ngroups: %d\n",
3507 xu->cr_version, xu->cr_uid, xu->cr_pid, xu->cr_ngroups);
3508 db_print_indent(indent);
3509 db_printf("cr_groups: ");
3510 comma = 0;
3511 for (i = 0; i < xu->cr_ngroups; i++) {
3512 db_printf("%s%u", comma ? ", " : "", xu->cr_groups[i]);
3513 comma = 1;
3514 }
3515 db_printf("\n");
3516 }
3517
3518 static void
db_print_unprefs(int indent,struct unp_head * uh)3519 db_print_unprefs(int indent, struct unp_head *uh)
3520 {
3521 struct unpcb *unp;
3522 int counter;
3523
3524 counter = 0;
3525 LIST_FOREACH(unp, uh, unp_reflink) {
3526 if (counter % 4 == 0)
3527 db_print_indent(indent);
3528 db_printf("%p ", unp);
3529 if (counter % 4 == 3)
3530 db_printf("\n");
3531 counter++;
3532 }
3533 if (counter != 0 && counter % 4 != 0)
3534 db_printf("\n");
3535 }
3536
DB_SHOW_COMMAND(unpcb,db_show_unpcb)3537 DB_SHOW_COMMAND(unpcb, db_show_unpcb)
3538 {
3539 struct unpcb *unp;
3540
3541 if (!have_addr) {
3542 db_printf("usage: show unpcb <addr>\n");
3543 return;
3544 }
3545 unp = (struct unpcb *)addr;
3546
3547 db_printf("unp_socket: %p unp_vnode: %p\n", unp->unp_socket,
3548 unp->unp_vnode);
3549
3550 db_printf("unp_ino: %ju unp_conn: %p\n", (uintmax_t)unp->unp_ino,
3551 unp->unp_conn);
3552
3553 db_printf("unp_refs:\n");
3554 db_print_unprefs(2, &unp->unp_refs);
3555
3556 /* XXXRW: Would be nice to print the full address, if any. */
3557 db_printf("unp_addr: %p\n", unp->unp_addr);
3558
3559 db_printf("unp_gencnt: %llu\n",
3560 (unsigned long long)unp->unp_gencnt);
3561
3562 db_printf("unp_flags: %x (", unp->unp_flags);
3563 db_print_unpflags(unp->unp_flags);
3564 db_printf(")\n");
3565
3566 db_printf("unp_peercred:\n");
3567 db_print_xucred(2, &unp->unp_peercred);
3568
3569 db_printf("unp_refcount: %u\n", unp->unp_refcount);
3570 }
3571 #endif
3572