xref: /linux/net/unix/af_unix.c (revision 9841991a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NET4:	Implementation of BSD Unix domain sockets.
4  *
5  * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
6  *
7  * Fixes:
8  *		Linus Torvalds	:	Assorted bug cures.
9  *		Niibe Yutaka	:	async I/O support.
10  *		Carsten Paeth	:	PF_UNIX check, address fixes.
11  *		Alan Cox	:	Limit size of allocated blocks.
12  *		Alan Cox	:	Fixed the stupid socketpair bug.
13  *		Alan Cox	:	BSD compatibility fine tuning.
14  *		Alan Cox	:	Fixed a bug in connect when interrupted.
15  *		Alan Cox	:	Sorted out a proper draft version of
16  *					file descriptor passing hacked up from
17  *					Mike Shaver's work.
18  *		Marty Leisner	:	Fixes to fd passing
19  *		Nick Nevin	:	recvmsg bugfix.
20  *		Alan Cox	:	Started proper garbage collector
21  *		Heiko EiBfeldt	:	Missing verify_area check
22  *		Alan Cox	:	Started POSIXisms
23  *		Andreas Schwab	:	Replace inode by dentry for proper
24  *					reference counting
25  *		Kirk Petersen	:	Made this a module
26  *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
27  *					Lots of bug fixes.
28  *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
29  *					by above two patches.
30  *	     Andrea Arcangeli	:	If possible we block in connect(2)
31  *					if the max backlog of the listen socket
32  *					is been reached. This won't break
33  *					old apps and it will avoid huge amount
34  *					of socks hashed (this for unix_gc()
35  *					performances reasons).
36  *					Security fix that limits the max
37  *					number of socks to 2*max_files and
38  *					the number of skb queueable in the
39  *					dgram receiver.
40  *		Artur Skawina   :	Hash function optimizations
41  *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
42  *	      Malcolm Beattie   :	Set peercred for socketpair
43  *	     Michal Ostrowski   :       Module initialization cleanup.
44  *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
45  *	     				the core infrastructure is doing that
46  *	     				for all net proto families now (2.5.69+)
47  *
48  * Known differences from reference BSD that was tested:
49  *
50  *	[TO FIX]
51  *	ECONNREFUSED is not returned from one end of a connected() socket to the
52  *		other the moment one end closes.
53  *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
54  *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
55  *	[NOT TO FIX]
56  *	accept() returns a path name even if the connecting socket has closed
57  *		in the meantime (BSD loses the path and gives up).
58  *	accept() returns 0 length path for an unbound connector. BSD returns 16
59  *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60  *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
61  *	BSD af_unix apparently has connect forgetting to block properly.
62  *		(need to check this with the POSIX spec in detail)
63  *
64  * Differences from 2.0.0-11-... (ANK)
65  *	Bug fixes and improvements.
66  *		- client shutdown killed server socket.
67  *		- removed all useless cli/sti pairs.
68  *
69  *	Semantic changes/extensions.
70  *		- generic control message passing.
71  *		- SCM_CREDENTIALS control message.
72  *		- "Abstract" (not FS based) socket bindings.
73  *		  Abstract names are sequences of bytes (not zero terminated)
74  *		  started by 0, so that this name space does not intersect
75  *		  with BSD names.
76  */
77 
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79 
80 #include <linux/module.h>
81 #include <linux/kernel.h>
82 #include <linux/signal.h>
83 #include <linux/sched/signal.h>
84 #include <linux/errno.h>
85 #include <linux/string.h>
86 #include <linux/stat.h>
87 #include <linux/dcache.h>
88 #include <linux/namei.h>
89 #include <linux/socket.h>
90 #include <linux/un.h>
91 #include <linux/fcntl.h>
92 #include <linux/filter.h>
93 #include <linux/termios.h>
94 #include <linux/sockios.h>
95 #include <linux/net.h>
96 #include <linux/in.h>
97 #include <linux/fs.h>
98 #include <linux/slab.h>
99 #include <linux/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <net/net_namespace.h>
103 #include <net/sock.h>
104 #include <net/tcp_states.h>
105 #include <net/af_unix.h>
106 #include <linux/proc_fs.h>
107 #include <linux/seq_file.h>
108 #include <net/scm.h>
109 #include <linux/init.h>
110 #include <linux/poll.h>
111 #include <linux/rtnetlink.h>
112 #include <linux/mount.h>
113 #include <net/checksum.h>
114 #include <linux/security.h>
115 #include <linux/splice.h>
116 #include <linux/freezer.h>
117 #include <linux/file.h>
118 #include <linux/btf_ids.h>
119 #include <linux/bpf-cgroup.h>
120 
121 static atomic_long_t unix_nr_socks;
122 static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
123 static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
124 
125 /* SMP locking strategy:
126  *    hash table is protected with spinlock.
127  *    each socket state is protected by separate spinlock.
128  */
129 
130 static unsigned int unix_unbound_hash(struct sock *sk)
131 {
132 	unsigned long hash = (unsigned long)sk;
133 
134 	hash ^= hash >> 16;
135 	hash ^= hash >> 8;
136 	hash ^= sk->sk_type;
137 
138 	return hash & UNIX_HASH_MOD;
139 }
140 
141 static unsigned int unix_bsd_hash(struct inode *i)
142 {
143 	return i->i_ino & UNIX_HASH_MOD;
144 }
145 
146 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
147 				       int addr_len, int type)
148 {
149 	__wsum csum = csum_partial(sunaddr, addr_len, 0);
150 	unsigned int hash;
151 
152 	hash = (__force unsigned int)csum_fold(csum);
153 	hash ^= hash >> 8;
154 	hash ^= type;
155 
156 	return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
157 }
158 
159 static void unix_table_double_lock(struct net *net,
160 				   unsigned int hash1, unsigned int hash2)
161 {
162 	if (hash1 == hash2) {
163 		spin_lock(&net->unx.table.locks[hash1]);
164 		return;
165 	}
166 
167 	if (hash1 > hash2)
168 		swap(hash1, hash2);
169 
170 	spin_lock(&net->unx.table.locks[hash1]);
171 	spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
172 }
173 
174 static void unix_table_double_unlock(struct net *net,
175 				     unsigned int hash1, unsigned int hash2)
176 {
177 	if (hash1 == hash2) {
178 		spin_unlock(&net->unx.table.locks[hash1]);
179 		return;
180 	}
181 
182 	spin_unlock(&net->unx.table.locks[hash1]);
183 	spin_unlock(&net->unx.table.locks[hash2]);
184 }
185 
186 #ifdef CONFIG_SECURITY_NETWORK
187 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
188 {
189 	UNIXCB(skb).secid = scm->secid;
190 }
191 
192 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
193 {
194 	scm->secid = UNIXCB(skb).secid;
195 }
196 
197 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
198 {
199 	return (scm->secid == UNIXCB(skb).secid);
200 }
201 #else
202 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
203 { }
204 
205 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
206 { }
207 
208 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
209 {
210 	return true;
211 }
212 #endif /* CONFIG_SECURITY_NETWORK */
213 
214 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
215 {
216 	return unix_peer(osk) == sk;
217 }
218 
219 static inline int unix_may_send(struct sock *sk, struct sock *osk)
220 {
221 	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
222 }
223 
224 static inline int unix_recvq_full(const struct sock *sk)
225 {
226 	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
227 }
228 
229 static inline int unix_recvq_full_lockless(const struct sock *sk)
230 {
231 	return skb_queue_len_lockless(&sk->sk_receive_queue) >
232 		READ_ONCE(sk->sk_max_ack_backlog);
233 }
234 
235 struct sock *unix_peer_get(struct sock *s)
236 {
237 	struct sock *peer;
238 
239 	unix_state_lock(s);
240 	peer = unix_peer(s);
241 	if (peer)
242 		sock_hold(peer);
243 	unix_state_unlock(s);
244 	return peer;
245 }
246 EXPORT_SYMBOL_GPL(unix_peer_get);
247 
248 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
249 					     int addr_len)
250 {
251 	struct unix_address *addr;
252 
253 	addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
254 	if (!addr)
255 		return NULL;
256 
257 	refcount_set(&addr->refcnt, 1);
258 	addr->len = addr_len;
259 	memcpy(addr->name, sunaddr, addr_len);
260 
261 	return addr;
262 }
263 
264 static inline void unix_release_addr(struct unix_address *addr)
265 {
266 	if (refcount_dec_and_test(&addr->refcnt))
267 		kfree(addr);
268 }
269 
270 /*
271  *	Check unix socket name:
272  *		- should be not zero length.
273  *	        - if started by not zero, should be NULL terminated (FS object)
274  *		- if started by zero, it is abstract name.
275  */
276 
277 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
278 {
279 	if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
280 	    addr_len > sizeof(*sunaddr))
281 		return -EINVAL;
282 
283 	if (sunaddr->sun_family != AF_UNIX)
284 		return -EINVAL;
285 
286 	return 0;
287 }
288 
289 static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
290 {
291 	struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
292 	short offset = offsetof(struct sockaddr_storage, __data);
293 
294 	BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
295 
296 	/* This may look like an off by one error but it is a bit more
297 	 * subtle.  108 is the longest valid AF_UNIX path for a binding.
298 	 * sun_path[108] doesn't as such exist.  However in kernel space
299 	 * we are guaranteed that it is a valid memory location in our
300 	 * kernel address buffer because syscall functions always pass
301 	 * a pointer of struct sockaddr_storage which has a bigger buffer
302 	 * than 108.  Also, we must terminate sun_path for strlen() in
303 	 * getname_kernel().
304 	 */
305 	addr->__data[addr_len - offset] = 0;
306 
307 	/* Don't pass sunaddr->sun_path to strlen().  Otherwise, 108 will
308 	 * cause panic if CONFIG_FORTIFY_SOURCE=y.  Let __fortify_strlen()
309 	 * know the actual buffer.
310 	 */
311 	return strlen(addr->__data) + offset + 1;
312 }
313 
314 static void __unix_remove_socket(struct sock *sk)
315 {
316 	sk_del_node_init(sk);
317 }
318 
319 static void __unix_insert_socket(struct net *net, struct sock *sk)
320 {
321 	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
322 	sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
323 }
324 
325 static void __unix_set_addr_hash(struct net *net, struct sock *sk,
326 				 struct unix_address *addr, unsigned int hash)
327 {
328 	__unix_remove_socket(sk);
329 	smp_store_release(&unix_sk(sk)->addr, addr);
330 
331 	sk->sk_hash = hash;
332 	__unix_insert_socket(net, sk);
333 }
334 
335 static void unix_remove_socket(struct net *net, struct sock *sk)
336 {
337 	spin_lock(&net->unx.table.locks[sk->sk_hash]);
338 	__unix_remove_socket(sk);
339 	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
340 }
341 
342 static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
343 {
344 	spin_lock(&net->unx.table.locks[sk->sk_hash]);
345 	__unix_insert_socket(net, sk);
346 	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
347 }
348 
349 static void unix_insert_bsd_socket(struct sock *sk)
350 {
351 	spin_lock(&bsd_socket_locks[sk->sk_hash]);
352 	sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
353 	spin_unlock(&bsd_socket_locks[sk->sk_hash]);
354 }
355 
356 static void unix_remove_bsd_socket(struct sock *sk)
357 {
358 	if (!hlist_unhashed(&sk->sk_bind_node)) {
359 		spin_lock(&bsd_socket_locks[sk->sk_hash]);
360 		__sk_del_bind_node(sk);
361 		spin_unlock(&bsd_socket_locks[sk->sk_hash]);
362 
363 		sk_node_init(&sk->sk_bind_node);
364 	}
365 }
366 
367 static struct sock *__unix_find_socket_byname(struct net *net,
368 					      struct sockaddr_un *sunname,
369 					      int len, unsigned int hash)
370 {
371 	struct sock *s;
372 
373 	sk_for_each(s, &net->unx.table.buckets[hash]) {
374 		struct unix_sock *u = unix_sk(s);
375 
376 		if (u->addr->len == len &&
377 		    !memcmp(u->addr->name, sunname, len))
378 			return s;
379 	}
380 	return NULL;
381 }
382 
383 static inline struct sock *unix_find_socket_byname(struct net *net,
384 						   struct sockaddr_un *sunname,
385 						   int len, unsigned int hash)
386 {
387 	struct sock *s;
388 
389 	spin_lock(&net->unx.table.locks[hash]);
390 	s = __unix_find_socket_byname(net, sunname, len, hash);
391 	if (s)
392 		sock_hold(s);
393 	spin_unlock(&net->unx.table.locks[hash]);
394 	return s;
395 }
396 
397 static struct sock *unix_find_socket_byinode(struct inode *i)
398 {
399 	unsigned int hash = unix_bsd_hash(i);
400 	struct sock *s;
401 
402 	spin_lock(&bsd_socket_locks[hash]);
403 	sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
404 		struct dentry *dentry = unix_sk(s)->path.dentry;
405 
406 		if (dentry && d_backing_inode(dentry) == i) {
407 			sock_hold(s);
408 			spin_unlock(&bsd_socket_locks[hash]);
409 			return s;
410 		}
411 	}
412 	spin_unlock(&bsd_socket_locks[hash]);
413 	return NULL;
414 }
415 
416 /* Support code for asymmetrically connected dgram sockets
417  *
418  * If a datagram socket is connected to a socket not itself connected
419  * to the first socket (eg, /dev/log), clients may only enqueue more
420  * messages if the present receive queue of the server socket is not
421  * "too large". This means there's a second writeability condition
422  * poll and sendmsg need to test. The dgram recv code will do a wake
423  * up on the peer_wait wait queue of a socket upon reception of a
424  * datagram which needs to be propagated to sleeping would-be writers
425  * since these might not have sent anything so far. This can't be
426  * accomplished via poll_wait because the lifetime of the server
427  * socket might be less than that of its clients if these break their
428  * association with it or if the server socket is closed while clients
429  * are still connected to it and there's no way to inform "a polling
430  * implementation" that it should let go of a certain wait queue
431  *
432  * In order to propagate a wake up, a wait_queue_entry_t of the client
433  * socket is enqueued on the peer_wait queue of the server socket
434  * whose wake function does a wake_up on the ordinary client socket
435  * wait queue. This connection is established whenever a write (or
436  * poll for write) hit the flow control condition and broken when the
437  * association to the server socket is dissolved or after a wake up
438  * was relayed.
439  */
440 
441 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
442 				      void *key)
443 {
444 	struct unix_sock *u;
445 	wait_queue_head_t *u_sleep;
446 
447 	u = container_of(q, struct unix_sock, peer_wake);
448 
449 	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
450 			    q);
451 	u->peer_wake.private = NULL;
452 
453 	/* relaying can only happen while the wq still exists */
454 	u_sleep = sk_sleep(&u->sk);
455 	if (u_sleep)
456 		wake_up_interruptible_poll(u_sleep, key_to_poll(key));
457 
458 	return 0;
459 }
460 
461 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
462 {
463 	struct unix_sock *u, *u_other;
464 	int rc;
465 
466 	u = unix_sk(sk);
467 	u_other = unix_sk(other);
468 	rc = 0;
469 	spin_lock(&u_other->peer_wait.lock);
470 
471 	if (!u->peer_wake.private) {
472 		u->peer_wake.private = other;
473 		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
474 
475 		rc = 1;
476 	}
477 
478 	spin_unlock(&u_other->peer_wait.lock);
479 	return rc;
480 }
481 
482 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
483 					    struct sock *other)
484 {
485 	struct unix_sock *u, *u_other;
486 
487 	u = unix_sk(sk);
488 	u_other = unix_sk(other);
489 	spin_lock(&u_other->peer_wait.lock);
490 
491 	if (u->peer_wake.private == other) {
492 		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
493 		u->peer_wake.private = NULL;
494 	}
495 
496 	spin_unlock(&u_other->peer_wait.lock);
497 }
498 
499 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
500 						   struct sock *other)
501 {
502 	unix_dgram_peer_wake_disconnect(sk, other);
503 	wake_up_interruptible_poll(sk_sleep(sk),
504 				   EPOLLOUT |
505 				   EPOLLWRNORM |
506 				   EPOLLWRBAND);
507 }
508 
509 /* preconditions:
510  *	- unix_peer(sk) == other
511  *	- association is stable
512  */
513 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
514 {
515 	int connected;
516 
517 	connected = unix_dgram_peer_wake_connect(sk, other);
518 
519 	/* If other is SOCK_DEAD, we want to make sure we signal
520 	 * POLLOUT, such that a subsequent write() can get a
521 	 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
522 	 * to other and its full, we will hang waiting for POLLOUT.
523 	 */
524 	if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
525 		return 1;
526 
527 	if (connected)
528 		unix_dgram_peer_wake_disconnect(sk, other);
529 
530 	return 0;
531 }
532 
533 static int unix_writable(const struct sock *sk)
534 {
535 	return sk->sk_state != TCP_LISTEN &&
536 	       (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
537 }
538 
539 static void unix_write_space(struct sock *sk)
540 {
541 	struct socket_wq *wq;
542 
543 	rcu_read_lock();
544 	if (unix_writable(sk)) {
545 		wq = rcu_dereference(sk->sk_wq);
546 		if (skwq_has_sleeper(wq))
547 			wake_up_interruptible_sync_poll(&wq->wait,
548 				EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
549 		sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
550 	}
551 	rcu_read_unlock();
552 }
553 
554 /* When dgram socket disconnects (or changes its peer), we clear its receive
555  * queue of packets arrived from previous peer. First, it allows to do
556  * flow control based only on wmem_alloc; second, sk connected to peer
557  * may receive messages only from that peer. */
558 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
559 {
560 	if (!skb_queue_empty(&sk->sk_receive_queue)) {
561 		skb_queue_purge(&sk->sk_receive_queue);
562 		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
563 
564 		/* If one link of bidirectional dgram pipe is disconnected,
565 		 * we signal error. Messages are lost. Do not make this,
566 		 * when peer was not connected to us.
567 		 */
568 		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
569 			WRITE_ONCE(other->sk_err, ECONNRESET);
570 			sk_error_report(other);
571 		}
572 	}
573 	other->sk_state = TCP_CLOSE;
574 }
575 
576 static void unix_sock_destructor(struct sock *sk)
577 {
578 	struct unix_sock *u = unix_sk(sk);
579 
580 	skb_queue_purge(&sk->sk_receive_queue);
581 
582 	DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
583 	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
584 	DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
585 	if (!sock_flag(sk, SOCK_DEAD)) {
586 		pr_info("Attempt to release alive unix socket: %p\n", sk);
587 		return;
588 	}
589 
590 	if (u->addr)
591 		unix_release_addr(u->addr);
592 
593 	atomic_long_dec(&unix_nr_socks);
594 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
595 #ifdef UNIX_REFCNT_DEBUG
596 	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
597 		atomic_long_read(&unix_nr_socks));
598 #endif
599 }
600 
601 static void unix_release_sock(struct sock *sk, int embrion)
602 {
603 	struct unix_sock *u = unix_sk(sk);
604 	struct sock *skpair;
605 	struct sk_buff *skb;
606 	struct path path;
607 	int state;
608 
609 	unix_remove_socket(sock_net(sk), sk);
610 	unix_remove_bsd_socket(sk);
611 
612 	/* Clear state */
613 	unix_state_lock(sk);
614 	sock_orphan(sk);
615 	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
616 	path	     = u->path;
617 	u->path.dentry = NULL;
618 	u->path.mnt = NULL;
619 	state = sk->sk_state;
620 	sk->sk_state = TCP_CLOSE;
621 
622 	skpair = unix_peer(sk);
623 	unix_peer(sk) = NULL;
624 
625 	unix_state_unlock(sk);
626 
627 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
628 	if (u->oob_skb) {
629 		kfree_skb(u->oob_skb);
630 		u->oob_skb = NULL;
631 	}
632 #endif
633 
634 	wake_up_interruptible_all(&u->peer_wait);
635 
636 	if (skpair != NULL) {
637 		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
638 			unix_state_lock(skpair);
639 			/* No more writes */
640 			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
641 			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
642 				WRITE_ONCE(skpair->sk_err, ECONNRESET);
643 			unix_state_unlock(skpair);
644 			skpair->sk_state_change(skpair);
645 			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
646 		}
647 
648 		unix_dgram_peer_wake_disconnect(sk, skpair);
649 		sock_put(skpair); /* It may now die */
650 	}
651 
652 	/* Try to flush out this socket. Throw out buffers at least */
653 
654 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
655 		if (state == TCP_LISTEN)
656 			unix_release_sock(skb->sk, 1);
657 		/* passed fds are erased in the kfree_skb hook	      */
658 		UNIXCB(skb).consumed = skb->len;
659 		kfree_skb(skb);
660 	}
661 
662 	if (path.dentry)
663 		path_put(&path);
664 
665 	sock_put(sk);
666 
667 	/* ---- Socket is dead now and most probably destroyed ---- */
668 
669 	/*
670 	 * Fixme: BSD difference: In BSD all sockets connected to us get
671 	 *	  ECONNRESET and we die on the spot. In Linux we behave
672 	 *	  like files and pipes do and wait for the last
673 	 *	  dereference.
674 	 *
675 	 * Can't we simply set sock->err?
676 	 *
677 	 *	  What the above comment does talk about? --ANK(980817)
678 	 */
679 
680 	if (READ_ONCE(unix_tot_inflight))
681 		unix_gc();		/* Garbage collect fds */
682 }
683 
684 static void init_peercred(struct sock *sk)
685 {
686 	const struct cred *old_cred;
687 	struct pid *old_pid;
688 
689 	spin_lock(&sk->sk_peer_lock);
690 	old_pid = sk->sk_peer_pid;
691 	old_cred = sk->sk_peer_cred;
692 	sk->sk_peer_pid  = get_pid(task_tgid(current));
693 	sk->sk_peer_cred = get_current_cred();
694 	spin_unlock(&sk->sk_peer_lock);
695 
696 	put_pid(old_pid);
697 	put_cred(old_cred);
698 }
699 
700 static void copy_peercred(struct sock *sk, struct sock *peersk)
701 {
702 	const struct cred *old_cred;
703 	struct pid *old_pid;
704 
705 	if (sk < peersk) {
706 		spin_lock(&sk->sk_peer_lock);
707 		spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
708 	} else {
709 		spin_lock(&peersk->sk_peer_lock);
710 		spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
711 	}
712 	old_pid = sk->sk_peer_pid;
713 	old_cred = sk->sk_peer_cred;
714 	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
715 	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
716 
717 	spin_unlock(&sk->sk_peer_lock);
718 	spin_unlock(&peersk->sk_peer_lock);
719 
720 	put_pid(old_pid);
721 	put_cred(old_cred);
722 }
723 
724 static int unix_listen(struct socket *sock, int backlog)
725 {
726 	int err;
727 	struct sock *sk = sock->sk;
728 	struct unix_sock *u = unix_sk(sk);
729 
730 	err = -EOPNOTSUPP;
731 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
732 		goto out;	/* Only stream/seqpacket sockets accept */
733 	err = -EINVAL;
734 	if (!u->addr)
735 		goto out;	/* No listens on an unbound socket */
736 	unix_state_lock(sk);
737 	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
738 		goto out_unlock;
739 	if (backlog > sk->sk_max_ack_backlog)
740 		wake_up_interruptible_all(&u->peer_wait);
741 	sk->sk_max_ack_backlog	= backlog;
742 	sk->sk_state		= TCP_LISTEN;
743 	/* set credentials so connect can copy them */
744 	init_peercred(sk);
745 	err = 0;
746 
747 out_unlock:
748 	unix_state_unlock(sk);
749 out:
750 	return err;
751 }
752 
753 static int unix_release(struct socket *);
754 static int unix_bind(struct socket *, struct sockaddr *, int);
755 static int unix_stream_connect(struct socket *, struct sockaddr *,
756 			       int addr_len, int flags);
757 static int unix_socketpair(struct socket *, struct socket *);
758 static int unix_accept(struct socket *, struct socket *, int, bool);
759 static int unix_getname(struct socket *, struct sockaddr *, int);
760 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
761 static __poll_t unix_dgram_poll(struct file *, struct socket *,
762 				    poll_table *);
763 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
764 #ifdef CONFIG_COMPAT
765 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
766 #endif
767 static int unix_shutdown(struct socket *, int);
768 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
769 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
770 static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
771 				       struct pipe_inode_info *, size_t size,
772 				       unsigned int flags);
773 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
774 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
775 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
776 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
777 static int unix_dgram_connect(struct socket *, struct sockaddr *,
778 			      int, int);
779 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
780 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
781 				  int);
782 
783 #ifdef CONFIG_PROC_FS
784 static int unix_count_nr_fds(struct sock *sk)
785 {
786 	struct sk_buff *skb;
787 	struct unix_sock *u;
788 	int nr_fds = 0;
789 
790 	spin_lock(&sk->sk_receive_queue.lock);
791 	skb = skb_peek(&sk->sk_receive_queue);
792 	while (skb) {
793 		u = unix_sk(skb->sk);
794 		nr_fds += atomic_read(&u->scm_stat.nr_fds);
795 		skb = skb_peek_next(skb, &sk->sk_receive_queue);
796 	}
797 	spin_unlock(&sk->sk_receive_queue.lock);
798 
799 	return nr_fds;
800 }
801 
802 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
803 {
804 	struct sock *sk = sock->sk;
805 	unsigned char s_state;
806 	struct unix_sock *u;
807 	int nr_fds = 0;
808 
809 	if (sk) {
810 		s_state = READ_ONCE(sk->sk_state);
811 		u = unix_sk(sk);
812 
813 		/* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
814 		 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
815 		 * SOCK_DGRAM is ordinary. So, no lock is needed.
816 		 */
817 		if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
818 			nr_fds = atomic_read(&u->scm_stat.nr_fds);
819 		else if (s_state == TCP_LISTEN)
820 			nr_fds = unix_count_nr_fds(sk);
821 
822 		seq_printf(m, "scm_fds: %u\n", nr_fds);
823 	}
824 }
825 #else
826 #define unix_show_fdinfo NULL
827 #endif
828 
829 static const struct proto_ops unix_stream_ops = {
830 	.family =	PF_UNIX,
831 	.owner =	THIS_MODULE,
832 	.release =	unix_release,
833 	.bind =		unix_bind,
834 	.connect =	unix_stream_connect,
835 	.socketpair =	unix_socketpair,
836 	.accept =	unix_accept,
837 	.getname =	unix_getname,
838 	.poll =		unix_poll,
839 	.ioctl =	unix_ioctl,
840 #ifdef CONFIG_COMPAT
841 	.compat_ioctl =	unix_compat_ioctl,
842 #endif
843 	.listen =	unix_listen,
844 	.shutdown =	unix_shutdown,
845 	.sendmsg =	unix_stream_sendmsg,
846 	.recvmsg =	unix_stream_recvmsg,
847 	.read_skb =	unix_stream_read_skb,
848 	.mmap =		sock_no_mmap,
849 	.splice_read =	unix_stream_splice_read,
850 	.set_peek_off =	sk_set_peek_off,
851 	.show_fdinfo =	unix_show_fdinfo,
852 };
853 
854 static const struct proto_ops unix_dgram_ops = {
855 	.family =	PF_UNIX,
856 	.owner =	THIS_MODULE,
857 	.release =	unix_release,
858 	.bind =		unix_bind,
859 	.connect =	unix_dgram_connect,
860 	.socketpair =	unix_socketpair,
861 	.accept =	sock_no_accept,
862 	.getname =	unix_getname,
863 	.poll =		unix_dgram_poll,
864 	.ioctl =	unix_ioctl,
865 #ifdef CONFIG_COMPAT
866 	.compat_ioctl =	unix_compat_ioctl,
867 #endif
868 	.listen =	sock_no_listen,
869 	.shutdown =	unix_shutdown,
870 	.sendmsg =	unix_dgram_sendmsg,
871 	.read_skb =	unix_read_skb,
872 	.recvmsg =	unix_dgram_recvmsg,
873 	.mmap =		sock_no_mmap,
874 	.set_peek_off =	sk_set_peek_off,
875 	.show_fdinfo =	unix_show_fdinfo,
876 };
877 
878 static const struct proto_ops unix_seqpacket_ops = {
879 	.family =	PF_UNIX,
880 	.owner =	THIS_MODULE,
881 	.release =	unix_release,
882 	.bind =		unix_bind,
883 	.connect =	unix_stream_connect,
884 	.socketpair =	unix_socketpair,
885 	.accept =	unix_accept,
886 	.getname =	unix_getname,
887 	.poll =		unix_dgram_poll,
888 	.ioctl =	unix_ioctl,
889 #ifdef CONFIG_COMPAT
890 	.compat_ioctl =	unix_compat_ioctl,
891 #endif
892 	.listen =	unix_listen,
893 	.shutdown =	unix_shutdown,
894 	.sendmsg =	unix_seqpacket_sendmsg,
895 	.recvmsg =	unix_seqpacket_recvmsg,
896 	.mmap =		sock_no_mmap,
897 	.set_peek_off =	sk_set_peek_off,
898 	.show_fdinfo =	unix_show_fdinfo,
899 };
900 
901 static void unix_close(struct sock *sk, long timeout)
902 {
903 	/* Nothing to do here, unix socket does not need a ->close().
904 	 * This is merely for sockmap.
905 	 */
906 }
907 
908 static void unix_unhash(struct sock *sk)
909 {
910 	/* Nothing to do here, unix socket does not need a ->unhash().
911 	 * This is merely for sockmap.
912 	 */
913 }
914 
915 static bool unix_bpf_bypass_getsockopt(int level, int optname)
916 {
917 	if (level == SOL_SOCKET) {
918 		switch (optname) {
919 		case SO_PEERPIDFD:
920 			return true;
921 		default:
922 			return false;
923 		}
924 	}
925 
926 	return false;
927 }
928 
929 struct proto unix_dgram_proto = {
930 	.name			= "UNIX",
931 	.owner			= THIS_MODULE,
932 	.obj_size		= sizeof(struct unix_sock),
933 	.close			= unix_close,
934 	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
935 #ifdef CONFIG_BPF_SYSCALL
936 	.psock_update_sk_prot	= unix_dgram_bpf_update_proto,
937 #endif
938 };
939 
940 struct proto unix_stream_proto = {
941 	.name			= "UNIX-STREAM",
942 	.owner			= THIS_MODULE,
943 	.obj_size		= sizeof(struct unix_sock),
944 	.close			= unix_close,
945 	.unhash			= unix_unhash,
946 	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
947 #ifdef CONFIG_BPF_SYSCALL
948 	.psock_update_sk_prot	= unix_stream_bpf_update_proto,
949 #endif
950 };
951 
952 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
953 {
954 	struct unix_sock *u;
955 	struct sock *sk;
956 	int err;
957 
958 	atomic_long_inc(&unix_nr_socks);
959 	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
960 		err = -ENFILE;
961 		goto err;
962 	}
963 
964 	if (type == SOCK_STREAM)
965 		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
966 	else /*dgram and  seqpacket */
967 		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
968 
969 	if (!sk) {
970 		err = -ENOMEM;
971 		goto err;
972 	}
973 
974 	sock_init_data(sock, sk);
975 
976 	sk->sk_hash		= unix_unbound_hash(sk);
977 	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
978 	sk->sk_write_space	= unix_write_space;
979 	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
980 	sk->sk_destruct		= unix_sock_destructor;
981 	u = unix_sk(sk);
982 	u->listener = NULL;
983 	u->vertex = NULL;
984 	u->path.dentry = NULL;
985 	u->path.mnt = NULL;
986 	spin_lock_init(&u->lock);
987 	mutex_init(&u->iolock); /* single task reading lock */
988 	mutex_init(&u->bindlock); /* single task binding lock */
989 	init_waitqueue_head(&u->peer_wait);
990 	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
991 	memset(&u->scm_stat, 0, sizeof(struct scm_stat));
992 	unix_insert_unbound_socket(net, sk);
993 
994 	sock_prot_inuse_add(net, sk->sk_prot, 1);
995 
996 	return sk;
997 
998 err:
999 	atomic_long_dec(&unix_nr_socks);
1000 	return ERR_PTR(err);
1001 }
1002 
1003 static int unix_create(struct net *net, struct socket *sock, int protocol,
1004 		       int kern)
1005 {
1006 	struct sock *sk;
1007 
1008 	if (protocol && protocol != PF_UNIX)
1009 		return -EPROTONOSUPPORT;
1010 
1011 	sock->state = SS_UNCONNECTED;
1012 
1013 	switch (sock->type) {
1014 	case SOCK_STREAM:
1015 		sock->ops = &unix_stream_ops;
1016 		break;
1017 		/*
1018 		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
1019 		 *	nothing uses it.
1020 		 */
1021 	case SOCK_RAW:
1022 		sock->type = SOCK_DGRAM;
1023 		fallthrough;
1024 	case SOCK_DGRAM:
1025 		sock->ops = &unix_dgram_ops;
1026 		break;
1027 	case SOCK_SEQPACKET:
1028 		sock->ops = &unix_seqpacket_ops;
1029 		break;
1030 	default:
1031 		return -ESOCKTNOSUPPORT;
1032 	}
1033 
1034 	sk = unix_create1(net, sock, kern, sock->type);
1035 	if (IS_ERR(sk))
1036 		return PTR_ERR(sk);
1037 
1038 	return 0;
1039 }
1040 
1041 static int unix_release(struct socket *sock)
1042 {
1043 	struct sock *sk = sock->sk;
1044 
1045 	if (!sk)
1046 		return 0;
1047 
1048 	sk->sk_prot->close(sk, 0);
1049 	unix_release_sock(sk, 0);
1050 	sock->sk = NULL;
1051 
1052 	return 0;
1053 }
1054 
1055 static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1056 				  int type)
1057 {
1058 	struct inode *inode;
1059 	struct path path;
1060 	struct sock *sk;
1061 	int err;
1062 
1063 	unix_mkname_bsd(sunaddr, addr_len);
1064 	err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1065 	if (err)
1066 		goto fail;
1067 
1068 	err = path_permission(&path, MAY_WRITE);
1069 	if (err)
1070 		goto path_put;
1071 
1072 	err = -ECONNREFUSED;
1073 	inode = d_backing_inode(path.dentry);
1074 	if (!S_ISSOCK(inode->i_mode))
1075 		goto path_put;
1076 
1077 	sk = unix_find_socket_byinode(inode);
1078 	if (!sk)
1079 		goto path_put;
1080 
1081 	err = -EPROTOTYPE;
1082 	if (sk->sk_type == type)
1083 		touch_atime(&path);
1084 	else
1085 		goto sock_put;
1086 
1087 	path_put(&path);
1088 
1089 	return sk;
1090 
1091 sock_put:
1092 	sock_put(sk);
1093 path_put:
1094 	path_put(&path);
1095 fail:
1096 	return ERR_PTR(err);
1097 }
1098 
1099 static struct sock *unix_find_abstract(struct net *net,
1100 				       struct sockaddr_un *sunaddr,
1101 				       int addr_len, int type)
1102 {
1103 	unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1104 	struct dentry *dentry;
1105 	struct sock *sk;
1106 
1107 	sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1108 	if (!sk)
1109 		return ERR_PTR(-ECONNREFUSED);
1110 
1111 	dentry = unix_sk(sk)->path.dentry;
1112 	if (dentry)
1113 		touch_atime(&unix_sk(sk)->path);
1114 
1115 	return sk;
1116 }
1117 
1118 static struct sock *unix_find_other(struct net *net,
1119 				    struct sockaddr_un *sunaddr,
1120 				    int addr_len, int type)
1121 {
1122 	struct sock *sk;
1123 
1124 	if (sunaddr->sun_path[0])
1125 		sk = unix_find_bsd(sunaddr, addr_len, type);
1126 	else
1127 		sk = unix_find_abstract(net, sunaddr, addr_len, type);
1128 
1129 	return sk;
1130 }
1131 
1132 static int unix_autobind(struct sock *sk)
1133 {
1134 	unsigned int new_hash, old_hash = sk->sk_hash;
1135 	struct unix_sock *u = unix_sk(sk);
1136 	struct net *net = sock_net(sk);
1137 	struct unix_address *addr;
1138 	u32 lastnum, ordernum;
1139 	int err;
1140 
1141 	err = mutex_lock_interruptible(&u->bindlock);
1142 	if (err)
1143 		return err;
1144 
1145 	if (u->addr)
1146 		goto out;
1147 
1148 	err = -ENOMEM;
1149 	addr = kzalloc(sizeof(*addr) +
1150 		       offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1151 	if (!addr)
1152 		goto out;
1153 
1154 	addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1155 	addr->name->sun_family = AF_UNIX;
1156 	refcount_set(&addr->refcnt, 1);
1157 
1158 	ordernum = get_random_u32();
1159 	lastnum = ordernum & 0xFFFFF;
1160 retry:
1161 	ordernum = (ordernum + 1) & 0xFFFFF;
1162 	sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1163 
1164 	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1165 	unix_table_double_lock(net, old_hash, new_hash);
1166 
1167 	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1168 		unix_table_double_unlock(net, old_hash, new_hash);
1169 
1170 		/* __unix_find_socket_byname() may take long time if many names
1171 		 * are already in use.
1172 		 */
1173 		cond_resched();
1174 
1175 		if (ordernum == lastnum) {
1176 			/* Give up if all names seems to be in use. */
1177 			err = -ENOSPC;
1178 			unix_release_addr(addr);
1179 			goto out;
1180 		}
1181 
1182 		goto retry;
1183 	}
1184 
1185 	__unix_set_addr_hash(net, sk, addr, new_hash);
1186 	unix_table_double_unlock(net, old_hash, new_hash);
1187 	err = 0;
1188 
1189 out:	mutex_unlock(&u->bindlock);
1190 	return err;
1191 }
1192 
1193 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1194 			 int addr_len)
1195 {
1196 	umode_t mode = S_IFSOCK |
1197 	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1198 	unsigned int new_hash, old_hash = sk->sk_hash;
1199 	struct unix_sock *u = unix_sk(sk);
1200 	struct net *net = sock_net(sk);
1201 	struct mnt_idmap *idmap;
1202 	struct unix_address *addr;
1203 	struct dentry *dentry;
1204 	struct path parent;
1205 	int err;
1206 
1207 	addr_len = unix_mkname_bsd(sunaddr, addr_len);
1208 	addr = unix_create_addr(sunaddr, addr_len);
1209 	if (!addr)
1210 		return -ENOMEM;
1211 
1212 	/*
1213 	 * Get the parent directory, calculate the hash for last
1214 	 * component.
1215 	 */
1216 	dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1217 	if (IS_ERR(dentry)) {
1218 		err = PTR_ERR(dentry);
1219 		goto out;
1220 	}
1221 
1222 	/*
1223 	 * All right, let's create it.
1224 	 */
1225 	idmap = mnt_idmap(parent.mnt);
1226 	err = security_path_mknod(&parent, dentry, mode, 0);
1227 	if (!err)
1228 		err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1229 	if (err)
1230 		goto out_path;
1231 	err = mutex_lock_interruptible(&u->bindlock);
1232 	if (err)
1233 		goto out_unlink;
1234 	if (u->addr)
1235 		goto out_unlock;
1236 
1237 	new_hash = unix_bsd_hash(d_backing_inode(dentry));
1238 	unix_table_double_lock(net, old_hash, new_hash);
1239 	u->path.mnt = mntget(parent.mnt);
1240 	u->path.dentry = dget(dentry);
1241 	__unix_set_addr_hash(net, sk, addr, new_hash);
1242 	unix_table_double_unlock(net, old_hash, new_hash);
1243 	unix_insert_bsd_socket(sk);
1244 	mutex_unlock(&u->bindlock);
1245 	done_path_create(&parent, dentry);
1246 	return 0;
1247 
1248 out_unlock:
1249 	mutex_unlock(&u->bindlock);
1250 	err = -EINVAL;
1251 out_unlink:
1252 	/* failed after successful mknod?  unlink what we'd created... */
1253 	vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1254 out_path:
1255 	done_path_create(&parent, dentry);
1256 out:
1257 	unix_release_addr(addr);
1258 	return err == -EEXIST ? -EADDRINUSE : err;
1259 }
1260 
1261 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1262 			      int addr_len)
1263 {
1264 	unsigned int new_hash, old_hash = sk->sk_hash;
1265 	struct unix_sock *u = unix_sk(sk);
1266 	struct net *net = sock_net(sk);
1267 	struct unix_address *addr;
1268 	int err;
1269 
1270 	addr = unix_create_addr(sunaddr, addr_len);
1271 	if (!addr)
1272 		return -ENOMEM;
1273 
1274 	err = mutex_lock_interruptible(&u->bindlock);
1275 	if (err)
1276 		goto out;
1277 
1278 	if (u->addr) {
1279 		err = -EINVAL;
1280 		goto out_mutex;
1281 	}
1282 
1283 	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1284 	unix_table_double_lock(net, old_hash, new_hash);
1285 
1286 	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1287 		goto out_spin;
1288 
1289 	__unix_set_addr_hash(net, sk, addr, new_hash);
1290 	unix_table_double_unlock(net, old_hash, new_hash);
1291 	mutex_unlock(&u->bindlock);
1292 	return 0;
1293 
1294 out_spin:
1295 	unix_table_double_unlock(net, old_hash, new_hash);
1296 	err = -EADDRINUSE;
1297 out_mutex:
1298 	mutex_unlock(&u->bindlock);
1299 out:
1300 	unix_release_addr(addr);
1301 	return err;
1302 }
1303 
1304 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1305 {
1306 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1307 	struct sock *sk = sock->sk;
1308 	int err;
1309 
1310 	if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1311 	    sunaddr->sun_family == AF_UNIX)
1312 		return unix_autobind(sk);
1313 
1314 	err = unix_validate_addr(sunaddr, addr_len);
1315 	if (err)
1316 		return err;
1317 
1318 	if (sunaddr->sun_path[0])
1319 		err = unix_bind_bsd(sk, sunaddr, addr_len);
1320 	else
1321 		err = unix_bind_abstract(sk, sunaddr, addr_len);
1322 
1323 	return err;
1324 }
1325 
1326 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1327 {
1328 	if (unlikely(sk1 == sk2) || !sk2) {
1329 		unix_state_lock(sk1);
1330 		return;
1331 	}
1332 	if (sk1 > sk2)
1333 		swap(sk1, sk2);
1334 
1335 	unix_state_lock(sk1);
1336 	unix_state_lock_nested(sk2, U_LOCK_SECOND);
1337 }
1338 
1339 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1340 {
1341 	if (unlikely(sk1 == sk2) || !sk2) {
1342 		unix_state_unlock(sk1);
1343 		return;
1344 	}
1345 	unix_state_unlock(sk1);
1346 	unix_state_unlock(sk2);
1347 }
1348 
1349 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1350 			      int alen, int flags)
1351 {
1352 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1353 	struct sock *sk = sock->sk;
1354 	struct sock *other;
1355 	int err;
1356 
1357 	err = -EINVAL;
1358 	if (alen < offsetofend(struct sockaddr, sa_family))
1359 		goto out;
1360 
1361 	if (addr->sa_family != AF_UNSPEC) {
1362 		err = unix_validate_addr(sunaddr, alen);
1363 		if (err)
1364 			goto out;
1365 
1366 		err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen);
1367 		if (err)
1368 			goto out;
1369 
1370 		if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1371 		     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1372 		    !unix_sk(sk)->addr) {
1373 			err = unix_autobind(sk);
1374 			if (err)
1375 				goto out;
1376 		}
1377 
1378 restart:
1379 		other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1380 		if (IS_ERR(other)) {
1381 			err = PTR_ERR(other);
1382 			goto out;
1383 		}
1384 
1385 		unix_state_double_lock(sk, other);
1386 
1387 		/* Apparently VFS overslept socket death. Retry. */
1388 		if (sock_flag(other, SOCK_DEAD)) {
1389 			unix_state_double_unlock(sk, other);
1390 			sock_put(other);
1391 			goto restart;
1392 		}
1393 
1394 		err = -EPERM;
1395 		if (!unix_may_send(sk, other))
1396 			goto out_unlock;
1397 
1398 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1399 		if (err)
1400 			goto out_unlock;
1401 
1402 		sk->sk_state = other->sk_state = TCP_ESTABLISHED;
1403 	} else {
1404 		/*
1405 		 *	1003.1g breaking connected state with AF_UNSPEC
1406 		 */
1407 		other = NULL;
1408 		unix_state_double_lock(sk, other);
1409 	}
1410 
1411 	/*
1412 	 * If it was connected, reconnect.
1413 	 */
1414 	if (unix_peer(sk)) {
1415 		struct sock *old_peer = unix_peer(sk);
1416 
1417 		unix_peer(sk) = other;
1418 		if (!other)
1419 			sk->sk_state = TCP_CLOSE;
1420 		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1421 
1422 		unix_state_double_unlock(sk, other);
1423 
1424 		if (other != old_peer)
1425 			unix_dgram_disconnected(sk, old_peer);
1426 		sock_put(old_peer);
1427 	} else {
1428 		unix_peer(sk) = other;
1429 		unix_state_double_unlock(sk, other);
1430 	}
1431 
1432 	return 0;
1433 
1434 out_unlock:
1435 	unix_state_double_unlock(sk, other);
1436 	sock_put(other);
1437 out:
1438 	return err;
1439 }
1440 
1441 static long unix_wait_for_peer(struct sock *other, long timeo)
1442 	__releases(&unix_sk(other)->lock)
1443 {
1444 	struct unix_sock *u = unix_sk(other);
1445 	int sched;
1446 	DEFINE_WAIT(wait);
1447 
1448 	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1449 
1450 	sched = !sock_flag(other, SOCK_DEAD) &&
1451 		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1452 		unix_recvq_full_lockless(other);
1453 
1454 	unix_state_unlock(other);
1455 
1456 	if (sched)
1457 		timeo = schedule_timeout(timeo);
1458 
1459 	finish_wait(&u->peer_wait, &wait);
1460 	return timeo;
1461 }
1462 
1463 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1464 			       int addr_len, int flags)
1465 {
1466 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1467 	struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1468 	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1469 	struct net *net = sock_net(sk);
1470 	struct sk_buff *skb = NULL;
1471 	long timeo;
1472 	int err;
1473 	int st;
1474 
1475 	err = unix_validate_addr(sunaddr, addr_len);
1476 	if (err)
1477 		goto out;
1478 
1479 	err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len);
1480 	if (err)
1481 		goto out;
1482 
1483 	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1484 	     test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
1485 		err = unix_autobind(sk);
1486 		if (err)
1487 			goto out;
1488 	}
1489 
1490 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1491 
1492 	/* First of all allocate resources.
1493 	   If we will make it after state is locked,
1494 	   we will have to recheck all again in any case.
1495 	 */
1496 
1497 	/* create new sock for complete connection */
1498 	newsk = unix_create1(net, NULL, 0, sock->type);
1499 	if (IS_ERR(newsk)) {
1500 		err = PTR_ERR(newsk);
1501 		newsk = NULL;
1502 		goto out;
1503 	}
1504 
1505 	err = -ENOMEM;
1506 
1507 	/* Allocate skb for sending to listening sock */
1508 	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1509 	if (skb == NULL)
1510 		goto out;
1511 
1512 restart:
1513 	/*  Find listening sock. */
1514 	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1515 	if (IS_ERR(other)) {
1516 		err = PTR_ERR(other);
1517 		other = NULL;
1518 		goto out;
1519 	}
1520 
1521 	/* Latch state of peer */
1522 	unix_state_lock(other);
1523 
1524 	/* Apparently VFS overslept socket death. Retry. */
1525 	if (sock_flag(other, SOCK_DEAD)) {
1526 		unix_state_unlock(other);
1527 		sock_put(other);
1528 		goto restart;
1529 	}
1530 
1531 	err = -ECONNREFUSED;
1532 	if (other->sk_state != TCP_LISTEN)
1533 		goto out_unlock;
1534 	if (other->sk_shutdown & RCV_SHUTDOWN)
1535 		goto out_unlock;
1536 
1537 	if (unix_recvq_full(other)) {
1538 		err = -EAGAIN;
1539 		if (!timeo)
1540 			goto out_unlock;
1541 
1542 		timeo = unix_wait_for_peer(other, timeo);
1543 
1544 		err = sock_intr_errno(timeo);
1545 		if (signal_pending(current))
1546 			goto out;
1547 		sock_put(other);
1548 		goto restart;
1549 	}
1550 
1551 	/* Latch our state.
1552 
1553 	   It is tricky place. We need to grab our state lock and cannot
1554 	   drop lock on peer. It is dangerous because deadlock is
1555 	   possible. Connect to self case and simultaneous
1556 	   attempt to connect are eliminated by checking socket
1557 	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1558 	   check this before attempt to grab lock.
1559 
1560 	   Well, and we have to recheck the state after socket locked.
1561 	 */
1562 	st = sk->sk_state;
1563 
1564 	switch (st) {
1565 	case TCP_CLOSE:
1566 		/* This is ok... continue with connect */
1567 		break;
1568 	case TCP_ESTABLISHED:
1569 		/* Socket is already connected */
1570 		err = -EISCONN;
1571 		goto out_unlock;
1572 	default:
1573 		err = -EINVAL;
1574 		goto out_unlock;
1575 	}
1576 
1577 	unix_state_lock_nested(sk, U_LOCK_SECOND);
1578 
1579 	if (sk->sk_state != st) {
1580 		unix_state_unlock(sk);
1581 		unix_state_unlock(other);
1582 		sock_put(other);
1583 		goto restart;
1584 	}
1585 
1586 	err = security_unix_stream_connect(sk, other, newsk);
1587 	if (err) {
1588 		unix_state_unlock(sk);
1589 		goto out_unlock;
1590 	}
1591 
1592 	/* The way is open! Fastly set all the necessary fields... */
1593 
1594 	sock_hold(sk);
1595 	unix_peer(newsk)	= sk;
1596 	newsk->sk_state		= TCP_ESTABLISHED;
1597 	newsk->sk_type		= sk->sk_type;
1598 	init_peercred(newsk);
1599 	newu = unix_sk(newsk);
1600 	newu->listener = other;
1601 	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1602 	otheru = unix_sk(other);
1603 
1604 	/* copy address information from listening to new sock
1605 	 *
1606 	 * The contents of *(otheru->addr) and otheru->path
1607 	 * are seen fully set up here, since we have found
1608 	 * otheru in hash under its lock.  Insertion into the
1609 	 * hash chain we'd found it in had been done in an
1610 	 * earlier critical area protected by the chain's lock,
1611 	 * the same one where we'd set *(otheru->addr) contents,
1612 	 * as well as otheru->path and otheru->addr itself.
1613 	 *
1614 	 * Using smp_store_release() here to set newu->addr
1615 	 * is enough to make those stores, as well as stores
1616 	 * to newu->path visible to anyone who gets newu->addr
1617 	 * by smp_load_acquire().  IOW, the same warranties
1618 	 * as for unix_sock instances bound in unix_bind() or
1619 	 * in unix_autobind().
1620 	 */
1621 	if (otheru->path.dentry) {
1622 		path_get(&otheru->path);
1623 		newu->path = otheru->path;
1624 	}
1625 	refcount_inc(&otheru->addr->refcnt);
1626 	smp_store_release(&newu->addr, otheru->addr);
1627 
1628 	/* Set credentials */
1629 	copy_peercred(sk, other);
1630 
1631 	sock->state	= SS_CONNECTED;
1632 	sk->sk_state	= TCP_ESTABLISHED;
1633 	sock_hold(newsk);
1634 
1635 	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
1636 	unix_peer(sk)	= newsk;
1637 
1638 	unix_state_unlock(sk);
1639 
1640 	/* take ten and send info to listening sock */
1641 	spin_lock(&other->sk_receive_queue.lock);
1642 	__skb_queue_tail(&other->sk_receive_queue, skb);
1643 	spin_unlock(&other->sk_receive_queue.lock);
1644 	unix_state_unlock(other);
1645 	other->sk_data_ready(other);
1646 	sock_put(other);
1647 	return 0;
1648 
1649 out_unlock:
1650 	if (other)
1651 		unix_state_unlock(other);
1652 
1653 out:
1654 	kfree_skb(skb);
1655 	if (newsk)
1656 		unix_release_sock(newsk, 0);
1657 	if (other)
1658 		sock_put(other);
1659 	return err;
1660 }
1661 
1662 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1663 {
1664 	struct sock *ska = socka->sk, *skb = sockb->sk;
1665 
1666 	/* Join our sockets back to back */
1667 	sock_hold(ska);
1668 	sock_hold(skb);
1669 	unix_peer(ska) = skb;
1670 	unix_peer(skb) = ska;
1671 	init_peercred(ska);
1672 	init_peercred(skb);
1673 
1674 	ska->sk_state = TCP_ESTABLISHED;
1675 	skb->sk_state = TCP_ESTABLISHED;
1676 	socka->state  = SS_CONNECTED;
1677 	sockb->state  = SS_CONNECTED;
1678 	return 0;
1679 }
1680 
1681 static void unix_sock_inherit_flags(const struct socket *old,
1682 				    struct socket *new)
1683 {
1684 	if (test_bit(SOCK_PASSCRED, &old->flags))
1685 		set_bit(SOCK_PASSCRED, &new->flags);
1686 	if (test_bit(SOCK_PASSPIDFD, &old->flags))
1687 		set_bit(SOCK_PASSPIDFD, &new->flags);
1688 	if (test_bit(SOCK_PASSSEC, &old->flags))
1689 		set_bit(SOCK_PASSSEC, &new->flags);
1690 }
1691 
1692 static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1693 		       bool kern)
1694 {
1695 	struct sock *sk = sock->sk;
1696 	struct sk_buff *skb;
1697 	struct sock *tsk;
1698 	int err;
1699 
1700 	err = -EOPNOTSUPP;
1701 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1702 		goto out;
1703 
1704 	err = -EINVAL;
1705 	if (sk->sk_state != TCP_LISTEN)
1706 		goto out;
1707 
1708 	/* If socket state is TCP_LISTEN it cannot change (for now...),
1709 	 * so that no locks are necessary.
1710 	 */
1711 
1712 	skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1713 				&err);
1714 	if (!skb) {
1715 		/* This means receive shutdown. */
1716 		if (err == 0)
1717 			err = -EINVAL;
1718 		goto out;
1719 	}
1720 
1721 	tsk = skb->sk;
1722 	skb_free_datagram(sk, skb);
1723 	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1724 
1725 	/* attach accepted sock to socket */
1726 	unix_state_lock(tsk);
1727 	unix_update_edges(unix_sk(tsk));
1728 	newsock->state = SS_CONNECTED;
1729 	unix_sock_inherit_flags(sock, newsock);
1730 	sock_graft(tsk, newsock);
1731 	unix_state_unlock(tsk);
1732 	return 0;
1733 
1734 out:
1735 	return err;
1736 }
1737 
1738 
1739 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1740 {
1741 	struct sock *sk = sock->sk;
1742 	struct unix_address *addr;
1743 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1744 	int err = 0;
1745 
1746 	if (peer) {
1747 		sk = unix_peer_get(sk);
1748 
1749 		err = -ENOTCONN;
1750 		if (!sk)
1751 			goto out;
1752 		err = 0;
1753 	} else {
1754 		sock_hold(sk);
1755 	}
1756 
1757 	addr = smp_load_acquire(&unix_sk(sk)->addr);
1758 	if (!addr) {
1759 		sunaddr->sun_family = AF_UNIX;
1760 		sunaddr->sun_path[0] = 0;
1761 		err = offsetof(struct sockaddr_un, sun_path);
1762 	} else {
1763 		err = addr->len;
1764 		memcpy(sunaddr, addr->name, addr->len);
1765 
1766 		if (peer)
1767 			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1768 					       CGROUP_UNIX_GETPEERNAME);
1769 		else
1770 			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1771 					       CGROUP_UNIX_GETSOCKNAME);
1772 	}
1773 	sock_put(sk);
1774 out:
1775 	return err;
1776 }
1777 
1778 /* The "user->unix_inflight" variable is protected by the garbage
1779  * collection lock, and we just read it locklessly here. If you go
1780  * over the limit, there might be a tiny race in actually noticing
1781  * it across threads. Tough.
1782  */
1783 static inline bool too_many_unix_fds(struct task_struct *p)
1784 {
1785 	struct user_struct *user = current_user();
1786 
1787 	if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
1788 		return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1789 	return false;
1790 }
1791 
1792 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1793 {
1794 	if (too_many_unix_fds(current))
1795 		return -ETOOMANYREFS;
1796 
1797 	UNIXCB(skb).fp = scm->fp;
1798 	scm->fp = NULL;
1799 
1800 	if (unix_prepare_fpl(UNIXCB(skb).fp))
1801 		return -ENOMEM;
1802 
1803 	return 0;
1804 }
1805 
1806 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1807 {
1808 	scm->fp = UNIXCB(skb).fp;
1809 	UNIXCB(skb).fp = NULL;
1810 
1811 	unix_destroy_fpl(scm->fp);
1812 }
1813 
1814 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1815 {
1816 	scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1817 }
1818 
1819 static void unix_destruct_scm(struct sk_buff *skb)
1820 {
1821 	struct scm_cookie scm;
1822 
1823 	memset(&scm, 0, sizeof(scm));
1824 	scm.pid  = UNIXCB(skb).pid;
1825 	if (UNIXCB(skb).fp)
1826 		unix_detach_fds(&scm, skb);
1827 
1828 	/* Alas, it calls VFS */
1829 	/* So fscking what? fput() had been SMP-safe since the last Summer */
1830 	scm_destroy(&scm);
1831 	sock_wfree(skb);
1832 }
1833 
1834 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1835 {
1836 	int err = 0;
1837 
1838 	UNIXCB(skb).pid  = get_pid(scm->pid);
1839 	UNIXCB(skb).uid = scm->creds.uid;
1840 	UNIXCB(skb).gid = scm->creds.gid;
1841 	UNIXCB(skb).fp = NULL;
1842 	unix_get_secdata(scm, skb);
1843 	if (scm->fp && send_fds)
1844 		err = unix_attach_fds(scm, skb);
1845 
1846 	skb->destructor = unix_destruct_scm;
1847 	return err;
1848 }
1849 
1850 static bool unix_passcred_enabled(const struct socket *sock,
1851 				  const struct sock *other)
1852 {
1853 	return test_bit(SOCK_PASSCRED, &sock->flags) ||
1854 	       test_bit(SOCK_PASSPIDFD, &sock->flags) ||
1855 	       !other->sk_socket ||
1856 	       test_bit(SOCK_PASSCRED, &other->sk_socket->flags) ||
1857 	       test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags);
1858 }
1859 
1860 /*
1861  * Some apps rely on write() giving SCM_CREDENTIALS
1862  * We include credentials if source or destination socket
1863  * asserted SOCK_PASSCRED.
1864  */
1865 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1866 			    const struct sock *other)
1867 {
1868 	if (UNIXCB(skb).pid)
1869 		return;
1870 	if (unix_passcred_enabled(sock, other)) {
1871 		UNIXCB(skb).pid  = get_pid(task_tgid(current));
1872 		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1873 	}
1874 }
1875 
1876 static bool unix_skb_scm_eq(struct sk_buff *skb,
1877 			    struct scm_cookie *scm)
1878 {
1879 	return UNIXCB(skb).pid == scm->pid &&
1880 	       uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1881 	       gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1882 	       unix_secdata_eq(scm, skb);
1883 }
1884 
1885 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1886 {
1887 	struct scm_fp_list *fp = UNIXCB(skb).fp;
1888 	struct unix_sock *u = unix_sk(sk);
1889 
1890 	if (unlikely(fp && fp->count)) {
1891 		atomic_add(fp->count, &u->scm_stat.nr_fds);
1892 		unix_add_edges(fp, u);
1893 	}
1894 }
1895 
1896 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1897 {
1898 	struct scm_fp_list *fp = UNIXCB(skb).fp;
1899 	struct unix_sock *u = unix_sk(sk);
1900 
1901 	if (unlikely(fp && fp->count)) {
1902 		atomic_sub(fp->count, &u->scm_stat.nr_fds);
1903 		unix_del_edges(fp);
1904 	}
1905 }
1906 
1907 /*
1908  *	Send AF_UNIX data.
1909  */
1910 
1911 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1912 			      size_t len)
1913 {
1914 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1915 	struct sock *sk = sock->sk, *other = NULL;
1916 	struct unix_sock *u = unix_sk(sk);
1917 	struct scm_cookie scm;
1918 	struct sk_buff *skb;
1919 	int data_len = 0;
1920 	int sk_locked;
1921 	long timeo;
1922 	int err;
1923 
1924 	err = scm_send(sock, msg, &scm, false);
1925 	if (err < 0)
1926 		return err;
1927 
1928 	wait_for_unix_gc(scm.fp);
1929 
1930 	err = -EOPNOTSUPP;
1931 	if (msg->msg_flags&MSG_OOB)
1932 		goto out;
1933 
1934 	if (msg->msg_namelen) {
1935 		err = unix_validate_addr(sunaddr, msg->msg_namelen);
1936 		if (err)
1937 			goto out;
1938 
1939 		err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk,
1940 							    msg->msg_name,
1941 							    &msg->msg_namelen,
1942 							    NULL);
1943 		if (err)
1944 			goto out;
1945 	} else {
1946 		sunaddr = NULL;
1947 		err = -ENOTCONN;
1948 		other = unix_peer_get(sk);
1949 		if (!other)
1950 			goto out;
1951 	}
1952 
1953 	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1954 	     test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
1955 		err = unix_autobind(sk);
1956 		if (err)
1957 			goto out;
1958 	}
1959 
1960 	err = -EMSGSIZE;
1961 	if (len > sk->sk_sndbuf - 32)
1962 		goto out;
1963 
1964 	if (len > SKB_MAX_ALLOC) {
1965 		data_len = min_t(size_t,
1966 				 len - SKB_MAX_ALLOC,
1967 				 MAX_SKB_FRAGS * PAGE_SIZE);
1968 		data_len = PAGE_ALIGN(data_len);
1969 
1970 		BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1971 	}
1972 
1973 	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1974 				   msg->msg_flags & MSG_DONTWAIT, &err,
1975 				   PAGE_ALLOC_COSTLY_ORDER);
1976 	if (skb == NULL)
1977 		goto out;
1978 
1979 	err = unix_scm_to_skb(&scm, skb, true);
1980 	if (err < 0)
1981 		goto out_free;
1982 
1983 	skb_put(skb, len - data_len);
1984 	skb->data_len = data_len;
1985 	skb->len = len;
1986 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1987 	if (err)
1988 		goto out_free;
1989 
1990 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1991 
1992 restart:
1993 	if (!other) {
1994 		err = -ECONNRESET;
1995 		if (sunaddr == NULL)
1996 			goto out_free;
1997 
1998 		other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
1999 					sk->sk_type);
2000 		if (IS_ERR(other)) {
2001 			err = PTR_ERR(other);
2002 			other = NULL;
2003 			goto out_free;
2004 		}
2005 	}
2006 
2007 	if (sk_filter(other, skb) < 0) {
2008 		/* Toss the packet but do not return any error to the sender */
2009 		err = len;
2010 		goto out_free;
2011 	}
2012 
2013 	sk_locked = 0;
2014 	unix_state_lock(other);
2015 restart_locked:
2016 	err = -EPERM;
2017 	if (!unix_may_send(sk, other))
2018 		goto out_unlock;
2019 
2020 	if (unlikely(sock_flag(other, SOCK_DEAD))) {
2021 		/*
2022 		 *	Check with 1003.1g - what should
2023 		 *	datagram error
2024 		 */
2025 		unix_state_unlock(other);
2026 		sock_put(other);
2027 
2028 		if (!sk_locked)
2029 			unix_state_lock(sk);
2030 
2031 		err = 0;
2032 		if (sk->sk_type == SOCK_SEQPACKET) {
2033 			/* We are here only when racing with unix_release_sock()
2034 			 * is clearing @other. Never change state to TCP_CLOSE
2035 			 * unlike SOCK_DGRAM wants.
2036 			 */
2037 			unix_state_unlock(sk);
2038 			err = -EPIPE;
2039 		} else if (unix_peer(sk) == other) {
2040 			unix_peer(sk) = NULL;
2041 			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2042 
2043 			sk->sk_state = TCP_CLOSE;
2044 			unix_state_unlock(sk);
2045 
2046 			unix_dgram_disconnected(sk, other);
2047 			sock_put(other);
2048 			err = -ECONNREFUSED;
2049 		} else {
2050 			unix_state_unlock(sk);
2051 		}
2052 
2053 		other = NULL;
2054 		if (err)
2055 			goto out_free;
2056 		goto restart;
2057 	}
2058 
2059 	err = -EPIPE;
2060 	if (other->sk_shutdown & RCV_SHUTDOWN)
2061 		goto out_unlock;
2062 
2063 	if (sk->sk_type != SOCK_SEQPACKET) {
2064 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2065 		if (err)
2066 			goto out_unlock;
2067 	}
2068 
2069 	/* other == sk && unix_peer(other) != sk if
2070 	 * - unix_peer(sk) == NULL, destination address bound to sk
2071 	 * - unix_peer(sk) == sk by time of get but disconnected before lock
2072 	 */
2073 	if (other != sk &&
2074 	    unlikely(unix_peer(other) != sk &&
2075 	    unix_recvq_full_lockless(other))) {
2076 		if (timeo) {
2077 			timeo = unix_wait_for_peer(other, timeo);
2078 
2079 			err = sock_intr_errno(timeo);
2080 			if (signal_pending(current))
2081 				goto out_free;
2082 
2083 			goto restart;
2084 		}
2085 
2086 		if (!sk_locked) {
2087 			unix_state_unlock(other);
2088 			unix_state_double_lock(sk, other);
2089 		}
2090 
2091 		if (unix_peer(sk) != other ||
2092 		    unix_dgram_peer_wake_me(sk, other)) {
2093 			err = -EAGAIN;
2094 			sk_locked = 1;
2095 			goto out_unlock;
2096 		}
2097 
2098 		if (!sk_locked) {
2099 			sk_locked = 1;
2100 			goto restart_locked;
2101 		}
2102 	}
2103 
2104 	if (unlikely(sk_locked))
2105 		unix_state_unlock(sk);
2106 
2107 	if (sock_flag(other, SOCK_RCVTSTAMP))
2108 		__net_timestamp(skb);
2109 	maybe_add_creds(skb, sock, other);
2110 	scm_stat_add(other, skb);
2111 	skb_queue_tail(&other->sk_receive_queue, skb);
2112 	unix_state_unlock(other);
2113 	other->sk_data_ready(other);
2114 	sock_put(other);
2115 	scm_destroy(&scm);
2116 	return len;
2117 
2118 out_unlock:
2119 	if (sk_locked)
2120 		unix_state_unlock(sk);
2121 	unix_state_unlock(other);
2122 out_free:
2123 	kfree_skb(skb);
2124 out:
2125 	if (other)
2126 		sock_put(other);
2127 	scm_destroy(&scm);
2128 	return err;
2129 }
2130 
2131 /* We use paged skbs for stream sockets, and limit occupancy to 32768
2132  * bytes, and a minimum of a full page.
2133  */
2134 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2135 
2136 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2137 static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
2138 		     struct scm_cookie *scm, bool fds_sent)
2139 {
2140 	struct unix_sock *ousk = unix_sk(other);
2141 	struct sk_buff *skb;
2142 	int err = 0;
2143 
2144 	skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2145 
2146 	if (!skb)
2147 		return err;
2148 
2149 	err = unix_scm_to_skb(scm, skb, !fds_sent);
2150 	if (err < 0) {
2151 		kfree_skb(skb);
2152 		return err;
2153 	}
2154 	skb_put(skb, 1);
2155 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2156 
2157 	if (err) {
2158 		kfree_skb(skb);
2159 		return err;
2160 	}
2161 
2162 	unix_state_lock(other);
2163 
2164 	if (sock_flag(other, SOCK_DEAD) ||
2165 	    (other->sk_shutdown & RCV_SHUTDOWN)) {
2166 		unix_state_unlock(other);
2167 		kfree_skb(skb);
2168 		return -EPIPE;
2169 	}
2170 
2171 	maybe_add_creds(skb, sock, other);
2172 	skb_get(skb);
2173 
2174 	scm_stat_add(other, skb);
2175 
2176 	spin_lock(&other->sk_receive_queue.lock);
2177 	if (ousk->oob_skb)
2178 		consume_skb(ousk->oob_skb);
2179 	WRITE_ONCE(ousk->oob_skb, skb);
2180 	__skb_queue_tail(&other->sk_receive_queue, skb);
2181 	spin_unlock(&other->sk_receive_queue.lock);
2182 
2183 	sk_send_sigurg(other);
2184 	unix_state_unlock(other);
2185 	other->sk_data_ready(other);
2186 
2187 	return err;
2188 }
2189 #endif
2190 
2191 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2192 			       size_t len)
2193 {
2194 	struct sock *sk = sock->sk;
2195 	struct sock *other = NULL;
2196 	int err, size;
2197 	struct sk_buff *skb;
2198 	int sent = 0;
2199 	struct scm_cookie scm;
2200 	bool fds_sent = false;
2201 	int data_len;
2202 
2203 	err = scm_send(sock, msg, &scm, false);
2204 	if (err < 0)
2205 		return err;
2206 
2207 	wait_for_unix_gc(scm.fp);
2208 
2209 	err = -EOPNOTSUPP;
2210 	if (msg->msg_flags & MSG_OOB) {
2211 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2212 		if (len)
2213 			len--;
2214 		else
2215 #endif
2216 			goto out_err;
2217 	}
2218 
2219 	if (msg->msg_namelen) {
2220 		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2221 		goto out_err;
2222 	} else {
2223 		err = -ENOTCONN;
2224 		other = unix_peer(sk);
2225 		if (!other)
2226 			goto out_err;
2227 	}
2228 
2229 	if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2230 		goto pipe_err;
2231 
2232 	while (sent < len) {
2233 		size = len - sent;
2234 
2235 		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2236 			skb = sock_alloc_send_pskb(sk, 0, 0,
2237 						   msg->msg_flags & MSG_DONTWAIT,
2238 						   &err, 0);
2239 		} else {
2240 			/* Keep two messages in the pipe so it schedules better */
2241 			size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2242 
2243 			/* allow fallback to order-0 allocations */
2244 			size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2245 
2246 			data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2247 
2248 			data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2249 
2250 			skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2251 						   msg->msg_flags & MSG_DONTWAIT, &err,
2252 						   get_order(UNIX_SKB_FRAGS_SZ));
2253 		}
2254 		if (!skb)
2255 			goto out_err;
2256 
2257 		/* Only send the fds in the first buffer */
2258 		err = unix_scm_to_skb(&scm, skb, !fds_sent);
2259 		if (err < 0) {
2260 			kfree_skb(skb);
2261 			goto out_err;
2262 		}
2263 		fds_sent = true;
2264 
2265 		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2266 			err = skb_splice_from_iter(skb, &msg->msg_iter, size,
2267 						   sk->sk_allocation);
2268 			if (err < 0) {
2269 				kfree_skb(skb);
2270 				goto out_err;
2271 			}
2272 			size = err;
2273 			refcount_add(size, &sk->sk_wmem_alloc);
2274 		} else {
2275 			skb_put(skb, size - data_len);
2276 			skb->data_len = data_len;
2277 			skb->len = size;
2278 			err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2279 			if (err) {
2280 				kfree_skb(skb);
2281 				goto out_err;
2282 			}
2283 		}
2284 
2285 		unix_state_lock(other);
2286 
2287 		if (sock_flag(other, SOCK_DEAD) ||
2288 		    (other->sk_shutdown & RCV_SHUTDOWN))
2289 			goto pipe_err_free;
2290 
2291 		maybe_add_creds(skb, sock, other);
2292 		scm_stat_add(other, skb);
2293 		skb_queue_tail(&other->sk_receive_queue, skb);
2294 		unix_state_unlock(other);
2295 		other->sk_data_ready(other);
2296 		sent += size;
2297 	}
2298 
2299 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2300 	if (msg->msg_flags & MSG_OOB) {
2301 		err = queue_oob(sock, msg, other, &scm, fds_sent);
2302 		if (err)
2303 			goto out_err;
2304 		sent++;
2305 	}
2306 #endif
2307 
2308 	scm_destroy(&scm);
2309 
2310 	return sent;
2311 
2312 pipe_err_free:
2313 	unix_state_unlock(other);
2314 	kfree_skb(skb);
2315 pipe_err:
2316 	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2317 		send_sig(SIGPIPE, current, 0);
2318 	err = -EPIPE;
2319 out_err:
2320 	scm_destroy(&scm);
2321 	return sent ? : err;
2322 }
2323 
2324 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2325 				  size_t len)
2326 {
2327 	int err;
2328 	struct sock *sk = sock->sk;
2329 
2330 	err = sock_error(sk);
2331 	if (err)
2332 		return err;
2333 
2334 	if (sk->sk_state != TCP_ESTABLISHED)
2335 		return -ENOTCONN;
2336 
2337 	if (msg->msg_namelen)
2338 		msg->msg_namelen = 0;
2339 
2340 	return unix_dgram_sendmsg(sock, msg, len);
2341 }
2342 
2343 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2344 				  size_t size, int flags)
2345 {
2346 	struct sock *sk = sock->sk;
2347 
2348 	if (sk->sk_state != TCP_ESTABLISHED)
2349 		return -ENOTCONN;
2350 
2351 	return unix_dgram_recvmsg(sock, msg, size, flags);
2352 }
2353 
2354 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2355 {
2356 	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2357 
2358 	if (addr) {
2359 		msg->msg_namelen = addr->len;
2360 		memcpy(msg->msg_name, addr->name, addr->len);
2361 	}
2362 }
2363 
2364 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2365 			 int flags)
2366 {
2367 	struct scm_cookie scm;
2368 	struct socket *sock = sk->sk_socket;
2369 	struct unix_sock *u = unix_sk(sk);
2370 	struct sk_buff *skb, *last;
2371 	long timeo;
2372 	int skip;
2373 	int err;
2374 
2375 	err = -EOPNOTSUPP;
2376 	if (flags&MSG_OOB)
2377 		goto out;
2378 
2379 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2380 
2381 	do {
2382 		mutex_lock(&u->iolock);
2383 
2384 		skip = sk_peek_offset(sk, flags);
2385 		skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2386 					      &skip, &err, &last);
2387 		if (skb) {
2388 			if (!(flags & MSG_PEEK))
2389 				scm_stat_del(sk, skb);
2390 			break;
2391 		}
2392 
2393 		mutex_unlock(&u->iolock);
2394 
2395 		if (err != -EAGAIN)
2396 			break;
2397 	} while (timeo &&
2398 		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2399 					      &err, &timeo, last));
2400 
2401 	if (!skb) { /* implies iolock unlocked */
2402 		unix_state_lock(sk);
2403 		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2404 		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2405 		    (sk->sk_shutdown & RCV_SHUTDOWN))
2406 			err = 0;
2407 		unix_state_unlock(sk);
2408 		goto out;
2409 	}
2410 
2411 	if (wq_has_sleeper(&u->peer_wait))
2412 		wake_up_interruptible_sync_poll(&u->peer_wait,
2413 						EPOLLOUT | EPOLLWRNORM |
2414 						EPOLLWRBAND);
2415 
2416 	if (msg->msg_name) {
2417 		unix_copy_addr(msg, skb->sk);
2418 
2419 		BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2420 						      msg->msg_name,
2421 						      &msg->msg_namelen);
2422 	}
2423 
2424 	if (size > skb->len - skip)
2425 		size = skb->len - skip;
2426 	else if (size < skb->len - skip)
2427 		msg->msg_flags |= MSG_TRUNC;
2428 
2429 	err = skb_copy_datagram_msg(skb, skip, msg, size);
2430 	if (err)
2431 		goto out_free;
2432 
2433 	if (sock_flag(sk, SOCK_RCVTSTAMP))
2434 		__sock_recv_timestamp(msg, sk, skb);
2435 
2436 	memset(&scm, 0, sizeof(scm));
2437 
2438 	scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2439 	unix_set_secdata(&scm, skb);
2440 
2441 	if (!(flags & MSG_PEEK)) {
2442 		if (UNIXCB(skb).fp)
2443 			unix_detach_fds(&scm, skb);
2444 
2445 		sk_peek_offset_bwd(sk, skb->len);
2446 	} else {
2447 		/* It is questionable: on PEEK we could:
2448 		   - do not return fds - good, but too simple 8)
2449 		   - return fds, and do not return them on read (old strategy,
2450 		     apparently wrong)
2451 		   - clone fds (I chose it for now, it is the most universal
2452 		     solution)
2453 
2454 		   POSIX 1003.1g does not actually define this clearly
2455 		   at all. POSIX 1003.1g doesn't define a lot of things
2456 		   clearly however!
2457 
2458 		*/
2459 
2460 		sk_peek_offset_fwd(sk, size);
2461 
2462 		if (UNIXCB(skb).fp)
2463 			unix_peek_fds(&scm, skb);
2464 	}
2465 	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2466 
2467 	scm_recv_unix(sock, msg, &scm, flags);
2468 
2469 out_free:
2470 	skb_free_datagram(sk, skb);
2471 	mutex_unlock(&u->iolock);
2472 out:
2473 	return err;
2474 }
2475 
2476 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2477 			      int flags)
2478 {
2479 	struct sock *sk = sock->sk;
2480 
2481 #ifdef CONFIG_BPF_SYSCALL
2482 	const struct proto *prot = READ_ONCE(sk->sk_prot);
2483 
2484 	if (prot != &unix_dgram_proto)
2485 		return prot->recvmsg(sk, msg, size, flags, NULL);
2486 #endif
2487 	return __unix_dgram_recvmsg(sk, msg, size, flags);
2488 }
2489 
2490 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2491 {
2492 	struct unix_sock *u = unix_sk(sk);
2493 	struct sk_buff *skb;
2494 	int err;
2495 
2496 	mutex_lock(&u->iolock);
2497 	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2498 	mutex_unlock(&u->iolock);
2499 	if (!skb)
2500 		return err;
2501 
2502 	return recv_actor(sk, skb);
2503 }
2504 
2505 /*
2506  *	Sleep until more data has arrived. But check for races..
2507  */
2508 static long unix_stream_data_wait(struct sock *sk, long timeo,
2509 				  struct sk_buff *last, unsigned int last_len,
2510 				  bool freezable)
2511 {
2512 	unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2513 	struct sk_buff *tail;
2514 	DEFINE_WAIT(wait);
2515 
2516 	unix_state_lock(sk);
2517 
2518 	for (;;) {
2519 		prepare_to_wait(sk_sleep(sk), &wait, state);
2520 
2521 		tail = skb_peek_tail(&sk->sk_receive_queue);
2522 		if (tail != last ||
2523 		    (tail && tail->len != last_len) ||
2524 		    sk->sk_err ||
2525 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2526 		    signal_pending(current) ||
2527 		    !timeo)
2528 			break;
2529 
2530 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2531 		unix_state_unlock(sk);
2532 		timeo = schedule_timeout(timeo);
2533 		unix_state_lock(sk);
2534 
2535 		if (sock_flag(sk, SOCK_DEAD))
2536 			break;
2537 
2538 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2539 	}
2540 
2541 	finish_wait(sk_sleep(sk), &wait);
2542 	unix_state_unlock(sk);
2543 	return timeo;
2544 }
2545 
2546 static unsigned int unix_skb_len(const struct sk_buff *skb)
2547 {
2548 	return skb->len - UNIXCB(skb).consumed;
2549 }
2550 
2551 struct unix_stream_read_state {
2552 	int (*recv_actor)(struct sk_buff *, int, int,
2553 			  struct unix_stream_read_state *);
2554 	struct socket *socket;
2555 	struct msghdr *msg;
2556 	struct pipe_inode_info *pipe;
2557 	size_t size;
2558 	int flags;
2559 	unsigned int splice_flags;
2560 };
2561 
2562 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2563 static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2564 {
2565 	struct socket *sock = state->socket;
2566 	struct sock *sk = sock->sk;
2567 	struct unix_sock *u = unix_sk(sk);
2568 	int chunk = 1;
2569 	struct sk_buff *oob_skb;
2570 
2571 	mutex_lock(&u->iolock);
2572 	unix_state_lock(sk);
2573 	spin_lock(&sk->sk_receive_queue.lock);
2574 
2575 	if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2576 		spin_unlock(&sk->sk_receive_queue.lock);
2577 		unix_state_unlock(sk);
2578 		mutex_unlock(&u->iolock);
2579 		return -EINVAL;
2580 	}
2581 
2582 	oob_skb = u->oob_skb;
2583 
2584 	if (!(state->flags & MSG_PEEK))
2585 		WRITE_ONCE(u->oob_skb, NULL);
2586 	else
2587 		skb_get(oob_skb);
2588 
2589 	spin_unlock(&sk->sk_receive_queue.lock);
2590 	unix_state_unlock(sk);
2591 
2592 	chunk = state->recv_actor(oob_skb, 0, chunk, state);
2593 
2594 	if (!(state->flags & MSG_PEEK))
2595 		UNIXCB(oob_skb).consumed += 1;
2596 
2597 	consume_skb(oob_skb);
2598 
2599 	mutex_unlock(&u->iolock);
2600 
2601 	if (chunk < 0)
2602 		return -EFAULT;
2603 
2604 	state->msg->msg_flags |= MSG_OOB;
2605 	return 1;
2606 }
2607 
2608 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2609 				  int flags, int copied)
2610 {
2611 	struct unix_sock *u = unix_sk(sk);
2612 
2613 	if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2614 		skb_unlink(skb, &sk->sk_receive_queue);
2615 		consume_skb(skb);
2616 		skb = NULL;
2617 	} else {
2618 		struct sk_buff *unlinked_skb = NULL;
2619 
2620 		spin_lock(&sk->sk_receive_queue.lock);
2621 
2622 		if (skb == u->oob_skb) {
2623 			if (copied) {
2624 				skb = NULL;
2625 			} else if (sock_flag(sk, SOCK_URGINLINE)) {
2626 				if (!(flags & MSG_PEEK)) {
2627 					WRITE_ONCE(u->oob_skb, NULL);
2628 					consume_skb(skb);
2629 				}
2630 			} else if (flags & MSG_PEEK) {
2631 				skb = NULL;
2632 			} else {
2633 				__skb_unlink(skb, &sk->sk_receive_queue);
2634 				WRITE_ONCE(u->oob_skb, NULL);
2635 				unlinked_skb = skb;
2636 				skb = skb_peek(&sk->sk_receive_queue);
2637 			}
2638 		}
2639 
2640 		spin_unlock(&sk->sk_receive_queue.lock);
2641 
2642 		if (unlinked_skb) {
2643 			WARN_ON_ONCE(skb_unref(unlinked_skb));
2644 			kfree_skb(unlinked_skb);
2645 		}
2646 	}
2647 	return skb;
2648 }
2649 #endif
2650 
2651 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2652 {
2653 	if (unlikely(sk->sk_state != TCP_ESTABLISHED))
2654 		return -ENOTCONN;
2655 
2656 	return unix_read_skb(sk, recv_actor);
2657 }
2658 
2659 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2660 				    bool freezable)
2661 {
2662 	struct scm_cookie scm;
2663 	struct socket *sock = state->socket;
2664 	struct sock *sk = sock->sk;
2665 	struct unix_sock *u = unix_sk(sk);
2666 	int copied = 0;
2667 	int flags = state->flags;
2668 	int noblock = flags & MSG_DONTWAIT;
2669 	bool check_creds = false;
2670 	int target;
2671 	int err = 0;
2672 	long timeo;
2673 	int skip;
2674 	size_t size = state->size;
2675 	unsigned int last_len;
2676 
2677 	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2678 		err = -EINVAL;
2679 		goto out;
2680 	}
2681 
2682 	if (unlikely(flags & MSG_OOB)) {
2683 		err = -EOPNOTSUPP;
2684 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2685 		err = unix_stream_recv_urg(state);
2686 #endif
2687 		goto out;
2688 	}
2689 
2690 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2691 	timeo = sock_rcvtimeo(sk, noblock);
2692 
2693 	memset(&scm, 0, sizeof(scm));
2694 
2695 	/* Lock the socket to prevent queue disordering
2696 	 * while sleeps in memcpy_tomsg
2697 	 */
2698 	mutex_lock(&u->iolock);
2699 
2700 	skip = max(sk_peek_offset(sk, flags), 0);
2701 
2702 	do {
2703 		int chunk;
2704 		bool drop_skb;
2705 		struct sk_buff *skb, *last;
2706 
2707 redo:
2708 		unix_state_lock(sk);
2709 		if (sock_flag(sk, SOCK_DEAD)) {
2710 			err = -ECONNRESET;
2711 			goto unlock;
2712 		}
2713 		last = skb = skb_peek(&sk->sk_receive_queue);
2714 		last_len = last ? last->len : 0;
2715 
2716 again:
2717 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2718 		if (skb) {
2719 			skb = manage_oob(skb, sk, flags, copied);
2720 			if (!skb && copied) {
2721 				unix_state_unlock(sk);
2722 				break;
2723 			}
2724 		}
2725 #endif
2726 		if (skb == NULL) {
2727 			if (copied >= target)
2728 				goto unlock;
2729 
2730 			/*
2731 			 *	POSIX 1003.1g mandates this order.
2732 			 */
2733 
2734 			err = sock_error(sk);
2735 			if (err)
2736 				goto unlock;
2737 			if (sk->sk_shutdown & RCV_SHUTDOWN)
2738 				goto unlock;
2739 
2740 			unix_state_unlock(sk);
2741 			if (!timeo) {
2742 				err = -EAGAIN;
2743 				break;
2744 			}
2745 
2746 			mutex_unlock(&u->iolock);
2747 
2748 			timeo = unix_stream_data_wait(sk, timeo, last,
2749 						      last_len, freezable);
2750 
2751 			if (signal_pending(current)) {
2752 				err = sock_intr_errno(timeo);
2753 				scm_destroy(&scm);
2754 				goto out;
2755 			}
2756 
2757 			mutex_lock(&u->iolock);
2758 			goto redo;
2759 unlock:
2760 			unix_state_unlock(sk);
2761 			break;
2762 		}
2763 
2764 		while (skip >= unix_skb_len(skb)) {
2765 			skip -= unix_skb_len(skb);
2766 			last = skb;
2767 			last_len = skb->len;
2768 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2769 			if (!skb)
2770 				goto again;
2771 		}
2772 
2773 		unix_state_unlock(sk);
2774 
2775 		if (check_creds) {
2776 			/* Never glue messages from different writers */
2777 			if (!unix_skb_scm_eq(skb, &scm))
2778 				break;
2779 		} else if (test_bit(SOCK_PASSCRED, &sock->flags) ||
2780 			   test_bit(SOCK_PASSPIDFD, &sock->flags)) {
2781 			/* Copy credentials */
2782 			scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2783 			unix_set_secdata(&scm, skb);
2784 			check_creds = true;
2785 		}
2786 
2787 		/* Copy address just once */
2788 		if (state->msg && state->msg->msg_name) {
2789 			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2790 					 state->msg->msg_name);
2791 			unix_copy_addr(state->msg, skb->sk);
2792 
2793 			BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2794 							      state->msg->msg_name,
2795 							      &state->msg->msg_namelen);
2796 
2797 			sunaddr = NULL;
2798 		}
2799 
2800 		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2801 		skb_get(skb);
2802 		chunk = state->recv_actor(skb, skip, chunk, state);
2803 		drop_skb = !unix_skb_len(skb);
2804 		/* skb is only safe to use if !drop_skb */
2805 		consume_skb(skb);
2806 		if (chunk < 0) {
2807 			if (copied == 0)
2808 				copied = -EFAULT;
2809 			break;
2810 		}
2811 		copied += chunk;
2812 		size -= chunk;
2813 
2814 		if (drop_skb) {
2815 			/* the skb was touched by a concurrent reader;
2816 			 * we should not expect anything from this skb
2817 			 * anymore and assume it invalid - we can be
2818 			 * sure it was dropped from the socket queue
2819 			 *
2820 			 * let's report a short read
2821 			 */
2822 			err = 0;
2823 			break;
2824 		}
2825 
2826 		/* Mark read part of skb as used */
2827 		if (!(flags & MSG_PEEK)) {
2828 			UNIXCB(skb).consumed += chunk;
2829 
2830 			sk_peek_offset_bwd(sk, chunk);
2831 
2832 			if (UNIXCB(skb).fp) {
2833 				scm_stat_del(sk, skb);
2834 				unix_detach_fds(&scm, skb);
2835 			}
2836 
2837 			if (unix_skb_len(skb))
2838 				break;
2839 
2840 			skb_unlink(skb, &sk->sk_receive_queue);
2841 			consume_skb(skb);
2842 
2843 			if (scm.fp)
2844 				break;
2845 		} else {
2846 			/* It is questionable, see note in unix_dgram_recvmsg.
2847 			 */
2848 			if (UNIXCB(skb).fp)
2849 				unix_peek_fds(&scm, skb);
2850 
2851 			sk_peek_offset_fwd(sk, chunk);
2852 
2853 			if (UNIXCB(skb).fp)
2854 				break;
2855 
2856 			skip = 0;
2857 			last = skb;
2858 			last_len = skb->len;
2859 			unix_state_lock(sk);
2860 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2861 			if (skb)
2862 				goto again;
2863 			unix_state_unlock(sk);
2864 			break;
2865 		}
2866 	} while (size);
2867 
2868 	mutex_unlock(&u->iolock);
2869 	if (state->msg)
2870 		scm_recv_unix(sock, state->msg, &scm, flags);
2871 	else
2872 		scm_destroy(&scm);
2873 out:
2874 	return copied ? : err;
2875 }
2876 
2877 static int unix_stream_read_actor(struct sk_buff *skb,
2878 				  int skip, int chunk,
2879 				  struct unix_stream_read_state *state)
2880 {
2881 	int ret;
2882 
2883 	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2884 				    state->msg, chunk);
2885 	return ret ?: chunk;
2886 }
2887 
2888 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2889 			  size_t size, int flags)
2890 {
2891 	struct unix_stream_read_state state = {
2892 		.recv_actor = unix_stream_read_actor,
2893 		.socket = sk->sk_socket,
2894 		.msg = msg,
2895 		.size = size,
2896 		.flags = flags
2897 	};
2898 
2899 	return unix_stream_read_generic(&state, true);
2900 }
2901 
2902 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2903 			       size_t size, int flags)
2904 {
2905 	struct unix_stream_read_state state = {
2906 		.recv_actor = unix_stream_read_actor,
2907 		.socket = sock,
2908 		.msg = msg,
2909 		.size = size,
2910 		.flags = flags
2911 	};
2912 
2913 #ifdef CONFIG_BPF_SYSCALL
2914 	struct sock *sk = sock->sk;
2915 	const struct proto *prot = READ_ONCE(sk->sk_prot);
2916 
2917 	if (prot != &unix_stream_proto)
2918 		return prot->recvmsg(sk, msg, size, flags, NULL);
2919 #endif
2920 	return unix_stream_read_generic(&state, true);
2921 }
2922 
2923 static int unix_stream_splice_actor(struct sk_buff *skb,
2924 				    int skip, int chunk,
2925 				    struct unix_stream_read_state *state)
2926 {
2927 	return skb_splice_bits(skb, state->socket->sk,
2928 			       UNIXCB(skb).consumed + skip,
2929 			       state->pipe, chunk, state->splice_flags);
2930 }
2931 
2932 static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
2933 				       struct pipe_inode_info *pipe,
2934 				       size_t size, unsigned int flags)
2935 {
2936 	struct unix_stream_read_state state = {
2937 		.recv_actor = unix_stream_splice_actor,
2938 		.socket = sock,
2939 		.pipe = pipe,
2940 		.size = size,
2941 		.splice_flags = flags,
2942 	};
2943 
2944 	if (unlikely(*ppos))
2945 		return -ESPIPE;
2946 
2947 	if (sock->file->f_flags & O_NONBLOCK ||
2948 	    flags & SPLICE_F_NONBLOCK)
2949 		state.flags = MSG_DONTWAIT;
2950 
2951 	return unix_stream_read_generic(&state, false);
2952 }
2953 
2954 static int unix_shutdown(struct socket *sock, int mode)
2955 {
2956 	struct sock *sk = sock->sk;
2957 	struct sock *other;
2958 
2959 	if (mode < SHUT_RD || mode > SHUT_RDWR)
2960 		return -EINVAL;
2961 	/* This maps:
2962 	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
2963 	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
2964 	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2965 	 */
2966 	++mode;
2967 
2968 	unix_state_lock(sk);
2969 	WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
2970 	other = unix_peer(sk);
2971 	if (other)
2972 		sock_hold(other);
2973 	unix_state_unlock(sk);
2974 	sk->sk_state_change(sk);
2975 
2976 	if (other &&
2977 		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2978 
2979 		int peer_mode = 0;
2980 		const struct proto *prot = READ_ONCE(other->sk_prot);
2981 
2982 		if (prot->unhash)
2983 			prot->unhash(other);
2984 		if (mode&RCV_SHUTDOWN)
2985 			peer_mode |= SEND_SHUTDOWN;
2986 		if (mode&SEND_SHUTDOWN)
2987 			peer_mode |= RCV_SHUTDOWN;
2988 		unix_state_lock(other);
2989 		WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
2990 		unix_state_unlock(other);
2991 		other->sk_state_change(other);
2992 		if (peer_mode == SHUTDOWN_MASK)
2993 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2994 		else if (peer_mode & RCV_SHUTDOWN)
2995 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2996 	}
2997 	if (other)
2998 		sock_put(other);
2999 
3000 	return 0;
3001 }
3002 
3003 long unix_inq_len(struct sock *sk)
3004 {
3005 	struct sk_buff *skb;
3006 	long amount = 0;
3007 
3008 	if (sk->sk_state == TCP_LISTEN)
3009 		return -EINVAL;
3010 
3011 	spin_lock(&sk->sk_receive_queue.lock);
3012 	if (sk->sk_type == SOCK_STREAM ||
3013 	    sk->sk_type == SOCK_SEQPACKET) {
3014 		skb_queue_walk(&sk->sk_receive_queue, skb)
3015 			amount += unix_skb_len(skb);
3016 	} else {
3017 		skb = skb_peek(&sk->sk_receive_queue);
3018 		if (skb)
3019 			amount = skb->len;
3020 	}
3021 	spin_unlock(&sk->sk_receive_queue.lock);
3022 
3023 	return amount;
3024 }
3025 EXPORT_SYMBOL_GPL(unix_inq_len);
3026 
3027 long unix_outq_len(struct sock *sk)
3028 {
3029 	return sk_wmem_alloc_get(sk);
3030 }
3031 EXPORT_SYMBOL_GPL(unix_outq_len);
3032 
3033 static int unix_open_file(struct sock *sk)
3034 {
3035 	struct path path;
3036 	struct file *f;
3037 	int fd;
3038 
3039 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3040 		return -EPERM;
3041 
3042 	if (!smp_load_acquire(&unix_sk(sk)->addr))
3043 		return -ENOENT;
3044 
3045 	path = unix_sk(sk)->path;
3046 	if (!path.dentry)
3047 		return -ENOENT;
3048 
3049 	path_get(&path);
3050 
3051 	fd = get_unused_fd_flags(O_CLOEXEC);
3052 	if (fd < 0)
3053 		goto out;
3054 
3055 	f = dentry_open(&path, O_PATH, current_cred());
3056 	if (IS_ERR(f)) {
3057 		put_unused_fd(fd);
3058 		fd = PTR_ERR(f);
3059 		goto out;
3060 	}
3061 
3062 	fd_install(fd, f);
3063 out:
3064 	path_put(&path);
3065 
3066 	return fd;
3067 }
3068 
3069 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3070 {
3071 	struct sock *sk = sock->sk;
3072 	long amount = 0;
3073 	int err;
3074 
3075 	switch (cmd) {
3076 	case SIOCOUTQ:
3077 		amount = unix_outq_len(sk);
3078 		err = put_user(amount, (int __user *)arg);
3079 		break;
3080 	case SIOCINQ:
3081 		amount = unix_inq_len(sk);
3082 		if (amount < 0)
3083 			err = amount;
3084 		else
3085 			err = put_user(amount, (int __user *)arg);
3086 		break;
3087 	case SIOCUNIXFILE:
3088 		err = unix_open_file(sk);
3089 		break;
3090 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3091 	case SIOCATMARK:
3092 		{
3093 			struct sk_buff *skb;
3094 			int answ = 0;
3095 
3096 			skb = skb_peek(&sk->sk_receive_queue);
3097 			if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3098 				answ = 1;
3099 			err = put_user(answ, (int __user *)arg);
3100 		}
3101 		break;
3102 #endif
3103 	default:
3104 		err = -ENOIOCTLCMD;
3105 		break;
3106 	}
3107 	return err;
3108 }
3109 
3110 #ifdef CONFIG_COMPAT
3111 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3112 {
3113 	return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3114 }
3115 #endif
3116 
3117 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3118 {
3119 	struct sock *sk = sock->sk;
3120 	__poll_t mask;
3121 	u8 shutdown;
3122 
3123 	sock_poll_wait(file, sock, wait);
3124 	mask = 0;
3125 	shutdown = READ_ONCE(sk->sk_shutdown);
3126 
3127 	/* exceptional events? */
3128 	if (READ_ONCE(sk->sk_err))
3129 		mask |= EPOLLERR;
3130 	if (shutdown == SHUTDOWN_MASK)
3131 		mask |= EPOLLHUP;
3132 	if (shutdown & RCV_SHUTDOWN)
3133 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3134 
3135 	/* readable? */
3136 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3137 		mask |= EPOLLIN | EPOLLRDNORM;
3138 	if (sk_is_readable(sk))
3139 		mask |= EPOLLIN | EPOLLRDNORM;
3140 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3141 	if (READ_ONCE(unix_sk(sk)->oob_skb))
3142 		mask |= EPOLLPRI;
3143 #endif
3144 
3145 	/* Connection-based need to check for termination and startup */
3146 	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3147 	    sk->sk_state == TCP_CLOSE)
3148 		mask |= EPOLLHUP;
3149 
3150 	/*
3151 	 * we set writable also when the other side has shut down the
3152 	 * connection. This prevents stuck sockets.
3153 	 */
3154 	if (unix_writable(sk))
3155 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3156 
3157 	return mask;
3158 }
3159 
3160 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3161 				    poll_table *wait)
3162 {
3163 	struct sock *sk = sock->sk, *other;
3164 	unsigned int writable;
3165 	__poll_t mask;
3166 	u8 shutdown;
3167 
3168 	sock_poll_wait(file, sock, wait);
3169 	mask = 0;
3170 	shutdown = READ_ONCE(sk->sk_shutdown);
3171 
3172 	/* exceptional events? */
3173 	if (READ_ONCE(sk->sk_err) ||
3174 	    !skb_queue_empty_lockless(&sk->sk_error_queue))
3175 		mask |= EPOLLERR |
3176 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3177 
3178 	if (shutdown & RCV_SHUTDOWN)
3179 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3180 	if (shutdown == SHUTDOWN_MASK)
3181 		mask |= EPOLLHUP;
3182 
3183 	/* readable? */
3184 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3185 		mask |= EPOLLIN | EPOLLRDNORM;
3186 	if (sk_is_readable(sk))
3187 		mask |= EPOLLIN | EPOLLRDNORM;
3188 
3189 	/* Connection-based need to check for termination and startup */
3190 	if (sk->sk_type == SOCK_SEQPACKET) {
3191 		if (sk->sk_state == TCP_CLOSE)
3192 			mask |= EPOLLHUP;
3193 		/* connection hasn't started yet? */
3194 		if (sk->sk_state == TCP_SYN_SENT)
3195 			return mask;
3196 	}
3197 
3198 	/* No write status requested, avoid expensive OUT tests. */
3199 	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3200 		return mask;
3201 
3202 	writable = unix_writable(sk);
3203 	if (writable) {
3204 		unix_state_lock(sk);
3205 
3206 		other = unix_peer(sk);
3207 		if (other && unix_peer(other) != sk &&
3208 		    unix_recvq_full_lockless(other) &&
3209 		    unix_dgram_peer_wake_me(sk, other))
3210 			writable = 0;
3211 
3212 		unix_state_unlock(sk);
3213 	}
3214 
3215 	if (writable)
3216 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3217 	else
3218 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3219 
3220 	return mask;
3221 }
3222 
3223 #ifdef CONFIG_PROC_FS
3224 
3225 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3226 
3227 #define get_bucket(x) ((x) >> BUCKET_SPACE)
3228 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3229 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3230 
3231 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3232 {
3233 	unsigned long offset = get_offset(*pos);
3234 	unsigned long bucket = get_bucket(*pos);
3235 	unsigned long count = 0;
3236 	struct sock *sk;
3237 
3238 	for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3239 	     sk; sk = sk_next(sk)) {
3240 		if (++count == offset)
3241 			break;
3242 	}
3243 
3244 	return sk;
3245 }
3246 
3247 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3248 {
3249 	unsigned long bucket = get_bucket(*pos);
3250 	struct net *net = seq_file_net(seq);
3251 	struct sock *sk;
3252 
3253 	while (bucket < UNIX_HASH_SIZE) {
3254 		spin_lock(&net->unx.table.locks[bucket]);
3255 
3256 		sk = unix_from_bucket(seq, pos);
3257 		if (sk)
3258 			return sk;
3259 
3260 		spin_unlock(&net->unx.table.locks[bucket]);
3261 
3262 		*pos = set_bucket_offset(++bucket, 1);
3263 	}
3264 
3265 	return NULL;
3266 }
3267 
3268 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3269 				  loff_t *pos)
3270 {
3271 	unsigned long bucket = get_bucket(*pos);
3272 
3273 	sk = sk_next(sk);
3274 	if (sk)
3275 		return sk;
3276 
3277 
3278 	spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3279 
3280 	*pos = set_bucket_offset(++bucket, 1);
3281 
3282 	return unix_get_first(seq, pos);
3283 }
3284 
3285 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3286 {
3287 	if (!*pos)
3288 		return SEQ_START_TOKEN;
3289 
3290 	return unix_get_first(seq, pos);
3291 }
3292 
3293 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3294 {
3295 	++*pos;
3296 
3297 	if (v == SEQ_START_TOKEN)
3298 		return unix_get_first(seq, pos);
3299 
3300 	return unix_get_next(seq, v, pos);
3301 }
3302 
3303 static void unix_seq_stop(struct seq_file *seq, void *v)
3304 {
3305 	struct sock *sk = v;
3306 
3307 	if (sk)
3308 		spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3309 }
3310 
3311 static int unix_seq_show(struct seq_file *seq, void *v)
3312 {
3313 
3314 	if (v == SEQ_START_TOKEN)
3315 		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
3316 			 "Inode Path\n");
3317 	else {
3318 		struct sock *s = v;
3319 		struct unix_sock *u = unix_sk(s);
3320 		unix_state_lock(s);
3321 
3322 		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3323 			s,
3324 			refcount_read(&s->sk_refcnt),
3325 			0,
3326 			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3327 			s->sk_type,
3328 			s->sk_socket ?
3329 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3330 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3331 			sock_i_ino(s));
3332 
3333 		if (u->addr) {	// under a hash table lock here
3334 			int i, len;
3335 			seq_putc(seq, ' ');
3336 
3337 			i = 0;
3338 			len = u->addr->len -
3339 				offsetof(struct sockaddr_un, sun_path);
3340 			if (u->addr->name->sun_path[0]) {
3341 				len--;
3342 			} else {
3343 				seq_putc(seq, '@');
3344 				i++;
3345 			}
3346 			for ( ; i < len; i++)
3347 				seq_putc(seq, u->addr->name->sun_path[i] ?:
3348 					 '@');
3349 		}
3350 		unix_state_unlock(s);
3351 		seq_putc(seq, '\n');
3352 	}
3353 
3354 	return 0;
3355 }
3356 
3357 static const struct seq_operations unix_seq_ops = {
3358 	.start  = unix_seq_start,
3359 	.next   = unix_seq_next,
3360 	.stop   = unix_seq_stop,
3361 	.show   = unix_seq_show,
3362 };
3363 
3364 #ifdef CONFIG_BPF_SYSCALL
3365 struct bpf_unix_iter_state {
3366 	struct seq_net_private p;
3367 	unsigned int cur_sk;
3368 	unsigned int end_sk;
3369 	unsigned int max_sk;
3370 	struct sock **batch;
3371 	bool st_bucket_done;
3372 };
3373 
3374 struct bpf_iter__unix {
3375 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
3376 	__bpf_md_ptr(struct unix_sock *, unix_sk);
3377 	uid_t uid __aligned(8);
3378 };
3379 
3380 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3381 			      struct unix_sock *unix_sk, uid_t uid)
3382 {
3383 	struct bpf_iter__unix ctx;
3384 
3385 	meta->seq_num--;  /* skip SEQ_START_TOKEN */
3386 	ctx.meta = meta;
3387 	ctx.unix_sk = unix_sk;
3388 	ctx.uid = uid;
3389 	return bpf_iter_run_prog(prog, &ctx);
3390 }
3391 
3392 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3393 
3394 {
3395 	struct bpf_unix_iter_state *iter = seq->private;
3396 	unsigned int expected = 1;
3397 	struct sock *sk;
3398 
3399 	sock_hold(start_sk);
3400 	iter->batch[iter->end_sk++] = start_sk;
3401 
3402 	for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3403 		if (iter->end_sk < iter->max_sk) {
3404 			sock_hold(sk);
3405 			iter->batch[iter->end_sk++] = sk;
3406 		}
3407 
3408 		expected++;
3409 	}
3410 
3411 	spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3412 
3413 	return expected;
3414 }
3415 
3416 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3417 {
3418 	while (iter->cur_sk < iter->end_sk)
3419 		sock_put(iter->batch[iter->cur_sk++]);
3420 }
3421 
3422 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3423 				       unsigned int new_batch_sz)
3424 {
3425 	struct sock **new_batch;
3426 
3427 	new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3428 			     GFP_USER | __GFP_NOWARN);
3429 	if (!new_batch)
3430 		return -ENOMEM;
3431 
3432 	bpf_iter_unix_put_batch(iter);
3433 	kvfree(iter->batch);
3434 	iter->batch = new_batch;
3435 	iter->max_sk = new_batch_sz;
3436 
3437 	return 0;
3438 }
3439 
3440 static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3441 					loff_t *pos)
3442 {
3443 	struct bpf_unix_iter_state *iter = seq->private;
3444 	unsigned int expected;
3445 	bool resized = false;
3446 	struct sock *sk;
3447 
3448 	if (iter->st_bucket_done)
3449 		*pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3450 
3451 again:
3452 	/* Get a new batch */
3453 	iter->cur_sk = 0;
3454 	iter->end_sk = 0;
3455 
3456 	sk = unix_get_first(seq, pos);
3457 	if (!sk)
3458 		return NULL; /* Done */
3459 
3460 	expected = bpf_iter_unix_hold_batch(seq, sk);
3461 
3462 	if (iter->end_sk == expected) {
3463 		iter->st_bucket_done = true;
3464 		return sk;
3465 	}
3466 
3467 	if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3468 		resized = true;
3469 		goto again;
3470 	}
3471 
3472 	return sk;
3473 }
3474 
3475 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3476 {
3477 	if (!*pos)
3478 		return SEQ_START_TOKEN;
3479 
3480 	/* bpf iter does not support lseek, so it always
3481 	 * continue from where it was stop()-ped.
3482 	 */
3483 	return bpf_iter_unix_batch(seq, pos);
3484 }
3485 
3486 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3487 {
3488 	struct bpf_unix_iter_state *iter = seq->private;
3489 	struct sock *sk;
3490 
3491 	/* Whenever seq_next() is called, the iter->cur_sk is
3492 	 * done with seq_show(), so advance to the next sk in
3493 	 * the batch.
3494 	 */
3495 	if (iter->cur_sk < iter->end_sk)
3496 		sock_put(iter->batch[iter->cur_sk++]);
3497 
3498 	++*pos;
3499 
3500 	if (iter->cur_sk < iter->end_sk)
3501 		sk = iter->batch[iter->cur_sk];
3502 	else
3503 		sk = bpf_iter_unix_batch(seq, pos);
3504 
3505 	return sk;
3506 }
3507 
3508 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3509 {
3510 	struct bpf_iter_meta meta;
3511 	struct bpf_prog *prog;
3512 	struct sock *sk = v;
3513 	uid_t uid;
3514 	bool slow;
3515 	int ret;
3516 
3517 	if (v == SEQ_START_TOKEN)
3518 		return 0;
3519 
3520 	slow = lock_sock_fast(sk);
3521 
3522 	if (unlikely(sk_unhashed(sk))) {
3523 		ret = SEQ_SKIP;
3524 		goto unlock;
3525 	}
3526 
3527 	uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3528 	meta.seq = seq;
3529 	prog = bpf_iter_get_info(&meta, false);
3530 	ret = unix_prog_seq_show(prog, &meta, v, uid);
3531 unlock:
3532 	unlock_sock_fast(sk, slow);
3533 	return ret;
3534 }
3535 
3536 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3537 {
3538 	struct bpf_unix_iter_state *iter = seq->private;
3539 	struct bpf_iter_meta meta;
3540 	struct bpf_prog *prog;
3541 
3542 	if (!v) {
3543 		meta.seq = seq;
3544 		prog = bpf_iter_get_info(&meta, true);
3545 		if (prog)
3546 			(void)unix_prog_seq_show(prog, &meta, v, 0);
3547 	}
3548 
3549 	if (iter->cur_sk < iter->end_sk)
3550 		bpf_iter_unix_put_batch(iter);
3551 }
3552 
3553 static const struct seq_operations bpf_iter_unix_seq_ops = {
3554 	.start	= bpf_iter_unix_seq_start,
3555 	.next	= bpf_iter_unix_seq_next,
3556 	.stop	= bpf_iter_unix_seq_stop,
3557 	.show	= bpf_iter_unix_seq_show,
3558 };
3559 #endif
3560 #endif
3561 
3562 static const struct net_proto_family unix_family_ops = {
3563 	.family = PF_UNIX,
3564 	.create = unix_create,
3565 	.owner	= THIS_MODULE,
3566 };
3567 
3568 
3569 static int __net_init unix_net_init(struct net *net)
3570 {
3571 	int i;
3572 
3573 	net->unx.sysctl_max_dgram_qlen = 10;
3574 	if (unix_sysctl_register(net))
3575 		goto out;
3576 
3577 #ifdef CONFIG_PROC_FS
3578 	if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3579 			     sizeof(struct seq_net_private)))
3580 		goto err_sysctl;
3581 #endif
3582 
3583 	net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3584 					      sizeof(spinlock_t), GFP_KERNEL);
3585 	if (!net->unx.table.locks)
3586 		goto err_proc;
3587 
3588 	net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3589 						sizeof(struct hlist_head),
3590 						GFP_KERNEL);
3591 	if (!net->unx.table.buckets)
3592 		goto free_locks;
3593 
3594 	for (i = 0; i < UNIX_HASH_SIZE; i++) {
3595 		spin_lock_init(&net->unx.table.locks[i]);
3596 		INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3597 	}
3598 
3599 	return 0;
3600 
3601 free_locks:
3602 	kvfree(net->unx.table.locks);
3603 err_proc:
3604 #ifdef CONFIG_PROC_FS
3605 	remove_proc_entry("unix", net->proc_net);
3606 err_sysctl:
3607 #endif
3608 	unix_sysctl_unregister(net);
3609 out:
3610 	return -ENOMEM;
3611 }
3612 
3613 static void __net_exit unix_net_exit(struct net *net)
3614 {
3615 	kvfree(net->unx.table.buckets);
3616 	kvfree(net->unx.table.locks);
3617 	unix_sysctl_unregister(net);
3618 	remove_proc_entry("unix", net->proc_net);
3619 }
3620 
3621 static struct pernet_operations unix_net_ops = {
3622 	.init = unix_net_init,
3623 	.exit = unix_net_exit,
3624 };
3625 
3626 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3627 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3628 		     struct unix_sock *unix_sk, uid_t uid)
3629 
3630 #define INIT_BATCH_SZ 16
3631 
3632 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3633 {
3634 	struct bpf_unix_iter_state *iter = priv_data;
3635 	int err;
3636 
3637 	err = bpf_iter_init_seq_net(priv_data, aux);
3638 	if (err)
3639 		return err;
3640 
3641 	err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3642 	if (err) {
3643 		bpf_iter_fini_seq_net(priv_data);
3644 		return err;
3645 	}
3646 
3647 	return 0;
3648 }
3649 
3650 static void bpf_iter_fini_unix(void *priv_data)
3651 {
3652 	struct bpf_unix_iter_state *iter = priv_data;
3653 
3654 	bpf_iter_fini_seq_net(priv_data);
3655 	kvfree(iter->batch);
3656 }
3657 
3658 static const struct bpf_iter_seq_info unix_seq_info = {
3659 	.seq_ops		= &bpf_iter_unix_seq_ops,
3660 	.init_seq_private	= bpf_iter_init_unix,
3661 	.fini_seq_private	= bpf_iter_fini_unix,
3662 	.seq_priv_size		= sizeof(struct bpf_unix_iter_state),
3663 };
3664 
3665 static const struct bpf_func_proto *
3666 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3667 			     const struct bpf_prog *prog)
3668 {
3669 	switch (func_id) {
3670 	case BPF_FUNC_setsockopt:
3671 		return &bpf_sk_setsockopt_proto;
3672 	case BPF_FUNC_getsockopt:
3673 		return &bpf_sk_getsockopt_proto;
3674 	default:
3675 		return NULL;
3676 	}
3677 }
3678 
3679 static struct bpf_iter_reg unix_reg_info = {
3680 	.target			= "unix",
3681 	.ctx_arg_info_size	= 1,
3682 	.ctx_arg_info		= {
3683 		{ offsetof(struct bpf_iter__unix, unix_sk),
3684 		  PTR_TO_BTF_ID_OR_NULL },
3685 	},
3686 	.get_func_proto         = bpf_iter_unix_get_func_proto,
3687 	.seq_info		= &unix_seq_info,
3688 };
3689 
3690 static void __init bpf_iter_register(void)
3691 {
3692 	unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3693 	if (bpf_iter_reg_target(&unix_reg_info))
3694 		pr_warn("Warning: could not register bpf iterator unix\n");
3695 }
3696 #endif
3697 
3698 static int __init af_unix_init(void)
3699 {
3700 	int i, rc = -1;
3701 
3702 	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3703 
3704 	for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3705 		spin_lock_init(&bsd_socket_locks[i]);
3706 		INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3707 	}
3708 
3709 	rc = proto_register(&unix_dgram_proto, 1);
3710 	if (rc != 0) {
3711 		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3712 		goto out;
3713 	}
3714 
3715 	rc = proto_register(&unix_stream_proto, 1);
3716 	if (rc != 0) {
3717 		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3718 		proto_unregister(&unix_dgram_proto);
3719 		goto out;
3720 	}
3721 
3722 	sock_register(&unix_family_ops);
3723 	register_pernet_subsys(&unix_net_ops);
3724 	unix_bpf_build_proto();
3725 
3726 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3727 	bpf_iter_register();
3728 #endif
3729 
3730 out:
3731 	return rc;
3732 }
3733 
3734 /* Later than subsys_initcall() because we depend on stuff initialised there */
3735 fs_initcall(af_unix_init);
3736