1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * NETLINK Kernel-user communication protocol.
4 *
5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * Patrick McHardy <kaber@trash.net>
8 *
9 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
10 * added netlink_proto_exit
11 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
12 * use nlk_sk, as sk->protinfo is on a diet 8)
13 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
14 * - inc module use count of module that owns
15 * the kernel socket in case userspace opens
16 * socket of same protocol
17 * - remove all module support, since netlink is
18 * mandatory if CONFIG_NET=y these days
19 */
20
21 #include <linux/module.h>
22
23 #include <linux/bpf.h>
24 #include <linux/capability.h>
25 #include <linux/kernel.h>
26 #include <linux/filter.h>
27 #include <linux/init.h>
28 #include <linux/signal.h>
29 #include <linux/sched.h>
30 #include <linux/errno.h>
31 #include <linux/string.h>
32 #include <linux/stat.h>
33 #include <linux/socket.h>
34 #include <linux/un.h>
35 #include <linux/fcntl.h>
36 #include <linux/termios.h>
37 #include <linux/sockios.h>
38 #include <linux/net.h>
39 #include <linux/fs.h>
40 #include <linux/slab.h>
41 #include <linux/uaccess.h>
42 #include <linux/skbuff.h>
43 #include <linux/netdevice.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
47 #include <linux/notifier.h>
48 #include <linux/security.h>
49 #include <linux/jhash.h>
50 #include <linux/jiffies.h>
51 #include <linux/random.h>
52 #include <linux/bitops.h>
53 #include <linux/mm.h>
54 #include <linux/types.h>
55 #include <linux/audit.h>
56 #include <linux/mutex.h>
57 #include <linux/vmalloc.h>
58 #include <linux/if_arp.h>
59 #include <linux/rhashtable.h>
60 #include <asm/cacheflush.h>
61 #include <linux/hash.h>
62 #include <linux/net_namespace.h>
63 #include <linux/nospec.h>
64 #include <linux/btf_ids.h>
65
66 #include <net/net_namespace.h>
67 #include <net/netns/generic.h>
68 #include <net/sock.h>
69 #include <net/scm.h>
70 #include <net/netlink.h>
71 #define CREATE_TRACE_POINTS
72 #include <trace/events/netlink.h>
73
74 #include "af_netlink.h"
75 #include "genetlink.h"
76
77 struct listeners {
78 struct rcu_head rcu;
79 unsigned long masks[];
80 };
81
82 /* state bits */
83 #define NETLINK_S_CONGESTED 0x0
84
netlink_is_kernel(struct sock * sk)85 static inline int netlink_is_kernel(struct sock *sk)
86 {
87 return nlk_test_bit(KERNEL_SOCKET, sk);
88 }
89
90 struct netlink_table *nl_table __read_mostly;
91 EXPORT_SYMBOL_GPL(nl_table);
92
93 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
94
95 static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
96
97 static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
98 "nlk_cb_mutex-ROUTE",
99 "nlk_cb_mutex-1",
100 "nlk_cb_mutex-USERSOCK",
101 "nlk_cb_mutex-FIREWALL",
102 "nlk_cb_mutex-SOCK_DIAG",
103 "nlk_cb_mutex-NFLOG",
104 "nlk_cb_mutex-XFRM",
105 "nlk_cb_mutex-SELINUX",
106 "nlk_cb_mutex-ISCSI",
107 "nlk_cb_mutex-AUDIT",
108 "nlk_cb_mutex-FIB_LOOKUP",
109 "nlk_cb_mutex-CONNECTOR",
110 "nlk_cb_mutex-NETFILTER",
111 "nlk_cb_mutex-IP6_FW",
112 "nlk_cb_mutex-DNRTMSG",
113 "nlk_cb_mutex-KOBJECT_UEVENT",
114 "nlk_cb_mutex-GENERIC",
115 "nlk_cb_mutex-17",
116 "nlk_cb_mutex-SCSITRANSPORT",
117 "nlk_cb_mutex-ECRYPTFS",
118 "nlk_cb_mutex-RDMA",
119 "nlk_cb_mutex-CRYPTO",
120 "nlk_cb_mutex-SMC",
121 "nlk_cb_mutex-23",
122 "nlk_cb_mutex-24",
123 "nlk_cb_mutex-25",
124 "nlk_cb_mutex-26",
125 "nlk_cb_mutex-27",
126 "nlk_cb_mutex-28",
127 "nlk_cb_mutex-29",
128 "nlk_cb_mutex-30",
129 "nlk_cb_mutex-31",
130 "nlk_cb_mutex-MAX_LINKS"
131 };
132
133 static int netlink_dump(struct sock *sk, bool lock_taken);
134
135 /* nl_table locking explained:
136 * Lookup and traversal are protected with an RCU read-side lock. Insertion
137 * and removal are protected with per bucket lock while using RCU list
138 * modification primitives and may run in parallel to RCU protected lookups.
139 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
140 * been acquired * either during or after the socket has been removed from
141 * the list and after an RCU grace period.
142 */
143 DEFINE_RWLOCK(nl_table_lock);
144 EXPORT_SYMBOL_GPL(nl_table_lock);
145 static atomic_t nl_table_users = ATOMIC_INIT(0);
146
147 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
148
149 static BLOCKING_NOTIFIER_HEAD(netlink_chain);
150
151
152 static const struct rhashtable_params netlink_rhashtable_params;
153
do_trace_netlink_extack(const char * msg)154 void do_trace_netlink_extack(const char *msg)
155 {
156 trace_netlink_extack(msg);
157 }
158 EXPORT_SYMBOL(do_trace_netlink_extack);
159
netlink_group_mask(u32 group)160 static inline u32 netlink_group_mask(u32 group)
161 {
162 if (group > 32)
163 return 0;
164 return group ? 1 << (group - 1) : 0;
165 }
166
netlink_to_full_skb(const struct sk_buff * skb,gfp_t gfp_mask)167 static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
168 gfp_t gfp_mask)
169 {
170 unsigned int len = skb->len;
171 struct sk_buff *new;
172
173 new = alloc_skb(len, gfp_mask);
174 if (new == NULL)
175 return NULL;
176
177 NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
178 NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
179 NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
180
181 skb_put_data(new, skb->data, len);
182 return new;
183 }
184
185 static unsigned int netlink_tap_net_id;
186
187 struct netlink_tap_net {
188 struct list_head netlink_tap_all;
189 struct mutex netlink_tap_lock;
190 };
191
netlink_add_tap(struct netlink_tap * nt)192 int netlink_add_tap(struct netlink_tap *nt)
193 {
194 struct net *net = dev_net(nt->dev);
195 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
196
197 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
198 return -EINVAL;
199
200 mutex_lock(&nn->netlink_tap_lock);
201 list_add_rcu(&nt->list, &nn->netlink_tap_all);
202 mutex_unlock(&nn->netlink_tap_lock);
203
204 __module_get(nt->module);
205
206 return 0;
207 }
208 EXPORT_SYMBOL_GPL(netlink_add_tap);
209
__netlink_remove_tap(struct netlink_tap * nt)210 static int __netlink_remove_tap(struct netlink_tap *nt)
211 {
212 struct net *net = dev_net(nt->dev);
213 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
214 bool found = false;
215 struct netlink_tap *tmp;
216
217 mutex_lock(&nn->netlink_tap_lock);
218
219 list_for_each_entry(tmp, &nn->netlink_tap_all, list) {
220 if (nt == tmp) {
221 list_del_rcu(&nt->list);
222 found = true;
223 goto out;
224 }
225 }
226
227 pr_warn("__netlink_remove_tap: %p not found\n", nt);
228 out:
229 mutex_unlock(&nn->netlink_tap_lock);
230
231 if (found)
232 module_put(nt->module);
233
234 return found ? 0 : -ENODEV;
235 }
236
netlink_remove_tap(struct netlink_tap * nt)237 int netlink_remove_tap(struct netlink_tap *nt)
238 {
239 int ret;
240
241 ret = __netlink_remove_tap(nt);
242 synchronize_net();
243
244 return ret;
245 }
246 EXPORT_SYMBOL_GPL(netlink_remove_tap);
247
netlink_tap_init_net(struct net * net)248 static __net_init int netlink_tap_init_net(struct net *net)
249 {
250 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
251
252 INIT_LIST_HEAD(&nn->netlink_tap_all);
253 mutex_init(&nn->netlink_tap_lock);
254 return 0;
255 }
256
257 static struct pernet_operations netlink_tap_net_ops = {
258 .init = netlink_tap_init_net,
259 .id = &netlink_tap_net_id,
260 .size = sizeof(struct netlink_tap_net),
261 };
262
netlink_filter_tap(const struct sk_buff * skb)263 static bool netlink_filter_tap(const struct sk_buff *skb)
264 {
265 struct sock *sk = skb->sk;
266
267 /* We take the more conservative approach and
268 * whitelist socket protocols that may pass.
269 */
270 switch (sk->sk_protocol) {
271 case NETLINK_ROUTE:
272 case NETLINK_USERSOCK:
273 case NETLINK_SOCK_DIAG:
274 case NETLINK_NFLOG:
275 case NETLINK_XFRM:
276 case NETLINK_FIB_LOOKUP:
277 case NETLINK_NETFILTER:
278 case NETLINK_GENERIC:
279 return true;
280 }
281
282 return false;
283 }
284
__netlink_deliver_tap_skb(struct sk_buff * skb,struct net_device * dev)285 static int __netlink_deliver_tap_skb(struct sk_buff *skb,
286 struct net_device *dev)
287 {
288 struct sk_buff *nskb;
289 struct sock *sk = skb->sk;
290 int ret = -ENOMEM;
291
292 if (!net_eq(dev_net(dev), sock_net(sk)))
293 return 0;
294
295 dev_hold(dev);
296
297 if (is_vmalloc_addr(skb->head))
298 nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
299 else
300 nskb = skb_clone(skb, GFP_ATOMIC);
301 if (nskb) {
302 nskb->dev = dev;
303 nskb->protocol = htons((u16) sk->sk_protocol);
304 nskb->pkt_type = netlink_is_kernel(sk) ?
305 PACKET_KERNEL : PACKET_USER;
306 skb_reset_network_header(nskb);
307 ret = dev_queue_xmit(nskb);
308 if (unlikely(ret > 0))
309 ret = net_xmit_errno(ret);
310 }
311
312 dev_put(dev);
313 return ret;
314 }
315
__netlink_deliver_tap(struct sk_buff * skb,struct netlink_tap_net * nn)316 static void __netlink_deliver_tap(struct sk_buff *skb, struct netlink_tap_net *nn)
317 {
318 int ret;
319 struct netlink_tap *tmp;
320
321 if (!netlink_filter_tap(skb))
322 return;
323
324 list_for_each_entry_rcu(tmp, &nn->netlink_tap_all, list) {
325 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
326 if (unlikely(ret))
327 break;
328 }
329 }
330
netlink_deliver_tap(struct net * net,struct sk_buff * skb)331 static void netlink_deliver_tap(struct net *net, struct sk_buff *skb)
332 {
333 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
334
335 rcu_read_lock();
336
337 if (unlikely(!list_empty(&nn->netlink_tap_all)))
338 __netlink_deliver_tap(skb, nn);
339
340 rcu_read_unlock();
341 }
342
netlink_deliver_tap_kernel(struct sock * dst,struct sock * src,struct sk_buff * skb)343 static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
344 struct sk_buff *skb)
345 {
346 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
347 netlink_deliver_tap(sock_net(dst), skb);
348 }
349
netlink_overrun(struct sock * sk)350 static void netlink_overrun(struct sock *sk)
351 {
352 if (!nlk_test_bit(RECV_NO_ENOBUFS, sk)) {
353 if (!test_and_set_bit(NETLINK_S_CONGESTED,
354 &nlk_sk(sk)->state)) {
355 WRITE_ONCE(sk->sk_err, ENOBUFS);
356 sk_error_report(sk);
357 }
358 }
359 atomic_inc(&sk->sk_drops);
360 }
361
netlink_rcv_wake(struct sock * sk)362 static void netlink_rcv_wake(struct sock *sk)
363 {
364 struct netlink_sock *nlk = nlk_sk(sk);
365
366 if (skb_queue_empty_lockless(&sk->sk_receive_queue))
367 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
368 if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
369 wake_up_interruptible(&nlk->wait);
370 }
371
netlink_skb_destructor(struct sk_buff * skb)372 static void netlink_skb_destructor(struct sk_buff *skb)
373 {
374 if (is_vmalloc_addr(skb->head)) {
375 if (!skb->cloned ||
376 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
377 vfree_atomic(skb->head);
378
379 skb->head = NULL;
380 }
381 if (skb->sk != NULL)
382 sock_rfree(skb);
383 }
384
netlink_skb_set_owner_r(struct sk_buff * skb,struct sock * sk)385 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
386 {
387 WARN_ON(skb->sk != NULL);
388 skb->sk = sk;
389 skb->destructor = netlink_skb_destructor;
390 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
391 sk_mem_charge(sk, skb->truesize);
392 }
393
netlink_sock_destruct(struct sock * sk)394 static void netlink_sock_destruct(struct sock *sk)
395 {
396 struct netlink_sock *nlk = nlk_sk(sk);
397
398 if (nlk->cb_running) {
399 if (nlk->cb.done)
400 nlk->cb.done(&nlk->cb);
401 module_put(nlk->cb.module);
402 kfree_skb(nlk->cb.skb);
403 }
404
405 skb_queue_purge(&sk->sk_receive_queue);
406
407 if (!sock_flag(sk, SOCK_DEAD)) {
408 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
409 return;
410 }
411
412 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
413 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
414 WARN_ON(nlk_sk(sk)->groups);
415 }
416
netlink_sock_destruct_work(struct work_struct * work)417 static void netlink_sock_destruct_work(struct work_struct *work)
418 {
419 struct netlink_sock *nlk = container_of(work, struct netlink_sock,
420 work);
421
422 sk_free(&nlk->sk);
423 }
424
425 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
426 * SMP. Look, when several writers sleep and reader wakes them up, all but one
427 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
428 * this, _but_ remember, it adds useless work on UP machines.
429 */
430
netlink_table_grab(void)431 void netlink_table_grab(void)
432 __acquires(nl_table_lock)
433 {
434 might_sleep();
435
436 write_lock_irq(&nl_table_lock);
437
438 if (atomic_read(&nl_table_users)) {
439 DECLARE_WAITQUEUE(wait, current);
440
441 add_wait_queue_exclusive(&nl_table_wait, &wait);
442 for (;;) {
443 set_current_state(TASK_UNINTERRUPTIBLE);
444 if (atomic_read(&nl_table_users) == 0)
445 break;
446 write_unlock_irq(&nl_table_lock);
447 schedule();
448 write_lock_irq(&nl_table_lock);
449 }
450
451 __set_current_state(TASK_RUNNING);
452 remove_wait_queue(&nl_table_wait, &wait);
453 }
454 }
455
netlink_table_ungrab(void)456 void netlink_table_ungrab(void)
457 __releases(nl_table_lock)
458 {
459 write_unlock_irq(&nl_table_lock);
460 wake_up(&nl_table_wait);
461 }
462
463 static inline void
netlink_lock_table(void)464 netlink_lock_table(void)
465 {
466 unsigned long flags;
467
468 /* read_lock() synchronizes us to netlink_table_grab */
469
470 read_lock_irqsave(&nl_table_lock, flags);
471 atomic_inc(&nl_table_users);
472 read_unlock_irqrestore(&nl_table_lock, flags);
473 }
474
475 static inline void
netlink_unlock_table(void)476 netlink_unlock_table(void)
477 {
478 if (atomic_dec_and_test(&nl_table_users))
479 wake_up(&nl_table_wait);
480 }
481
482 struct netlink_compare_arg
483 {
484 possible_net_t pnet;
485 u32 portid;
486 };
487
488 /* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
489 #define netlink_compare_arg_len \
490 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
491
netlink_compare(struct rhashtable_compare_arg * arg,const void * ptr)492 static inline int netlink_compare(struct rhashtable_compare_arg *arg,
493 const void *ptr)
494 {
495 const struct netlink_compare_arg *x = arg->key;
496 const struct netlink_sock *nlk = ptr;
497
498 return nlk->portid != x->portid ||
499 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
500 }
501
netlink_compare_arg_init(struct netlink_compare_arg * arg,struct net * net,u32 portid)502 static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
503 struct net *net, u32 portid)
504 {
505 memset(arg, 0, sizeof(*arg));
506 write_pnet(&arg->pnet, net);
507 arg->portid = portid;
508 }
509
__netlink_lookup(struct netlink_table * table,u32 portid,struct net * net)510 static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
511 struct net *net)
512 {
513 struct netlink_compare_arg arg;
514
515 netlink_compare_arg_init(&arg, net, portid);
516 return rhashtable_lookup_fast(&table->hash, &arg,
517 netlink_rhashtable_params);
518 }
519
__netlink_insert(struct netlink_table * table,struct sock * sk)520 static int __netlink_insert(struct netlink_table *table, struct sock *sk)
521 {
522 struct netlink_compare_arg arg;
523
524 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
525 return rhashtable_lookup_insert_key(&table->hash, &arg,
526 &nlk_sk(sk)->node,
527 netlink_rhashtable_params);
528 }
529
netlink_lookup(struct net * net,int protocol,u32 portid)530 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
531 {
532 struct netlink_table *table = &nl_table[protocol];
533 struct sock *sk;
534
535 rcu_read_lock();
536 sk = __netlink_lookup(table, portid, net);
537 if (sk)
538 sock_hold(sk);
539 rcu_read_unlock();
540
541 return sk;
542 }
543
544 static const struct proto_ops netlink_ops;
545
546 static void
netlink_update_listeners(struct sock * sk)547 netlink_update_listeners(struct sock *sk)
548 {
549 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
550 unsigned long mask;
551 unsigned int i;
552 struct listeners *listeners;
553
554 listeners = nl_deref_protected(tbl->listeners);
555 if (!listeners)
556 return;
557
558 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
559 mask = 0;
560 sk_for_each_bound(sk, &tbl->mc_list) {
561 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
562 mask |= nlk_sk(sk)->groups[i];
563 }
564 listeners->masks[i] = mask;
565 }
566 /* this function is only called with the netlink table "grabbed", which
567 * makes sure updates are visible before bind or setsockopt return. */
568 }
569
netlink_insert(struct sock * sk,u32 portid)570 static int netlink_insert(struct sock *sk, u32 portid)
571 {
572 struct netlink_table *table = &nl_table[sk->sk_protocol];
573 int err;
574
575 lock_sock(sk);
576
577 err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
578 if (nlk_sk(sk)->bound)
579 goto err;
580
581 /* portid can be read locklessly from netlink_getname(). */
582 WRITE_ONCE(nlk_sk(sk)->portid, portid);
583
584 sock_hold(sk);
585
586 err = __netlink_insert(table, sk);
587 if (err) {
588 /* In case the hashtable backend returns with -EBUSY
589 * from here, it must not escape to the caller.
590 */
591 if (unlikely(err == -EBUSY))
592 err = -EOVERFLOW;
593 if (err == -EEXIST)
594 err = -EADDRINUSE;
595 sock_put(sk);
596 goto err;
597 }
598
599 /* We need to ensure that the socket is hashed and visible. */
600 smp_wmb();
601 /* Paired with lockless reads from netlink_bind(),
602 * netlink_connect() and netlink_sendmsg().
603 */
604 WRITE_ONCE(nlk_sk(sk)->bound, portid);
605
606 err:
607 release_sock(sk);
608 return err;
609 }
610
netlink_remove(struct sock * sk)611 static void netlink_remove(struct sock *sk)
612 {
613 struct netlink_table *table;
614
615 table = &nl_table[sk->sk_protocol];
616 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
617 netlink_rhashtable_params)) {
618 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
619 __sock_put(sk);
620 }
621
622 netlink_table_grab();
623 if (nlk_sk(sk)->subscriptions) {
624 __sk_del_bind_node(sk);
625 netlink_update_listeners(sk);
626 }
627 if (sk->sk_protocol == NETLINK_GENERIC)
628 atomic_inc(&genl_sk_destructing_cnt);
629 netlink_table_ungrab();
630 }
631
632 static struct proto netlink_proto = {
633 .name = "NETLINK",
634 .owner = THIS_MODULE,
635 .obj_size = sizeof(struct netlink_sock),
636 };
637
__netlink_create(struct net * net,struct socket * sock,struct mutex * dump_cb_mutex,int protocol,int kern)638 static int __netlink_create(struct net *net, struct socket *sock,
639 struct mutex *dump_cb_mutex, int protocol,
640 int kern)
641 {
642 struct sock *sk;
643 struct netlink_sock *nlk;
644
645 sock->ops = &netlink_ops;
646
647 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
648 if (!sk)
649 return -ENOMEM;
650
651 sock_init_data(sock, sk);
652
653 nlk = nlk_sk(sk);
654 mutex_init(&nlk->nl_cb_mutex);
655 lockdep_set_class_and_name(&nlk->nl_cb_mutex,
656 nlk_cb_mutex_keys + protocol,
657 nlk_cb_mutex_key_strings[protocol]);
658 nlk->dump_cb_mutex = dump_cb_mutex;
659 init_waitqueue_head(&nlk->wait);
660
661 sk->sk_destruct = netlink_sock_destruct;
662 sk->sk_protocol = protocol;
663 return 0;
664 }
665
netlink_create(struct net * net,struct socket * sock,int protocol,int kern)666 static int netlink_create(struct net *net, struct socket *sock, int protocol,
667 int kern)
668 {
669 struct module *module = NULL;
670 struct mutex *cb_mutex;
671 struct netlink_sock *nlk;
672 int (*bind)(struct net *net, int group);
673 void (*unbind)(struct net *net, int group);
674 void (*release)(struct sock *sock, unsigned long *groups);
675 int err = 0;
676
677 sock->state = SS_UNCONNECTED;
678
679 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
680 return -ESOCKTNOSUPPORT;
681
682 if (protocol < 0 || protocol >= MAX_LINKS)
683 return -EPROTONOSUPPORT;
684 protocol = array_index_nospec(protocol, MAX_LINKS);
685
686 netlink_lock_table();
687 #ifdef CONFIG_MODULES
688 if (!nl_table[protocol].registered) {
689 netlink_unlock_table();
690 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
691 netlink_lock_table();
692 }
693 #endif
694 if (nl_table[protocol].registered &&
695 try_module_get(nl_table[protocol].module))
696 module = nl_table[protocol].module;
697 else
698 err = -EPROTONOSUPPORT;
699 cb_mutex = nl_table[protocol].cb_mutex;
700 bind = nl_table[protocol].bind;
701 unbind = nl_table[protocol].unbind;
702 release = nl_table[protocol].release;
703 netlink_unlock_table();
704
705 if (err < 0)
706 goto out;
707
708 err = __netlink_create(net, sock, cb_mutex, protocol, kern);
709 if (err < 0)
710 goto out_module;
711
712 sock_prot_inuse_add(net, &netlink_proto, 1);
713
714 nlk = nlk_sk(sock->sk);
715 nlk->module = module;
716 nlk->netlink_bind = bind;
717 nlk->netlink_unbind = unbind;
718 nlk->netlink_release = release;
719 out:
720 return err;
721
722 out_module:
723 module_put(module);
724 goto out;
725 }
726
deferred_put_nlk_sk(struct rcu_head * head)727 static void deferred_put_nlk_sk(struct rcu_head *head)
728 {
729 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
730 struct sock *sk = &nlk->sk;
731
732 kfree(nlk->groups);
733 nlk->groups = NULL;
734
735 if (!refcount_dec_and_test(&sk->sk_refcnt))
736 return;
737
738 if (nlk->cb_running && nlk->cb.done) {
739 INIT_WORK(&nlk->work, netlink_sock_destruct_work);
740 schedule_work(&nlk->work);
741 return;
742 }
743
744 sk_free(sk);
745 }
746
netlink_release(struct socket * sock)747 static int netlink_release(struct socket *sock)
748 {
749 struct sock *sk = sock->sk;
750 struct netlink_sock *nlk;
751
752 if (!sk)
753 return 0;
754
755 netlink_remove(sk);
756 sock_orphan(sk);
757 nlk = nlk_sk(sk);
758
759 /*
760 * OK. Socket is unlinked, any packets that arrive now
761 * will be purged.
762 */
763 if (nlk->netlink_release)
764 nlk->netlink_release(sk, nlk->groups);
765
766 /* must not acquire netlink_table_lock in any way again before unbind
767 * and notifying genetlink is done as otherwise it might deadlock
768 */
769 if (nlk->netlink_unbind) {
770 int i;
771
772 for (i = 0; i < nlk->ngroups; i++)
773 if (test_bit(i, nlk->groups))
774 nlk->netlink_unbind(sock_net(sk), i + 1);
775 }
776 if (sk->sk_protocol == NETLINK_GENERIC &&
777 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
778 wake_up(&genl_sk_destructing_waitq);
779
780 sock->sk = NULL;
781 wake_up_interruptible_all(&nlk->wait);
782
783 skb_queue_purge(&sk->sk_write_queue);
784
785 if (nlk->portid && nlk->bound) {
786 struct netlink_notify n = {
787 .net = sock_net(sk),
788 .protocol = sk->sk_protocol,
789 .portid = nlk->portid,
790 };
791 blocking_notifier_call_chain(&netlink_chain,
792 NETLINK_URELEASE, &n);
793 }
794
795 module_put(nlk->module);
796
797 if (netlink_is_kernel(sk)) {
798 netlink_table_grab();
799 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
800 if (--nl_table[sk->sk_protocol].registered == 0) {
801 struct listeners *old;
802
803 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
804 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
805 kfree_rcu(old, rcu);
806 nl_table[sk->sk_protocol].module = NULL;
807 nl_table[sk->sk_protocol].bind = NULL;
808 nl_table[sk->sk_protocol].unbind = NULL;
809 nl_table[sk->sk_protocol].flags = 0;
810 nl_table[sk->sk_protocol].registered = 0;
811 }
812 netlink_table_ungrab();
813 }
814
815 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
816
817 /* Because struct net might disappear soon, do not keep a pointer. */
818 if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) {
819 __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
820 /* Because of deferred_put_nlk_sk and use of work queue,
821 * it is possible netns will be freed before this socket.
822 */
823 sock_net_set(sk, &init_net);
824 __netns_tracker_alloc(&init_net, &sk->ns_tracker,
825 false, GFP_KERNEL);
826 }
827 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
828 return 0;
829 }
830
netlink_autobind(struct socket * sock)831 static int netlink_autobind(struct socket *sock)
832 {
833 struct sock *sk = sock->sk;
834 struct net *net = sock_net(sk);
835 struct netlink_table *table = &nl_table[sk->sk_protocol];
836 s32 portid = task_tgid_vnr(current);
837 int err;
838 s32 rover = -4096;
839 bool ok;
840
841 retry:
842 cond_resched();
843 rcu_read_lock();
844 ok = !__netlink_lookup(table, portid, net);
845 rcu_read_unlock();
846 if (!ok) {
847 /* Bind collision, search negative portid values. */
848 if (rover == -4096)
849 /* rover will be in range [S32_MIN, -4097] */
850 rover = S32_MIN + get_random_u32_below(-4096 - S32_MIN);
851 else if (rover >= -4096)
852 rover = -4097;
853 portid = rover--;
854 goto retry;
855 }
856
857 err = netlink_insert(sk, portid);
858 if (err == -EADDRINUSE)
859 goto retry;
860
861 /* If 2 threads race to autobind, that is fine. */
862 if (err == -EBUSY)
863 err = 0;
864
865 return err;
866 }
867
868 /**
869 * __netlink_ns_capable - General netlink message capability test
870 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
871 * @user_ns: The user namespace of the capability to use
872 * @cap: The capability to use
873 *
874 * Test to see if the opener of the socket we received the message
875 * from had when the netlink socket was created and the sender of the
876 * message has the capability @cap in the user namespace @user_ns.
877 */
__netlink_ns_capable(const struct netlink_skb_parms * nsp,struct user_namespace * user_ns,int cap)878 bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
879 struct user_namespace *user_ns, int cap)
880 {
881 return ((nsp->flags & NETLINK_SKB_DST) ||
882 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
883 ns_capable(user_ns, cap);
884 }
885 EXPORT_SYMBOL(__netlink_ns_capable);
886
887 /**
888 * netlink_ns_capable - General netlink message capability test
889 * @skb: socket buffer holding a netlink command from userspace
890 * @user_ns: The user namespace of the capability to use
891 * @cap: The capability to use
892 *
893 * Test to see if the opener of the socket we received the message
894 * from had when the netlink socket was created and the sender of the
895 * message has the capability @cap in the user namespace @user_ns.
896 */
netlink_ns_capable(const struct sk_buff * skb,struct user_namespace * user_ns,int cap)897 bool netlink_ns_capable(const struct sk_buff *skb,
898 struct user_namespace *user_ns, int cap)
899 {
900 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
901 }
902 EXPORT_SYMBOL(netlink_ns_capable);
903
904 /**
905 * netlink_capable - Netlink global message capability test
906 * @skb: socket buffer holding a netlink command from userspace
907 * @cap: The capability to use
908 *
909 * Test to see if the opener of the socket we received the message
910 * from had when the netlink socket was created and the sender of the
911 * message has the capability @cap in all user namespaces.
912 */
netlink_capable(const struct sk_buff * skb,int cap)913 bool netlink_capable(const struct sk_buff *skb, int cap)
914 {
915 return netlink_ns_capable(skb, &init_user_ns, cap);
916 }
917 EXPORT_SYMBOL(netlink_capable);
918
919 /**
920 * netlink_net_capable - Netlink network namespace message capability test
921 * @skb: socket buffer holding a netlink command from userspace
922 * @cap: The capability to use
923 *
924 * Test to see if the opener of the socket we received the message
925 * from had when the netlink socket was created and the sender of the
926 * message has the capability @cap over the network namespace of
927 * the socket we received the message from.
928 */
netlink_net_capable(const struct sk_buff * skb,int cap)929 bool netlink_net_capable(const struct sk_buff *skb, int cap)
930 {
931 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
932 }
933 EXPORT_SYMBOL(netlink_net_capable);
934
netlink_allowed(const struct socket * sock,unsigned int flag)935 static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
936 {
937 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
938 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
939 }
940
941 static void
netlink_update_subscriptions(struct sock * sk,unsigned int subscriptions)942 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
943 {
944 struct netlink_sock *nlk = nlk_sk(sk);
945
946 if (nlk->subscriptions && !subscriptions)
947 __sk_del_bind_node(sk);
948 else if (!nlk->subscriptions && subscriptions)
949 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
950 nlk->subscriptions = subscriptions;
951 }
952
netlink_realloc_groups(struct sock * sk)953 static int netlink_realloc_groups(struct sock *sk)
954 {
955 struct netlink_sock *nlk = nlk_sk(sk);
956 unsigned int groups;
957 unsigned long *new_groups;
958 int err = 0;
959
960 netlink_table_grab();
961
962 groups = nl_table[sk->sk_protocol].groups;
963 if (!nl_table[sk->sk_protocol].registered) {
964 err = -ENOENT;
965 goto out_unlock;
966 }
967
968 if (nlk->ngroups >= groups)
969 goto out_unlock;
970
971 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
972 if (new_groups == NULL) {
973 err = -ENOMEM;
974 goto out_unlock;
975 }
976 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
977 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
978
979 nlk->groups = new_groups;
980 nlk->ngroups = groups;
981 out_unlock:
982 netlink_table_ungrab();
983 return err;
984 }
985
netlink_undo_bind(int group,long unsigned int groups,struct sock * sk)986 static void netlink_undo_bind(int group, long unsigned int groups,
987 struct sock *sk)
988 {
989 struct netlink_sock *nlk = nlk_sk(sk);
990 int undo;
991
992 if (!nlk->netlink_unbind)
993 return;
994
995 for (undo = 0; undo < group; undo++)
996 if (test_bit(undo, &groups))
997 nlk->netlink_unbind(sock_net(sk), undo + 1);
998 }
999
netlink_bind(struct socket * sock,struct sockaddr * addr,int addr_len)1000 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1001 int addr_len)
1002 {
1003 struct sock *sk = sock->sk;
1004 struct net *net = sock_net(sk);
1005 struct netlink_sock *nlk = nlk_sk(sk);
1006 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1007 int err = 0;
1008 unsigned long groups;
1009 bool bound;
1010
1011 if (addr_len < sizeof(struct sockaddr_nl))
1012 return -EINVAL;
1013
1014 if (nladdr->nl_family != AF_NETLINK)
1015 return -EINVAL;
1016 groups = nladdr->nl_groups;
1017
1018 /* Only superuser is allowed to listen multicasts */
1019 if (groups) {
1020 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1021 return -EPERM;
1022 err = netlink_realloc_groups(sk);
1023 if (err)
1024 return err;
1025 }
1026
1027 if (nlk->ngroups < BITS_PER_LONG)
1028 groups &= (1UL << nlk->ngroups) - 1;
1029
1030 /* Paired with WRITE_ONCE() in netlink_insert() */
1031 bound = READ_ONCE(nlk->bound);
1032 if (bound) {
1033 /* Ensure nlk->portid is up-to-date. */
1034 smp_rmb();
1035
1036 if (nladdr->nl_pid != nlk->portid)
1037 return -EINVAL;
1038 }
1039
1040 if (nlk->netlink_bind && groups) {
1041 int group;
1042
1043 /* nl_groups is a u32, so cap the maximum groups we can bind */
1044 for (group = 0; group < BITS_PER_TYPE(u32); group++) {
1045 if (!test_bit(group, &groups))
1046 continue;
1047 err = nlk->netlink_bind(net, group + 1);
1048 if (!err)
1049 continue;
1050 netlink_undo_bind(group, groups, sk);
1051 return err;
1052 }
1053 }
1054
1055 /* No need for barriers here as we return to user-space without
1056 * using any of the bound attributes.
1057 */
1058 netlink_lock_table();
1059 if (!bound) {
1060 err = nladdr->nl_pid ?
1061 netlink_insert(sk, nladdr->nl_pid) :
1062 netlink_autobind(sock);
1063 if (err) {
1064 netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
1065 goto unlock;
1066 }
1067 }
1068
1069 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1070 goto unlock;
1071 netlink_unlock_table();
1072
1073 netlink_table_grab();
1074 netlink_update_subscriptions(sk, nlk->subscriptions +
1075 hweight32(groups) -
1076 hweight32(nlk->groups[0]));
1077 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
1078 netlink_update_listeners(sk);
1079 netlink_table_ungrab();
1080
1081 return 0;
1082
1083 unlock:
1084 netlink_unlock_table();
1085 return err;
1086 }
1087
netlink_connect(struct socket * sock,struct sockaddr * addr,int alen,int flags)1088 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1089 int alen, int flags)
1090 {
1091 int err = 0;
1092 struct sock *sk = sock->sk;
1093 struct netlink_sock *nlk = nlk_sk(sk);
1094 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1095
1096 if (alen < sizeof(addr->sa_family))
1097 return -EINVAL;
1098
1099 if (addr->sa_family == AF_UNSPEC) {
1100 /* paired with READ_ONCE() in netlink_getsockbyportid() */
1101 WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED);
1102 /* dst_portid and dst_group can be read locklessly */
1103 WRITE_ONCE(nlk->dst_portid, 0);
1104 WRITE_ONCE(nlk->dst_group, 0);
1105 return 0;
1106 }
1107 if (addr->sa_family != AF_NETLINK)
1108 return -EINVAL;
1109
1110 if (alen < sizeof(struct sockaddr_nl))
1111 return -EINVAL;
1112
1113 if ((nladdr->nl_groups || nladdr->nl_pid) &&
1114 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1115 return -EPERM;
1116
1117 /* No need for barriers here as we return to user-space without
1118 * using any of the bound attributes.
1119 * Paired with WRITE_ONCE() in netlink_insert().
1120 */
1121 if (!READ_ONCE(nlk->bound))
1122 err = netlink_autobind(sock);
1123
1124 if (err == 0) {
1125 /* paired with READ_ONCE() in netlink_getsockbyportid() */
1126 WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED);
1127 /* dst_portid and dst_group can be read locklessly */
1128 WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
1129 WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
1130 }
1131
1132 return err;
1133 }
1134
netlink_getname(struct socket * sock,struct sockaddr * addr,int peer)1135 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1136 int peer)
1137 {
1138 struct sock *sk = sock->sk;
1139 struct netlink_sock *nlk = nlk_sk(sk);
1140 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1141
1142 nladdr->nl_family = AF_NETLINK;
1143 nladdr->nl_pad = 0;
1144
1145 if (peer) {
1146 /* Paired with WRITE_ONCE() in netlink_connect() */
1147 nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
1148 nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
1149 } else {
1150 /* Paired with WRITE_ONCE() in netlink_insert() */
1151 nladdr->nl_pid = READ_ONCE(nlk->portid);
1152 netlink_lock_table();
1153 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1154 netlink_unlock_table();
1155 }
1156 return sizeof(*nladdr);
1157 }
1158
netlink_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1159 static int netlink_ioctl(struct socket *sock, unsigned int cmd,
1160 unsigned long arg)
1161 {
1162 /* try to hand this ioctl down to the NIC drivers.
1163 */
1164 return -ENOIOCTLCMD;
1165 }
1166
netlink_getsockbyportid(struct sock * ssk,u32 portid)1167 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1168 {
1169 struct sock *sock;
1170 struct netlink_sock *nlk;
1171
1172 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1173 if (!sock)
1174 return ERR_PTR(-ECONNREFUSED);
1175
1176 /* Don't bother queuing skb if kernel socket has no input function */
1177 nlk = nlk_sk(sock);
1178 /* dst_portid and sk_state can be changed in netlink_connect() */
1179 if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
1180 READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
1181 sock_put(sock);
1182 return ERR_PTR(-ECONNREFUSED);
1183 }
1184 return sock;
1185 }
1186
netlink_getsockbyfilp(struct file * filp)1187 struct sock *netlink_getsockbyfilp(struct file *filp)
1188 {
1189 struct inode *inode = file_inode(filp);
1190 struct sock *sock;
1191
1192 if (!S_ISSOCK(inode->i_mode))
1193 return ERR_PTR(-ENOTSOCK);
1194
1195 sock = SOCKET_I(inode)->sk;
1196 if (sock->sk_family != AF_NETLINK)
1197 return ERR_PTR(-EINVAL);
1198
1199 sock_hold(sock);
1200 return sock;
1201 }
1202
netlink_alloc_large_skb(unsigned int size,int broadcast)1203 struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast)
1204 {
1205 size_t head_size = SKB_HEAD_ALIGN(size);
1206 struct sk_buff *skb;
1207 void *data;
1208
1209 if (head_size <= PAGE_SIZE || broadcast)
1210 return alloc_skb(size, GFP_KERNEL);
1211
1212 data = kvmalloc(head_size, GFP_KERNEL);
1213 if (!data)
1214 return NULL;
1215
1216 skb = __build_skb(data, head_size);
1217 if (!skb)
1218 kvfree(data);
1219 else if (is_vmalloc_addr(data))
1220 skb->destructor = netlink_skb_destructor;
1221
1222 return skb;
1223 }
1224
1225 /*
1226 * Attach a skb to a netlink socket.
1227 * The caller must hold a reference to the destination socket. On error, the
1228 * reference is dropped. The skb is not send to the destination, just all
1229 * all error checks are performed and memory in the queue is reserved.
1230 * Return values:
1231 * < 0: error. skb freed, reference to sock dropped.
1232 * 0: continue
1233 * 1: repeat lookup - reference dropped while waiting for socket memory.
1234 */
netlink_attachskb(struct sock * sk,struct sk_buff * skb,long * timeo,struct sock * ssk)1235 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1236 long *timeo, struct sock *ssk)
1237 {
1238 struct netlink_sock *nlk;
1239
1240 nlk = nlk_sk(sk);
1241
1242 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1243 test_bit(NETLINK_S_CONGESTED, &nlk->state))) {
1244 DECLARE_WAITQUEUE(wait, current);
1245 if (!*timeo) {
1246 if (!ssk || netlink_is_kernel(ssk))
1247 netlink_overrun(sk);
1248 sock_put(sk);
1249 kfree_skb(skb);
1250 return -EAGAIN;
1251 }
1252
1253 __set_current_state(TASK_INTERRUPTIBLE);
1254 add_wait_queue(&nlk->wait, &wait);
1255
1256 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1257 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
1258 !sock_flag(sk, SOCK_DEAD))
1259 *timeo = schedule_timeout(*timeo);
1260
1261 __set_current_state(TASK_RUNNING);
1262 remove_wait_queue(&nlk->wait, &wait);
1263 sock_put(sk);
1264
1265 if (signal_pending(current)) {
1266 kfree_skb(skb);
1267 return sock_intr_errno(*timeo);
1268 }
1269 return 1;
1270 }
1271 netlink_skb_set_owner_r(skb, sk);
1272 return 0;
1273 }
1274
__netlink_sendskb(struct sock * sk,struct sk_buff * skb)1275 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1276 {
1277 int len = skb->len;
1278
1279 netlink_deliver_tap(sock_net(sk), skb);
1280
1281 skb_queue_tail(&sk->sk_receive_queue, skb);
1282 sk->sk_data_ready(sk);
1283 return len;
1284 }
1285
netlink_sendskb(struct sock * sk,struct sk_buff * skb)1286 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1287 {
1288 int len = __netlink_sendskb(sk, skb);
1289
1290 sock_put(sk);
1291 return len;
1292 }
1293
netlink_detachskb(struct sock * sk,struct sk_buff * skb)1294 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1295 {
1296 kfree_skb(skb);
1297 sock_put(sk);
1298 }
1299
netlink_trim(struct sk_buff * skb,gfp_t allocation)1300 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1301 {
1302 int delta;
1303
1304 WARN_ON(skb->sk != NULL);
1305 delta = skb->end - skb->tail;
1306 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1307 return skb;
1308
1309 if (skb_shared(skb)) {
1310 struct sk_buff *nskb = skb_clone(skb, allocation);
1311 if (!nskb)
1312 return skb;
1313 consume_skb(skb);
1314 skb = nskb;
1315 }
1316
1317 pskb_expand_head(skb, 0, -delta,
1318 (allocation & ~__GFP_DIRECT_RECLAIM) |
1319 __GFP_NOWARN | __GFP_NORETRY);
1320 return skb;
1321 }
1322
netlink_unicast_kernel(struct sock * sk,struct sk_buff * skb,struct sock * ssk)1323 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1324 struct sock *ssk)
1325 {
1326 int ret;
1327 struct netlink_sock *nlk = nlk_sk(sk);
1328
1329 ret = -ECONNREFUSED;
1330 if (nlk->netlink_rcv != NULL) {
1331 ret = skb->len;
1332 netlink_skb_set_owner_r(skb, sk);
1333 NETLINK_CB(skb).sk = ssk;
1334 netlink_deliver_tap_kernel(sk, ssk, skb);
1335 nlk->netlink_rcv(skb);
1336 consume_skb(skb);
1337 } else {
1338 kfree_skb(skb);
1339 }
1340 sock_put(sk);
1341 return ret;
1342 }
1343
netlink_unicast(struct sock * ssk,struct sk_buff * skb,u32 portid,int nonblock)1344 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1345 u32 portid, int nonblock)
1346 {
1347 struct sock *sk;
1348 int err;
1349 long timeo;
1350
1351 skb = netlink_trim(skb, gfp_any());
1352
1353 timeo = sock_sndtimeo(ssk, nonblock);
1354 retry:
1355 sk = netlink_getsockbyportid(ssk, portid);
1356 if (IS_ERR(sk)) {
1357 kfree_skb(skb);
1358 return PTR_ERR(sk);
1359 }
1360 if (netlink_is_kernel(sk))
1361 return netlink_unicast_kernel(sk, skb, ssk);
1362
1363 if (sk_filter(sk, skb)) {
1364 err = skb->len;
1365 kfree_skb(skb);
1366 sock_put(sk);
1367 return err;
1368 }
1369
1370 err = netlink_attachskb(sk, skb, &timeo, ssk);
1371 if (err == 1)
1372 goto retry;
1373 if (err)
1374 return err;
1375
1376 return netlink_sendskb(sk, skb);
1377 }
1378 EXPORT_SYMBOL(netlink_unicast);
1379
netlink_has_listeners(struct sock * sk,unsigned int group)1380 int netlink_has_listeners(struct sock *sk, unsigned int group)
1381 {
1382 int res = 0;
1383 struct listeners *listeners;
1384
1385 BUG_ON(!netlink_is_kernel(sk));
1386
1387 rcu_read_lock();
1388 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1389
1390 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1391 res = test_bit(group - 1, listeners->masks);
1392
1393 rcu_read_unlock();
1394
1395 return res;
1396 }
1397 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1398
netlink_strict_get_check(struct sk_buff * skb)1399 bool netlink_strict_get_check(struct sk_buff *skb)
1400 {
1401 return nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
1402 }
1403 EXPORT_SYMBOL_GPL(netlink_strict_get_check);
1404
netlink_broadcast_deliver(struct sock * sk,struct sk_buff * skb)1405 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1406 {
1407 struct netlink_sock *nlk = nlk_sk(sk);
1408
1409 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1410 !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
1411 netlink_skb_set_owner_r(skb, sk);
1412 __netlink_sendskb(sk, skb);
1413 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1414 }
1415 return -1;
1416 }
1417
1418 struct netlink_broadcast_data {
1419 struct sock *exclude_sk;
1420 struct net *net;
1421 u32 portid;
1422 u32 group;
1423 int failure;
1424 int delivery_failure;
1425 int congested;
1426 int delivered;
1427 gfp_t allocation;
1428 struct sk_buff *skb, *skb2;
1429 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1430 void *tx_data;
1431 };
1432
do_one_broadcast(struct sock * sk,struct netlink_broadcast_data * p)1433 static void do_one_broadcast(struct sock *sk,
1434 struct netlink_broadcast_data *p)
1435 {
1436 struct netlink_sock *nlk = nlk_sk(sk);
1437 int val;
1438
1439 if (p->exclude_sk == sk)
1440 return;
1441
1442 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1443 !test_bit(p->group - 1, nlk->groups))
1444 return;
1445
1446 if (!net_eq(sock_net(sk), p->net)) {
1447 if (!nlk_test_bit(LISTEN_ALL_NSID, sk))
1448 return;
1449
1450 if (!peernet_has_id(sock_net(sk), p->net))
1451 return;
1452
1453 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
1454 CAP_NET_BROADCAST))
1455 return;
1456 }
1457
1458 if (p->failure) {
1459 netlink_overrun(sk);
1460 return;
1461 }
1462
1463 sock_hold(sk);
1464 if (p->skb2 == NULL) {
1465 if (skb_shared(p->skb)) {
1466 p->skb2 = skb_clone(p->skb, p->allocation);
1467 } else {
1468 p->skb2 = skb_get(p->skb);
1469 /*
1470 * skb ownership may have been set when
1471 * delivered to a previous socket.
1472 */
1473 skb_orphan(p->skb2);
1474 }
1475 }
1476 if (p->skb2 == NULL) {
1477 netlink_overrun(sk);
1478 /* Clone failed. Notify ALL listeners. */
1479 p->failure = 1;
1480 if (nlk_test_bit(BROADCAST_SEND_ERROR, sk))
1481 p->delivery_failure = 1;
1482 goto out;
1483 }
1484
1485 if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1486 kfree_skb(p->skb2);
1487 p->skb2 = NULL;
1488 goto out;
1489 }
1490
1491 if (sk_filter(sk, p->skb2)) {
1492 kfree_skb(p->skb2);
1493 p->skb2 = NULL;
1494 goto out;
1495 }
1496 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
1497 if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
1498 NETLINK_CB(p->skb2).nsid_is_set = true;
1499 val = netlink_broadcast_deliver(sk, p->skb2);
1500 if (val < 0) {
1501 netlink_overrun(sk);
1502 if (nlk_test_bit(BROADCAST_SEND_ERROR, sk))
1503 p->delivery_failure = 1;
1504 } else {
1505 p->congested |= val;
1506 p->delivered = 1;
1507 p->skb2 = NULL;
1508 }
1509 out:
1510 sock_put(sk);
1511 }
1512
netlink_broadcast_filtered(struct sock * ssk,struct sk_buff * skb,u32 portid,u32 group,gfp_t allocation,netlink_filter_fn filter,void * filter_data)1513 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
1514 u32 portid,
1515 u32 group, gfp_t allocation,
1516 netlink_filter_fn filter,
1517 void *filter_data)
1518 {
1519 struct net *net = sock_net(ssk);
1520 struct netlink_broadcast_data info;
1521 struct sock *sk;
1522
1523 skb = netlink_trim(skb, allocation);
1524
1525 info.exclude_sk = ssk;
1526 info.net = net;
1527 info.portid = portid;
1528 info.group = group;
1529 info.failure = 0;
1530 info.delivery_failure = 0;
1531 info.congested = 0;
1532 info.delivered = 0;
1533 info.allocation = allocation;
1534 info.skb = skb;
1535 info.skb2 = NULL;
1536 info.tx_filter = filter;
1537 info.tx_data = filter_data;
1538
1539 /* While we sleep in clone, do not allow to change socket list */
1540
1541 netlink_lock_table();
1542
1543 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1544 do_one_broadcast(sk, &info);
1545
1546 consume_skb(skb);
1547
1548 netlink_unlock_table();
1549
1550 if (info.delivery_failure) {
1551 kfree_skb(info.skb2);
1552 return -ENOBUFS;
1553 }
1554 consume_skb(info.skb2);
1555
1556 if (info.delivered) {
1557 if (info.congested && gfpflags_allow_blocking(allocation))
1558 yield();
1559 return 0;
1560 }
1561 return -ESRCH;
1562 }
1563 EXPORT_SYMBOL(netlink_broadcast_filtered);
1564
netlink_broadcast(struct sock * ssk,struct sk_buff * skb,u32 portid,u32 group,gfp_t allocation)1565 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1566 u32 group, gfp_t allocation)
1567 {
1568 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
1569 NULL, NULL);
1570 }
1571 EXPORT_SYMBOL(netlink_broadcast);
1572
1573 struct netlink_set_err_data {
1574 struct sock *exclude_sk;
1575 u32 portid;
1576 u32 group;
1577 int code;
1578 };
1579
do_one_set_err(struct sock * sk,struct netlink_set_err_data * p)1580 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1581 {
1582 struct netlink_sock *nlk = nlk_sk(sk);
1583 int ret = 0;
1584
1585 if (sk == p->exclude_sk)
1586 goto out;
1587
1588 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1589 goto out;
1590
1591 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1592 !test_bit(p->group - 1, nlk->groups))
1593 goto out;
1594
1595 if (p->code == ENOBUFS && nlk_test_bit(RECV_NO_ENOBUFS, sk)) {
1596 ret = 1;
1597 goto out;
1598 }
1599
1600 WRITE_ONCE(sk->sk_err, p->code);
1601 sk_error_report(sk);
1602 out:
1603 return ret;
1604 }
1605
1606 /**
1607 * netlink_set_err - report error to broadcast listeners
1608 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1609 * @portid: the PORTID of a process that we want to skip (if any)
1610 * @group: the broadcast group that will notice the error
1611 * @code: error code, must be negative (as usual in kernelspace)
1612 *
1613 * This function returns the number of broadcast listeners that have set the
1614 * NETLINK_NO_ENOBUFS socket option.
1615 */
netlink_set_err(struct sock * ssk,u32 portid,u32 group,int code)1616 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1617 {
1618 struct netlink_set_err_data info;
1619 unsigned long flags;
1620 struct sock *sk;
1621 int ret = 0;
1622
1623 info.exclude_sk = ssk;
1624 info.portid = portid;
1625 info.group = group;
1626 /* sk->sk_err wants a positive error value */
1627 info.code = -code;
1628
1629 read_lock_irqsave(&nl_table_lock, flags);
1630
1631 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1632 ret += do_one_set_err(sk, &info);
1633
1634 read_unlock_irqrestore(&nl_table_lock, flags);
1635 return ret;
1636 }
1637 EXPORT_SYMBOL(netlink_set_err);
1638
1639 /* must be called with netlink table grabbed */
netlink_update_socket_mc(struct netlink_sock * nlk,unsigned int group,int is_new)1640 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1641 unsigned int group,
1642 int is_new)
1643 {
1644 int old, new = !!is_new, subscriptions;
1645
1646 old = test_bit(group - 1, nlk->groups);
1647 subscriptions = nlk->subscriptions - old + new;
1648 __assign_bit(group - 1, nlk->groups, new);
1649 netlink_update_subscriptions(&nlk->sk, subscriptions);
1650 netlink_update_listeners(&nlk->sk);
1651 }
1652
netlink_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)1653 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1654 sockptr_t optval, unsigned int optlen)
1655 {
1656 struct sock *sk = sock->sk;
1657 struct netlink_sock *nlk = nlk_sk(sk);
1658 unsigned int val = 0;
1659 int nr = -1;
1660
1661 if (level != SOL_NETLINK)
1662 return -ENOPROTOOPT;
1663
1664 if (optlen >= sizeof(int) &&
1665 copy_from_sockptr(&val, optval, sizeof(val)))
1666 return -EFAULT;
1667
1668 switch (optname) {
1669 case NETLINK_PKTINFO:
1670 nr = NETLINK_F_RECV_PKTINFO;
1671 break;
1672 case NETLINK_ADD_MEMBERSHIP:
1673 case NETLINK_DROP_MEMBERSHIP: {
1674 int err;
1675
1676 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1677 return -EPERM;
1678 err = netlink_realloc_groups(sk);
1679 if (err)
1680 return err;
1681 if (!val || val - 1 >= nlk->ngroups)
1682 return -EINVAL;
1683 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
1684 err = nlk->netlink_bind(sock_net(sk), val);
1685 if (err)
1686 return err;
1687 }
1688 netlink_table_grab();
1689 netlink_update_socket_mc(nlk, val,
1690 optname == NETLINK_ADD_MEMBERSHIP);
1691 netlink_table_ungrab();
1692 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
1693 nlk->netlink_unbind(sock_net(sk), val);
1694
1695 break;
1696 }
1697 case NETLINK_BROADCAST_ERROR:
1698 nr = NETLINK_F_BROADCAST_SEND_ERROR;
1699 break;
1700 case NETLINK_NO_ENOBUFS:
1701 assign_bit(NETLINK_F_RECV_NO_ENOBUFS, &nlk->flags, val);
1702 if (val) {
1703 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
1704 wake_up_interruptible(&nlk->wait);
1705 }
1706 break;
1707 case NETLINK_LISTEN_ALL_NSID:
1708 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
1709 return -EPERM;
1710 nr = NETLINK_F_LISTEN_ALL_NSID;
1711 break;
1712 case NETLINK_CAP_ACK:
1713 nr = NETLINK_F_CAP_ACK;
1714 break;
1715 case NETLINK_EXT_ACK:
1716 nr = NETLINK_F_EXT_ACK;
1717 break;
1718 case NETLINK_GET_STRICT_CHK:
1719 nr = NETLINK_F_STRICT_CHK;
1720 break;
1721 default:
1722 return -ENOPROTOOPT;
1723 }
1724 if (nr >= 0)
1725 assign_bit(nr, &nlk->flags, val);
1726 return 0;
1727 }
1728
netlink_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1729 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1730 char __user *optval, int __user *optlen)
1731 {
1732 struct sock *sk = sock->sk;
1733 struct netlink_sock *nlk = nlk_sk(sk);
1734 unsigned int flag;
1735 int len, val;
1736
1737 if (level != SOL_NETLINK)
1738 return -ENOPROTOOPT;
1739
1740 if (get_user(len, optlen))
1741 return -EFAULT;
1742 if (len < 0)
1743 return -EINVAL;
1744
1745 switch (optname) {
1746 case NETLINK_PKTINFO:
1747 flag = NETLINK_F_RECV_PKTINFO;
1748 break;
1749 case NETLINK_BROADCAST_ERROR:
1750 flag = NETLINK_F_BROADCAST_SEND_ERROR;
1751 break;
1752 case NETLINK_NO_ENOBUFS:
1753 flag = NETLINK_F_RECV_NO_ENOBUFS;
1754 break;
1755 case NETLINK_LIST_MEMBERSHIPS: {
1756 int pos, idx, shift, err = 0;
1757
1758 netlink_lock_table();
1759 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
1760 if (len - pos < sizeof(u32))
1761 break;
1762
1763 idx = pos / sizeof(unsigned long);
1764 shift = (pos % sizeof(unsigned long)) * 8;
1765 if (put_user((u32)(nlk->groups[idx] >> shift),
1766 (u32 __user *)(optval + pos))) {
1767 err = -EFAULT;
1768 break;
1769 }
1770 }
1771 if (put_user(ALIGN(BITS_TO_BYTES(nlk->ngroups), sizeof(u32)), optlen))
1772 err = -EFAULT;
1773 netlink_unlock_table();
1774 return err;
1775 }
1776 case NETLINK_LISTEN_ALL_NSID:
1777 flag = NETLINK_F_LISTEN_ALL_NSID;
1778 break;
1779 case NETLINK_CAP_ACK:
1780 flag = NETLINK_F_CAP_ACK;
1781 break;
1782 case NETLINK_EXT_ACK:
1783 flag = NETLINK_F_EXT_ACK;
1784 break;
1785 case NETLINK_GET_STRICT_CHK:
1786 flag = NETLINK_F_STRICT_CHK;
1787 break;
1788 default:
1789 return -ENOPROTOOPT;
1790 }
1791
1792 if (len < sizeof(int))
1793 return -EINVAL;
1794
1795 len = sizeof(int);
1796 val = test_bit(flag, &nlk->flags);
1797
1798 if (put_user(len, optlen) ||
1799 copy_to_user(optval, &val, len))
1800 return -EFAULT;
1801
1802 return 0;
1803 }
1804
netlink_cmsg_recv_pktinfo(struct msghdr * msg,struct sk_buff * skb)1805 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1806 {
1807 struct nl_pktinfo info;
1808
1809 info.group = NETLINK_CB(skb).dst_group;
1810 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1811 }
1812
netlink_cmsg_listen_all_nsid(struct sock * sk,struct msghdr * msg,struct sk_buff * skb)1813 static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
1814 struct sk_buff *skb)
1815 {
1816 if (!NETLINK_CB(skb).nsid_is_set)
1817 return;
1818
1819 put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
1820 &NETLINK_CB(skb).nsid);
1821 }
1822
netlink_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1823 static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1824 {
1825 struct sock *sk = sock->sk;
1826 struct netlink_sock *nlk = nlk_sk(sk);
1827 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1828 u32 dst_portid;
1829 u32 dst_group;
1830 struct sk_buff *skb;
1831 int err;
1832 struct scm_cookie scm;
1833 u32 netlink_skb_flags = 0;
1834
1835 if (msg->msg_flags & MSG_OOB)
1836 return -EOPNOTSUPP;
1837
1838 if (len == 0) {
1839 pr_warn_once("Zero length message leads to an empty skb\n");
1840 return -ENODATA;
1841 }
1842
1843 err = scm_send(sock, msg, &scm, true);
1844 if (err < 0)
1845 return err;
1846
1847 if (msg->msg_namelen) {
1848 err = -EINVAL;
1849 if (msg->msg_namelen < sizeof(struct sockaddr_nl))
1850 goto out;
1851 if (addr->nl_family != AF_NETLINK)
1852 goto out;
1853 dst_portid = addr->nl_pid;
1854 dst_group = ffs(addr->nl_groups);
1855 err = -EPERM;
1856 if ((dst_group || dst_portid) &&
1857 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1858 goto out;
1859 netlink_skb_flags |= NETLINK_SKB_DST;
1860 } else {
1861 /* Paired with WRITE_ONCE() in netlink_connect() */
1862 dst_portid = READ_ONCE(nlk->dst_portid);
1863 dst_group = READ_ONCE(nlk->dst_group);
1864 }
1865
1866 /* Paired with WRITE_ONCE() in netlink_insert() */
1867 if (!READ_ONCE(nlk->bound)) {
1868 err = netlink_autobind(sock);
1869 if (err)
1870 goto out;
1871 } else {
1872 /* Ensure nlk is hashed and visible. */
1873 smp_rmb();
1874 }
1875
1876 err = -EMSGSIZE;
1877 if (len > sk->sk_sndbuf - 32)
1878 goto out;
1879 err = -ENOBUFS;
1880 skb = netlink_alloc_large_skb(len, dst_group);
1881 if (skb == NULL)
1882 goto out;
1883
1884 NETLINK_CB(skb).portid = nlk->portid;
1885 NETLINK_CB(skb).dst_group = dst_group;
1886 NETLINK_CB(skb).creds = scm.creds;
1887 NETLINK_CB(skb).flags = netlink_skb_flags;
1888
1889 err = -EFAULT;
1890 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1891 kfree_skb(skb);
1892 goto out;
1893 }
1894
1895 err = security_netlink_send(sk, skb);
1896 if (err) {
1897 kfree_skb(skb);
1898 goto out;
1899 }
1900
1901 if (dst_group) {
1902 refcount_inc(&skb->users);
1903 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1904 }
1905 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags & MSG_DONTWAIT);
1906
1907 out:
1908 scm_destroy(&scm);
1909 return err;
1910 }
1911
netlink_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1912 static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1913 int flags)
1914 {
1915 struct scm_cookie scm;
1916 struct sock *sk = sock->sk;
1917 struct netlink_sock *nlk = nlk_sk(sk);
1918 size_t copied, max_recvmsg_len;
1919 struct sk_buff *skb, *data_skb;
1920 int err, ret;
1921
1922 if (flags & MSG_OOB)
1923 return -EOPNOTSUPP;
1924
1925 copied = 0;
1926
1927 skb = skb_recv_datagram(sk, flags, &err);
1928 if (skb == NULL)
1929 goto out;
1930
1931 data_skb = skb;
1932
1933 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1934 if (unlikely(skb_shinfo(skb)->frag_list)) {
1935 /*
1936 * If this skb has a frag_list, then here that means that we
1937 * will have to use the frag_list skb's data for compat tasks
1938 * and the regular skb's data for normal (non-compat) tasks.
1939 *
1940 * If we need to send the compat skb, assign it to the
1941 * 'data_skb' variable so that it will be used below for data
1942 * copying. We keep 'skb' for everything else, including
1943 * freeing both later.
1944 */
1945 if (flags & MSG_CMSG_COMPAT)
1946 data_skb = skb_shinfo(skb)->frag_list;
1947 }
1948 #endif
1949
1950 /* Record the max length of recvmsg() calls for future allocations */
1951 max_recvmsg_len = max(READ_ONCE(nlk->max_recvmsg_len), len);
1952 max_recvmsg_len = min_t(size_t, max_recvmsg_len,
1953 SKB_WITH_OVERHEAD(32768));
1954 WRITE_ONCE(nlk->max_recvmsg_len, max_recvmsg_len);
1955
1956 copied = data_skb->len;
1957 if (len < copied) {
1958 msg->msg_flags |= MSG_TRUNC;
1959 copied = len;
1960 }
1961
1962 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
1963
1964 if (msg->msg_name) {
1965 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1966 addr->nl_family = AF_NETLINK;
1967 addr->nl_pad = 0;
1968 addr->nl_pid = NETLINK_CB(skb).portid;
1969 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1970 msg->msg_namelen = sizeof(*addr);
1971 }
1972
1973 if (nlk_test_bit(RECV_PKTINFO, sk))
1974 netlink_cmsg_recv_pktinfo(msg, skb);
1975 if (nlk_test_bit(LISTEN_ALL_NSID, sk))
1976 netlink_cmsg_listen_all_nsid(sk, msg, skb);
1977
1978 memset(&scm, 0, sizeof(scm));
1979 scm.creds = *NETLINK_CREDS(skb);
1980 if (flags & MSG_TRUNC)
1981 copied = data_skb->len;
1982
1983 skb_free_datagram(sk, skb);
1984
1985 if (READ_ONCE(nlk->cb_running) &&
1986 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1987 ret = netlink_dump(sk, false);
1988 if (ret) {
1989 WRITE_ONCE(sk->sk_err, -ret);
1990 sk_error_report(sk);
1991 }
1992 }
1993
1994 scm_recv(sock, msg, &scm, flags);
1995 out:
1996 netlink_rcv_wake(sk);
1997 return err ? : copied;
1998 }
1999
netlink_data_ready(struct sock * sk)2000 static void netlink_data_ready(struct sock *sk)
2001 {
2002 BUG();
2003 }
2004
2005 /*
2006 * We export these functions to other modules. They provide a
2007 * complete set of kernel non-blocking support for message
2008 * queueing.
2009 */
2010
2011 struct sock *
__netlink_kernel_create(struct net * net,int unit,struct module * module,struct netlink_kernel_cfg * cfg)2012 __netlink_kernel_create(struct net *net, int unit, struct module *module,
2013 struct netlink_kernel_cfg *cfg)
2014 {
2015 struct socket *sock;
2016 struct sock *sk;
2017 struct netlink_sock *nlk;
2018 struct listeners *listeners = NULL;
2019 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2020 unsigned int groups;
2021
2022 BUG_ON(!nl_table);
2023
2024 if (unit < 0 || unit >= MAX_LINKS)
2025 return NULL;
2026
2027 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2028 return NULL;
2029
2030 if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
2031 goto out_sock_release_nosk;
2032
2033 sk = sock->sk;
2034
2035 if (!cfg || cfg->groups < 32)
2036 groups = 32;
2037 else
2038 groups = cfg->groups;
2039
2040 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2041 if (!listeners)
2042 goto out_sock_release;
2043
2044 sk->sk_data_ready = netlink_data_ready;
2045 if (cfg && cfg->input)
2046 nlk_sk(sk)->netlink_rcv = cfg->input;
2047
2048 if (netlink_insert(sk, 0))
2049 goto out_sock_release;
2050
2051 nlk = nlk_sk(sk);
2052 set_bit(NETLINK_F_KERNEL_SOCKET, &nlk->flags);
2053
2054 netlink_table_grab();
2055 if (!nl_table[unit].registered) {
2056 nl_table[unit].groups = groups;
2057 rcu_assign_pointer(nl_table[unit].listeners, listeners);
2058 nl_table[unit].cb_mutex = cb_mutex;
2059 nl_table[unit].module = module;
2060 if (cfg) {
2061 nl_table[unit].bind = cfg->bind;
2062 nl_table[unit].unbind = cfg->unbind;
2063 nl_table[unit].release = cfg->release;
2064 nl_table[unit].flags = cfg->flags;
2065 }
2066 nl_table[unit].registered = 1;
2067 } else {
2068 kfree(listeners);
2069 nl_table[unit].registered++;
2070 }
2071 netlink_table_ungrab();
2072 return sk;
2073
2074 out_sock_release:
2075 kfree(listeners);
2076 netlink_kernel_release(sk);
2077 return NULL;
2078
2079 out_sock_release_nosk:
2080 sock_release(sock);
2081 return NULL;
2082 }
2083 EXPORT_SYMBOL(__netlink_kernel_create);
2084
2085 void
netlink_kernel_release(struct sock * sk)2086 netlink_kernel_release(struct sock *sk)
2087 {
2088 if (sk == NULL || sk->sk_socket == NULL)
2089 return;
2090
2091 sock_release(sk->sk_socket);
2092 }
2093 EXPORT_SYMBOL(netlink_kernel_release);
2094
__netlink_change_ngroups(struct sock * sk,unsigned int groups)2095 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2096 {
2097 struct listeners *new, *old;
2098 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2099
2100 if (groups < 32)
2101 groups = 32;
2102
2103 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2104 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2105 if (!new)
2106 return -ENOMEM;
2107 old = nl_deref_protected(tbl->listeners);
2108 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2109 rcu_assign_pointer(tbl->listeners, new);
2110
2111 kfree_rcu(old, rcu);
2112 }
2113 tbl->groups = groups;
2114
2115 return 0;
2116 }
2117
2118 /**
2119 * netlink_change_ngroups - change number of multicast groups
2120 *
2121 * This changes the number of multicast groups that are available
2122 * on a certain netlink family. Note that it is not possible to
2123 * change the number of groups to below 32. Also note that it does
2124 * not implicitly call netlink_clear_multicast_users() when the
2125 * number of groups is reduced.
2126 *
2127 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2128 * @groups: The new number of groups.
2129 */
netlink_change_ngroups(struct sock * sk,unsigned int groups)2130 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2131 {
2132 int err;
2133
2134 netlink_table_grab();
2135 err = __netlink_change_ngroups(sk, groups);
2136 netlink_table_ungrab();
2137
2138 return err;
2139 }
2140
__netlink_clear_multicast_users(struct sock * ksk,unsigned int group)2141 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2142 {
2143 struct sock *sk;
2144 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2145
2146 sk_for_each_bound(sk, &tbl->mc_list)
2147 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2148 }
2149
2150 struct nlmsghdr *
__nlmsg_put(struct sk_buff * skb,u32 portid,u32 seq,int type,int len,int flags)2151 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2152 {
2153 struct nlmsghdr *nlh;
2154 int size = nlmsg_msg_size(len);
2155
2156 nlh = skb_put(skb, NLMSG_ALIGN(size));
2157 nlh->nlmsg_type = type;
2158 nlh->nlmsg_len = size;
2159 nlh->nlmsg_flags = flags;
2160 nlh->nlmsg_pid = portid;
2161 nlh->nlmsg_seq = seq;
2162 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2163 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2164 return nlh;
2165 }
2166 EXPORT_SYMBOL(__nlmsg_put);
2167
2168 static size_t
netlink_ack_tlv_len(struct netlink_sock * nlk,int err,const struct netlink_ext_ack * extack)2169 netlink_ack_tlv_len(struct netlink_sock *nlk, int err,
2170 const struct netlink_ext_ack *extack)
2171 {
2172 size_t tlvlen;
2173
2174 if (!extack || !test_bit(NETLINK_F_EXT_ACK, &nlk->flags))
2175 return 0;
2176
2177 tlvlen = 0;
2178 if (extack->_msg)
2179 tlvlen += nla_total_size(strlen(extack->_msg) + 1);
2180 if (extack->cookie_len)
2181 tlvlen += nla_total_size(extack->cookie_len);
2182
2183 /* Following attributes are only reported as error (not warning) */
2184 if (!err)
2185 return tlvlen;
2186
2187 if (extack->bad_attr)
2188 tlvlen += nla_total_size(sizeof(u32));
2189 if (extack->policy)
2190 tlvlen += netlink_policy_dump_attr_size_estimate(extack->policy);
2191 if (extack->miss_type)
2192 tlvlen += nla_total_size(sizeof(u32));
2193 if (extack->miss_nest)
2194 tlvlen += nla_total_size(sizeof(u32));
2195
2196 return tlvlen;
2197 }
2198
2199 static void
netlink_ack_tlv_fill(struct sk_buff * in_skb,struct sk_buff * skb,const struct nlmsghdr * nlh,int err,const struct netlink_ext_ack * extack)2200 netlink_ack_tlv_fill(struct sk_buff *in_skb, struct sk_buff *skb,
2201 const struct nlmsghdr *nlh, int err,
2202 const struct netlink_ext_ack *extack)
2203 {
2204 if (extack->_msg)
2205 WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg));
2206 if (extack->cookie_len)
2207 WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
2208 extack->cookie_len, extack->cookie));
2209
2210 if (!err)
2211 return;
2212
2213 if (extack->bad_attr &&
2214 !WARN_ON((u8 *)extack->bad_attr < in_skb->data ||
2215 (u8 *)extack->bad_attr >= in_skb->data + in_skb->len))
2216 WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
2217 (u8 *)extack->bad_attr - (const u8 *)nlh));
2218 if (extack->policy)
2219 netlink_policy_dump_write_attr(skb, extack->policy,
2220 NLMSGERR_ATTR_POLICY);
2221 if (extack->miss_type)
2222 WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_TYPE,
2223 extack->miss_type));
2224 if (extack->miss_nest &&
2225 !WARN_ON((u8 *)extack->miss_nest < in_skb->data ||
2226 (u8 *)extack->miss_nest > in_skb->data + in_skb->len))
2227 WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_NEST,
2228 (u8 *)extack->miss_nest - (const u8 *)nlh));
2229 }
2230
2231 /*
2232 * It looks a bit ugly.
2233 * It would be better to create kernel thread.
2234 */
2235
netlink_dump_done(struct netlink_sock * nlk,struct sk_buff * skb,struct netlink_callback * cb,struct netlink_ext_ack * extack)2236 static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
2237 struct netlink_callback *cb,
2238 struct netlink_ext_ack *extack)
2239 {
2240 struct nlmsghdr *nlh;
2241 size_t extack_len;
2242
2243 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(nlk->dump_done_errno),
2244 NLM_F_MULTI | cb->answer_flags);
2245 if (WARN_ON(!nlh))
2246 return -ENOBUFS;
2247
2248 nl_dump_check_consistent(cb, nlh);
2249 memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, sizeof(nlk->dump_done_errno));
2250
2251 extack_len = netlink_ack_tlv_len(nlk, nlk->dump_done_errno, extack);
2252 if (extack_len) {
2253 nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
2254 if (skb_tailroom(skb) >= extack_len) {
2255 netlink_ack_tlv_fill(cb->skb, skb, cb->nlh,
2256 nlk->dump_done_errno, extack);
2257 nlmsg_end(skb, nlh);
2258 }
2259 }
2260
2261 return 0;
2262 }
2263
netlink_dump(struct sock * sk,bool lock_taken)2264 static int netlink_dump(struct sock *sk, bool lock_taken)
2265 {
2266 struct netlink_sock *nlk = nlk_sk(sk);
2267 struct netlink_ext_ack extack = {};
2268 struct netlink_callback *cb;
2269 struct sk_buff *skb = NULL;
2270 size_t max_recvmsg_len;
2271 struct module *module;
2272 int err = -ENOBUFS;
2273 int alloc_min_size;
2274 int alloc_size;
2275
2276 if (!lock_taken)
2277 mutex_lock(&nlk->nl_cb_mutex);
2278 if (!nlk->cb_running) {
2279 err = -EINVAL;
2280 goto errout_skb;
2281 }
2282
2283 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2284 goto errout_skb;
2285
2286 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2287 * required, but it makes sense to _attempt_ a 16K bytes allocation
2288 * to reduce number of system calls on dump operations, if user
2289 * ever provided a big enough buffer.
2290 */
2291 cb = &nlk->cb;
2292 alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2293
2294 max_recvmsg_len = READ_ONCE(nlk->max_recvmsg_len);
2295 if (alloc_min_size < max_recvmsg_len) {
2296 alloc_size = max_recvmsg_len;
2297 skb = alloc_skb(alloc_size,
2298 (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
2299 __GFP_NOWARN | __GFP_NORETRY);
2300 }
2301 if (!skb) {
2302 alloc_size = alloc_min_size;
2303 skb = alloc_skb(alloc_size, GFP_KERNEL);
2304 }
2305 if (!skb)
2306 goto errout_skb;
2307
2308 /* Trim skb to allocated size. User is expected to provide buffer as
2309 * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
2310 * netlink_recvmsg())). dump will pack as many smaller messages as
2311 * could fit within the allocated skb. skb is typically allocated
2312 * with larger space than required (could be as much as near 2x the
2313 * requested size with align to next power of 2 approach). Allowing
2314 * dump to use the excess space makes it difficult for a user to have a
2315 * reasonable static buffer based on the expected largest dump of a
2316 * single netdev. The outcome is MSG_TRUNC error.
2317 */
2318 skb_reserve(skb, skb_tailroom(skb) - alloc_size);
2319
2320 /* Make sure malicious BPF programs can not read unitialized memory
2321 * from skb->head -> skb->data
2322 */
2323 skb_reset_network_header(skb);
2324 skb_reset_mac_header(skb);
2325
2326 netlink_skb_set_owner_r(skb, sk);
2327
2328 if (nlk->dump_done_errno > 0) {
2329 struct mutex *extra_mutex = nlk->dump_cb_mutex;
2330
2331 cb->extack = &extack;
2332
2333 if (cb->flags & RTNL_FLAG_DUMP_UNLOCKED)
2334 extra_mutex = NULL;
2335 if (extra_mutex)
2336 mutex_lock(extra_mutex);
2337 nlk->dump_done_errno = cb->dump(skb, cb);
2338 if (extra_mutex)
2339 mutex_unlock(extra_mutex);
2340
2341 /* EMSGSIZE plus something already in the skb means
2342 * that there's more to dump but current skb has filled up.
2343 * If the callback really wants to return EMSGSIZE to user space
2344 * it needs to do so again, on the next cb->dump() call,
2345 * without putting data in the skb.
2346 */
2347 if (nlk->dump_done_errno == -EMSGSIZE && skb->len)
2348 nlk->dump_done_errno = skb->len;
2349
2350 cb->extack = NULL;
2351 }
2352
2353 if (nlk->dump_done_errno > 0 ||
2354 skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
2355 mutex_unlock(&nlk->nl_cb_mutex);
2356
2357 if (sk_filter(sk, skb))
2358 kfree_skb(skb);
2359 else
2360 __netlink_sendskb(sk, skb);
2361 return 0;
2362 }
2363
2364 if (netlink_dump_done(nlk, skb, cb, &extack))
2365 goto errout_skb;
2366
2367 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2368 /* frag_list skb's data is used for compat tasks
2369 * and the regular skb's data for normal (non-compat) tasks.
2370 * See netlink_recvmsg().
2371 */
2372 if (unlikely(skb_shinfo(skb)->frag_list)) {
2373 if (netlink_dump_done(nlk, skb_shinfo(skb)->frag_list, cb, &extack))
2374 goto errout_skb;
2375 }
2376 #endif
2377
2378 if (sk_filter(sk, skb))
2379 kfree_skb(skb);
2380 else
2381 __netlink_sendskb(sk, skb);
2382
2383 if (cb->done)
2384 cb->done(cb);
2385
2386 WRITE_ONCE(nlk->cb_running, false);
2387 module = cb->module;
2388 skb = cb->skb;
2389 mutex_unlock(&nlk->nl_cb_mutex);
2390 module_put(module);
2391 consume_skb(skb);
2392 return 0;
2393
2394 errout_skb:
2395 mutex_unlock(&nlk->nl_cb_mutex);
2396 kfree_skb(skb);
2397 return err;
2398 }
2399
__netlink_dump_start(struct sock * ssk,struct sk_buff * skb,const struct nlmsghdr * nlh,struct netlink_dump_control * control)2400 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2401 const struct nlmsghdr *nlh,
2402 struct netlink_dump_control *control)
2403 {
2404 struct netlink_callback *cb;
2405 struct netlink_sock *nlk;
2406 struct sock *sk;
2407 int ret;
2408
2409 refcount_inc(&skb->users);
2410
2411 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2412 if (sk == NULL) {
2413 ret = -ECONNREFUSED;
2414 goto error_free;
2415 }
2416
2417 nlk = nlk_sk(sk);
2418 mutex_lock(&nlk->nl_cb_mutex);
2419 /* A dump is in progress... */
2420 if (nlk->cb_running) {
2421 ret = -EBUSY;
2422 goto error_unlock;
2423 }
2424 /* add reference of module which cb->dump belongs to */
2425 if (!try_module_get(control->module)) {
2426 ret = -EPROTONOSUPPORT;
2427 goto error_unlock;
2428 }
2429
2430 cb = &nlk->cb;
2431 memset(cb, 0, sizeof(*cb));
2432 cb->dump = control->dump;
2433 cb->done = control->done;
2434 cb->nlh = nlh;
2435 cb->data = control->data;
2436 cb->module = control->module;
2437 cb->min_dump_alloc = control->min_dump_alloc;
2438 cb->flags = control->flags;
2439 cb->skb = skb;
2440
2441 cb->strict_check = nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
2442
2443 if (control->start) {
2444 cb->extack = control->extack;
2445 ret = control->start(cb);
2446 cb->extack = NULL;
2447 if (ret)
2448 goto error_put;
2449 }
2450
2451 WRITE_ONCE(nlk->cb_running, true);
2452 nlk->dump_done_errno = INT_MAX;
2453
2454 ret = netlink_dump(sk, true);
2455
2456 sock_put(sk);
2457
2458 if (ret)
2459 return ret;
2460
2461 /* We successfully started a dump, by returning -EINTR we
2462 * signal not to send ACK even if it was requested.
2463 */
2464 return -EINTR;
2465
2466 error_put:
2467 module_put(control->module);
2468 error_unlock:
2469 sock_put(sk);
2470 mutex_unlock(&nlk->nl_cb_mutex);
2471 error_free:
2472 kfree_skb(skb);
2473 return ret;
2474 }
2475 EXPORT_SYMBOL(__netlink_dump_start);
2476
netlink_ack(struct sk_buff * in_skb,struct nlmsghdr * nlh,int err,const struct netlink_ext_ack * extack)2477 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2478 const struct netlink_ext_ack *extack)
2479 {
2480 struct sk_buff *skb;
2481 struct nlmsghdr *rep;
2482 struct nlmsgerr *errmsg;
2483 size_t payload = sizeof(*errmsg);
2484 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
2485 unsigned int flags = 0;
2486 size_t tlvlen;
2487
2488 /* Error messages get the original request appened, unless the user
2489 * requests to cap the error message, and get extra error data if
2490 * requested.
2491 */
2492 if (err && !test_bit(NETLINK_F_CAP_ACK, &nlk->flags))
2493 payload += nlmsg_len(nlh);
2494 else
2495 flags |= NLM_F_CAPPED;
2496
2497 tlvlen = netlink_ack_tlv_len(nlk, err, extack);
2498 if (tlvlen)
2499 flags |= NLM_F_ACK_TLVS;
2500
2501 skb = nlmsg_new(payload + tlvlen, GFP_KERNEL);
2502 if (!skb)
2503 goto err_skb;
2504
2505 rep = nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2506 NLMSG_ERROR, sizeof(*errmsg), flags);
2507 if (!rep)
2508 goto err_bad_put;
2509 errmsg = nlmsg_data(rep);
2510 errmsg->error = err;
2511 errmsg->msg = *nlh;
2512
2513 if (!(flags & NLM_F_CAPPED)) {
2514 if (!nlmsg_append(skb, nlmsg_len(nlh)))
2515 goto err_bad_put;
2516
2517 memcpy(nlmsg_data(&errmsg->msg), nlmsg_data(nlh),
2518 nlmsg_len(nlh));
2519 }
2520
2521 if (tlvlen)
2522 netlink_ack_tlv_fill(in_skb, skb, nlh, err, extack);
2523
2524 nlmsg_end(skb, rep);
2525
2526 nlmsg_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid);
2527
2528 return;
2529
2530 err_bad_put:
2531 nlmsg_free(skb);
2532 err_skb:
2533 WRITE_ONCE(NETLINK_CB(in_skb).sk->sk_err, ENOBUFS);
2534 sk_error_report(NETLINK_CB(in_skb).sk);
2535 }
2536 EXPORT_SYMBOL(netlink_ack);
2537
netlink_rcv_skb(struct sk_buff * skb,int (* cb)(struct sk_buff *,struct nlmsghdr *,struct netlink_ext_ack *))2538 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2539 struct nlmsghdr *,
2540 struct netlink_ext_ack *))
2541 {
2542 struct netlink_ext_ack extack;
2543 struct nlmsghdr *nlh;
2544 int err;
2545
2546 while (skb->len >= nlmsg_total_size(0)) {
2547 int msglen;
2548
2549 memset(&extack, 0, sizeof(extack));
2550 nlh = nlmsg_hdr(skb);
2551 err = 0;
2552
2553 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2554 return 0;
2555
2556 /* Only requests are handled by the kernel */
2557 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2558 goto ack;
2559
2560 /* Skip control messages */
2561 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2562 goto ack;
2563
2564 err = cb(skb, nlh, &extack);
2565 if (err == -EINTR)
2566 goto skip;
2567
2568 ack:
2569 if (nlh->nlmsg_flags & NLM_F_ACK || err)
2570 netlink_ack(skb, nlh, err, &extack);
2571
2572 skip:
2573 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2574 if (msglen > skb->len)
2575 msglen = skb->len;
2576 skb_pull(skb, msglen);
2577 }
2578
2579 return 0;
2580 }
2581 EXPORT_SYMBOL(netlink_rcv_skb);
2582
2583 /**
2584 * nlmsg_notify - send a notification netlink message
2585 * @sk: netlink socket to use
2586 * @skb: notification message
2587 * @portid: destination netlink portid for reports or 0
2588 * @group: destination multicast group or 0
2589 * @report: 1 to report back, 0 to disable
2590 * @flags: allocation flags
2591 */
nlmsg_notify(struct sock * sk,struct sk_buff * skb,u32 portid,unsigned int group,int report,gfp_t flags)2592 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2593 unsigned int group, int report, gfp_t flags)
2594 {
2595 int err = 0;
2596
2597 if (group) {
2598 int exclude_portid = 0;
2599
2600 if (report) {
2601 refcount_inc(&skb->users);
2602 exclude_portid = portid;
2603 }
2604
2605 /* errors reported via destination sk->sk_err, but propagate
2606 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2607 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2608 if (err == -ESRCH)
2609 err = 0;
2610 }
2611
2612 if (report) {
2613 int err2;
2614
2615 err2 = nlmsg_unicast(sk, skb, portid);
2616 if (!err)
2617 err = err2;
2618 }
2619
2620 return err;
2621 }
2622 EXPORT_SYMBOL(nlmsg_notify);
2623
2624 #ifdef CONFIG_PROC_FS
2625 struct nl_seq_iter {
2626 struct seq_net_private p;
2627 struct rhashtable_iter hti;
2628 int link;
2629 };
2630
netlink_walk_start(struct nl_seq_iter * iter)2631 static void netlink_walk_start(struct nl_seq_iter *iter)
2632 {
2633 rhashtable_walk_enter(&nl_table[iter->link].hash, &iter->hti);
2634 rhashtable_walk_start(&iter->hti);
2635 }
2636
netlink_walk_stop(struct nl_seq_iter * iter)2637 static void netlink_walk_stop(struct nl_seq_iter *iter)
2638 {
2639 rhashtable_walk_stop(&iter->hti);
2640 rhashtable_walk_exit(&iter->hti);
2641 }
2642
__netlink_seq_next(struct seq_file * seq)2643 static void *__netlink_seq_next(struct seq_file *seq)
2644 {
2645 struct nl_seq_iter *iter = seq->private;
2646 struct netlink_sock *nlk;
2647
2648 do {
2649 for (;;) {
2650 nlk = rhashtable_walk_next(&iter->hti);
2651
2652 if (IS_ERR(nlk)) {
2653 if (PTR_ERR(nlk) == -EAGAIN)
2654 continue;
2655
2656 return nlk;
2657 }
2658
2659 if (nlk)
2660 break;
2661
2662 netlink_walk_stop(iter);
2663 if (++iter->link >= MAX_LINKS)
2664 return NULL;
2665
2666 netlink_walk_start(iter);
2667 }
2668 } while (sock_net(&nlk->sk) != seq_file_net(seq));
2669
2670 return nlk;
2671 }
2672
netlink_seq_start(struct seq_file * seq,loff_t * posp)2673 static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
2674 __acquires(RCU)
2675 {
2676 struct nl_seq_iter *iter = seq->private;
2677 void *obj = SEQ_START_TOKEN;
2678 loff_t pos;
2679
2680 iter->link = 0;
2681
2682 netlink_walk_start(iter);
2683
2684 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
2685 obj = __netlink_seq_next(seq);
2686
2687 return obj;
2688 }
2689
netlink_seq_next(struct seq_file * seq,void * v,loff_t * pos)2690 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2691 {
2692 ++*pos;
2693 return __netlink_seq_next(seq);
2694 }
2695
netlink_native_seq_stop(struct seq_file * seq,void * v)2696 static void netlink_native_seq_stop(struct seq_file *seq, void *v)
2697 {
2698 struct nl_seq_iter *iter = seq->private;
2699
2700 if (iter->link >= MAX_LINKS)
2701 return;
2702
2703 netlink_walk_stop(iter);
2704 }
2705
2706
netlink_native_seq_show(struct seq_file * seq,void * v)2707 static int netlink_native_seq_show(struct seq_file *seq, void *v)
2708 {
2709 if (v == SEQ_START_TOKEN) {
2710 seq_puts(seq,
2711 "sk Eth Pid Groups "
2712 "Rmem Wmem Dump Locks Drops Inode\n");
2713 } else {
2714 struct sock *s = v;
2715 struct netlink_sock *nlk = nlk_sk(s);
2716
2717 seq_printf(seq, "%pK %-3d %-10u %08x %-8d %-8d %-5d %-8d %-8u %-8lu\n",
2718 s,
2719 s->sk_protocol,
2720 nlk->portid,
2721 nlk->groups ? (u32)nlk->groups[0] : 0,
2722 sk_rmem_alloc_get(s),
2723 sk_wmem_alloc_get(s),
2724 READ_ONCE(nlk->cb_running),
2725 refcount_read(&s->sk_refcnt),
2726 atomic_read(&s->sk_drops),
2727 sock_i_ino(s)
2728 );
2729
2730 }
2731 return 0;
2732 }
2733
2734 #ifdef CONFIG_BPF_SYSCALL
2735 struct bpf_iter__netlink {
2736 __bpf_md_ptr(struct bpf_iter_meta *, meta);
2737 __bpf_md_ptr(struct netlink_sock *, sk);
2738 };
2739
DEFINE_BPF_ITER_FUNC(netlink,struct bpf_iter_meta * meta,struct netlink_sock * sk)2740 DEFINE_BPF_ITER_FUNC(netlink, struct bpf_iter_meta *meta, struct netlink_sock *sk)
2741
2742 static int netlink_prog_seq_show(struct bpf_prog *prog,
2743 struct bpf_iter_meta *meta,
2744 void *v)
2745 {
2746 struct bpf_iter__netlink ctx;
2747
2748 meta->seq_num--; /* skip SEQ_START_TOKEN */
2749 ctx.meta = meta;
2750 ctx.sk = nlk_sk((struct sock *)v);
2751 return bpf_iter_run_prog(prog, &ctx);
2752 }
2753
netlink_seq_show(struct seq_file * seq,void * v)2754 static int netlink_seq_show(struct seq_file *seq, void *v)
2755 {
2756 struct bpf_iter_meta meta;
2757 struct bpf_prog *prog;
2758
2759 meta.seq = seq;
2760 prog = bpf_iter_get_info(&meta, false);
2761 if (!prog)
2762 return netlink_native_seq_show(seq, v);
2763
2764 if (v != SEQ_START_TOKEN)
2765 return netlink_prog_seq_show(prog, &meta, v);
2766
2767 return 0;
2768 }
2769
netlink_seq_stop(struct seq_file * seq,void * v)2770 static void netlink_seq_stop(struct seq_file *seq, void *v)
2771 {
2772 struct bpf_iter_meta meta;
2773 struct bpf_prog *prog;
2774
2775 if (!v) {
2776 meta.seq = seq;
2777 prog = bpf_iter_get_info(&meta, true);
2778 if (prog)
2779 (void)netlink_prog_seq_show(prog, &meta, v);
2780 }
2781
2782 netlink_native_seq_stop(seq, v);
2783 }
2784 #else
netlink_seq_show(struct seq_file * seq,void * v)2785 static int netlink_seq_show(struct seq_file *seq, void *v)
2786 {
2787 return netlink_native_seq_show(seq, v);
2788 }
2789
netlink_seq_stop(struct seq_file * seq,void * v)2790 static void netlink_seq_stop(struct seq_file *seq, void *v)
2791 {
2792 netlink_native_seq_stop(seq, v);
2793 }
2794 #endif
2795
2796 static const struct seq_operations netlink_seq_ops = {
2797 .start = netlink_seq_start,
2798 .next = netlink_seq_next,
2799 .stop = netlink_seq_stop,
2800 .show = netlink_seq_show,
2801 };
2802 #endif
2803
netlink_register_notifier(struct notifier_block * nb)2804 int netlink_register_notifier(struct notifier_block *nb)
2805 {
2806 return blocking_notifier_chain_register(&netlink_chain, nb);
2807 }
2808 EXPORT_SYMBOL(netlink_register_notifier);
2809
netlink_unregister_notifier(struct notifier_block * nb)2810 int netlink_unregister_notifier(struct notifier_block *nb)
2811 {
2812 return blocking_notifier_chain_unregister(&netlink_chain, nb);
2813 }
2814 EXPORT_SYMBOL(netlink_unregister_notifier);
2815
2816 static const struct proto_ops netlink_ops = {
2817 .family = PF_NETLINK,
2818 .owner = THIS_MODULE,
2819 .release = netlink_release,
2820 .bind = netlink_bind,
2821 .connect = netlink_connect,
2822 .socketpair = sock_no_socketpair,
2823 .accept = sock_no_accept,
2824 .getname = netlink_getname,
2825 .poll = datagram_poll,
2826 .ioctl = netlink_ioctl,
2827 .listen = sock_no_listen,
2828 .shutdown = sock_no_shutdown,
2829 .setsockopt = netlink_setsockopt,
2830 .getsockopt = netlink_getsockopt,
2831 .sendmsg = netlink_sendmsg,
2832 .recvmsg = netlink_recvmsg,
2833 .mmap = sock_no_mmap,
2834 };
2835
2836 static const struct net_proto_family netlink_family_ops = {
2837 .family = PF_NETLINK,
2838 .create = netlink_create,
2839 .owner = THIS_MODULE, /* for consistency 8) */
2840 };
2841
netlink_net_init(struct net * net)2842 static int __net_init netlink_net_init(struct net *net)
2843 {
2844 #ifdef CONFIG_PROC_FS
2845 if (!proc_create_net("netlink", 0, net->proc_net, &netlink_seq_ops,
2846 sizeof(struct nl_seq_iter)))
2847 return -ENOMEM;
2848 #endif
2849 return 0;
2850 }
2851
netlink_net_exit(struct net * net)2852 static void __net_exit netlink_net_exit(struct net *net)
2853 {
2854 #ifdef CONFIG_PROC_FS
2855 remove_proc_entry("netlink", net->proc_net);
2856 #endif
2857 }
2858
netlink_add_usersock_entry(void)2859 static void __init netlink_add_usersock_entry(void)
2860 {
2861 struct listeners *listeners;
2862 int groups = 32;
2863
2864 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2865 if (!listeners)
2866 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2867
2868 netlink_table_grab();
2869
2870 nl_table[NETLINK_USERSOCK].groups = groups;
2871 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2872 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2873 nl_table[NETLINK_USERSOCK].registered = 1;
2874 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
2875
2876 netlink_table_ungrab();
2877 }
2878
2879 static struct pernet_operations __net_initdata netlink_net_ops = {
2880 .init = netlink_net_init,
2881 .exit = netlink_net_exit,
2882 };
2883
netlink_hash(const void * data,u32 len,u32 seed)2884 static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
2885 {
2886 const struct netlink_sock *nlk = data;
2887 struct netlink_compare_arg arg;
2888
2889 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
2890 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
2891 }
2892
2893 static const struct rhashtable_params netlink_rhashtable_params = {
2894 .head_offset = offsetof(struct netlink_sock, node),
2895 .key_len = netlink_compare_arg_len,
2896 .obj_hashfn = netlink_hash,
2897 .obj_cmpfn = netlink_compare,
2898 .automatic_shrinking = true,
2899 };
2900
2901 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
2902 BTF_ID_LIST(btf_netlink_sock_id)
2903 BTF_ID(struct, netlink_sock)
2904
2905 static const struct bpf_iter_seq_info netlink_seq_info = {
2906 .seq_ops = &netlink_seq_ops,
2907 .init_seq_private = bpf_iter_init_seq_net,
2908 .fini_seq_private = bpf_iter_fini_seq_net,
2909 .seq_priv_size = sizeof(struct nl_seq_iter),
2910 };
2911
2912 static struct bpf_iter_reg netlink_reg_info = {
2913 .target = "netlink",
2914 .ctx_arg_info_size = 1,
2915 .ctx_arg_info = {
2916 { offsetof(struct bpf_iter__netlink, sk),
2917 PTR_TO_BTF_ID_OR_NULL },
2918 },
2919 .seq_info = &netlink_seq_info,
2920 };
2921
bpf_iter_register(void)2922 static int __init bpf_iter_register(void)
2923 {
2924 netlink_reg_info.ctx_arg_info[0].btf_id = *btf_netlink_sock_id;
2925 return bpf_iter_reg_target(&netlink_reg_info);
2926 }
2927 #endif
2928
netlink_proto_init(void)2929 static int __init netlink_proto_init(void)
2930 {
2931 int i;
2932 int err = proto_register(&netlink_proto, 0);
2933
2934 if (err != 0)
2935 goto out;
2936
2937 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
2938 err = bpf_iter_register();
2939 if (err)
2940 goto out;
2941 #endif
2942
2943 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof_field(struct sk_buff, cb));
2944
2945 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2946 if (!nl_table)
2947 goto panic;
2948
2949 for (i = 0; i < MAX_LINKS; i++) {
2950 if (rhashtable_init(&nl_table[i].hash,
2951 &netlink_rhashtable_params) < 0) {
2952 while (--i > 0)
2953 rhashtable_destroy(&nl_table[i].hash);
2954 kfree(nl_table);
2955 goto panic;
2956 }
2957 }
2958
2959 netlink_add_usersock_entry();
2960
2961 sock_register(&netlink_family_ops);
2962 register_pernet_subsys(&netlink_net_ops);
2963 register_pernet_subsys(&netlink_tap_net_ops);
2964 /* The netlink device handler may be needed early. */
2965 rtnetlink_init();
2966 out:
2967 return err;
2968 panic:
2969 panic("netlink_init: Cannot allocate nl_table\n");
2970 }
2971
2972 core_initcall(netlink_proto_init);
2973