1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <linux/vmalloc.h>
26 #include <net/xdp_sock_drv.h>
27 #include <net/busy_poll.h>
28 #include <net/netdev_rx_queue.h>
29 #include <net/xdp.h>
30
31 #include "xsk_queue.h"
32 #include "xdp_umem.h"
33 #include "xsk.h"
34
35 #define TX_BATCH_SIZE 32
36 #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
37
38 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
39
xsk_set_rx_need_wakeup(struct xsk_buff_pool * pool)40 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
41 {
42 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
43 return;
44
45 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
46 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
47 }
48 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
49
xsk_set_tx_need_wakeup(struct xsk_buff_pool * pool)50 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
51 {
52 struct xdp_sock *xs;
53
54 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
55 return;
56
57 rcu_read_lock();
58 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
59 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
60 }
61 rcu_read_unlock();
62
63 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
64 }
65 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
66
xsk_clear_rx_need_wakeup(struct xsk_buff_pool * pool)67 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
68 {
69 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
70 return;
71
72 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
73 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
74 }
75 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
76
xsk_clear_tx_need_wakeup(struct xsk_buff_pool * pool)77 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
78 {
79 struct xdp_sock *xs;
80
81 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
82 return;
83
84 rcu_read_lock();
85 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
86 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
87 }
88 rcu_read_unlock();
89
90 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
91 }
92 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
93
xsk_uses_need_wakeup(struct xsk_buff_pool * pool)94 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
95 {
96 return pool->uses_need_wakeup;
97 }
98 EXPORT_SYMBOL(xsk_uses_need_wakeup);
99
xsk_get_pool_from_qid(struct net_device * dev,u16 queue_id)100 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
101 u16 queue_id)
102 {
103 if (queue_id < dev->real_num_rx_queues)
104 return dev->_rx[queue_id].pool;
105 if (queue_id < dev->real_num_tx_queues)
106 return dev->_tx[queue_id].pool;
107
108 return NULL;
109 }
110 EXPORT_SYMBOL(xsk_get_pool_from_qid);
111
xsk_clear_pool_at_qid(struct net_device * dev,u16 queue_id)112 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
113 {
114 if (queue_id < dev->num_rx_queues)
115 dev->_rx[queue_id].pool = NULL;
116 if (queue_id < dev->num_tx_queues)
117 dev->_tx[queue_id].pool = NULL;
118 }
119
120 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
121 * not know if the device has more tx queues than rx, or the opposite.
122 * This might also change during run time.
123 */
xsk_reg_pool_at_qid(struct net_device * dev,struct xsk_buff_pool * pool,u16 queue_id)124 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
125 u16 queue_id)
126 {
127 if (queue_id >= max_t(unsigned int,
128 dev->real_num_rx_queues,
129 dev->real_num_tx_queues))
130 return -EINVAL;
131
132 if (queue_id < dev->real_num_rx_queues)
133 dev->_rx[queue_id].pool = pool;
134 if (queue_id < dev->real_num_tx_queues)
135 dev->_tx[queue_id].pool = pool;
136
137 return 0;
138 }
139
__xsk_rcv_zc(struct xdp_sock * xs,struct xdp_buff_xsk * xskb,u32 len,u32 flags)140 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
141 u32 flags)
142 {
143 u64 addr;
144 int err;
145
146 addr = xp_get_handle(xskb);
147 err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
148 if (err) {
149 xs->rx_queue_full++;
150 return err;
151 }
152
153 xp_release(xskb);
154 return 0;
155 }
156
xsk_rcv_zc(struct xdp_sock * xs,struct xdp_buff * xdp,u32 len)157 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
158 {
159 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
160 u32 frags = xdp_buff_has_frags(xdp);
161 struct xdp_buff_xsk *pos, *tmp;
162 struct list_head *xskb_list;
163 u32 contd = 0;
164 int err;
165
166 if (frags)
167 contd = XDP_PKT_CONTD;
168
169 err = __xsk_rcv_zc(xs, xskb, len, contd);
170 if (err)
171 goto err;
172 if (likely(!frags))
173 return 0;
174
175 xskb_list = &xskb->pool->xskb_list;
176 list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
177 if (list_is_singular(xskb_list))
178 contd = 0;
179 len = pos->xdp.data_end - pos->xdp.data;
180 err = __xsk_rcv_zc(xs, pos, len, contd);
181 if (err)
182 goto err;
183 list_del(&pos->xskb_list_node);
184 }
185
186 return 0;
187 err:
188 xsk_buff_free(xdp);
189 return err;
190 }
191
xsk_copy_xdp_start(struct xdp_buff * from)192 static void *xsk_copy_xdp_start(struct xdp_buff *from)
193 {
194 if (unlikely(xdp_data_meta_unsupported(from)))
195 return from->data;
196 else
197 return from->data_meta;
198 }
199
xsk_copy_xdp(void * to,void ** from,u32 to_len,u32 * from_len,skb_frag_t ** frag,u32 rem)200 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len,
201 u32 *from_len, skb_frag_t **frag, u32 rem)
202 {
203 u32 copied = 0;
204
205 while (1) {
206 u32 copy_len = min_t(u32, *from_len, to_len);
207
208 memcpy(to, *from, copy_len);
209 copied += copy_len;
210 if (rem == copied)
211 return copied;
212
213 if (*from_len == copy_len) {
214 *from = skb_frag_address(*frag);
215 *from_len = skb_frag_size((*frag)++);
216 } else {
217 *from += copy_len;
218 *from_len -= copy_len;
219 }
220 if (to_len == copy_len)
221 return copied;
222
223 to_len -= copy_len;
224 to += copy_len;
225 }
226 }
227
__xsk_rcv(struct xdp_sock * xs,struct xdp_buff * xdp,u32 len)228 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
229 {
230 u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
231 void *copy_from = xsk_copy_xdp_start(xdp), *copy_to;
232 u32 from_len, meta_len, rem, num_desc;
233 struct xdp_buff_xsk *xskb;
234 struct xdp_buff *xsk_xdp;
235 skb_frag_t *frag;
236
237 from_len = xdp->data_end - copy_from;
238 meta_len = xdp->data - copy_from;
239 rem = len + meta_len;
240
241 if (len <= frame_size && !xdp_buff_has_frags(xdp)) {
242 int err;
243
244 xsk_xdp = xsk_buff_alloc(xs->pool);
245 if (!xsk_xdp) {
246 xs->rx_dropped++;
247 return -ENOMEM;
248 }
249 memcpy(xsk_xdp->data - meta_len, copy_from, rem);
250 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
251 err = __xsk_rcv_zc(xs, xskb, len, 0);
252 if (err) {
253 xsk_buff_free(xsk_xdp);
254 return err;
255 }
256
257 return 0;
258 }
259
260 num_desc = (len - 1) / frame_size + 1;
261
262 if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
263 xs->rx_dropped++;
264 return -ENOMEM;
265 }
266 if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
267 xs->rx_queue_full++;
268 return -ENOBUFS;
269 }
270
271 if (xdp_buff_has_frags(xdp)) {
272 struct skb_shared_info *sinfo;
273
274 sinfo = xdp_get_shared_info_from_buff(xdp);
275 frag = &sinfo->frags[0];
276 }
277
278 do {
279 u32 to_len = frame_size + meta_len;
280 u32 copied;
281
282 xsk_xdp = xsk_buff_alloc(xs->pool);
283 copy_to = xsk_xdp->data - meta_len;
284
285 copied = xsk_copy_xdp(copy_to, ©_from, to_len, &from_len, &frag, rem);
286 rem -= copied;
287
288 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
289 __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
290 meta_len = 0;
291 } while (rem);
292
293 return 0;
294 }
295
xsk_tx_writeable(struct xdp_sock * xs)296 static bool xsk_tx_writeable(struct xdp_sock *xs)
297 {
298 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
299 return false;
300
301 return true;
302 }
303
xsk_is_bound(struct xdp_sock * xs)304 static bool xsk_is_bound(struct xdp_sock *xs)
305 {
306 if (READ_ONCE(xs->state) == XSK_BOUND) {
307 /* Matches smp_wmb() in bind(). */
308 smp_rmb();
309 return true;
310 }
311 return false;
312 }
313
xsk_rcv_check(struct xdp_sock * xs,struct xdp_buff * xdp,u32 len)314 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
315 {
316 if (!xsk_is_bound(xs))
317 return -ENXIO;
318
319 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
320 return -EINVAL;
321
322 if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
323 xs->rx_dropped++;
324 return -ENOSPC;
325 }
326
327 sk_mark_napi_id_once_xdp(&xs->sk, xdp);
328 return 0;
329 }
330
xsk_flush(struct xdp_sock * xs)331 static void xsk_flush(struct xdp_sock *xs)
332 {
333 xskq_prod_submit(xs->rx);
334 __xskq_cons_release(xs->pool->fq);
335 sock_def_readable(&xs->sk);
336 }
337
xsk_generic_rcv(struct xdp_sock * xs,struct xdp_buff * xdp)338 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
339 {
340 u32 len = xdp_get_buff_len(xdp);
341 int err;
342
343 spin_lock_bh(&xs->rx_lock);
344 err = xsk_rcv_check(xs, xdp, len);
345 if (!err) {
346 err = __xsk_rcv(xs, xdp, len);
347 xsk_flush(xs);
348 }
349 spin_unlock_bh(&xs->rx_lock);
350 return err;
351 }
352
xsk_rcv(struct xdp_sock * xs,struct xdp_buff * xdp)353 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
354 {
355 u32 len = xdp_get_buff_len(xdp);
356 int err;
357
358 err = xsk_rcv_check(xs, xdp, len);
359 if (err)
360 return err;
361
362 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
363 len = xdp->data_end - xdp->data;
364 return xsk_rcv_zc(xs, xdp, len);
365 }
366
367 err = __xsk_rcv(xs, xdp, len);
368 if (!err)
369 xdp_return_buff(xdp);
370 return err;
371 }
372
__xsk_map_redirect(struct xdp_sock * xs,struct xdp_buff * xdp)373 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
374 {
375 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
376 int err;
377
378 err = xsk_rcv(xs, xdp);
379 if (err)
380 return err;
381
382 if (!xs->flush_node.prev)
383 list_add(&xs->flush_node, flush_list);
384
385 return 0;
386 }
387
__xsk_map_flush(void)388 void __xsk_map_flush(void)
389 {
390 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
391 struct xdp_sock *xs, *tmp;
392
393 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
394 xsk_flush(xs);
395 __list_del_clearprev(&xs->flush_node);
396 }
397 }
398
399 #ifdef CONFIG_DEBUG_NET
xsk_map_check_flush(void)400 bool xsk_map_check_flush(void)
401 {
402 if (list_empty(this_cpu_ptr(&xskmap_flush_list)))
403 return false;
404 __xsk_map_flush();
405 return true;
406 }
407 #endif
408
xsk_tx_completed(struct xsk_buff_pool * pool,u32 nb_entries)409 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
410 {
411 xskq_prod_submit_n(pool->cq, nb_entries);
412 }
413 EXPORT_SYMBOL(xsk_tx_completed);
414
xsk_tx_release(struct xsk_buff_pool * pool)415 void xsk_tx_release(struct xsk_buff_pool *pool)
416 {
417 struct xdp_sock *xs;
418
419 rcu_read_lock();
420 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
421 __xskq_cons_release(xs->tx);
422 if (xsk_tx_writeable(xs))
423 xs->sk.sk_write_space(&xs->sk);
424 }
425 rcu_read_unlock();
426 }
427 EXPORT_SYMBOL(xsk_tx_release);
428
xsk_tx_peek_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)429 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
430 {
431 bool budget_exhausted = false;
432 struct xdp_sock *xs;
433
434 rcu_read_lock();
435 again:
436 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
437 if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) {
438 budget_exhausted = true;
439 continue;
440 }
441
442 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
443 if (xskq_has_descs(xs->tx))
444 xskq_cons_release(xs->tx);
445 continue;
446 }
447
448 xs->tx_budget_spent++;
449
450 /* This is the backpressure mechanism for the Tx path.
451 * Reserve space in the completion queue and only proceed
452 * if there is space in it. This avoids having to implement
453 * any buffering in the Tx path.
454 */
455 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
456 goto out;
457
458 xskq_cons_release(xs->tx);
459 rcu_read_unlock();
460 return true;
461 }
462
463 if (budget_exhausted) {
464 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
465 xs->tx_budget_spent = 0;
466
467 budget_exhausted = false;
468 goto again;
469 }
470
471 out:
472 rcu_read_unlock();
473 return false;
474 }
475 EXPORT_SYMBOL(xsk_tx_peek_desc);
476
xsk_tx_peek_release_fallback(struct xsk_buff_pool * pool,u32 max_entries)477 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
478 {
479 struct xdp_desc *descs = pool->tx_descs;
480 u32 nb_pkts = 0;
481
482 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
483 nb_pkts++;
484
485 xsk_tx_release(pool);
486 return nb_pkts;
487 }
488
xsk_tx_peek_release_desc_batch(struct xsk_buff_pool * pool,u32 nb_pkts)489 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
490 {
491 struct xdp_sock *xs;
492
493 rcu_read_lock();
494 if (!list_is_singular(&pool->xsk_tx_list)) {
495 /* Fallback to the non-batched version */
496 rcu_read_unlock();
497 return xsk_tx_peek_release_fallback(pool, nb_pkts);
498 }
499
500 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
501 if (!xs) {
502 nb_pkts = 0;
503 goto out;
504 }
505
506 nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
507
508 /* This is the backpressure mechanism for the Tx path. Try to
509 * reserve space in the completion queue for all packets, but
510 * if there are fewer slots available, just process that many
511 * packets. This avoids having to implement any buffering in
512 * the Tx path.
513 */
514 nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
515 if (!nb_pkts)
516 goto out;
517
518 nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
519 if (!nb_pkts) {
520 xs->tx->queue_empty_descs++;
521 goto out;
522 }
523
524 __xskq_cons_release(xs->tx);
525 xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
526 xs->sk.sk_write_space(&xs->sk);
527
528 out:
529 rcu_read_unlock();
530 return nb_pkts;
531 }
532 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
533
xsk_wakeup(struct xdp_sock * xs,u8 flags)534 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
535 {
536 struct net_device *dev = xs->dev;
537
538 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
539 }
540
xsk_cq_reserve_addr_locked(struct xdp_sock * xs,u64 addr)541 static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr)
542 {
543 unsigned long flags;
544 int ret;
545
546 spin_lock_irqsave(&xs->pool->cq_lock, flags);
547 ret = xskq_prod_reserve_addr(xs->pool->cq, addr);
548 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
549
550 return ret;
551 }
552
xsk_cq_submit_locked(struct xdp_sock * xs,u32 n)553 static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n)
554 {
555 unsigned long flags;
556
557 spin_lock_irqsave(&xs->pool->cq_lock, flags);
558 xskq_prod_submit_n(xs->pool->cq, n);
559 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
560 }
561
xsk_cq_cancel_locked(struct xdp_sock * xs,u32 n)562 static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n)
563 {
564 unsigned long flags;
565
566 spin_lock_irqsave(&xs->pool->cq_lock, flags);
567 xskq_prod_cancel_n(xs->pool->cq, n);
568 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
569 }
570
xsk_get_num_desc(struct sk_buff * skb)571 static u32 xsk_get_num_desc(struct sk_buff *skb)
572 {
573 return skb ? (long)skb_shinfo(skb)->destructor_arg : 0;
574 }
575
xsk_destruct_skb(struct sk_buff * skb)576 static void xsk_destruct_skb(struct sk_buff *skb)
577 {
578 struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
579
580 if (compl->tx_timestamp) {
581 /* sw completion timestamp, not a real one */
582 *compl->tx_timestamp = ktime_get_tai_fast_ns();
583 }
584
585 xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb));
586 sock_wfree(skb);
587 }
588
xsk_set_destructor_arg(struct sk_buff * skb)589 static void xsk_set_destructor_arg(struct sk_buff *skb)
590 {
591 long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1;
592
593 skb_shinfo(skb)->destructor_arg = (void *)num;
594 }
595
xsk_consume_skb(struct sk_buff * skb)596 static void xsk_consume_skb(struct sk_buff *skb)
597 {
598 struct xdp_sock *xs = xdp_sk(skb->sk);
599
600 skb->destructor = sock_wfree;
601 xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb));
602 /* Free skb without triggering the perf drop trace */
603 consume_skb(skb);
604 xs->skb = NULL;
605 }
606
xsk_drop_skb(struct sk_buff * skb)607 static void xsk_drop_skb(struct sk_buff *skb)
608 {
609 xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb);
610 xsk_consume_skb(skb);
611 }
612
xsk_build_skb_zerocopy(struct xdp_sock * xs,struct xdp_desc * desc)613 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
614 struct xdp_desc *desc)
615 {
616 struct xsk_buff_pool *pool = xs->pool;
617 u32 hr, len, ts, offset, copy, copied;
618 struct sk_buff *skb = xs->skb;
619 struct page *page;
620 void *buffer;
621 int err, i;
622 u64 addr;
623
624 if (!skb) {
625 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
626
627 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
628 if (unlikely(!skb))
629 return ERR_PTR(err);
630
631 skb_reserve(skb, hr);
632 }
633
634 addr = desc->addr;
635 len = desc->len;
636 ts = pool->unaligned ? len : pool->chunk_size;
637
638 buffer = xsk_buff_raw_get_data(pool, addr);
639 offset = offset_in_page(buffer);
640 addr = buffer - pool->addrs;
641
642 for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) {
643 if (unlikely(i >= MAX_SKB_FRAGS))
644 return ERR_PTR(-EOVERFLOW);
645
646 page = pool->umem->pgs[addr >> PAGE_SHIFT];
647 get_page(page);
648
649 copy = min_t(u32, PAGE_SIZE - offset, len - copied);
650 skb_fill_page_desc(skb, i, page, offset, copy);
651
652 copied += copy;
653 addr += copy;
654 offset = 0;
655 }
656
657 skb->len += len;
658 skb->data_len += len;
659 skb->truesize += ts;
660
661 refcount_add(ts, &xs->sk.sk_wmem_alloc);
662
663 return skb;
664 }
665
xsk_build_skb(struct xdp_sock * xs,struct xdp_desc * desc)666 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
667 struct xdp_desc *desc)
668 {
669 struct xsk_tx_metadata *meta = NULL;
670 struct net_device *dev = xs->dev;
671 struct sk_buff *skb = xs->skb;
672 bool first_frag = false;
673 int err;
674
675 if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
676 skb = xsk_build_skb_zerocopy(xs, desc);
677 if (IS_ERR(skb)) {
678 err = PTR_ERR(skb);
679 goto free_err;
680 }
681 } else {
682 u32 hr, tr, len;
683 void *buffer;
684
685 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
686 len = desc->len;
687
688 if (!skb) {
689 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
690 tr = dev->needed_tailroom;
691 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
692 if (unlikely(!skb))
693 goto free_err;
694
695 skb_reserve(skb, hr);
696 skb_put(skb, len);
697
698 err = skb_store_bits(skb, 0, buffer, len);
699 if (unlikely(err)) {
700 kfree_skb(skb);
701 goto free_err;
702 }
703
704 first_frag = true;
705 } else {
706 int nr_frags = skb_shinfo(skb)->nr_frags;
707 struct page *page;
708 u8 *vaddr;
709
710 if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
711 err = -EOVERFLOW;
712 goto free_err;
713 }
714
715 page = alloc_page(xs->sk.sk_allocation);
716 if (unlikely(!page)) {
717 err = -EAGAIN;
718 goto free_err;
719 }
720
721 vaddr = kmap_local_page(page);
722 memcpy(vaddr, buffer, len);
723 kunmap_local(vaddr);
724
725 skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
726 refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
727 }
728
729 if (first_frag && desc->options & XDP_TX_METADATA) {
730 if (unlikely(xs->pool->tx_metadata_len == 0)) {
731 err = -EINVAL;
732 goto free_err;
733 }
734
735 meta = buffer - xs->pool->tx_metadata_len;
736 if (unlikely(!xsk_buff_valid_tx_metadata(meta))) {
737 err = -EINVAL;
738 goto free_err;
739 }
740
741 if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) {
742 if (unlikely(meta->request.csum_start +
743 meta->request.csum_offset +
744 sizeof(__sum16) > len)) {
745 err = -EINVAL;
746 goto free_err;
747 }
748
749 skb->csum_start = hr + meta->request.csum_start;
750 skb->csum_offset = meta->request.csum_offset;
751 skb->ip_summed = CHECKSUM_PARTIAL;
752
753 if (unlikely(xs->pool->tx_sw_csum)) {
754 err = skb_checksum_help(skb);
755 if (err)
756 goto free_err;
757 }
758 }
759 }
760 }
761
762 skb->dev = dev;
763 skb->priority = READ_ONCE(xs->sk.sk_priority);
764 skb->mark = READ_ONCE(xs->sk.sk_mark);
765 skb->destructor = xsk_destruct_skb;
766 xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
767 xsk_set_destructor_arg(skb);
768
769 return skb;
770
771 free_err:
772 if (err == -EOVERFLOW) {
773 /* Drop the packet */
774 xsk_set_destructor_arg(xs->skb);
775 xsk_drop_skb(xs->skb);
776 xskq_cons_release(xs->tx);
777 } else {
778 /* Let application retry */
779 xsk_cq_cancel_locked(xs, 1);
780 }
781
782 return ERR_PTR(err);
783 }
784
__xsk_generic_xmit(struct sock * sk)785 static int __xsk_generic_xmit(struct sock *sk)
786 {
787 struct xdp_sock *xs = xdp_sk(sk);
788 u32 max_batch = TX_BATCH_SIZE;
789 bool sent_frame = false;
790 struct xdp_desc desc;
791 struct sk_buff *skb;
792 int err = 0;
793
794 mutex_lock(&xs->mutex);
795
796 /* Since we dropped the RCU read lock, the socket state might have changed. */
797 if (unlikely(!xsk_is_bound(xs))) {
798 err = -ENXIO;
799 goto out;
800 }
801
802 if (xs->queue_id >= xs->dev->real_num_tx_queues)
803 goto out;
804
805 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
806 if (max_batch-- == 0) {
807 err = -EAGAIN;
808 goto out;
809 }
810
811 /* This is the backpressure mechanism for the Tx path.
812 * Reserve space in the completion queue and only proceed
813 * if there is space in it. This avoids having to implement
814 * any buffering in the Tx path.
815 */
816 if (xsk_cq_reserve_addr_locked(xs, desc.addr))
817 goto out;
818
819 skb = xsk_build_skb(xs, &desc);
820 if (IS_ERR(skb)) {
821 err = PTR_ERR(skb);
822 if (err != -EOVERFLOW)
823 goto out;
824 err = 0;
825 continue;
826 }
827
828 xskq_cons_release(xs->tx);
829
830 if (xp_mb_desc(&desc)) {
831 xs->skb = skb;
832 continue;
833 }
834
835 err = __dev_direct_xmit(skb, xs->queue_id);
836 if (err == NETDEV_TX_BUSY) {
837 /* Tell user-space to retry the send */
838 xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
839 xsk_consume_skb(skb);
840 err = -EAGAIN;
841 goto out;
842 }
843
844 /* Ignore NET_XMIT_CN as packet might have been sent */
845 if (err == NET_XMIT_DROP) {
846 /* SKB completed but not sent */
847 err = -EBUSY;
848 xs->skb = NULL;
849 goto out;
850 }
851
852 sent_frame = true;
853 xs->skb = NULL;
854 }
855
856 if (xskq_has_descs(xs->tx)) {
857 if (xs->skb)
858 xsk_drop_skb(xs->skb);
859 xskq_cons_release(xs->tx);
860 }
861
862 out:
863 if (sent_frame)
864 if (xsk_tx_writeable(xs))
865 sk->sk_write_space(sk);
866
867 mutex_unlock(&xs->mutex);
868 return err;
869 }
870
xsk_generic_xmit(struct sock * sk)871 static int xsk_generic_xmit(struct sock *sk)
872 {
873 int ret;
874
875 /* Drop the RCU lock since the SKB path might sleep. */
876 rcu_read_unlock();
877 ret = __xsk_generic_xmit(sk);
878 /* Reaquire RCU lock before going into common code. */
879 rcu_read_lock();
880
881 return ret;
882 }
883
xsk_no_wakeup(struct sock * sk)884 static bool xsk_no_wakeup(struct sock *sk)
885 {
886 #ifdef CONFIG_NET_RX_BUSY_POLL
887 /* Prefer busy-polling, skip the wakeup. */
888 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
889 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
890 #else
891 return false;
892 #endif
893 }
894
xsk_check_common(struct xdp_sock * xs)895 static int xsk_check_common(struct xdp_sock *xs)
896 {
897 if (unlikely(!xsk_is_bound(xs)))
898 return -ENXIO;
899 if (unlikely(!(xs->dev->flags & IFF_UP)))
900 return -ENETDOWN;
901
902 return 0;
903 }
904
__xsk_sendmsg(struct socket * sock,struct msghdr * m,size_t total_len)905 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
906 {
907 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
908 struct sock *sk = sock->sk;
909 struct xdp_sock *xs = xdp_sk(sk);
910 struct xsk_buff_pool *pool;
911 int err;
912
913 err = xsk_check_common(xs);
914 if (err)
915 return err;
916 if (unlikely(need_wait))
917 return -EOPNOTSUPP;
918 if (unlikely(!xs->tx))
919 return -ENOBUFS;
920
921 if (sk_can_busy_loop(sk)) {
922 if (xs->zc)
923 __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
924 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
925 }
926
927 if (xs->zc && xsk_no_wakeup(sk))
928 return 0;
929
930 pool = xs->pool;
931 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
932 if (xs->zc)
933 return xsk_wakeup(xs, XDP_WAKEUP_TX);
934 return xsk_generic_xmit(sk);
935 }
936 return 0;
937 }
938
xsk_sendmsg(struct socket * sock,struct msghdr * m,size_t total_len)939 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
940 {
941 int ret;
942
943 rcu_read_lock();
944 ret = __xsk_sendmsg(sock, m, total_len);
945 rcu_read_unlock();
946
947 return ret;
948 }
949
__xsk_recvmsg(struct socket * sock,struct msghdr * m,size_t len,int flags)950 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
951 {
952 bool need_wait = !(flags & MSG_DONTWAIT);
953 struct sock *sk = sock->sk;
954 struct xdp_sock *xs = xdp_sk(sk);
955 int err;
956
957 err = xsk_check_common(xs);
958 if (err)
959 return err;
960 if (unlikely(!xs->rx))
961 return -ENOBUFS;
962 if (unlikely(need_wait))
963 return -EOPNOTSUPP;
964
965 if (sk_can_busy_loop(sk))
966 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
967
968 if (xsk_no_wakeup(sk))
969 return 0;
970
971 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
972 return xsk_wakeup(xs, XDP_WAKEUP_RX);
973 return 0;
974 }
975
xsk_recvmsg(struct socket * sock,struct msghdr * m,size_t len,int flags)976 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
977 {
978 int ret;
979
980 rcu_read_lock();
981 ret = __xsk_recvmsg(sock, m, len, flags);
982 rcu_read_unlock();
983
984 return ret;
985 }
986
xsk_poll(struct file * file,struct socket * sock,struct poll_table_struct * wait)987 static __poll_t xsk_poll(struct file *file, struct socket *sock,
988 struct poll_table_struct *wait)
989 {
990 __poll_t mask = 0;
991 struct sock *sk = sock->sk;
992 struct xdp_sock *xs = xdp_sk(sk);
993 struct xsk_buff_pool *pool;
994
995 sock_poll_wait(file, sock, wait);
996
997 rcu_read_lock();
998 if (xsk_check_common(xs))
999 goto out;
1000
1001 pool = xs->pool;
1002
1003 if (pool->cached_need_wakeup) {
1004 if (xs->zc)
1005 xsk_wakeup(xs, pool->cached_need_wakeup);
1006 else if (xs->tx)
1007 /* Poll needs to drive Tx also in copy mode */
1008 xsk_generic_xmit(sk);
1009 }
1010
1011 if (xs->rx && !xskq_prod_is_empty(xs->rx))
1012 mask |= EPOLLIN | EPOLLRDNORM;
1013 if (xs->tx && xsk_tx_writeable(xs))
1014 mask |= EPOLLOUT | EPOLLWRNORM;
1015 out:
1016 rcu_read_unlock();
1017 return mask;
1018 }
1019
xsk_init_queue(u32 entries,struct xsk_queue ** queue,bool umem_queue)1020 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
1021 bool umem_queue)
1022 {
1023 struct xsk_queue *q;
1024
1025 if (entries == 0 || *queue || !is_power_of_2(entries))
1026 return -EINVAL;
1027
1028 q = xskq_create(entries, umem_queue);
1029 if (!q)
1030 return -ENOMEM;
1031
1032 /* Make sure queue is ready before it can be seen by others */
1033 smp_wmb();
1034 WRITE_ONCE(*queue, q);
1035 return 0;
1036 }
1037
xsk_unbind_dev(struct xdp_sock * xs)1038 static void xsk_unbind_dev(struct xdp_sock *xs)
1039 {
1040 struct net_device *dev = xs->dev;
1041
1042 if (xs->state != XSK_BOUND)
1043 return;
1044 WRITE_ONCE(xs->state, XSK_UNBOUND);
1045
1046 /* Wait for driver to stop using the xdp socket. */
1047 xp_del_xsk(xs->pool, xs);
1048 synchronize_net();
1049 dev_put(dev);
1050 }
1051
xsk_get_map_list_entry(struct xdp_sock * xs,struct xdp_sock __rcu *** map_entry)1052 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
1053 struct xdp_sock __rcu ***map_entry)
1054 {
1055 struct xsk_map *map = NULL;
1056 struct xsk_map_node *node;
1057
1058 *map_entry = NULL;
1059
1060 spin_lock_bh(&xs->map_list_lock);
1061 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
1062 node);
1063 if (node) {
1064 bpf_map_inc(&node->map->map);
1065 map = node->map;
1066 *map_entry = node->map_entry;
1067 }
1068 spin_unlock_bh(&xs->map_list_lock);
1069 return map;
1070 }
1071
xsk_delete_from_maps(struct xdp_sock * xs)1072 static void xsk_delete_from_maps(struct xdp_sock *xs)
1073 {
1074 /* This function removes the current XDP socket from all the
1075 * maps it resides in. We need to take extra care here, due to
1076 * the two locks involved. Each map has a lock synchronizing
1077 * updates to the entries, and each socket has a lock that
1078 * synchronizes access to the list of maps (map_list). For
1079 * deadlock avoidance the locks need to be taken in the order
1080 * "map lock"->"socket map list lock". We start off by
1081 * accessing the socket map list, and take a reference to the
1082 * map to guarantee existence between the
1083 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
1084 * calls. Then we ask the map to remove the socket, which
1085 * tries to remove the socket from the map. Note that there
1086 * might be updates to the map between
1087 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
1088 */
1089 struct xdp_sock __rcu **map_entry = NULL;
1090 struct xsk_map *map;
1091
1092 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
1093 xsk_map_try_sock_delete(map, xs, map_entry);
1094 bpf_map_put(&map->map);
1095 }
1096 }
1097
xsk_release(struct socket * sock)1098 static int xsk_release(struct socket *sock)
1099 {
1100 struct sock *sk = sock->sk;
1101 struct xdp_sock *xs = xdp_sk(sk);
1102 struct net *net;
1103
1104 if (!sk)
1105 return 0;
1106
1107 net = sock_net(sk);
1108
1109 if (xs->skb)
1110 xsk_drop_skb(xs->skb);
1111
1112 mutex_lock(&net->xdp.lock);
1113 sk_del_node_init_rcu(sk);
1114 mutex_unlock(&net->xdp.lock);
1115
1116 sock_prot_inuse_add(net, sk->sk_prot, -1);
1117
1118 xsk_delete_from_maps(xs);
1119 mutex_lock(&xs->mutex);
1120 xsk_unbind_dev(xs);
1121 mutex_unlock(&xs->mutex);
1122
1123 xskq_destroy(xs->rx);
1124 xskq_destroy(xs->tx);
1125 xskq_destroy(xs->fq_tmp);
1126 xskq_destroy(xs->cq_tmp);
1127
1128 sock_orphan(sk);
1129 sock->sk = NULL;
1130
1131 sock_put(sk);
1132
1133 return 0;
1134 }
1135
xsk_lookup_xsk_from_fd(int fd)1136 static struct socket *xsk_lookup_xsk_from_fd(int fd)
1137 {
1138 struct socket *sock;
1139 int err;
1140
1141 sock = sockfd_lookup(fd, &err);
1142 if (!sock)
1143 return ERR_PTR(-ENOTSOCK);
1144
1145 if (sock->sk->sk_family != PF_XDP) {
1146 sockfd_put(sock);
1147 return ERR_PTR(-ENOPROTOOPT);
1148 }
1149
1150 return sock;
1151 }
1152
xsk_validate_queues(struct xdp_sock * xs)1153 static bool xsk_validate_queues(struct xdp_sock *xs)
1154 {
1155 return xs->fq_tmp && xs->cq_tmp;
1156 }
1157
xsk_bind(struct socket * sock,struct sockaddr * addr,int addr_len)1158 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
1159 {
1160 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
1161 struct sock *sk = sock->sk;
1162 struct xdp_sock *xs = xdp_sk(sk);
1163 struct net_device *dev;
1164 int bound_dev_if;
1165 u32 flags, qid;
1166 int err = 0;
1167
1168 if (addr_len < sizeof(struct sockaddr_xdp))
1169 return -EINVAL;
1170 if (sxdp->sxdp_family != AF_XDP)
1171 return -EINVAL;
1172
1173 flags = sxdp->sxdp_flags;
1174 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
1175 XDP_USE_NEED_WAKEUP | XDP_USE_SG))
1176 return -EINVAL;
1177
1178 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
1179 if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
1180 return -EINVAL;
1181
1182 rtnl_lock();
1183 mutex_lock(&xs->mutex);
1184 if (xs->state != XSK_READY) {
1185 err = -EBUSY;
1186 goto out_release;
1187 }
1188
1189 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
1190 if (!dev) {
1191 err = -ENODEV;
1192 goto out_release;
1193 }
1194
1195 if (!xs->rx && !xs->tx) {
1196 err = -EINVAL;
1197 goto out_unlock;
1198 }
1199
1200 qid = sxdp->sxdp_queue_id;
1201
1202 if (flags & XDP_SHARED_UMEM) {
1203 struct xdp_sock *umem_xs;
1204 struct socket *sock;
1205
1206 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
1207 (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) {
1208 /* Cannot specify flags for shared sockets. */
1209 err = -EINVAL;
1210 goto out_unlock;
1211 }
1212
1213 if (xs->umem) {
1214 /* We have already our own. */
1215 err = -EINVAL;
1216 goto out_unlock;
1217 }
1218
1219 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
1220 if (IS_ERR(sock)) {
1221 err = PTR_ERR(sock);
1222 goto out_unlock;
1223 }
1224
1225 umem_xs = xdp_sk(sock->sk);
1226 if (!xsk_is_bound(umem_xs)) {
1227 err = -EBADF;
1228 sockfd_put(sock);
1229 goto out_unlock;
1230 }
1231
1232 if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
1233 /* Share the umem with another socket on another qid
1234 * and/or device.
1235 */
1236 xs->pool = xp_create_and_assign_umem(xs,
1237 umem_xs->umem);
1238 if (!xs->pool) {
1239 err = -ENOMEM;
1240 sockfd_put(sock);
1241 goto out_unlock;
1242 }
1243
1244 err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1245 qid);
1246 if (err) {
1247 xp_destroy(xs->pool);
1248 xs->pool = NULL;
1249 sockfd_put(sock);
1250 goto out_unlock;
1251 }
1252 } else {
1253 /* Share the buffer pool with the other socket. */
1254 if (xs->fq_tmp || xs->cq_tmp) {
1255 /* Do not allow setting your own fq or cq. */
1256 err = -EINVAL;
1257 sockfd_put(sock);
1258 goto out_unlock;
1259 }
1260
1261 xp_get_pool(umem_xs->pool);
1262 xs->pool = umem_xs->pool;
1263
1264 /* If underlying shared umem was created without Tx
1265 * ring, allocate Tx descs array that Tx batching API
1266 * utilizes
1267 */
1268 if (xs->tx && !xs->pool->tx_descs) {
1269 err = xp_alloc_tx_descs(xs->pool, xs);
1270 if (err) {
1271 xp_put_pool(xs->pool);
1272 xs->pool = NULL;
1273 sockfd_put(sock);
1274 goto out_unlock;
1275 }
1276 }
1277 }
1278
1279 xdp_get_umem(umem_xs->umem);
1280 WRITE_ONCE(xs->umem, umem_xs->umem);
1281 sockfd_put(sock);
1282 } else if (!xs->umem || !xsk_validate_queues(xs)) {
1283 err = -EINVAL;
1284 goto out_unlock;
1285 } else {
1286 /* This xsk has its own umem. */
1287 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1288 if (!xs->pool) {
1289 err = -ENOMEM;
1290 goto out_unlock;
1291 }
1292
1293 err = xp_assign_dev(xs->pool, dev, qid, flags);
1294 if (err) {
1295 xp_destroy(xs->pool);
1296 xs->pool = NULL;
1297 goto out_unlock;
1298 }
1299 }
1300
1301 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1302 xs->fq_tmp = NULL;
1303 xs->cq_tmp = NULL;
1304
1305 xs->dev = dev;
1306 xs->zc = xs->umem->zc;
1307 xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
1308 xs->queue_id = qid;
1309 xp_add_xsk(xs->pool, xs);
1310
1311 out_unlock:
1312 if (err) {
1313 dev_put(dev);
1314 } else {
1315 /* Matches smp_rmb() in bind() for shared umem
1316 * sockets, and xsk_is_bound().
1317 */
1318 smp_wmb();
1319 WRITE_ONCE(xs->state, XSK_BOUND);
1320 }
1321 out_release:
1322 mutex_unlock(&xs->mutex);
1323 rtnl_unlock();
1324 return err;
1325 }
1326
1327 struct xdp_umem_reg_v1 {
1328 __u64 addr; /* Start of packet data area */
1329 __u64 len; /* Length of packet data area */
1330 __u32 chunk_size;
1331 __u32 headroom;
1332 };
1333
1334 struct xdp_umem_reg_v2 {
1335 __u64 addr; /* Start of packet data area */
1336 __u64 len; /* Length of packet data area */
1337 __u32 chunk_size;
1338 __u32 headroom;
1339 __u32 flags;
1340 };
1341
xsk_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)1342 static int xsk_setsockopt(struct socket *sock, int level, int optname,
1343 sockptr_t optval, unsigned int optlen)
1344 {
1345 struct sock *sk = sock->sk;
1346 struct xdp_sock *xs = xdp_sk(sk);
1347 int err;
1348
1349 if (level != SOL_XDP)
1350 return -ENOPROTOOPT;
1351
1352 switch (optname) {
1353 case XDP_RX_RING:
1354 case XDP_TX_RING:
1355 {
1356 struct xsk_queue **q;
1357 int entries;
1358
1359 if (optlen < sizeof(entries))
1360 return -EINVAL;
1361 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1362 return -EFAULT;
1363
1364 mutex_lock(&xs->mutex);
1365 if (xs->state != XSK_READY) {
1366 mutex_unlock(&xs->mutex);
1367 return -EBUSY;
1368 }
1369 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1370 err = xsk_init_queue(entries, q, false);
1371 if (!err && optname == XDP_TX_RING)
1372 /* Tx needs to be explicitly woken up the first time */
1373 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1374 mutex_unlock(&xs->mutex);
1375 return err;
1376 }
1377 case XDP_UMEM_REG:
1378 {
1379 size_t mr_size = sizeof(struct xdp_umem_reg);
1380 struct xdp_umem_reg mr = {};
1381 struct xdp_umem *umem;
1382
1383 if (optlen < sizeof(struct xdp_umem_reg_v1))
1384 return -EINVAL;
1385 else if (optlen < sizeof(struct xdp_umem_reg_v2))
1386 mr_size = sizeof(struct xdp_umem_reg_v1);
1387 else if (optlen < sizeof(mr))
1388 mr_size = sizeof(struct xdp_umem_reg_v2);
1389
1390 if (copy_from_sockptr(&mr, optval, mr_size))
1391 return -EFAULT;
1392
1393 mutex_lock(&xs->mutex);
1394 if (xs->state != XSK_READY || xs->umem) {
1395 mutex_unlock(&xs->mutex);
1396 return -EBUSY;
1397 }
1398
1399 umem = xdp_umem_create(&mr);
1400 if (IS_ERR(umem)) {
1401 mutex_unlock(&xs->mutex);
1402 return PTR_ERR(umem);
1403 }
1404
1405 /* Make sure umem is ready before it can be seen by others */
1406 smp_wmb();
1407 WRITE_ONCE(xs->umem, umem);
1408 mutex_unlock(&xs->mutex);
1409 return 0;
1410 }
1411 case XDP_UMEM_FILL_RING:
1412 case XDP_UMEM_COMPLETION_RING:
1413 {
1414 struct xsk_queue **q;
1415 int entries;
1416
1417 if (optlen < sizeof(entries))
1418 return -EINVAL;
1419 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1420 return -EFAULT;
1421
1422 mutex_lock(&xs->mutex);
1423 if (xs->state != XSK_READY) {
1424 mutex_unlock(&xs->mutex);
1425 return -EBUSY;
1426 }
1427
1428 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1429 &xs->cq_tmp;
1430 err = xsk_init_queue(entries, q, true);
1431 mutex_unlock(&xs->mutex);
1432 return err;
1433 }
1434 default:
1435 break;
1436 }
1437
1438 return -ENOPROTOOPT;
1439 }
1440
xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 * ring)1441 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1442 {
1443 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1444 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1445 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1446 }
1447
xsk_enter_umem_offsets(struct xdp_ring_offset_v1 * ring)1448 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1449 {
1450 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1451 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1452 ring->desc = offsetof(struct xdp_umem_ring, desc);
1453 }
1454
1455 struct xdp_statistics_v1 {
1456 __u64 rx_dropped;
1457 __u64 rx_invalid_descs;
1458 __u64 tx_invalid_descs;
1459 };
1460
xsk_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1461 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1462 char __user *optval, int __user *optlen)
1463 {
1464 struct sock *sk = sock->sk;
1465 struct xdp_sock *xs = xdp_sk(sk);
1466 int len;
1467
1468 if (level != SOL_XDP)
1469 return -ENOPROTOOPT;
1470
1471 if (get_user(len, optlen))
1472 return -EFAULT;
1473 if (len < 0)
1474 return -EINVAL;
1475
1476 switch (optname) {
1477 case XDP_STATISTICS:
1478 {
1479 struct xdp_statistics stats = {};
1480 bool extra_stats = true;
1481 size_t stats_size;
1482
1483 if (len < sizeof(struct xdp_statistics_v1)) {
1484 return -EINVAL;
1485 } else if (len < sizeof(stats)) {
1486 extra_stats = false;
1487 stats_size = sizeof(struct xdp_statistics_v1);
1488 } else {
1489 stats_size = sizeof(stats);
1490 }
1491
1492 mutex_lock(&xs->mutex);
1493 stats.rx_dropped = xs->rx_dropped;
1494 if (extra_stats) {
1495 stats.rx_ring_full = xs->rx_queue_full;
1496 stats.rx_fill_ring_empty_descs =
1497 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1498 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1499 } else {
1500 stats.rx_dropped += xs->rx_queue_full;
1501 }
1502 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1503 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1504 mutex_unlock(&xs->mutex);
1505
1506 if (copy_to_user(optval, &stats, stats_size))
1507 return -EFAULT;
1508 if (put_user(stats_size, optlen))
1509 return -EFAULT;
1510
1511 return 0;
1512 }
1513 case XDP_MMAP_OFFSETS:
1514 {
1515 struct xdp_mmap_offsets off;
1516 struct xdp_mmap_offsets_v1 off_v1;
1517 bool flags_supported = true;
1518 void *to_copy;
1519
1520 if (len < sizeof(off_v1))
1521 return -EINVAL;
1522 else if (len < sizeof(off))
1523 flags_supported = false;
1524
1525 if (flags_supported) {
1526 /* xdp_ring_offset is identical to xdp_ring_offset_v1
1527 * except for the flags field added to the end.
1528 */
1529 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1530 &off.rx);
1531 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1532 &off.tx);
1533 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1534 &off.fr);
1535 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1536 &off.cr);
1537 off.rx.flags = offsetof(struct xdp_rxtx_ring,
1538 ptrs.flags);
1539 off.tx.flags = offsetof(struct xdp_rxtx_ring,
1540 ptrs.flags);
1541 off.fr.flags = offsetof(struct xdp_umem_ring,
1542 ptrs.flags);
1543 off.cr.flags = offsetof(struct xdp_umem_ring,
1544 ptrs.flags);
1545
1546 len = sizeof(off);
1547 to_copy = &off;
1548 } else {
1549 xsk_enter_rxtx_offsets(&off_v1.rx);
1550 xsk_enter_rxtx_offsets(&off_v1.tx);
1551 xsk_enter_umem_offsets(&off_v1.fr);
1552 xsk_enter_umem_offsets(&off_v1.cr);
1553
1554 len = sizeof(off_v1);
1555 to_copy = &off_v1;
1556 }
1557
1558 if (copy_to_user(optval, to_copy, len))
1559 return -EFAULT;
1560 if (put_user(len, optlen))
1561 return -EFAULT;
1562
1563 return 0;
1564 }
1565 case XDP_OPTIONS:
1566 {
1567 struct xdp_options opts = {};
1568
1569 if (len < sizeof(opts))
1570 return -EINVAL;
1571
1572 mutex_lock(&xs->mutex);
1573 if (xs->zc)
1574 opts.flags |= XDP_OPTIONS_ZEROCOPY;
1575 mutex_unlock(&xs->mutex);
1576
1577 len = sizeof(opts);
1578 if (copy_to_user(optval, &opts, len))
1579 return -EFAULT;
1580 if (put_user(len, optlen))
1581 return -EFAULT;
1582
1583 return 0;
1584 }
1585 default:
1586 break;
1587 }
1588
1589 return -EOPNOTSUPP;
1590 }
1591
xsk_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)1592 static int xsk_mmap(struct file *file, struct socket *sock,
1593 struct vm_area_struct *vma)
1594 {
1595 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1596 unsigned long size = vma->vm_end - vma->vm_start;
1597 struct xdp_sock *xs = xdp_sk(sock->sk);
1598 int state = READ_ONCE(xs->state);
1599 struct xsk_queue *q = NULL;
1600
1601 if (state != XSK_READY && state != XSK_BOUND)
1602 return -EBUSY;
1603
1604 if (offset == XDP_PGOFF_RX_RING) {
1605 q = READ_ONCE(xs->rx);
1606 } else if (offset == XDP_PGOFF_TX_RING) {
1607 q = READ_ONCE(xs->tx);
1608 } else {
1609 /* Matches the smp_wmb() in XDP_UMEM_REG */
1610 smp_rmb();
1611 if (offset == XDP_UMEM_PGOFF_FILL_RING)
1612 q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1613 READ_ONCE(xs->pool->fq);
1614 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1615 q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1616 READ_ONCE(xs->pool->cq);
1617 }
1618
1619 if (!q)
1620 return -EINVAL;
1621
1622 /* Matches the smp_wmb() in xsk_init_queue */
1623 smp_rmb();
1624 if (size > q->ring_vmalloc_size)
1625 return -EINVAL;
1626
1627 return remap_vmalloc_range(vma, q->ring, 0);
1628 }
1629
xsk_notifier(struct notifier_block * this,unsigned long msg,void * ptr)1630 static int xsk_notifier(struct notifier_block *this,
1631 unsigned long msg, void *ptr)
1632 {
1633 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1634 struct net *net = dev_net(dev);
1635 struct sock *sk;
1636
1637 switch (msg) {
1638 case NETDEV_UNREGISTER:
1639 mutex_lock(&net->xdp.lock);
1640 sk_for_each(sk, &net->xdp.list) {
1641 struct xdp_sock *xs = xdp_sk(sk);
1642
1643 mutex_lock(&xs->mutex);
1644 if (xs->dev == dev) {
1645 sk->sk_err = ENETDOWN;
1646 if (!sock_flag(sk, SOCK_DEAD))
1647 sk_error_report(sk);
1648
1649 xsk_unbind_dev(xs);
1650
1651 /* Clear device references. */
1652 xp_clear_dev(xs->pool);
1653 }
1654 mutex_unlock(&xs->mutex);
1655 }
1656 mutex_unlock(&net->xdp.lock);
1657 break;
1658 }
1659 return NOTIFY_DONE;
1660 }
1661
1662 static struct proto xsk_proto = {
1663 .name = "XDP",
1664 .owner = THIS_MODULE,
1665 .obj_size = sizeof(struct xdp_sock),
1666 };
1667
1668 static const struct proto_ops xsk_proto_ops = {
1669 .family = PF_XDP,
1670 .owner = THIS_MODULE,
1671 .release = xsk_release,
1672 .bind = xsk_bind,
1673 .connect = sock_no_connect,
1674 .socketpair = sock_no_socketpair,
1675 .accept = sock_no_accept,
1676 .getname = sock_no_getname,
1677 .poll = xsk_poll,
1678 .ioctl = sock_no_ioctl,
1679 .listen = sock_no_listen,
1680 .shutdown = sock_no_shutdown,
1681 .setsockopt = xsk_setsockopt,
1682 .getsockopt = xsk_getsockopt,
1683 .sendmsg = xsk_sendmsg,
1684 .recvmsg = xsk_recvmsg,
1685 .mmap = xsk_mmap,
1686 };
1687
xsk_destruct(struct sock * sk)1688 static void xsk_destruct(struct sock *sk)
1689 {
1690 struct xdp_sock *xs = xdp_sk(sk);
1691
1692 if (!sock_flag(sk, SOCK_DEAD))
1693 return;
1694
1695 if (!xp_put_pool(xs->pool))
1696 xdp_put_umem(xs->umem, !xs->pool);
1697 }
1698
xsk_create(struct net * net,struct socket * sock,int protocol,int kern)1699 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1700 int kern)
1701 {
1702 struct xdp_sock *xs;
1703 struct sock *sk;
1704
1705 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1706 return -EPERM;
1707 if (sock->type != SOCK_RAW)
1708 return -ESOCKTNOSUPPORT;
1709
1710 if (protocol)
1711 return -EPROTONOSUPPORT;
1712
1713 sock->state = SS_UNCONNECTED;
1714
1715 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1716 if (!sk)
1717 return -ENOBUFS;
1718
1719 sock->ops = &xsk_proto_ops;
1720
1721 sock_init_data(sock, sk);
1722
1723 sk->sk_family = PF_XDP;
1724
1725 sk->sk_destruct = xsk_destruct;
1726
1727 sock_set_flag(sk, SOCK_RCU_FREE);
1728
1729 xs = xdp_sk(sk);
1730 xs->state = XSK_READY;
1731 mutex_init(&xs->mutex);
1732 spin_lock_init(&xs->rx_lock);
1733
1734 INIT_LIST_HEAD(&xs->map_list);
1735 spin_lock_init(&xs->map_list_lock);
1736
1737 mutex_lock(&net->xdp.lock);
1738 sk_add_node_rcu(sk, &net->xdp.list);
1739 mutex_unlock(&net->xdp.lock);
1740
1741 sock_prot_inuse_add(net, &xsk_proto, 1);
1742
1743 return 0;
1744 }
1745
1746 static const struct net_proto_family xsk_family_ops = {
1747 .family = PF_XDP,
1748 .create = xsk_create,
1749 .owner = THIS_MODULE,
1750 };
1751
1752 static struct notifier_block xsk_netdev_notifier = {
1753 .notifier_call = xsk_notifier,
1754 };
1755
xsk_net_init(struct net * net)1756 static int __net_init xsk_net_init(struct net *net)
1757 {
1758 mutex_init(&net->xdp.lock);
1759 INIT_HLIST_HEAD(&net->xdp.list);
1760 return 0;
1761 }
1762
xsk_net_exit(struct net * net)1763 static void __net_exit xsk_net_exit(struct net *net)
1764 {
1765 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1766 }
1767
1768 static struct pernet_operations xsk_net_ops = {
1769 .init = xsk_net_init,
1770 .exit = xsk_net_exit,
1771 };
1772
xsk_init(void)1773 static int __init xsk_init(void)
1774 {
1775 int err, cpu;
1776
1777 err = proto_register(&xsk_proto, 0 /* no slab */);
1778 if (err)
1779 goto out;
1780
1781 err = sock_register(&xsk_family_ops);
1782 if (err)
1783 goto out_proto;
1784
1785 err = register_pernet_subsys(&xsk_net_ops);
1786 if (err)
1787 goto out_sk;
1788
1789 err = register_netdevice_notifier(&xsk_netdev_notifier);
1790 if (err)
1791 goto out_pernet;
1792
1793 for_each_possible_cpu(cpu)
1794 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1795 return 0;
1796
1797 out_pernet:
1798 unregister_pernet_subsys(&xsk_net_ops);
1799 out_sk:
1800 sock_unregister(PF_XDP);
1801 out_proto:
1802 proto_unregister(&xsk_proto);
1803 out:
1804 return err;
1805 }
1806
1807 fs_initcall(xsk_init);
1808