xref: /linux/include/net/xdp_sock.h (revision bf0bdd13)
1dac09149SBjörn Töpel /* SPDX-License-Identifier: GPL-2.0 */
2dac09149SBjörn Töpel /* AF_XDP internal functions
3c0c77d8fSBjörn Töpel  * Copyright(c) 2018 Intel Corporation.
4c0c77d8fSBjörn Töpel  */
5c0c77d8fSBjörn Töpel 
6c0c77d8fSBjörn Töpel #ifndef _LINUX_XDP_SOCK_H
7c0c77d8fSBjörn Töpel #define _LINUX_XDP_SOCK_H
8c0c77d8fSBjörn Töpel 
9e61e62b9SBjörn Töpel #include <linux/workqueue.h>
10e61e62b9SBjörn Töpel #include <linux/if_xdp.h>
11c0c77d8fSBjörn Töpel #include <linux/mutex.h>
12ac98d8aaSMagnus Karlsson #include <linux/spinlock.h>
13e61e62b9SBjörn Töpel #include <linux/mm.h>
14c0c77d8fSBjörn Töpel #include <net/sock.h>
15c0c77d8fSBjörn Töpel 
16b9b6b68eSBjörn Töpel struct net_device;
17b9b6b68eSBjörn Töpel struct xsk_queue;
18e61e62b9SBjörn Töpel 
198aef7340SBjörn Töpel struct xdp_umem_page {
208aef7340SBjörn Töpel 	void *addr;
21173d3adbSBjörn Töpel 	dma_addr_t dma;
228aef7340SBjörn Töpel };
238aef7340SBjörn Töpel 
24f5bd9138SJakub Kicinski struct xdp_umem_fq_reuse {
25f5bd9138SJakub Kicinski 	u32 nentries;
26f5bd9138SJakub Kicinski 	u32 length;
27f5bd9138SJakub Kicinski 	u64 handles[];
28f5bd9138SJakub Kicinski };
29f5bd9138SJakub Kicinski 
30e61e62b9SBjörn Töpel struct xdp_umem {
31e61e62b9SBjörn Töpel 	struct xsk_queue *fq;
32e61e62b9SBjörn Töpel 	struct xsk_queue *cq;
338aef7340SBjörn Töpel 	struct xdp_umem_page *pages;
3493ee30f3SMagnus Karlsson 	u64 chunk_mask;
3593ee30f3SMagnus Karlsson 	u64 size;
36e61e62b9SBjörn Töpel 	u32 headroom;
37e61e62b9SBjörn Töpel 	u32 chunk_size_nohr;
38e61e62b9SBjörn Töpel 	struct user_struct *user;
39e61e62b9SBjörn Töpel 	unsigned long address;
40e61e62b9SBjörn Töpel 	refcount_t users;
41e61e62b9SBjörn Töpel 	struct work_struct work;
428aef7340SBjörn Töpel 	struct page **pgs;
43e61e62b9SBjörn Töpel 	u32 npgs;
4450e74c01SBjörn Töpel 	int id;
45173d3adbSBjörn Töpel 	struct net_device *dev;
46f5bd9138SJakub Kicinski 	struct xdp_umem_fq_reuse *fq_reuse;
47173d3adbSBjörn Töpel 	u16 queue_id;
48173d3adbSBjörn Töpel 	bool zc;
49ac98d8aaSMagnus Karlsson 	spinlock_t xsk_list_lock;
50ac98d8aaSMagnus Karlsson 	struct list_head xsk_list;
51e61e62b9SBjörn Töpel };
52c0c77d8fSBjörn Töpel 
53c0c77d8fSBjörn Töpel struct xdp_sock {
54c0c77d8fSBjörn Töpel 	/* struct sock must be the first member of struct xdp_sock */
55c0c77d8fSBjörn Töpel 	struct sock sk;
56b9b6b68eSBjörn Töpel 	struct xsk_queue *rx;
57b9b6b68eSBjörn Töpel 	struct net_device *dev;
58c0c77d8fSBjörn Töpel 	struct xdp_umem *umem;
59fbfc504aSBjörn Töpel 	struct list_head flush_node;
60965a9909SMagnus Karlsson 	u16 queue_id;
61ac98d8aaSMagnus Karlsson 	bool zc;
62c0c77d8fSBjörn Töpel 	/* Protects multiple processes in the control path */
63c0c77d8fSBjörn Töpel 	struct mutex mutex;
64fada7fdcSJonathan Lemon 	struct xsk_queue *tx ____cacheline_aligned_in_smp;
65fada7fdcSJonathan Lemon 	struct list_head list;
66a9744f7cSMagnus Karlsson 	/* Mutual exclusion of NAPI TX thread and sendmsg error paths
67a9744f7cSMagnus Karlsson 	 * in the SKB destructor callback.
68a9744f7cSMagnus Karlsson 	 */
69a9744f7cSMagnus Karlsson 	spinlock_t tx_completion_lock;
70*bf0bdd13SIlya Maximets 	/* Protects generic receive. */
71*bf0bdd13SIlya Maximets 	spinlock_t rx_lock;
72c497176cSBjörn Töpel 	u64 rx_dropped;
73c0c77d8fSBjörn Töpel };
74c0c77d8fSBjörn Töpel 
75c497176cSBjörn Töpel struct xdp_buff;
76c497176cSBjörn Töpel #ifdef CONFIG_XDP_SOCKETS
77c497176cSBjörn Töpel int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
78c497176cSBjörn Töpel int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
79c497176cSBjörn Töpel void xsk_flush(struct xdp_sock *xs);
80fbfc504aSBjörn Töpel bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
81ac98d8aaSMagnus Karlsson /* Used from netdev driver */
82d57d7642SMaxim Mikityanskiy bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
83173d3adbSBjörn Töpel u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
84173d3adbSBjörn Töpel void xsk_umem_discard_addr(struct xdp_umem *umem);
85ac98d8aaSMagnus Karlsson void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
864bce4e5cSMaxim Mikityanskiy bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
87ac98d8aaSMagnus Karlsson void xsk_umem_consume_tx_done(struct xdp_umem *umem);
88f5bd9138SJakub Kicinski struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
89f5bd9138SJakub Kicinski struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
90f5bd9138SJakub Kicinski 					  struct xdp_umem_fq_reuse *newq);
91f5bd9138SJakub Kicinski void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
921661d346SJakub Kicinski struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
9390254034SBjörn Töpel 
9490254034SBjörn Töpel static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
9590254034SBjörn Töpel {
9690254034SBjörn Töpel 	return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
9790254034SBjörn Töpel }
9890254034SBjörn Töpel 
9990254034SBjörn Töpel static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
10090254034SBjörn Töpel {
10190254034SBjörn Töpel 	return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
10290254034SBjörn Töpel }
103f5bd9138SJakub Kicinski 
104f5bd9138SJakub Kicinski /* Reuse-queue aware version of FILL queue helpers */
105d57d7642SMaxim Mikityanskiy static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
106d57d7642SMaxim Mikityanskiy {
107d57d7642SMaxim Mikityanskiy 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
108d57d7642SMaxim Mikityanskiy 
109d57d7642SMaxim Mikityanskiy 	if (rq->length >= cnt)
110d57d7642SMaxim Mikityanskiy 		return true;
111d57d7642SMaxim Mikityanskiy 
112d57d7642SMaxim Mikityanskiy 	return xsk_umem_has_addrs(umem, cnt - rq->length);
113d57d7642SMaxim Mikityanskiy }
114d57d7642SMaxim Mikityanskiy 
115f5bd9138SJakub Kicinski static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
116f5bd9138SJakub Kicinski {
117f5bd9138SJakub Kicinski 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
118f5bd9138SJakub Kicinski 
119f5bd9138SJakub Kicinski 	if (!rq->length)
120f5bd9138SJakub Kicinski 		return xsk_umem_peek_addr(umem, addr);
121f5bd9138SJakub Kicinski 
122f5bd9138SJakub Kicinski 	*addr = rq->handles[rq->length - 1];
123f5bd9138SJakub Kicinski 	return addr;
124f5bd9138SJakub Kicinski }
125f5bd9138SJakub Kicinski 
126f5bd9138SJakub Kicinski static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
127f5bd9138SJakub Kicinski {
128f5bd9138SJakub Kicinski 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
129f5bd9138SJakub Kicinski 
130f5bd9138SJakub Kicinski 	if (!rq->length)
131f5bd9138SJakub Kicinski 		xsk_umem_discard_addr(umem);
132f5bd9138SJakub Kicinski 	else
133f5bd9138SJakub Kicinski 		rq->length--;
134f5bd9138SJakub Kicinski }
135f5bd9138SJakub Kicinski 
136f5bd9138SJakub Kicinski static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
137f5bd9138SJakub Kicinski {
138f5bd9138SJakub Kicinski 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
139f5bd9138SJakub Kicinski 
140f5bd9138SJakub Kicinski 	rq->handles[rq->length++] = addr;
141f5bd9138SJakub Kicinski }
142c497176cSBjörn Töpel #else
143c497176cSBjörn Töpel static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
144c497176cSBjörn Töpel {
145c497176cSBjörn Töpel 	return -ENOTSUPP;
146c497176cSBjörn Töpel }
147c497176cSBjörn Töpel 
148c497176cSBjörn Töpel static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
149c497176cSBjörn Töpel {
150c497176cSBjörn Töpel 	return -ENOTSUPP;
151c497176cSBjörn Töpel }
152c497176cSBjörn Töpel 
153c497176cSBjörn Töpel static inline void xsk_flush(struct xdp_sock *xs)
154c497176cSBjörn Töpel {
155c497176cSBjörn Töpel }
156fbfc504aSBjörn Töpel 
157fbfc504aSBjörn Töpel static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
158fbfc504aSBjörn Töpel {
159fbfc504aSBjörn Töpel 	return false;
160fbfc504aSBjörn Töpel }
16190254034SBjörn Töpel 
162d57d7642SMaxim Mikityanskiy static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
163d57d7642SMaxim Mikityanskiy {
164d57d7642SMaxim Mikityanskiy 	return false;
165d57d7642SMaxim Mikityanskiy }
166d57d7642SMaxim Mikityanskiy 
16790254034SBjörn Töpel static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
16890254034SBjörn Töpel {
16990254034SBjörn Töpel 	return NULL;
17090254034SBjörn Töpel }
17190254034SBjörn Töpel 
17290254034SBjörn Töpel static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
17390254034SBjörn Töpel {
17490254034SBjörn Töpel }
17590254034SBjörn Töpel 
17690254034SBjörn Töpel static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
17790254034SBjörn Töpel {
17890254034SBjörn Töpel }
17990254034SBjörn Töpel 
1804bce4e5cSMaxim Mikityanskiy static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
1814bce4e5cSMaxim Mikityanskiy 				       struct xdp_desc *desc)
18290254034SBjörn Töpel {
18390254034SBjörn Töpel 	return false;
18490254034SBjörn Töpel }
18590254034SBjörn Töpel 
18690254034SBjörn Töpel static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
18790254034SBjörn Töpel {
18890254034SBjörn Töpel }
18990254034SBjörn Töpel 
190f5bd9138SJakub Kicinski static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
191f5bd9138SJakub Kicinski {
192f5bd9138SJakub Kicinski 	return NULL;
193f5bd9138SJakub Kicinski }
194f5bd9138SJakub Kicinski 
195f5bd9138SJakub Kicinski static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
196f5bd9138SJakub Kicinski 	struct xdp_umem *umem,
197f5bd9138SJakub Kicinski 	struct xdp_umem_fq_reuse *newq)
198f5bd9138SJakub Kicinski {
199f5bd9138SJakub Kicinski 	return NULL;
200f5bd9138SJakub Kicinski }
201f5bd9138SJakub Kicinski static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
202f5bd9138SJakub Kicinski {
203f5bd9138SJakub Kicinski }
204f5bd9138SJakub Kicinski 
2051661d346SJakub Kicinski static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
2061661d346SJakub Kicinski 						     u16 queue_id)
2071661d346SJakub Kicinski {
2081661d346SJakub Kicinski 	return NULL;
2091661d346SJakub Kicinski }
2101661d346SJakub Kicinski 
21190254034SBjörn Töpel static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
21290254034SBjörn Töpel {
21390254034SBjörn Töpel 	return NULL;
21490254034SBjörn Töpel }
21590254034SBjörn Töpel 
21690254034SBjörn Töpel static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
21790254034SBjörn Töpel {
21890254034SBjörn Töpel 	return 0;
21990254034SBjörn Töpel }
220f5bd9138SJakub Kicinski 
221d57d7642SMaxim Mikityanskiy static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
222d57d7642SMaxim Mikityanskiy {
223d57d7642SMaxim Mikityanskiy 	return false;
224d57d7642SMaxim Mikityanskiy }
225d57d7642SMaxim Mikityanskiy 
226f5bd9138SJakub Kicinski static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
227f5bd9138SJakub Kicinski {
228f5bd9138SJakub Kicinski 	return NULL;
229f5bd9138SJakub Kicinski }
230f5bd9138SJakub Kicinski 
231f5bd9138SJakub Kicinski static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
232f5bd9138SJakub Kicinski {
233f5bd9138SJakub Kicinski }
234f5bd9138SJakub Kicinski 
235f5bd9138SJakub Kicinski static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
236f5bd9138SJakub Kicinski {
237f5bd9138SJakub Kicinski }
238f5bd9138SJakub Kicinski 
239c497176cSBjörn Töpel #endif /* CONFIG_XDP_SOCKETS */
240c497176cSBjörn Töpel 
241c0c77d8fSBjörn Töpel #endif /* _LINUX_XDP_SOCK_H */
242