xref: /linux/include/net/xdp_sock_drv.h (revision 2b43470a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
3  * Copyright(c) 2020 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_DRV_H
7 #define _LINUX_XDP_SOCK_DRV_H
8 
9 #include <net/xdp_sock.h>
10 #include <net/xsk_buff_pool.h>
11 
12 #ifdef CONFIG_XDP_SOCKETS
13 
14 bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
15 bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
16 void xsk_umem_release_addr(struct xdp_umem *umem);
17 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
18 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
19 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
20 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
21 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
22 					  struct xdp_umem_fq_reuse *newq);
23 void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
24 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
25 void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
26 void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
27 void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
28 void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
29 bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
30 
31 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
32 {
33 	unsigned long page_addr;
34 
35 	addr = xsk_umem_add_offset_to_addr(addr);
36 	page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr;
37 
38 	return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK);
39 }
40 
41 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
42 {
43 	addr = xsk_umem_add_offset_to_addr(addr);
44 
45 	return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK);
46 }
47 
48 /* Reuse-queue aware version of FILL queue helpers */
49 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
50 {
51 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
52 
53 	if (rq->length >= cnt)
54 		return true;
55 
56 	return xsk_umem_has_addrs(umem, cnt - rq->length);
57 }
58 
59 static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
60 {
61 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
62 
63 	if (!rq->length)
64 		return xsk_umem_peek_addr(umem, addr);
65 
66 	*addr = rq->handles[rq->length - 1];
67 	return addr;
68 }
69 
70 static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
71 {
72 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
73 
74 	if (!rq->length)
75 		xsk_umem_release_addr(umem);
76 	else
77 		rq->length--;
78 }
79 
80 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
81 {
82 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
83 
84 	rq->handles[rq->length++] = addr;
85 }
86 
87 /* Handle the offset appropriately depending on aligned or unaligned mode.
88  * For unaligned mode, we store the offset in the upper 16-bits of the address.
89  * For aligned mode, we simply add the offset to the address.
90  */
91 static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address,
92 					 u64 offset)
93 {
94 	if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG)
95 		return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
96 	else
97 		return address + offset;
98 }
99 
100 static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem)
101 {
102 	return umem->chunk_size_nohr;
103 }
104 
105 static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
106 {
107 	return XDP_PACKET_HEADROOM + umem->headroom;
108 }
109 
110 static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
111 {
112 	return umem->chunk_size;
113 }
114 
115 static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
116 {
117 	return xsk_umem_get_chunk_size(umem) - xsk_umem_get_headroom(umem);
118 }
119 
120 static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
121 					 struct xdp_rxq_info *rxq)
122 {
123 	xp_set_rxq_info(umem->pool, rxq);
124 }
125 
126 static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
127 				      unsigned long attrs)
128 {
129 	xp_dma_unmap(umem->pool, attrs);
130 }
131 
132 static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
133 				   unsigned long attrs)
134 {
135 	return xp_dma_map(umem->pool, dev, attrs, umem->pgs, umem->npgs);
136 }
137 
138 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
139 {
140 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
141 
142 	return xp_get_dma(xskb);
143 }
144 
145 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
146 {
147 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
148 
149 	return xp_get_frame_dma(xskb);
150 }
151 
152 static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
153 {
154 	return xp_alloc(umem->pool);
155 }
156 
157 static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
158 {
159 	return xp_can_alloc(umem->pool, count);
160 }
161 
162 static inline void xsk_buff_free(struct xdp_buff *xdp)
163 {
164 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
165 
166 	xp_free(xskb);
167 }
168 
169 static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
170 {
171 	return xp_raw_get_dma(umem->pool, addr);
172 }
173 
174 static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
175 {
176 	return xp_raw_get_data(umem->pool, addr);
177 }
178 
179 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
180 {
181 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
182 
183 	xp_dma_sync_for_cpu(xskb);
184 }
185 
186 static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
187 						    dma_addr_t dma,
188 						    size_t size)
189 {
190 	xp_dma_sync_for_device(umem->pool, dma, size);
191 }
192 
193 #else
194 
195 static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
196 {
197 	return false;
198 }
199 
200 static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
201 {
202 	return NULL;
203 }
204 
205 static inline void xsk_umem_release_addr(struct xdp_umem *umem)
206 {
207 }
208 
209 static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
210 {
211 }
212 
213 static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
214 				       struct xdp_desc *desc)
215 {
216 	return false;
217 }
218 
219 static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
220 {
221 }
222 
223 static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
224 {
225 	return NULL;
226 }
227 
228 static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
229 	struct xdp_umem *umem, struct xdp_umem_fq_reuse *newq)
230 {
231 	return NULL;
232 }
233 
234 static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
235 {
236 }
237 
238 static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
239 						     u16 queue_id)
240 {
241 	return NULL;
242 }
243 
244 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
245 {
246 	return NULL;
247 }
248 
249 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
250 {
251 	return 0;
252 }
253 
254 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
255 {
256 	return false;
257 }
258 
259 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
260 {
261 	return NULL;
262 }
263 
264 static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
265 {
266 }
267 
268 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
269 {
270 }
271 
272 static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
273 {
274 }
275 
276 static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
277 {
278 }
279 
280 static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
281 {
282 }
283 
284 static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
285 {
286 }
287 
288 static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
289 {
290 	return false;
291 }
292 
293 static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
294 					 u64 offset)
295 {
296 	return 0;
297 }
298 
299 static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem)
300 {
301 	return 0;
302 }
303 
304 static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
305 {
306 	return 0;
307 }
308 
309 static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
310 {
311 	return 0;
312 }
313 
314 static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
315 {
316 	return 0;
317 }
318 
319 static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
320 					 struct xdp_rxq_info *rxq)
321 {
322 }
323 
324 static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
325 				      unsigned long attrs)
326 {
327 }
328 
329 static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
330 				   unsigned long attrs)
331 {
332 	return 0;
333 }
334 
335 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
336 {
337 	return 0;
338 }
339 
340 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
341 {
342 	return 0;
343 }
344 
345 static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
346 {
347 	return NULL;
348 }
349 
350 static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
351 {
352 	return false;
353 }
354 
355 static inline void xsk_buff_free(struct xdp_buff *xdp)
356 {
357 }
358 
359 static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
360 {
361 	return 0;
362 }
363 
364 static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
365 {
366 	return NULL;
367 }
368 
369 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
370 {
371 }
372 
373 static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
374 						    dma_addr_t dma,
375 						    size_t size)
376 {
377 }
378 
379 #endif /* CONFIG_XDP_SOCKETS */
380 
381 #endif /* _LINUX_XDP_SOCK_DRV_H */
382