xref: /linux/include/net/xdp_sock_drv.h (revision 1742b3d5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
3  * Copyright(c) 2020 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_DRV_H
7 #define _LINUX_XDP_SOCK_DRV_H
8 
9 #include <net/xdp_sock.h>
10 #include <net/xsk_buff_pool.h>
11 
12 #ifdef CONFIG_XDP_SOCKETS
13 
14 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
15 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
16 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
17 struct xsk_buff_pool *xdp_get_xsk_pool_from_qid(struct net_device *dev,
18 						u16 queue_id);
19 void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
20 void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
21 void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
22 void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
23 bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
24 
25 static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
26 {
27 	return XDP_PACKET_HEADROOM + umem->headroom;
28 }
29 
30 static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
31 {
32 	return umem->chunk_size;
33 }
34 
35 static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
36 {
37 	return xsk_umem_get_chunk_size(umem) - xsk_umem_get_headroom(umem);
38 }
39 
40 static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
41 					 struct xdp_rxq_info *rxq)
42 {
43 	xp_set_rxq_info(umem->pool, rxq);
44 }
45 
46 static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
47 				      unsigned long attrs)
48 {
49 	xp_dma_unmap(umem->pool, attrs);
50 }
51 
52 static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
53 				   unsigned long attrs)
54 {
55 	return xp_dma_map(umem->pool, dev, attrs, umem->pgs, umem->npgs);
56 }
57 
58 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
59 {
60 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
61 
62 	return xp_get_dma(xskb);
63 }
64 
65 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
66 {
67 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
68 
69 	return xp_get_frame_dma(xskb);
70 }
71 
72 static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
73 {
74 	return xp_alloc(umem->pool);
75 }
76 
77 static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
78 {
79 	return xp_can_alloc(umem->pool, count);
80 }
81 
82 static inline void xsk_buff_free(struct xdp_buff *xdp)
83 {
84 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
85 
86 	xp_free(xskb);
87 }
88 
89 static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
90 {
91 	return xp_raw_get_dma(umem->pool, addr);
92 }
93 
94 static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
95 {
96 	return xp_raw_get_data(umem->pool, addr);
97 }
98 
99 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
100 {
101 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
102 
103 	xp_dma_sync_for_cpu(xskb);
104 }
105 
106 static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
107 						    dma_addr_t dma,
108 						    size_t size)
109 {
110 	xp_dma_sync_for_device(umem->pool, dma, size);
111 }
112 
113 #else
114 
115 static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
116 {
117 }
118 
119 static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
120 				       struct xdp_desc *desc)
121 {
122 	return false;
123 }
124 
125 static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
126 {
127 }
128 
129 static inline struct xsk_buff_pool *
130 xdp_get_xsk_pool_from_qid(struct net_device *dev, u16 queue_id)
131 {
132 	return NULL;
133 }
134 
135 static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
136 {
137 }
138 
139 static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
140 {
141 }
142 
143 static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
144 {
145 }
146 
147 static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
148 {
149 }
150 
151 static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
152 {
153 	return false;
154 }
155 
156 static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
157 {
158 	return 0;
159 }
160 
161 static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
162 {
163 	return 0;
164 }
165 
166 static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
167 {
168 	return 0;
169 }
170 
171 static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
172 					 struct xdp_rxq_info *rxq)
173 {
174 }
175 
176 static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
177 				      unsigned long attrs)
178 {
179 }
180 
181 static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
182 				   unsigned long attrs)
183 {
184 	return 0;
185 }
186 
187 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
188 {
189 	return 0;
190 }
191 
192 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
193 {
194 	return 0;
195 }
196 
197 static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
198 {
199 	return NULL;
200 }
201 
202 static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
203 {
204 	return false;
205 }
206 
207 static inline void xsk_buff_free(struct xdp_buff *xdp)
208 {
209 }
210 
211 static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
212 {
213 	return 0;
214 }
215 
216 static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
217 {
218 	return NULL;
219 }
220 
221 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
222 {
223 }
224 
225 static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
226 						    dma_addr_t dma,
227 						    size_t size)
228 {
229 }
230 
231 #endif /* CONFIG_XDP_SOCKETS */
232 
233 #endif /* _LINUX_XDP_SOCK_DRV_H */
234