1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* include/net/xdp.h
3 *
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5 */
6 #ifndef __LINUX_NET_XDP_H__
7 #define __LINUX_NET_XDP_H__
8
9 #include <linux/bitfield.h>
10 #include <linux/filter.h>
11 #include <linux/netdevice.h>
12 #include <linux/skbuff.h> /* skb_shared_info */
13
14 /**
15 * DOC: XDP RX-queue information
16 *
17 * The XDP RX-queue info (xdp_rxq_info) is associated with the driver
18 * level RX-ring queues. It is information that is specific to how
19 * the driver has configured a given RX-ring queue.
20 *
21 * Each xdp_buff frame received in the driver carries a (pointer)
22 * reference to this xdp_rxq_info structure. This provides the XDP
23 * data-path read-access to RX-info for both kernel and bpf-side
24 * (limited subset).
25 *
26 * For now, direct access is only safe while running in NAPI/softirq
27 * context. Contents are read-mostly and must not be updated during
28 * driver NAPI/softirq poll.
29 *
30 * The driver usage API is a register and unregister API.
31 *
32 * The struct is not directly tied to the XDP prog. A new XDP prog
33 * can be attached as long as it doesn't change the underlying
34 * RX-ring. If the RX-ring does change significantly, the NIC driver
35 * naturally needs to stop the RX-ring before purging and reallocating
36 * memory. In that process the driver MUST call unregister (which
37 * also applies for driver shutdown and unload). The register API is
38 * also mandatory during RX-ring setup.
39 */
40
41 enum xdp_mem_type {
42 MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */
43 MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */
44 MEM_TYPE_PAGE_POOL,
45 MEM_TYPE_XSK_BUFF_POOL,
46 MEM_TYPE_MAX,
47 };
48
49 /* XDP flags for ndo_xdp_xmit */
50 #define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */
51 #define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH
52
53 struct xdp_mem_info {
54 u32 type; /* enum xdp_mem_type, but known size type */
55 u32 id;
56 };
57
58 struct page_pool;
59
60 struct xdp_rxq_info {
61 struct net_device *dev;
62 u32 queue_index;
63 u32 reg_state;
64 struct xdp_mem_info mem;
65 unsigned int napi_id;
66 u32 frag_size;
67 } ____cacheline_aligned; /* perf critical, avoid false-sharing */
68
69 struct xdp_txq_info {
70 struct net_device *dev;
71 };
72
73 enum xdp_buff_flags {
74 XDP_FLAGS_HAS_FRAGS = BIT(0), /* non-linear xdp buff */
75 XDP_FLAGS_FRAGS_PF_MEMALLOC = BIT(1), /* xdp paged memory is under
76 * pressure
77 */
78 };
79
80 struct xdp_buff {
81 void *data;
82 void *data_end;
83 void *data_meta;
84 void *data_hard_start;
85 struct xdp_rxq_info *rxq;
86 struct xdp_txq_info *txq;
87 u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
88 u32 flags; /* supported values defined in xdp_buff_flags */
89 };
90
xdp_buff_has_frags(struct xdp_buff * xdp)91 static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
92 {
93 return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS);
94 }
95
xdp_buff_set_frags_flag(struct xdp_buff * xdp)96 static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp)
97 {
98 xdp->flags |= XDP_FLAGS_HAS_FRAGS;
99 }
100
xdp_buff_clear_frags_flag(struct xdp_buff * xdp)101 static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp)
102 {
103 xdp->flags &= ~XDP_FLAGS_HAS_FRAGS;
104 }
105
xdp_buff_is_frag_pfmemalloc(struct xdp_buff * xdp)106 static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp)
107 {
108 return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
109 }
110
xdp_buff_set_frag_pfmemalloc(struct xdp_buff * xdp)111 static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp)
112 {
113 xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC;
114 }
115
116 static __always_inline void
xdp_init_buff(struct xdp_buff * xdp,u32 frame_sz,struct xdp_rxq_info * rxq)117 xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
118 {
119 xdp->frame_sz = frame_sz;
120 xdp->rxq = rxq;
121 xdp->flags = 0;
122 }
123
124 static __always_inline void
xdp_prepare_buff(struct xdp_buff * xdp,unsigned char * hard_start,int headroom,int data_len,const bool meta_valid)125 xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
126 int headroom, int data_len, const bool meta_valid)
127 {
128 unsigned char *data = hard_start + headroom;
129
130 xdp->data_hard_start = hard_start;
131 xdp->data = data;
132 xdp->data_end = data + data_len;
133 xdp->data_meta = meta_valid ? data : data + 1;
134 }
135
136 /* Reserve memory area at end-of data area.
137 *
138 * This macro reserves tailroom in the XDP buffer by limiting the
139 * XDP/BPF data access to data_hard_end. Notice same area (and size)
140 * is used for XDP_PASS, when constructing the SKB via build_skb().
141 */
142 #define xdp_data_hard_end(xdp) \
143 ((xdp)->data_hard_start + (xdp)->frame_sz - \
144 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
145
146 static inline struct skb_shared_info *
xdp_get_shared_info_from_buff(struct xdp_buff * xdp)147 xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
148 {
149 return (struct skb_shared_info *)xdp_data_hard_end(xdp);
150 }
151
xdp_get_buff_len(struct xdp_buff * xdp)152 static __always_inline unsigned int xdp_get_buff_len(struct xdp_buff *xdp)
153 {
154 unsigned int len = xdp->data_end - xdp->data;
155 struct skb_shared_info *sinfo;
156
157 if (likely(!xdp_buff_has_frags(xdp)))
158 goto out;
159
160 sinfo = xdp_get_shared_info_from_buff(xdp);
161 len += sinfo->xdp_frags_size;
162 out:
163 return len;
164 }
165
166 struct xdp_frame {
167 void *data;
168 u16 len;
169 u16 headroom;
170 u32 metasize; /* uses lower 8-bits */
171 /* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
172 * while mem info is valid on remote CPU.
173 */
174 struct xdp_mem_info mem;
175 struct net_device *dev_rx; /* used by cpumap */
176 u32 frame_sz;
177 u32 flags; /* supported values defined in xdp_buff_flags */
178 };
179
xdp_frame_has_frags(struct xdp_frame * frame)180 static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame)
181 {
182 return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
183 }
184
xdp_frame_is_frag_pfmemalloc(struct xdp_frame * frame)185 static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame)
186 {
187 return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
188 }
189
190 #define XDP_BULK_QUEUE_SIZE 16
191 struct xdp_frame_bulk {
192 int count;
193 void *xa;
194 void *q[XDP_BULK_QUEUE_SIZE];
195 };
196
xdp_frame_bulk_init(struct xdp_frame_bulk * bq)197 static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
198 {
199 /* bq->count will be zero'ed when bq->xa gets updated */
200 bq->xa = NULL;
201 }
202
203 static inline struct skb_shared_info *
xdp_get_shared_info_from_frame(struct xdp_frame * frame)204 xdp_get_shared_info_from_frame(struct xdp_frame *frame)
205 {
206 void *data_hard_start = frame->data - frame->headroom - sizeof(*frame);
207
208 return (struct skb_shared_info *)(data_hard_start + frame->frame_sz -
209 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
210 }
211
212 struct xdp_cpumap_stats {
213 unsigned int redirect;
214 unsigned int pass;
215 unsigned int drop;
216 };
217
218 /* Clear kernel pointers in xdp_frame */
xdp_scrub_frame(struct xdp_frame * frame)219 static inline void xdp_scrub_frame(struct xdp_frame *frame)
220 {
221 frame->data = NULL;
222 frame->dev_rx = NULL;
223 }
224
225 static inline void
xdp_update_skb_shared_info(struct sk_buff * skb,u8 nr_frags,unsigned int size,unsigned int truesize,bool pfmemalloc)226 xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
227 unsigned int size, unsigned int truesize,
228 bool pfmemalloc)
229 {
230 skb_shinfo(skb)->nr_frags = nr_frags;
231
232 skb->len += size;
233 skb->data_len += size;
234 skb->truesize += truesize;
235 skb->pfmemalloc |= pfmemalloc;
236 }
237
238 /* Avoids inlining WARN macro in fast-path */
239 void xdp_warn(const char *msg, const char *func, const int line);
240 #define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
241
242 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
243 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
244 struct sk_buff *skb,
245 struct net_device *dev);
246 struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
247 struct net_device *dev);
248 int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp);
249 struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf);
250
251 static inline
xdp_convert_frame_to_buff(struct xdp_frame * frame,struct xdp_buff * xdp)252 void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
253 {
254 xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame);
255 xdp->data = frame->data;
256 xdp->data_end = frame->data + frame->len;
257 xdp->data_meta = frame->data - frame->metasize;
258 xdp->frame_sz = frame->frame_sz;
259 xdp->flags = frame->flags;
260 }
261
262 static inline
xdp_update_frame_from_buff(struct xdp_buff * xdp,struct xdp_frame * xdp_frame)263 int xdp_update_frame_from_buff(struct xdp_buff *xdp,
264 struct xdp_frame *xdp_frame)
265 {
266 int metasize, headroom;
267
268 /* Assure headroom is available for storing info */
269 headroom = xdp->data - xdp->data_hard_start;
270 metasize = xdp->data - xdp->data_meta;
271 metasize = metasize > 0 ? metasize : 0;
272 if (unlikely((headroom - metasize) < sizeof(*xdp_frame)))
273 return -ENOSPC;
274
275 /* Catch if driver didn't reserve tailroom for skb_shared_info */
276 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
277 XDP_WARN("Driver BUG: missing reserved tailroom");
278 return -ENOSPC;
279 }
280
281 xdp_frame->data = xdp->data;
282 xdp_frame->len = xdp->data_end - xdp->data;
283 xdp_frame->headroom = headroom - sizeof(*xdp_frame);
284 xdp_frame->metasize = metasize;
285 xdp_frame->frame_sz = xdp->frame_sz;
286 xdp_frame->flags = xdp->flags;
287
288 return 0;
289 }
290
291 /* Convert xdp_buff to xdp_frame */
292 static inline
xdp_convert_buff_to_frame(struct xdp_buff * xdp)293 struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
294 {
295 struct xdp_frame *xdp_frame;
296
297 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
298 return xdp_convert_zc_to_xdp_frame(xdp);
299
300 /* Store info in top of packet */
301 xdp_frame = xdp->data_hard_start;
302 if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0))
303 return NULL;
304
305 /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */
306 xdp_frame->mem = xdp->rxq->mem;
307
308 return xdp_frame;
309 }
310
311 void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
312 struct xdp_buff *xdp);
313 void xdp_return_frame(struct xdp_frame *xdpf);
314 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
315 void xdp_return_buff(struct xdp_buff *xdp);
316 void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
317 void xdp_return_frame_bulk(struct xdp_frame *xdpf,
318 struct xdp_frame_bulk *bq);
319
xdp_get_frame_len(struct xdp_frame * xdpf)320 static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf)
321 {
322 struct skb_shared_info *sinfo;
323 unsigned int len = xdpf->len;
324
325 if (likely(!xdp_frame_has_frags(xdpf)))
326 goto out;
327
328 sinfo = xdp_get_shared_info_from_frame(xdpf);
329 len += sinfo->xdp_frags_size;
330 out:
331 return len;
332 }
333
334 int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
335 struct net_device *dev, u32 queue_index,
336 unsigned int napi_id, u32 frag_size);
337 static inline int
xdp_rxq_info_reg(struct xdp_rxq_info * xdp_rxq,struct net_device * dev,u32 queue_index,unsigned int napi_id)338 xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
339 struct net_device *dev, u32 queue_index,
340 unsigned int napi_id)
341 {
342 return __xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id, 0);
343 }
344
345 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
346 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
347 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
348 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
349 enum xdp_mem_type type, void *allocator);
350 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
351 int xdp_reg_mem_model(struct xdp_mem_info *mem,
352 enum xdp_mem_type type, void *allocator);
353 void xdp_unreg_mem_model(struct xdp_mem_info *mem);
354
355 /* Drivers not supporting XDP metadata can use this helper, which
356 * rejects any room expansion for metadata as a result.
357 */
358 static __always_inline void
xdp_set_data_meta_invalid(struct xdp_buff * xdp)359 xdp_set_data_meta_invalid(struct xdp_buff *xdp)
360 {
361 xdp->data_meta = xdp->data + 1;
362 }
363
364 static __always_inline bool
xdp_data_meta_unsupported(const struct xdp_buff * xdp)365 xdp_data_meta_unsupported(const struct xdp_buff *xdp)
366 {
367 return unlikely(xdp->data_meta > xdp->data);
368 }
369
xdp_metalen_invalid(unsigned long metalen)370 static inline bool xdp_metalen_invalid(unsigned long metalen)
371 {
372 unsigned long meta_max;
373
374 meta_max = type_max(typeof_member(struct skb_shared_info, meta_len));
375 BUILD_BUG_ON(!__builtin_constant_p(meta_max));
376
377 return !IS_ALIGNED(metalen, sizeof(u32)) || metalen > meta_max;
378 }
379
380 struct xdp_attachment_info {
381 struct bpf_prog *prog;
382 u32 flags;
383 };
384
385 struct netdev_bpf;
386 void xdp_attachment_setup(struct xdp_attachment_info *info,
387 struct netdev_bpf *bpf);
388
389 #define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
390
391 /* Define the relationship between xdp-rx-metadata kfunc and
392 * various other entities:
393 * - xdp_rx_metadata enum
394 * - netdev netlink enum (Documentation/netlink/specs/netdev.yaml)
395 * - kfunc name
396 * - xdp_metadata_ops field
397 */
398 #define XDP_METADATA_KFUNC_xxx \
399 XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_TIMESTAMP, \
400 NETDEV_XDP_RX_METADATA_TIMESTAMP, \
401 bpf_xdp_metadata_rx_timestamp, \
402 xmo_rx_timestamp) \
403 XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_HASH, \
404 NETDEV_XDP_RX_METADATA_HASH, \
405 bpf_xdp_metadata_rx_hash, \
406 xmo_rx_hash) \
407 XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_VLAN_TAG, \
408 NETDEV_XDP_RX_METADATA_VLAN_TAG, \
409 bpf_xdp_metadata_rx_vlan_tag, \
410 xmo_rx_vlan_tag) \
411
412 enum xdp_rx_metadata {
413 #define XDP_METADATA_KFUNC(name, _, __, ___) name,
414 XDP_METADATA_KFUNC_xxx
415 #undef XDP_METADATA_KFUNC
416 MAX_XDP_METADATA_KFUNC,
417 };
418
419 enum xdp_rss_hash_type {
420 /* First part: Individual bits for L3/L4 types */
421 XDP_RSS_L3_IPV4 = BIT(0),
422 XDP_RSS_L3_IPV6 = BIT(1),
423
424 /* The fixed (L3) IPv4 and IPv6 headers can both be followed by
425 * variable/dynamic headers, IPv4 called Options and IPv6 called
426 * Extension Headers. HW RSS type can contain this info.
427 */
428 XDP_RSS_L3_DYNHDR = BIT(2),
429
430 /* When RSS hash covers L4 then drivers MUST set XDP_RSS_L4 bit in
431 * addition to the protocol specific bit. This ease interaction with
432 * SKBs and avoids reserving a fixed mask for future L4 protocol bits.
433 */
434 XDP_RSS_L4 = BIT(3), /* L4 based hash, proto can be unknown */
435 XDP_RSS_L4_TCP = BIT(4),
436 XDP_RSS_L4_UDP = BIT(5),
437 XDP_RSS_L4_SCTP = BIT(6),
438 XDP_RSS_L4_IPSEC = BIT(7), /* L4 based hash include IPSEC SPI */
439 XDP_RSS_L4_ICMP = BIT(8),
440
441 /* Second part: RSS hash type combinations used for driver HW mapping */
442 XDP_RSS_TYPE_NONE = 0,
443 XDP_RSS_TYPE_L2 = XDP_RSS_TYPE_NONE,
444
445 XDP_RSS_TYPE_L3_IPV4 = XDP_RSS_L3_IPV4,
446 XDP_RSS_TYPE_L3_IPV6 = XDP_RSS_L3_IPV6,
447 XDP_RSS_TYPE_L3_IPV4_OPT = XDP_RSS_L3_IPV4 | XDP_RSS_L3_DYNHDR,
448 XDP_RSS_TYPE_L3_IPV6_EX = XDP_RSS_L3_IPV6 | XDP_RSS_L3_DYNHDR,
449
450 XDP_RSS_TYPE_L4_ANY = XDP_RSS_L4,
451 XDP_RSS_TYPE_L4_IPV4_TCP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
452 XDP_RSS_TYPE_L4_IPV4_UDP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
453 XDP_RSS_TYPE_L4_IPV4_SCTP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
454 XDP_RSS_TYPE_L4_IPV4_IPSEC = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
455 XDP_RSS_TYPE_L4_IPV4_ICMP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_ICMP,
456
457 XDP_RSS_TYPE_L4_IPV6_TCP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
458 XDP_RSS_TYPE_L4_IPV6_UDP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
459 XDP_RSS_TYPE_L4_IPV6_SCTP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
460 XDP_RSS_TYPE_L4_IPV6_IPSEC = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
461 XDP_RSS_TYPE_L4_IPV6_ICMP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_ICMP,
462
463 XDP_RSS_TYPE_L4_IPV6_TCP_EX = XDP_RSS_TYPE_L4_IPV6_TCP | XDP_RSS_L3_DYNHDR,
464 XDP_RSS_TYPE_L4_IPV6_UDP_EX = XDP_RSS_TYPE_L4_IPV6_UDP | XDP_RSS_L3_DYNHDR,
465 XDP_RSS_TYPE_L4_IPV6_SCTP_EX = XDP_RSS_TYPE_L4_IPV6_SCTP | XDP_RSS_L3_DYNHDR,
466 };
467
468 struct xdp_metadata_ops {
469 int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp);
470 int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash,
471 enum xdp_rss_hash_type *rss_type);
472 int (*xmo_rx_vlan_tag)(const struct xdp_md *ctx, __be16 *vlan_proto,
473 u16 *vlan_tci);
474 };
475
476 #ifdef CONFIG_NET
477 u32 bpf_xdp_metadata_kfunc_id(int id);
478 bool bpf_dev_bound_kfunc_id(u32 btf_id);
479 void xdp_set_features_flag(struct net_device *dev, xdp_features_t val);
480 void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg);
481 void xdp_features_clear_redirect_target(struct net_device *dev);
482 #else
bpf_xdp_metadata_kfunc_id(int id)483 static inline u32 bpf_xdp_metadata_kfunc_id(int id) { return 0; }
bpf_dev_bound_kfunc_id(u32 btf_id)484 static inline bool bpf_dev_bound_kfunc_id(u32 btf_id) { return false; }
485
486 static inline void
xdp_set_features_flag(struct net_device * dev,xdp_features_t val)487 xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
488 {
489 }
490
491 static inline void
xdp_features_set_redirect_target(struct net_device * dev,bool support_sg)492 xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
493 {
494 }
495
496 static inline void
xdp_features_clear_redirect_target(struct net_device * dev)497 xdp_features_clear_redirect_target(struct net_device *dev)
498 {
499 }
500 #endif
501
xdp_clear_features_flag(struct net_device * dev)502 static inline void xdp_clear_features_flag(struct net_device *dev)
503 {
504 xdp_set_features_flag(dev, 0);
505 }
506
bpf_prog_run_xdp(const struct bpf_prog * prog,struct xdp_buff * xdp)507 static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
508 struct xdp_buff *xdp)
509 {
510 /* Driver XDP hooks are invoked within a single NAPI poll cycle and thus
511 * under local_bh_disable(), which provides the needed RCU protection
512 * for accessing map entries.
513 */
514 u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
515
516 if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) {
517 if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
518 act = xdp_master_redirect(xdp);
519 }
520
521 return act;
522 }
523 #endif /* __LINUX_NET_XDP_H__ */
524