xref: /linux/include/linux/netdevice.h (revision 5c167270)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Definitions for the Interfaces handler.
8  *
9  * Version:	@(#)dev.h	1.0.10	08/12/93
10  *
11  * Authors:	Ross Biro
12  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
14  *		Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
15  *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
16  *		Bjorn Ekwall. <bj0rn@blox.se>
17  *              Pekka Riikonen <priikone@poseidon.pspt.fi>
18  *
19  *		Moved to /usr/include/linux for NET3
20  */
21 #ifndef _LINUX_NETDEVICE_H
22 #define _LINUX_NETDEVICE_H
23 
24 #include <linux/timer.h>
25 #include <linux/bug.h>
26 #include <linux/delay.h>
27 #include <linux/atomic.h>
28 #include <linux/prefetch.h>
29 #include <asm/cache.h>
30 #include <asm/byteorder.h>
31 #include <asm/local.h>
32 
33 #include <linux/percpu.h>
34 #include <linux/rculist.h>
35 #include <linux/workqueue.h>
36 #include <linux/dynamic_queue_limits.h>
37 
38 #include <net/net_namespace.h>
39 #ifdef CONFIG_DCB
40 #include <net/dcbnl.h>
41 #endif
42 #include <net/netprio_cgroup.h>
43 
44 #include <linux/netdev_features.h>
45 #include <linux/neighbour.h>
46 #include <uapi/linux/netdevice.h>
47 #include <uapi/linux/if_bonding.h>
48 #include <uapi/linux/pkt_cls.h>
49 #include <uapi/linux/netdev.h>
50 #include <linux/hashtable.h>
51 #include <linux/rbtree.h>
52 #include <net/net_trackers.h>
53 #include <net/net_debug.h>
54 #include <net/dropreason-core.h>
55 
56 struct netpoll_info;
57 struct device;
58 struct ethtool_ops;
59 struct kernel_hwtstamp_config;
60 struct phy_device;
61 struct dsa_port;
62 struct ip_tunnel_parm_kern;
63 struct macsec_context;
64 struct macsec_ops;
65 struct netdev_name_node;
66 struct sd_flow_limit;
67 struct sfp_bus;
68 /* 802.11 specific */
69 struct wireless_dev;
70 /* 802.15.4 specific */
71 struct wpan_dev;
72 struct mpls_dev;
73 /* UDP Tunnel offloads */
74 struct udp_tunnel_info;
75 struct udp_tunnel_nic_info;
76 struct udp_tunnel_nic;
77 struct bpf_prog;
78 struct xdp_buff;
79 struct xdp_frame;
80 struct xdp_metadata_ops;
81 struct xdp_md;
82 
83 typedef u32 xdp_features_t;
84 
85 void synchronize_net(void);
86 void netdev_set_default_ethtool_ops(struct net_device *dev,
87 				    const struct ethtool_ops *ops);
88 void netdev_sw_irq_coalesce_default_on(struct net_device *dev);
89 
90 /* Backlog congestion levels */
91 #define NET_RX_SUCCESS		0	/* keep 'em coming, baby */
92 #define NET_RX_DROP		1	/* packet dropped */
93 
94 #define MAX_NEST_DEV 8
95 
96 /*
97  * Transmit return codes: transmit return codes originate from three different
98  * namespaces:
99  *
100  * - qdisc return codes
101  * - driver transmit return codes
102  * - errno values
103  *
104  * Drivers are allowed to return any one of those in their hard_start_xmit()
105  * function. Real network devices commonly used with qdiscs should only return
106  * the driver transmit return codes though - when qdiscs are used, the actual
107  * transmission happens asynchronously, so the value is not propagated to
108  * higher layers. Virtual network devices transmit synchronously; in this case
109  * the driver transmit return codes are consumed by dev_queue_xmit(), and all
110  * others are propagated to higher layers.
111  */
112 
113 /* qdisc ->enqueue() return codes. */
114 #define NET_XMIT_SUCCESS	0x00
115 #define NET_XMIT_DROP		0x01	/* skb dropped			*/
116 #define NET_XMIT_CN		0x02	/* congestion notification	*/
117 #define NET_XMIT_MASK		0x0f	/* qdisc flags in net/sch_generic.h */
118 
119 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
120  * indicates that the device will soon be dropping packets, or already drops
121  * some packets of the same priority; prompting us to send less aggressively. */
122 #define net_xmit_eval(e)	((e) == NET_XMIT_CN ? 0 : (e))
123 #define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0)
124 
125 /* Driver transmit return codes */
126 #define NETDEV_TX_MASK		0xf0
127 
128 enum netdev_tx {
129 	__NETDEV_TX_MIN	 = INT_MIN,	/* make sure enum is signed */
130 	NETDEV_TX_OK	 = 0x00,	/* driver took care of packet */
131 	NETDEV_TX_BUSY	 = 0x10,	/* driver tx path was busy*/
132 };
133 typedef enum netdev_tx netdev_tx_t;
134 
135 /*
136  * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
137  * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
138  */
dev_xmit_complete(int rc)139 static inline bool dev_xmit_complete(int rc)
140 {
141 	/*
142 	 * Positive cases with an skb consumed by a driver:
143 	 * - successful transmission (rc == NETDEV_TX_OK)
144 	 * - error while transmitting (rc < 0)
145 	 * - error while queueing to a different device (rc & NET_XMIT_MASK)
146 	 */
147 	if (likely(rc < NET_XMIT_MASK))
148 		return true;
149 
150 	return false;
151 }
152 
153 /*
154  *	Compute the worst-case header length according to the protocols
155  *	used.
156  */
157 
158 #if defined(CONFIG_HYPERV_NET)
159 # define LL_MAX_HEADER 128
160 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
161 # if defined(CONFIG_MAC80211_MESH)
162 #  define LL_MAX_HEADER 128
163 # else
164 #  define LL_MAX_HEADER 96
165 # endif
166 #else
167 # define LL_MAX_HEADER 32
168 #endif
169 
170 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
171     !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
172 #define MAX_HEADER LL_MAX_HEADER
173 #else
174 #define MAX_HEADER (LL_MAX_HEADER + 48)
175 #endif
176 
177 /*
178  *	Old network device statistics. Fields are native words
179  *	(unsigned long) so they can be read and written atomically.
180  */
181 
182 #define NET_DEV_STAT(FIELD)			\
183 	union {					\
184 		unsigned long FIELD;		\
185 		atomic_long_t __##FIELD;	\
186 	}
187 
188 struct net_device_stats {
189 	NET_DEV_STAT(rx_packets);
190 	NET_DEV_STAT(tx_packets);
191 	NET_DEV_STAT(rx_bytes);
192 	NET_DEV_STAT(tx_bytes);
193 	NET_DEV_STAT(rx_errors);
194 	NET_DEV_STAT(tx_errors);
195 	NET_DEV_STAT(rx_dropped);
196 	NET_DEV_STAT(tx_dropped);
197 	NET_DEV_STAT(multicast);
198 	NET_DEV_STAT(collisions);
199 	NET_DEV_STAT(rx_length_errors);
200 	NET_DEV_STAT(rx_over_errors);
201 	NET_DEV_STAT(rx_crc_errors);
202 	NET_DEV_STAT(rx_frame_errors);
203 	NET_DEV_STAT(rx_fifo_errors);
204 	NET_DEV_STAT(rx_missed_errors);
205 	NET_DEV_STAT(tx_aborted_errors);
206 	NET_DEV_STAT(tx_carrier_errors);
207 	NET_DEV_STAT(tx_fifo_errors);
208 	NET_DEV_STAT(tx_heartbeat_errors);
209 	NET_DEV_STAT(tx_window_errors);
210 	NET_DEV_STAT(rx_compressed);
211 	NET_DEV_STAT(tx_compressed);
212 };
213 #undef NET_DEV_STAT
214 
215 /* per-cpu stats, allocated on demand.
216  * Try to fit them in a single cache line, for dev_get_stats() sake.
217  */
218 struct net_device_core_stats {
219 	unsigned long	rx_dropped;
220 	unsigned long	tx_dropped;
221 	unsigned long	rx_nohandler;
222 	unsigned long	rx_otherhost_dropped;
223 } __aligned(4 * sizeof(unsigned long));
224 
225 #include <linux/cache.h>
226 #include <linux/skbuff.h>
227 
228 struct neighbour;
229 struct neigh_parms;
230 struct sk_buff;
231 
232 struct netdev_hw_addr {
233 	struct list_head	list;
234 	struct rb_node		node;
235 	unsigned char		addr[MAX_ADDR_LEN];
236 	unsigned char		type;
237 #define NETDEV_HW_ADDR_T_LAN		1
238 #define NETDEV_HW_ADDR_T_SAN		2
239 #define NETDEV_HW_ADDR_T_UNICAST	3
240 #define NETDEV_HW_ADDR_T_MULTICAST	4
241 	bool			global_use;
242 	int			sync_cnt;
243 	int			refcount;
244 	int			synced;
245 	struct rcu_head		rcu_head;
246 };
247 
248 struct netdev_hw_addr_list {
249 	struct list_head	list;
250 	int			count;
251 
252 	/* Auxiliary tree for faster lookup on addition and deletion */
253 	struct rb_root		tree;
254 };
255 
256 #define netdev_hw_addr_list_count(l) ((l)->count)
257 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
258 #define netdev_hw_addr_list_for_each(ha, l) \
259 	list_for_each_entry(ha, &(l)->list, list)
260 
261 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
262 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
263 #define netdev_for_each_uc_addr(ha, dev) \
264 	netdev_hw_addr_list_for_each(ha, &(dev)->uc)
265 #define netdev_for_each_synced_uc_addr(_ha, _dev) \
266 	netdev_for_each_uc_addr((_ha), (_dev)) \
267 		if ((_ha)->sync_cnt)
268 
269 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
270 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
271 #define netdev_for_each_mc_addr(ha, dev) \
272 	netdev_hw_addr_list_for_each(ha, &(dev)->mc)
273 #define netdev_for_each_synced_mc_addr(_ha, _dev) \
274 	netdev_for_each_mc_addr((_ha), (_dev)) \
275 		if ((_ha)->sync_cnt)
276 
277 struct hh_cache {
278 	unsigned int	hh_len;
279 	seqlock_t	hh_lock;
280 
281 	/* cached hardware header; allow for machine alignment needs.        */
282 #define HH_DATA_MOD	16
283 #define HH_DATA_OFF(__len) \
284 	(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
285 #define HH_DATA_ALIGN(__len) \
286 	(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
287 	unsigned long	hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
288 };
289 
290 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
291  * Alternative is:
292  *   dev->hard_header_len ? (dev->hard_header_len +
293  *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
294  *
295  * We could use other alignment values, but we must maintain the
296  * relationship HH alignment <= LL alignment.
297  */
298 #define LL_RESERVED_SPACE(dev) \
299 	((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
300 	  & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
301 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
302 	((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
303 	  & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
304 
305 struct header_ops {
306 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
307 			   unsigned short type, const void *daddr,
308 			   const void *saddr, unsigned int len);
309 	int	(*parse)(const struct sk_buff *skb, unsigned char *haddr);
310 	int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
311 	void	(*cache_update)(struct hh_cache *hh,
312 				const struct net_device *dev,
313 				const unsigned char *haddr);
314 	bool	(*validate)(const char *ll_header, unsigned int len);
315 	__be16	(*parse_protocol)(const struct sk_buff *skb);
316 };
317 
318 /* These flag bits are private to the generic network queueing
319  * layer; they may not be explicitly referenced by any other
320  * code.
321  */
322 
323 enum netdev_state_t {
324 	__LINK_STATE_START,
325 	__LINK_STATE_PRESENT,
326 	__LINK_STATE_NOCARRIER,
327 	__LINK_STATE_LINKWATCH_PENDING,
328 	__LINK_STATE_DORMANT,
329 	__LINK_STATE_TESTING,
330 };
331 
332 struct gro_list {
333 	struct list_head	list;
334 	int			count;
335 };
336 
337 /*
338  * size of gro hash buckets, must less than bit number of
339  * napi_struct::gro_bitmask
340  */
341 #define GRO_HASH_BUCKETS	8
342 
343 /*
344  * Structure for NAPI scheduling similar to tasklet but with weighting
345  */
346 struct napi_struct {
347 	/* The poll_list must only be managed by the entity which
348 	 * changes the state of the NAPI_STATE_SCHED bit.  This means
349 	 * whoever atomically sets that bit can add this napi_struct
350 	 * to the per-CPU poll_list, and whoever clears that bit
351 	 * can remove from the list right before clearing the bit.
352 	 */
353 	struct list_head	poll_list;
354 
355 	unsigned long		state;
356 	int			weight;
357 	int			defer_hard_irqs_count;
358 	unsigned long		gro_bitmask;
359 	int			(*poll)(struct napi_struct *, int);
360 #ifdef CONFIG_NETPOLL
361 	/* CPU actively polling if netpoll is configured */
362 	int			poll_owner;
363 #endif
364 	/* CPU on which NAPI has been scheduled for processing */
365 	int			list_owner;
366 	struct net_device	*dev;
367 	struct gro_list		gro_hash[GRO_HASH_BUCKETS];
368 	struct sk_buff		*skb;
369 	struct list_head	rx_list; /* Pending GRO_NORMAL skbs */
370 	int			rx_count; /* length of rx_list */
371 	unsigned int		napi_id;
372 	struct hrtimer		timer;
373 	struct task_struct	*thread;
374 	/* control-path-only fields follow */
375 	struct list_head	dev_list;
376 	struct hlist_node	napi_hash_node;
377 	int			irq;
378 };
379 
380 enum {
381 	NAPI_STATE_SCHED,		/* Poll is scheduled */
382 	NAPI_STATE_MISSED,		/* reschedule a napi */
383 	NAPI_STATE_DISABLE,		/* Disable pending */
384 	NAPI_STATE_NPSVC,		/* Netpoll - don't dequeue from poll_list */
385 	NAPI_STATE_LISTED,		/* NAPI added to system lists */
386 	NAPI_STATE_NO_BUSY_POLL,	/* Do not add in napi_hash, no busy polling */
387 	NAPI_STATE_IN_BUSY_POLL,	/* sk_busy_loop() owns this NAPI */
388 	NAPI_STATE_PREFER_BUSY_POLL,	/* prefer busy-polling over softirq processing*/
389 	NAPI_STATE_THREADED,		/* The poll is performed inside its own thread*/
390 	NAPI_STATE_SCHED_THREADED,	/* Napi is currently scheduled in threaded mode */
391 };
392 
393 enum {
394 	NAPIF_STATE_SCHED		= BIT(NAPI_STATE_SCHED),
395 	NAPIF_STATE_MISSED		= BIT(NAPI_STATE_MISSED),
396 	NAPIF_STATE_DISABLE		= BIT(NAPI_STATE_DISABLE),
397 	NAPIF_STATE_NPSVC		= BIT(NAPI_STATE_NPSVC),
398 	NAPIF_STATE_LISTED		= BIT(NAPI_STATE_LISTED),
399 	NAPIF_STATE_NO_BUSY_POLL	= BIT(NAPI_STATE_NO_BUSY_POLL),
400 	NAPIF_STATE_IN_BUSY_POLL	= BIT(NAPI_STATE_IN_BUSY_POLL),
401 	NAPIF_STATE_PREFER_BUSY_POLL	= BIT(NAPI_STATE_PREFER_BUSY_POLL),
402 	NAPIF_STATE_THREADED		= BIT(NAPI_STATE_THREADED),
403 	NAPIF_STATE_SCHED_THREADED	= BIT(NAPI_STATE_SCHED_THREADED),
404 };
405 
406 enum gro_result {
407 	GRO_MERGED,
408 	GRO_MERGED_FREE,
409 	GRO_HELD,
410 	GRO_NORMAL,
411 	GRO_CONSUMED,
412 };
413 typedef enum gro_result gro_result_t;
414 
415 /*
416  * enum rx_handler_result - Possible return values for rx_handlers.
417  * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
418  * further.
419  * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
420  * case skb->dev was changed by rx_handler.
421  * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
422  * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
423  *
424  * rx_handlers are functions called from inside __netif_receive_skb(), to do
425  * special processing of the skb, prior to delivery to protocol handlers.
426  *
427  * Currently, a net_device can only have a single rx_handler registered. Trying
428  * to register a second rx_handler will return -EBUSY.
429  *
430  * To register a rx_handler on a net_device, use netdev_rx_handler_register().
431  * To unregister a rx_handler on a net_device, use
432  * netdev_rx_handler_unregister().
433  *
434  * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
435  * do with the skb.
436  *
437  * If the rx_handler consumed the skb in some way, it should return
438  * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
439  * the skb to be delivered in some other way.
440  *
441  * If the rx_handler changed skb->dev, to divert the skb to another
442  * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
443  * new device will be called if it exists.
444  *
445  * If the rx_handler decides the skb should be ignored, it should return
446  * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
447  * are registered on exact device (ptype->dev == skb->dev).
448  *
449  * If the rx_handler didn't change skb->dev, but wants the skb to be normally
450  * delivered, it should return RX_HANDLER_PASS.
451  *
452  * A device without a registered rx_handler will behave as if rx_handler
453  * returned RX_HANDLER_PASS.
454  */
455 
456 enum rx_handler_result {
457 	RX_HANDLER_CONSUMED,
458 	RX_HANDLER_ANOTHER,
459 	RX_HANDLER_EXACT,
460 	RX_HANDLER_PASS,
461 };
462 typedef enum rx_handler_result rx_handler_result_t;
463 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
464 
465 void __napi_schedule(struct napi_struct *n);
466 void __napi_schedule_irqoff(struct napi_struct *n);
467 
napi_disable_pending(struct napi_struct * n)468 static inline bool napi_disable_pending(struct napi_struct *n)
469 {
470 	return test_bit(NAPI_STATE_DISABLE, &n->state);
471 }
472 
napi_prefer_busy_poll(struct napi_struct * n)473 static inline bool napi_prefer_busy_poll(struct napi_struct *n)
474 {
475 	return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
476 }
477 
478 /**
479  * napi_is_scheduled - test if NAPI is scheduled
480  * @n: NAPI context
481  *
482  * This check is "best-effort". With no locking implemented,
483  * a NAPI can be scheduled or terminate right after this check
484  * and produce not precise results.
485  *
486  * NAPI_STATE_SCHED is an internal state, napi_is_scheduled
487  * should not be used normally and napi_schedule should be
488  * used instead.
489  *
490  * Use only if the driver really needs to check if a NAPI
491  * is scheduled for example in the context of delayed timer
492  * that can be skipped if a NAPI is already scheduled.
493  *
494  * Return True if NAPI is scheduled, False otherwise.
495  */
napi_is_scheduled(struct napi_struct * n)496 static inline bool napi_is_scheduled(struct napi_struct *n)
497 {
498 	return test_bit(NAPI_STATE_SCHED, &n->state);
499 }
500 
501 bool napi_schedule_prep(struct napi_struct *n);
502 
503 /**
504  *	napi_schedule - schedule NAPI poll
505  *	@n: NAPI context
506  *
507  * Schedule NAPI poll routine to be called if it is not already
508  * running.
509  * Return true if we schedule a NAPI or false if not.
510  * Refer to napi_schedule_prep() for additional reason on why
511  * a NAPI might not be scheduled.
512  */
napi_schedule(struct napi_struct * n)513 static inline bool napi_schedule(struct napi_struct *n)
514 {
515 	if (napi_schedule_prep(n)) {
516 		__napi_schedule(n);
517 		return true;
518 	}
519 
520 	return false;
521 }
522 
523 /**
524  *	napi_schedule_irqoff - schedule NAPI poll
525  *	@n: NAPI context
526  *
527  * Variant of napi_schedule(), assuming hard irqs are masked.
528  */
napi_schedule_irqoff(struct napi_struct * n)529 static inline void napi_schedule_irqoff(struct napi_struct *n)
530 {
531 	if (napi_schedule_prep(n))
532 		__napi_schedule_irqoff(n);
533 }
534 
535 /**
536  * napi_complete_done - NAPI processing complete
537  * @n: NAPI context
538  * @work_done: number of packets processed
539  *
540  * Mark NAPI processing as complete. Should only be called if poll budget
541  * has not been completely consumed.
542  * Prefer over napi_complete().
543  * Return false if device should avoid rearming interrupts.
544  */
545 bool napi_complete_done(struct napi_struct *n, int work_done);
546 
napi_complete(struct napi_struct * n)547 static inline bool napi_complete(struct napi_struct *n)
548 {
549 	return napi_complete_done(n, 0);
550 }
551 
552 int dev_set_threaded(struct net_device *dev, bool threaded);
553 
554 /**
555  *	napi_disable - prevent NAPI from scheduling
556  *	@n: NAPI context
557  *
558  * Stop NAPI from being scheduled on this context.
559  * Waits till any outstanding processing completes.
560  */
561 void napi_disable(struct napi_struct *n);
562 
563 void napi_enable(struct napi_struct *n);
564 
565 /**
566  *	napi_synchronize - wait until NAPI is not running
567  *	@n: NAPI context
568  *
569  * Wait until NAPI is done being scheduled on this context.
570  * Waits till any outstanding processing completes but
571  * does not disable future activations.
572  */
napi_synchronize(const struct napi_struct * n)573 static inline void napi_synchronize(const struct napi_struct *n)
574 {
575 	if (IS_ENABLED(CONFIG_SMP))
576 		while (test_bit(NAPI_STATE_SCHED, &n->state))
577 			msleep(1);
578 	else
579 		barrier();
580 }
581 
582 /**
583  *	napi_if_scheduled_mark_missed - if napi is running, set the
584  *	NAPIF_STATE_MISSED
585  *	@n: NAPI context
586  *
587  * If napi is running, set the NAPIF_STATE_MISSED, and return true if
588  * NAPI is scheduled.
589  **/
napi_if_scheduled_mark_missed(struct napi_struct * n)590 static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
591 {
592 	unsigned long val, new;
593 
594 	val = READ_ONCE(n->state);
595 	do {
596 		if (val & NAPIF_STATE_DISABLE)
597 			return true;
598 
599 		if (!(val & NAPIF_STATE_SCHED))
600 			return false;
601 
602 		new = val | NAPIF_STATE_MISSED;
603 	} while (!try_cmpxchg(&n->state, &val, new));
604 
605 	return true;
606 }
607 
608 enum netdev_queue_state_t {
609 	__QUEUE_STATE_DRV_XOFF,
610 	__QUEUE_STATE_STACK_XOFF,
611 	__QUEUE_STATE_FROZEN,
612 };
613 
614 #define QUEUE_STATE_DRV_XOFF	(1 << __QUEUE_STATE_DRV_XOFF)
615 #define QUEUE_STATE_STACK_XOFF	(1 << __QUEUE_STATE_STACK_XOFF)
616 #define QUEUE_STATE_FROZEN	(1 << __QUEUE_STATE_FROZEN)
617 
618 #define QUEUE_STATE_ANY_XOFF	(QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
619 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
620 					QUEUE_STATE_FROZEN)
621 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
622 					QUEUE_STATE_FROZEN)
623 
624 /*
625  * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
626  * netif_tx_* functions below are used to manipulate this flag.  The
627  * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
628  * queue independently.  The netif_xmit_*stopped functions below are called
629  * to check if the queue has been stopped by the driver or stack (either
630  * of the XOFF bits are set in the state).  Drivers should not need to call
631  * netif_xmit*stopped functions, they should only be using netif_tx_*.
632  */
633 
634 struct netdev_queue {
635 /*
636  * read-mostly part
637  */
638 	struct net_device	*dev;
639 	netdevice_tracker	dev_tracker;
640 
641 	struct Qdisc __rcu	*qdisc;
642 	struct Qdisc __rcu	*qdisc_sleeping;
643 #ifdef CONFIG_SYSFS
644 	struct kobject		kobj;
645 #endif
646 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
647 	int			numa_node;
648 #endif
649 	unsigned long		tx_maxrate;
650 	/*
651 	 * Number of TX timeouts for this queue
652 	 * (/sys/class/net/DEV/Q/trans_timeout)
653 	 */
654 	atomic_long_t		trans_timeout;
655 
656 	/* Subordinate device that the queue has been assigned to */
657 	struct net_device	*sb_dev;
658 #ifdef CONFIG_XDP_SOCKETS
659 	struct xsk_buff_pool    *pool;
660 #endif
661 	/* NAPI instance for the queue
662 	 * Readers and writers must hold RTNL
663 	 */
664 	struct napi_struct      *napi;
665 /*
666  * write-mostly part
667  */
668 	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
669 	int			xmit_lock_owner;
670 	/*
671 	 * Time (in jiffies) of last Tx
672 	 */
673 	unsigned long		trans_start;
674 
675 	unsigned long		state;
676 
677 #ifdef CONFIG_BQL
678 	struct dql		dql;
679 #endif
680 } ____cacheline_aligned_in_smp;
681 
682 extern int sysctl_fb_tunnels_only_for_init_net;
683 extern int sysctl_devconf_inherit_init_net;
684 
685 /*
686  * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns
687  *                                     == 1 : For initns only
688  *                                     == 2 : For none.
689  */
net_has_fallback_tunnels(const struct net * net)690 static inline bool net_has_fallback_tunnels(const struct net *net)
691 {
692 #if IS_ENABLED(CONFIG_SYSCTL)
693 	int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
694 
695 	return !fb_tunnels_only_for_init_net ||
696 		(net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
697 #else
698 	return true;
699 #endif
700 }
701 
net_inherit_devconf(void)702 static inline int net_inherit_devconf(void)
703 {
704 #if IS_ENABLED(CONFIG_SYSCTL)
705 	return READ_ONCE(sysctl_devconf_inherit_init_net);
706 #else
707 	return 0;
708 #endif
709 }
710 
netdev_queue_numa_node_read(const struct netdev_queue * q)711 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
712 {
713 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
714 	return q->numa_node;
715 #else
716 	return NUMA_NO_NODE;
717 #endif
718 }
719 
netdev_queue_numa_node_write(struct netdev_queue * q,int node)720 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
721 {
722 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
723 	q->numa_node = node;
724 #endif
725 }
726 
727 #ifdef CONFIG_RFS_ACCEL
728 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
729 			 u16 filter_id);
730 #endif
731 
732 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */
733 enum xps_map_type {
734 	XPS_CPUS = 0,
735 	XPS_RXQS,
736 	XPS_MAPS_MAX,
737 };
738 
739 #ifdef CONFIG_XPS
740 /*
741  * This structure holds an XPS map which can be of variable length.  The
742  * map is an array of queues.
743  */
744 struct xps_map {
745 	unsigned int len;
746 	unsigned int alloc_len;
747 	struct rcu_head rcu;
748 	u16 queues[];
749 };
750 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
751 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
752        - sizeof(struct xps_map)) / sizeof(u16))
753 
754 /*
755  * This structure holds all XPS maps for device.  Maps are indexed by CPU.
756  *
757  * We keep track of the number of cpus/rxqs used when the struct is allocated,
758  * in nr_ids. This will help not accessing out-of-bound memory.
759  *
760  * We keep track of the number of traffic classes used when the struct is
761  * allocated, in num_tc. This will be used to navigate the maps, to ensure we're
762  * not crossing its upper bound, as the original dev->num_tc can be updated in
763  * the meantime.
764  */
765 struct xps_dev_maps {
766 	struct rcu_head rcu;
767 	unsigned int nr_ids;
768 	s16 num_tc;
769 	struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
770 };
771 
772 #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) +	\
773 	(nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
774 
775 #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
776 	(_rxqs * (_tcs) * sizeof(struct xps_map *)))
777 
778 #endif /* CONFIG_XPS */
779 
780 #define TC_MAX_QUEUE	16
781 #define TC_BITMASK	15
782 /* HW offloaded queuing disciplines txq count and offset maps */
783 struct netdev_tc_txq {
784 	u16 count;
785 	u16 offset;
786 };
787 
788 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
789 /*
790  * This structure is to hold information about the device
791  * configured to run FCoE protocol stack.
792  */
793 struct netdev_fcoe_hbainfo {
794 	char	manufacturer[64];
795 	char	serial_number[64];
796 	char	hardware_version[64];
797 	char	driver_version[64];
798 	char	optionrom_version[64];
799 	char	firmware_version[64];
800 	char	model[256];
801 	char	model_description[256];
802 };
803 #endif
804 
805 #define MAX_PHYS_ITEM_ID_LEN 32
806 
807 /* This structure holds a unique identifier to identify some
808  * physical item (port for example) used by a netdevice.
809  */
810 struct netdev_phys_item_id {
811 	unsigned char id[MAX_PHYS_ITEM_ID_LEN];
812 	unsigned char id_len;
813 };
814 
netdev_phys_item_id_same(struct netdev_phys_item_id * a,struct netdev_phys_item_id * b)815 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
816 					    struct netdev_phys_item_id *b)
817 {
818 	return a->id_len == b->id_len &&
819 	       memcmp(a->id, b->id, a->id_len) == 0;
820 }
821 
822 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
823 				       struct sk_buff *skb,
824 				       struct net_device *sb_dev);
825 
826 enum net_device_path_type {
827 	DEV_PATH_ETHERNET = 0,
828 	DEV_PATH_VLAN,
829 	DEV_PATH_BRIDGE,
830 	DEV_PATH_PPPOE,
831 	DEV_PATH_DSA,
832 	DEV_PATH_MTK_WDMA,
833 };
834 
835 struct net_device_path {
836 	enum net_device_path_type	type;
837 	const struct net_device		*dev;
838 	union {
839 		struct {
840 			u16		id;
841 			__be16		proto;
842 			u8		h_dest[ETH_ALEN];
843 		} encap;
844 		struct {
845 			enum {
846 				DEV_PATH_BR_VLAN_KEEP,
847 				DEV_PATH_BR_VLAN_TAG,
848 				DEV_PATH_BR_VLAN_UNTAG,
849 				DEV_PATH_BR_VLAN_UNTAG_HW,
850 			}		vlan_mode;
851 			u16		vlan_id;
852 			__be16		vlan_proto;
853 		} bridge;
854 		struct {
855 			int port;
856 			u16 proto;
857 		} dsa;
858 		struct {
859 			u8 wdma_idx;
860 			u8 queue;
861 			u16 wcid;
862 			u8 bss;
863 			u8 amsdu;
864 		} mtk_wdma;
865 	};
866 };
867 
868 #define NET_DEVICE_PATH_STACK_MAX	5
869 #define NET_DEVICE_PATH_VLAN_MAX	2
870 
871 struct net_device_path_stack {
872 	int			num_paths;
873 	struct net_device_path	path[NET_DEVICE_PATH_STACK_MAX];
874 };
875 
876 struct net_device_path_ctx {
877 	const struct net_device *dev;
878 	u8			daddr[ETH_ALEN];
879 
880 	int			num_vlans;
881 	struct {
882 		u16		id;
883 		__be16		proto;
884 	} vlan[NET_DEVICE_PATH_VLAN_MAX];
885 };
886 
887 enum tc_setup_type {
888 	TC_QUERY_CAPS,
889 	TC_SETUP_QDISC_MQPRIO,
890 	TC_SETUP_CLSU32,
891 	TC_SETUP_CLSFLOWER,
892 	TC_SETUP_CLSMATCHALL,
893 	TC_SETUP_CLSBPF,
894 	TC_SETUP_BLOCK,
895 	TC_SETUP_QDISC_CBS,
896 	TC_SETUP_QDISC_RED,
897 	TC_SETUP_QDISC_PRIO,
898 	TC_SETUP_QDISC_MQ,
899 	TC_SETUP_QDISC_ETF,
900 	TC_SETUP_ROOT_QDISC,
901 	TC_SETUP_QDISC_GRED,
902 	TC_SETUP_QDISC_TAPRIO,
903 	TC_SETUP_FT,
904 	TC_SETUP_QDISC_ETS,
905 	TC_SETUP_QDISC_TBF,
906 	TC_SETUP_QDISC_FIFO,
907 	TC_SETUP_QDISC_HTB,
908 	TC_SETUP_ACT,
909 };
910 
911 /* These structures hold the attributes of bpf state that are being passed
912  * to the netdevice through the bpf op.
913  */
914 enum bpf_netdev_command {
915 	/* Set or clear a bpf program used in the earliest stages of packet
916 	 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
917 	 * is responsible for calling bpf_prog_put on any old progs that are
918 	 * stored. In case of error, the callee need not release the new prog
919 	 * reference, but on success it takes ownership and must bpf_prog_put
920 	 * when it is no longer used.
921 	 */
922 	XDP_SETUP_PROG,
923 	XDP_SETUP_PROG_HW,
924 	/* BPF program for offload callbacks, invoked at program load time. */
925 	BPF_OFFLOAD_MAP_ALLOC,
926 	BPF_OFFLOAD_MAP_FREE,
927 	XDP_SETUP_XSK_POOL,
928 };
929 
930 struct bpf_prog_offload_ops;
931 struct netlink_ext_ack;
932 struct xdp_umem;
933 struct xdp_dev_bulk_queue;
934 struct bpf_xdp_link;
935 
936 enum bpf_xdp_mode {
937 	XDP_MODE_SKB = 0,
938 	XDP_MODE_DRV = 1,
939 	XDP_MODE_HW = 2,
940 	__MAX_XDP_MODE
941 };
942 
943 struct bpf_xdp_entity {
944 	struct bpf_prog *prog;
945 	struct bpf_xdp_link *link;
946 };
947 
948 struct netdev_bpf {
949 	enum bpf_netdev_command command;
950 	union {
951 		/* XDP_SETUP_PROG */
952 		struct {
953 			u32 flags;
954 			struct bpf_prog *prog;
955 			struct netlink_ext_ack *extack;
956 		};
957 		/* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
958 		struct {
959 			struct bpf_offloaded_map *offmap;
960 		};
961 		/* XDP_SETUP_XSK_POOL */
962 		struct {
963 			struct xsk_buff_pool *pool;
964 			u16 queue_id;
965 		} xsk;
966 	};
967 };
968 
969 /* Flags for ndo_xsk_wakeup. */
970 #define XDP_WAKEUP_RX (1 << 0)
971 #define XDP_WAKEUP_TX (1 << 1)
972 
973 #ifdef CONFIG_XFRM_OFFLOAD
974 struct xfrmdev_ops {
975 	int	(*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack);
976 	void	(*xdo_dev_state_delete) (struct xfrm_state *x);
977 	void	(*xdo_dev_state_free) (struct xfrm_state *x);
978 	bool	(*xdo_dev_offload_ok) (struct sk_buff *skb,
979 				       struct xfrm_state *x);
980 	void	(*xdo_dev_state_advance_esn) (struct xfrm_state *x);
981 	void	(*xdo_dev_state_update_stats) (struct xfrm_state *x);
982 	int	(*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
983 	void	(*xdo_dev_policy_delete) (struct xfrm_policy *x);
984 	void	(*xdo_dev_policy_free) (struct xfrm_policy *x);
985 };
986 #endif
987 
988 struct dev_ifalias {
989 	struct rcu_head rcuhead;
990 	char ifalias[];
991 };
992 
993 struct devlink;
994 struct tlsdev_ops;
995 
996 struct netdev_net_notifier {
997 	struct list_head list;
998 	struct notifier_block *nb;
999 };
1000 
1001 /*
1002  * This structure defines the management hooks for network devices.
1003  * The following hooks can be defined; unless noted otherwise, they are
1004  * optional and can be filled with a null pointer.
1005  *
1006  * int (*ndo_init)(struct net_device *dev);
1007  *     This function is called once when a network device is registered.
1008  *     The network device can use this for any late stage initialization
1009  *     or semantic validation. It can fail with an error code which will
1010  *     be propagated back to register_netdev.
1011  *
1012  * void (*ndo_uninit)(struct net_device *dev);
1013  *     This function is called when device is unregistered or when registration
1014  *     fails. It is not called if init fails.
1015  *
1016  * int (*ndo_open)(struct net_device *dev);
1017  *     This function is called when a network device transitions to the up
1018  *     state.
1019  *
1020  * int (*ndo_stop)(struct net_device *dev);
1021  *     This function is called when a network device transitions to the down
1022  *     state.
1023  *
1024  * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1025  *                               struct net_device *dev);
1026  *	Called when a packet needs to be transmitted.
1027  *	Returns NETDEV_TX_OK.  Can return NETDEV_TX_BUSY, but you should stop
1028  *	the queue before that can happen; it's for obsolete devices and weird
1029  *	corner cases, but the stack really does a non-trivial amount
1030  *	of useless work if you return NETDEV_TX_BUSY.
1031  *	Required; cannot be NULL.
1032  *
1033  * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1034  *					   struct net_device *dev
1035  *					   netdev_features_t features);
1036  *	Called by core transmit path to determine if device is capable of
1037  *	performing offload operations on a given packet. This is to give
1038  *	the device an opportunity to implement any restrictions that cannot
1039  *	be otherwise expressed by feature flags. The check is called with
1040  *	the set of features that the stack has calculated and it returns
1041  *	those the driver believes to be appropriate.
1042  *
1043  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
1044  *                         struct net_device *sb_dev);
1045  *	Called to decide which queue to use when device supports multiple
1046  *	transmit queues.
1047  *
1048  * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
1049  *	This function is called to allow device receiver to make
1050  *	changes to configuration when multicast or promiscuous is enabled.
1051  *
1052  * void (*ndo_set_rx_mode)(struct net_device *dev);
1053  *	This function is called device changes address list filtering.
1054  *	If driver handles unicast address filtering, it should set
1055  *	IFF_UNICAST_FLT in its priv_flags.
1056  *
1057  * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
1058  *	This function  is called when the Media Access Control address
1059  *	needs to be changed. If this interface is not defined, the
1060  *	MAC address can not be changed.
1061  *
1062  * int (*ndo_validate_addr)(struct net_device *dev);
1063  *	Test if Media Access Control address is valid for the device.
1064  *
1065  * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
1066  *	Old-style ioctl entry point. This is used internally by the
1067  *	appletalk and ieee802154 subsystems but is no longer called by
1068  *	the device ioctl handler.
1069  *
1070  * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd);
1071  *	Used by the bonding driver for its device specific ioctls:
1072  *	SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE,
1073  *	SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY
1074  *
1075  * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
1076  *	Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG,
1077  *	SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP.
1078  *
1079  * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
1080  *	Used to set network devices bus interface parameters. This interface
1081  *	is retained for legacy reasons; new devices should use the bus
1082  *	interface (PCI) for low level management.
1083  *
1084  * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
1085  *	Called when a user wants to change the Maximum Transfer Unit
1086  *	of a device.
1087  *
1088  * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue);
1089  *	Callback used when the transmitter has not made any progress
1090  *	for dev->watchdog ticks.
1091  *
1092  * void (*ndo_get_stats64)(struct net_device *dev,
1093  *                         struct rtnl_link_stats64 *storage);
1094  * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1095  *	Called when a user wants to get the network device usage
1096  *	statistics. Drivers must do one of the following:
1097  *	1. Define @ndo_get_stats64 to fill in a zero-initialised
1098  *	   rtnl_link_stats64 structure passed by the caller.
1099  *	2. Define @ndo_get_stats to update a net_device_stats structure
1100  *	   (which should normally be dev->stats) and return a pointer to
1101  *	   it. The structure may be changed asynchronously only if each
1102  *	   field is written atomically.
1103  *	3. Update dev->stats asynchronously and atomically, and define
1104  *	   neither operation.
1105  *
1106  * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
1107  *	Return true if this device supports offload stats of this attr_id.
1108  *
1109  * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
1110  *	void *attr_data)
1111  *	Get statistics for offload operations by attr_id. Write it into the
1112  *	attr_data pointer.
1113  *
1114  * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
1115  *	If device supports VLAN filtering this function is called when a
1116  *	VLAN id is registered.
1117  *
1118  * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
1119  *	If device supports VLAN filtering this function is called when a
1120  *	VLAN id is unregistered.
1121  *
1122  * void (*ndo_poll_controller)(struct net_device *dev);
1123  *
1124  *	SR-IOV management functions.
1125  * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
1126  * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
1127  *			  u8 qos, __be16 proto);
1128  * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
1129  *			  int max_tx_rate);
1130  * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
1131  * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
1132  * int (*ndo_get_vf_config)(struct net_device *dev,
1133  *			    int vf, struct ifla_vf_info *ivf);
1134  * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
1135  * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
1136  *			  struct nlattr *port[]);
1137  *
1138  *      Enable or disable the VF ability to query its RSS Redirection Table and
1139  *      Hash Key. This is needed since on some devices VF share this information
1140  *      with PF and querying it may introduce a theoretical security risk.
1141  * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
1142  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
1143  * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
1144  *		       void *type_data);
1145  *	Called to setup any 'tc' scheduler, classifier or action on @dev.
1146  *	This is always called from the stack with the rtnl lock held and netif
1147  *	tx queues stopped. This allows the netdevice to perform queue
1148  *	management safely.
1149  *
1150  *	Fiber Channel over Ethernet (FCoE) offload functions.
1151  * int (*ndo_fcoe_enable)(struct net_device *dev);
1152  *	Called when the FCoE protocol stack wants to start using LLD for FCoE
1153  *	so the underlying device can perform whatever needed configuration or
1154  *	initialization to support acceleration of FCoE traffic.
1155  *
1156  * int (*ndo_fcoe_disable)(struct net_device *dev);
1157  *	Called when the FCoE protocol stack wants to stop using LLD for FCoE
1158  *	so the underlying device can perform whatever needed clean-ups to
1159  *	stop supporting acceleration of FCoE traffic.
1160  *
1161  * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
1162  *			     struct scatterlist *sgl, unsigned int sgc);
1163  *	Called when the FCoE Initiator wants to initialize an I/O that
1164  *	is a possible candidate for Direct Data Placement (DDP). The LLD can
1165  *	perform necessary setup and returns 1 to indicate the device is set up
1166  *	successfully to perform DDP on this I/O, otherwise this returns 0.
1167  *
1168  * int (*ndo_fcoe_ddp_done)(struct net_device *dev,  u16 xid);
1169  *	Called when the FCoE Initiator/Target is done with the DDPed I/O as
1170  *	indicated by the FC exchange id 'xid', so the underlying device can
1171  *	clean up and reuse resources for later DDP requests.
1172  *
1173  * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
1174  *			      struct scatterlist *sgl, unsigned int sgc);
1175  *	Called when the FCoE Target wants to initialize an I/O that
1176  *	is a possible candidate for Direct Data Placement (DDP). The LLD can
1177  *	perform necessary setup and returns 1 to indicate the device is set up
1178  *	successfully to perform DDP on this I/O, otherwise this returns 0.
1179  *
1180  * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1181  *			       struct netdev_fcoe_hbainfo *hbainfo);
1182  *	Called when the FCoE Protocol stack wants information on the underlying
1183  *	device. This information is utilized by the FCoE protocol stack to
1184  *	register attributes with Fiber Channel management service as per the
1185  *	FC-GS Fabric Device Management Information(FDMI) specification.
1186  *
1187  * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
1188  *	Called when the underlying device wants to override default World Wide
1189  *	Name (WWN) generation mechanism in FCoE protocol stack to pass its own
1190  *	World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
1191  *	protocol stack to use.
1192  *
1193  *	RFS acceleration.
1194  * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
1195  *			    u16 rxq_index, u32 flow_id);
1196  *	Set hardware filter for RFS.  rxq_index is the target queue index;
1197  *	flow_id is a flow ID to be passed to rps_may_expire_flow() later.
1198  *	Return the filter ID on success, or a negative error code.
1199  *
1200  *	Slave management functions (for bridge, bonding, etc).
1201  * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
1202  *	Called to make another netdev an underling.
1203  *
1204  * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
1205  *	Called to release previously enslaved netdev.
1206  *
1207  * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev,
1208  *					    struct sk_buff *skb,
1209  *					    bool all_slaves);
1210  *	Get the xmit slave of master device. If all_slaves is true, function
1211  *	assume all the slaves can transmit.
1212  *
1213  *      Feature/offload setting functions.
1214  * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1215  *		netdev_features_t features);
1216  *	Adjusts the requested feature flags according to device-specific
1217  *	constraints, and returns the resulting flags. Must not modify
1218  *	the device state.
1219  *
1220  * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1221  *	Called to update device configuration to new features. Passed
1222  *	feature set might be less than what was returned by ndo_fix_features()).
1223  *	Must return >0 or -errno if it changed dev->features itself.
1224  *
1225  * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
1226  *		      struct net_device *dev,
1227  *		      const unsigned char *addr, u16 vid, u16 flags,
1228  *		      struct netlink_ext_ack *extack);
1229  *	Adds an FDB entry to dev for addr.
1230  * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
1231  *		      struct net_device *dev,
1232  *		      const unsigned char *addr, u16 vid)
1233  *	Deletes the FDB entry from dev coresponding to addr.
1234  * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev,
1235  *			   struct netlink_ext_ack *extack);
1236  * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
1237  *		       struct net_device *dev, struct net_device *filter_dev,
1238  *		       int *idx)
1239  *	Used to add FDB entries to dump requests. Implementers should add
1240  *	entries to skb and update idx with the number of entries.
1241  *
1242  * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[],
1243  *		      u16 nlmsg_flags, struct netlink_ext_ack *extack);
1244  *	Adds an MDB entry to dev.
1245  * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[],
1246  *		      struct netlink_ext_ack *extack);
1247  *	Deletes the MDB entry from dev.
1248  * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[],
1249  *			   struct netlink_ext_ack *extack);
1250  *	Bulk deletes MDB entries from dev.
1251  * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb,
1252  *		       struct netlink_callback *cb);
1253  *	Dumps MDB entries from dev. The first argument (marker) in the netlink
1254  *	callback is used by core rtnetlink code.
1255  *
1256  * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
1257  *			     u16 flags, struct netlink_ext_ack *extack)
1258  * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
1259  *			     struct net_device *dev, u32 filter_mask,
1260  *			     int nlflags)
1261  * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
1262  *			     u16 flags);
1263  *
1264  * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
1265  *	Called to change device carrier. Soft-devices (like dummy, team, etc)
1266  *	which do not represent real hardware may define this to allow their
1267  *	userspace components to manage their virtual carrier state. Devices
1268  *	that determine carrier state from physical hardware properties (eg
1269  *	network cables) or protocol-dependent mechanisms (eg
1270  *	USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
1271  *
1272  * int (*ndo_get_phys_port_id)(struct net_device *dev,
1273  *			       struct netdev_phys_item_id *ppid);
1274  *	Called to get ID of physical port of this device. If driver does
1275  *	not implement this, it is assumed that the hw is not able to have
1276  *	multiple net devices on single physical port.
1277  *
1278  * int (*ndo_get_port_parent_id)(struct net_device *dev,
1279  *				 struct netdev_phys_item_id *ppid)
1280  *	Called to get the parent ID of the physical port of this device.
1281  *
1282  * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1283  *				 struct net_device *dev)
1284  *	Called by upper layer devices to accelerate switching or other
1285  *	station functionality into hardware. 'pdev is the lowerdev
1286  *	to use for the offload and 'dev' is the net device that will
1287  *	back the offload. Returns a pointer to the private structure
1288  *	the upper layer will maintain.
1289  * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1290  *	Called by upper layer device to delete the station created
1291  *	by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1292  *	the station and priv is the structure returned by the add
1293  *	operation.
1294  * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1295  *			     int queue_index, u32 maxrate);
1296  *	Called when a user wants to set a max-rate limitation of specific
1297  *	TX queue.
1298  * int (*ndo_get_iflink)(const struct net_device *dev);
1299  *	Called to get the iflink value of this device.
1300  * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1301  *	This function is used to get egress tunnel information for given skb.
1302  *	This is useful for retrieving outer tunnel header parameters while
1303  *	sampling packet.
1304  * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
1305  *	This function is used to specify the headroom that the skb must
1306  *	consider when allocation skb during packet reception. Setting
1307  *	appropriate rx headroom value allows avoiding skb head copy on
1308  *	forward. Setting a negative value resets the rx headroom to the
1309  *	default value.
1310  * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
1311  *	This function is used to set or query state related to XDP on the
1312  *	netdevice and manage BPF offload. See definition of
1313  *	enum bpf_netdev_command for details.
1314  * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp,
1315  *			u32 flags);
1316  *	This function is used to submit @n XDP packets for transmit on a
1317  *	netdevice. Returns number of frames successfully transmitted, frames
1318  *	that got dropped are freed/returned via xdp_return_frame().
1319  *	Returns negative number, means general error invoking ndo, meaning
1320  *	no frames were xmit'ed and core-caller will free all frames.
1321  * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev,
1322  *					        struct xdp_buff *xdp);
1323  *      Get the xmit slave of master device based on the xdp_buff.
1324  * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
1325  *      This function is used to wake up the softirq, ksoftirqd or kthread
1326  *	responsible for sending and/or receiving packets on a specific
1327  *	queue id bound to an AF_XDP socket. The flags field specifies if
1328  *	only RX, only Tx, or both should be woken up using the flags
1329  *	XDP_WAKEUP_RX and XDP_WAKEUP_TX.
1330  * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p,
1331  *			 int cmd);
1332  *	Add, change, delete or get information on an IPv4 tunnel.
1333  * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
1334  *	If a device is paired with a peer device, return the peer instance.
1335  *	The caller must be under RCU read context.
1336  * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path);
1337  *     Get the forwarding path to reach the real device from the HW destination address
1338  * ktime_t (*ndo_get_tstamp)(struct net_device *dev,
1339  *			     const struct skb_shared_hwtstamps *hwtstamps,
1340  *			     bool cycles);
1341  *	Get hardware timestamp based on normal/adjustable time or free running
1342  *	cycle counter. This function is required if physical clock supports a
1343  *	free running cycle counter.
1344  *
1345  * int (*ndo_hwtstamp_get)(struct net_device *dev,
1346  *			   struct kernel_hwtstamp_config *kernel_config);
1347  *	Get the currently configured hardware timestamping parameters for the
1348  *	NIC device.
1349  *
1350  * int (*ndo_hwtstamp_set)(struct net_device *dev,
1351  *			   struct kernel_hwtstamp_config *kernel_config,
1352  *			   struct netlink_ext_ack *extack);
1353  *	Change the hardware timestamping parameters for NIC device.
1354  */
1355 struct net_device_ops {
1356 	int			(*ndo_init)(struct net_device *dev);
1357 	void			(*ndo_uninit)(struct net_device *dev);
1358 	int			(*ndo_open)(struct net_device *dev);
1359 	int			(*ndo_stop)(struct net_device *dev);
1360 	netdev_tx_t		(*ndo_start_xmit)(struct sk_buff *skb,
1361 						  struct net_device *dev);
1362 	netdev_features_t	(*ndo_features_check)(struct sk_buff *skb,
1363 						      struct net_device *dev,
1364 						      netdev_features_t features);
1365 	u16			(*ndo_select_queue)(struct net_device *dev,
1366 						    struct sk_buff *skb,
1367 						    struct net_device *sb_dev);
1368 	void			(*ndo_change_rx_flags)(struct net_device *dev,
1369 						       int flags);
1370 	void			(*ndo_set_rx_mode)(struct net_device *dev);
1371 	int			(*ndo_set_mac_address)(struct net_device *dev,
1372 						       void *addr);
1373 	int			(*ndo_validate_addr)(struct net_device *dev);
1374 	int			(*ndo_do_ioctl)(struct net_device *dev,
1375 					        struct ifreq *ifr, int cmd);
1376 	int			(*ndo_eth_ioctl)(struct net_device *dev,
1377 						 struct ifreq *ifr, int cmd);
1378 	int			(*ndo_siocbond)(struct net_device *dev,
1379 						struct ifreq *ifr, int cmd);
1380 	int			(*ndo_siocwandev)(struct net_device *dev,
1381 						  struct if_settings *ifs);
1382 	int			(*ndo_siocdevprivate)(struct net_device *dev,
1383 						      struct ifreq *ifr,
1384 						      void __user *data, int cmd);
1385 	int			(*ndo_set_config)(struct net_device *dev,
1386 					          struct ifmap *map);
1387 	int			(*ndo_change_mtu)(struct net_device *dev,
1388 						  int new_mtu);
1389 	int			(*ndo_neigh_setup)(struct net_device *dev,
1390 						   struct neigh_parms *);
1391 	void			(*ndo_tx_timeout) (struct net_device *dev,
1392 						   unsigned int txqueue);
1393 
1394 	void			(*ndo_get_stats64)(struct net_device *dev,
1395 						   struct rtnl_link_stats64 *storage);
1396 	bool			(*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1397 	int			(*ndo_get_offload_stats)(int attr_id,
1398 							 const struct net_device *dev,
1399 							 void *attr_data);
1400 	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1401 
1402 	int			(*ndo_vlan_rx_add_vid)(struct net_device *dev,
1403 						       __be16 proto, u16 vid);
1404 	int			(*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1405 						        __be16 proto, u16 vid);
1406 #ifdef CONFIG_NET_POLL_CONTROLLER
1407 	void                    (*ndo_poll_controller)(struct net_device *dev);
1408 	int			(*ndo_netpoll_setup)(struct net_device *dev,
1409 						     struct netpoll_info *info);
1410 	void			(*ndo_netpoll_cleanup)(struct net_device *dev);
1411 #endif
1412 	int			(*ndo_set_vf_mac)(struct net_device *dev,
1413 						  int queue, u8 *mac);
1414 	int			(*ndo_set_vf_vlan)(struct net_device *dev,
1415 						   int queue, u16 vlan,
1416 						   u8 qos, __be16 proto);
1417 	int			(*ndo_set_vf_rate)(struct net_device *dev,
1418 						   int vf, int min_tx_rate,
1419 						   int max_tx_rate);
1420 	int			(*ndo_set_vf_spoofchk)(struct net_device *dev,
1421 						       int vf, bool setting);
1422 	int			(*ndo_set_vf_trust)(struct net_device *dev,
1423 						    int vf, bool setting);
1424 	int			(*ndo_get_vf_config)(struct net_device *dev,
1425 						     int vf,
1426 						     struct ifla_vf_info *ivf);
1427 	int			(*ndo_set_vf_link_state)(struct net_device *dev,
1428 							 int vf, int link_state);
1429 	int			(*ndo_get_vf_stats)(struct net_device *dev,
1430 						    int vf,
1431 						    struct ifla_vf_stats
1432 						    *vf_stats);
1433 	int			(*ndo_set_vf_port)(struct net_device *dev,
1434 						   int vf,
1435 						   struct nlattr *port[]);
1436 	int			(*ndo_get_vf_port)(struct net_device *dev,
1437 						   int vf, struct sk_buff *skb);
1438 	int			(*ndo_get_vf_guid)(struct net_device *dev,
1439 						   int vf,
1440 						   struct ifla_vf_guid *node_guid,
1441 						   struct ifla_vf_guid *port_guid);
1442 	int			(*ndo_set_vf_guid)(struct net_device *dev,
1443 						   int vf, u64 guid,
1444 						   int guid_type);
1445 	int			(*ndo_set_vf_rss_query_en)(
1446 						   struct net_device *dev,
1447 						   int vf, bool setting);
1448 	int			(*ndo_setup_tc)(struct net_device *dev,
1449 						enum tc_setup_type type,
1450 						void *type_data);
1451 #if IS_ENABLED(CONFIG_FCOE)
1452 	int			(*ndo_fcoe_enable)(struct net_device *dev);
1453 	int			(*ndo_fcoe_disable)(struct net_device *dev);
1454 	int			(*ndo_fcoe_ddp_setup)(struct net_device *dev,
1455 						      u16 xid,
1456 						      struct scatterlist *sgl,
1457 						      unsigned int sgc);
1458 	int			(*ndo_fcoe_ddp_done)(struct net_device *dev,
1459 						     u16 xid);
1460 	int			(*ndo_fcoe_ddp_target)(struct net_device *dev,
1461 						       u16 xid,
1462 						       struct scatterlist *sgl,
1463 						       unsigned int sgc);
1464 	int			(*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1465 							struct netdev_fcoe_hbainfo *hbainfo);
1466 #endif
1467 
1468 #if IS_ENABLED(CONFIG_LIBFCOE)
1469 #define NETDEV_FCOE_WWNN 0
1470 #define NETDEV_FCOE_WWPN 1
1471 	int			(*ndo_fcoe_get_wwn)(struct net_device *dev,
1472 						    u64 *wwn, int type);
1473 #endif
1474 
1475 #ifdef CONFIG_RFS_ACCEL
1476 	int			(*ndo_rx_flow_steer)(struct net_device *dev,
1477 						     const struct sk_buff *skb,
1478 						     u16 rxq_index,
1479 						     u32 flow_id);
1480 #endif
1481 	int			(*ndo_add_slave)(struct net_device *dev,
1482 						 struct net_device *slave_dev,
1483 						 struct netlink_ext_ack *extack);
1484 	int			(*ndo_del_slave)(struct net_device *dev,
1485 						 struct net_device *slave_dev);
1486 	struct net_device*	(*ndo_get_xmit_slave)(struct net_device *dev,
1487 						      struct sk_buff *skb,
1488 						      bool all_slaves);
1489 	struct net_device*	(*ndo_sk_get_lower_dev)(struct net_device *dev,
1490 							struct sock *sk);
1491 	netdev_features_t	(*ndo_fix_features)(struct net_device *dev,
1492 						    netdev_features_t features);
1493 	int			(*ndo_set_features)(struct net_device *dev,
1494 						    netdev_features_t features);
1495 	int			(*ndo_neigh_construct)(struct net_device *dev,
1496 						       struct neighbour *n);
1497 	void			(*ndo_neigh_destroy)(struct net_device *dev,
1498 						     struct neighbour *n);
1499 
1500 	int			(*ndo_fdb_add)(struct ndmsg *ndm,
1501 					       struct nlattr *tb[],
1502 					       struct net_device *dev,
1503 					       const unsigned char *addr,
1504 					       u16 vid,
1505 					       u16 flags,
1506 					       struct netlink_ext_ack *extack);
1507 	int			(*ndo_fdb_del)(struct ndmsg *ndm,
1508 					       struct nlattr *tb[],
1509 					       struct net_device *dev,
1510 					       const unsigned char *addr,
1511 					       u16 vid, struct netlink_ext_ack *extack);
1512 	int			(*ndo_fdb_del_bulk)(struct nlmsghdr *nlh,
1513 						    struct net_device *dev,
1514 						    struct netlink_ext_ack *extack);
1515 	int			(*ndo_fdb_dump)(struct sk_buff *skb,
1516 						struct netlink_callback *cb,
1517 						struct net_device *dev,
1518 						struct net_device *filter_dev,
1519 						int *idx);
1520 	int			(*ndo_fdb_get)(struct sk_buff *skb,
1521 					       struct nlattr *tb[],
1522 					       struct net_device *dev,
1523 					       const unsigned char *addr,
1524 					       u16 vid, u32 portid, u32 seq,
1525 					       struct netlink_ext_ack *extack);
1526 	int			(*ndo_mdb_add)(struct net_device *dev,
1527 					       struct nlattr *tb[],
1528 					       u16 nlmsg_flags,
1529 					       struct netlink_ext_ack *extack);
1530 	int			(*ndo_mdb_del)(struct net_device *dev,
1531 					       struct nlattr *tb[],
1532 					       struct netlink_ext_ack *extack);
1533 	int			(*ndo_mdb_del_bulk)(struct net_device *dev,
1534 						    struct nlattr *tb[],
1535 						    struct netlink_ext_ack *extack);
1536 	int			(*ndo_mdb_dump)(struct net_device *dev,
1537 						struct sk_buff *skb,
1538 						struct netlink_callback *cb);
1539 	int			(*ndo_mdb_get)(struct net_device *dev,
1540 					       struct nlattr *tb[], u32 portid,
1541 					       u32 seq,
1542 					       struct netlink_ext_ack *extack);
1543 	int			(*ndo_bridge_setlink)(struct net_device *dev,
1544 						      struct nlmsghdr *nlh,
1545 						      u16 flags,
1546 						      struct netlink_ext_ack *extack);
1547 	int			(*ndo_bridge_getlink)(struct sk_buff *skb,
1548 						      u32 pid, u32 seq,
1549 						      struct net_device *dev,
1550 						      u32 filter_mask,
1551 						      int nlflags);
1552 	int			(*ndo_bridge_dellink)(struct net_device *dev,
1553 						      struct nlmsghdr *nlh,
1554 						      u16 flags);
1555 	int			(*ndo_change_carrier)(struct net_device *dev,
1556 						      bool new_carrier);
1557 	int			(*ndo_get_phys_port_id)(struct net_device *dev,
1558 							struct netdev_phys_item_id *ppid);
1559 	int			(*ndo_get_port_parent_id)(struct net_device *dev,
1560 							  struct netdev_phys_item_id *ppid);
1561 	int			(*ndo_get_phys_port_name)(struct net_device *dev,
1562 							  char *name, size_t len);
1563 	void*			(*ndo_dfwd_add_station)(struct net_device *pdev,
1564 							struct net_device *dev);
1565 	void			(*ndo_dfwd_del_station)(struct net_device *pdev,
1566 							void *priv);
1567 
1568 	int			(*ndo_set_tx_maxrate)(struct net_device *dev,
1569 						      int queue_index,
1570 						      u32 maxrate);
1571 	int			(*ndo_get_iflink)(const struct net_device *dev);
1572 	int			(*ndo_fill_metadata_dst)(struct net_device *dev,
1573 						       struct sk_buff *skb);
1574 	void			(*ndo_set_rx_headroom)(struct net_device *dev,
1575 						       int needed_headroom);
1576 	int			(*ndo_bpf)(struct net_device *dev,
1577 					   struct netdev_bpf *bpf);
1578 	int			(*ndo_xdp_xmit)(struct net_device *dev, int n,
1579 						struct xdp_frame **xdp,
1580 						u32 flags);
1581 	struct net_device *	(*ndo_xdp_get_xmit_slave)(struct net_device *dev,
1582 							  struct xdp_buff *xdp);
1583 	int			(*ndo_xsk_wakeup)(struct net_device *dev,
1584 						  u32 queue_id, u32 flags);
1585 	int			(*ndo_tunnel_ctl)(struct net_device *dev,
1586 						  struct ip_tunnel_parm_kern *p,
1587 						  int cmd);
1588 	struct net_device *	(*ndo_get_peer_dev)(struct net_device *dev);
1589 	int                     (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
1590                                                          struct net_device_path *path);
1591 	ktime_t			(*ndo_get_tstamp)(struct net_device *dev,
1592 						  const struct skb_shared_hwtstamps *hwtstamps,
1593 						  bool cycles);
1594 	int			(*ndo_hwtstamp_get)(struct net_device *dev,
1595 						    struct kernel_hwtstamp_config *kernel_config);
1596 	int			(*ndo_hwtstamp_set)(struct net_device *dev,
1597 						    struct kernel_hwtstamp_config *kernel_config,
1598 						    struct netlink_ext_ack *extack);
1599 };
1600 
1601 /**
1602  * enum netdev_priv_flags - &struct net_device priv_flags
1603  *
1604  * These are the &struct net_device, they are only set internally
1605  * by drivers and used in the kernel. These flags are invisible to
1606  * userspace; this means that the order of these flags can change
1607  * during any kernel release.
1608  *
1609  * You should have a pretty good reason to be extending these flags.
1610  *
1611  * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1612  * @IFF_EBRIDGE: Ethernet bridging device
1613  * @IFF_BONDING: bonding master or slave
1614  * @IFF_ISATAP: ISATAP interface (RFC4214)
1615  * @IFF_WAN_HDLC: WAN HDLC device
1616  * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1617  *	release skb->dst
1618  * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1619  * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1620  * @IFF_MACVLAN_PORT: device used as macvlan port
1621  * @IFF_BRIDGE_PORT: device used as bridge port
1622  * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1623  * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1624  * @IFF_UNICAST_FLT: Supports unicast filtering
1625  * @IFF_TEAM_PORT: device used as team port
1626  * @IFF_SUPP_NOFCS: device supports sending custom FCS
1627  * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1628  *	change when it's running
1629  * @IFF_MACVLAN: Macvlan device
1630  * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
1631  *	underlying stacked devices
1632  * @IFF_L3MDEV_MASTER: device is an L3 master device
1633  * @IFF_NO_QUEUE: device can run without qdisc attached
1634  * @IFF_OPENVSWITCH: device is a Open vSwitch master
1635  * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
1636  * @IFF_TEAM: device is a team device
1637  * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
1638  * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
1639  *	entity (i.e. the master device for bridged veth)
1640  * @IFF_MACSEC: device is a MACsec device
1641  * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
1642  * @IFF_FAILOVER: device is a failover master device
1643  * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
1644  * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
1645  * @IFF_NO_ADDRCONF: prevent ipv6 addrconf
1646  * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
1647  *	skb_headlen(skb) == 0 (data starts from frag0)
1648  * @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN
1649  * @IFF_SEE_ALL_HWTSTAMP_REQUESTS: device wants to see calls to
1650  *	ndo_hwtstamp_set() for all timestamp requests regardless of source,
1651  *	even if those aren't HWTSTAMP_SOURCE_NETDEV.
1652  */
1653 enum netdev_priv_flags {
1654 	IFF_802_1Q_VLAN			= 1<<0,
1655 	IFF_EBRIDGE			= 1<<1,
1656 	IFF_BONDING			= 1<<2,
1657 	IFF_ISATAP			= 1<<3,
1658 	IFF_WAN_HDLC			= 1<<4,
1659 	IFF_XMIT_DST_RELEASE		= 1<<5,
1660 	IFF_DONT_BRIDGE			= 1<<6,
1661 	IFF_DISABLE_NETPOLL		= 1<<7,
1662 	IFF_MACVLAN_PORT		= 1<<8,
1663 	IFF_BRIDGE_PORT			= 1<<9,
1664 	IFF_OVS_DATAPATH		= 1<<10,
1665 	IFF_TX_SKB_SHARING		= 1<<11,
1666 	IFF_UNICAST_FLT			= 1<<12,
1667 	IFF_TEAM_PORT			= 1<<13,
1668 	IFF_SUPP_NOFCS			= 1<<14,
1669 	IFF_LIVE_ADDR_CHANGE		= 1<<15,
1670 	IFF_MACVLAN			= 1<<16,
1671 	IFF_XMIT_DST_RELEASE_PERM	= 1<<17,
1672 	IFF_L3MDEV_MASTER		= 1<<18,
1673 	IFF_NO_QUEUE			= 1<<19,
1674 	IFF_OPENVSWITCH			= 1<<20,
1675 	IFF_L3MDEV_SLAVE		= 1<<21,
1676 	IFF_TEAM			= 1<<22,
1677 	IFF_RXFH_CONFIGURED		= 1<<23,
1678 	IFF_PHONY_HEADROOM		= 1<<24,
1679 	IFF_MACSEC			= 1<<25,
1680 	IFF_NO_RX_HANDLER		= 1<<26,
1681 	IFF_FAILOVER			= 1<<27,
1682 	IFF_FAILOVER_SLAVE		= 1<<28,
1683 	IFF_L3MDEV_RX_HANDLER		= 1<<29,
1684 	IFF_NO_ADDRCONF			= BIT_ULL(30),
1685 	IFF_TX_SKB_NO_LINEAR		= BIT_ULL(31),
1686 	IFF_CHANGE_PROTO_DOWN		= BIT_ULL(32),
1687 	IFF_SEE_ALL_HWTSTAMP_REQUESTS	= BIT_ULL(33),
1688 };
1689 
1690 #define IFF_802_1Q_VLAN			IFF_802_1Q_VLAN
1691 #define IFF_EBRIDGE			IFF_EBRIDGE
1692 #define IFF_BONDING			IFF_BONDING
1693 #define IFF_ISATAP			IFF_ISATAP
1694 #define IFF_WAN_HDLC			IFF_WAN_HDLC
1695 #define IFF_XMIT_DST_RELEASE		IFF_XMIT_DST_RELEASE
1696 #define IFF_DONT_BRIDGE			IFF_DONT_BRIDGE
1697 #define IFF_DISABLE_NETPOLL		IFF_DISABLE_NETPOLL
1698 #define IFF_MACVLAN_PORT		IFF_MACVLAN_PORT
1699 #define IFF_BRIDGE_PORT			IFF_BRIDGE_PORT
1700 #define IFF_OVS_DATAPATH		IFF_OVS_DATAPATH
1701 #define IFF_TX_SKB_SHARING		IFF_TX_SKB_SHARING
1702 #define IFF_UNICAST_FLT			IFF_UNICAST_FLT
1703 #define IFF_TEAM_PORT			IFF_TEAM_PORT
1704 #define IFF_SUPP_NOFCS			IFF_SUPP_NOFCS
1705 #define IFF_LIVE_ADDR_CHANGE		IFF_LIVE_ADDR_CHANGE
1706 #define IFF_MACVLAN			IFF_MACVLAN
1707 #define IFF_XMIT_DST_RELEASE_PERM	IFF_XMIT_DST_RELEASE_PERM
1708 #define IFF_L3MDEV_MASTER		IFF_L3MDEV_MASTER
1709 #define IFF_NO_QUEUE			IFF_NO_QUEUE
1710 #define IFF_OPENVSWITCH			IFF_OPENVSWITCH
1711 #define IFF_L3MDEV_SLAVE		IFF_L3MDEV_SLAVE
1712 #define IFF_TEAM			IFF_TEAM
1713 #define IFF_RXFH_CONFIGURED		IFF_RXFH_CONFIGURED
1714 #define IFF_PHONY_HEADROOM		IFF_PHONY_HEADROOM
1715 #define IFF_MACSEC			IFF_MACSEC
1716 #define IFF_NO_RX_HANDLER		IFF_NO_RX_HANDLER
1717 #define IFF_FAILOVER			IFF_FAILOVER
1718 #define IFF_FAILOVER_SLAVE		IFF_FAILOVER_SLAVE
1719 #define IFF_L3MDEV_RX_HANDLER		IFF_L3MDEV_RX_HANDLER
1720 #define IFF_TX_SKB_NO_LINEAR		IFF_TX_SKB_NO_LINEAR
1721 
1722 /* Specifies the type of the struct net_device::ml_priv pointer */
1723 enum netdev_ml_priv_type {
1724 	ML_PRIV_NONE,
1725 	ML_PRIV_CAN,
1726 };
1727 
1728 enum netdev_stat_type {
1729 	NETDEV_PCPU_STAT_NONE,
1730 	NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */
1731 	NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */
1732 	NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
1733 };
1734 
1735 enum netdev_reg_state {
1736 	NETREG_UNINITIALIZED = 0,
1737 	NETREG_REGISTERED,	/* completed register_netdevice */
1738 	NETREG_UNREGISTERING,	/* called unregister_netdevice */
1739 	NETREG_UNREGISTERED,	/* completed unregister todo */
1740 	NETREG_RELEASED,	/* called free_netdev */
1741 	NETREG_DUMMY,		/* dummy device for NAPI poll */
1742 };
1743 
1744 /**
1745  *	struct net_device - The DEVICE structure.
1746  *
1747  *	Actually, this whole structure is a big mistake.  It mixes I/O
1748  *	data with strictly "high-level" data, and it has to know about
1749  *	almost every data structure used in the INET module.
1750  *
1751  *	@name:	This is the first field of the "visible" part of this structure
1752  *		(i.e. as seen by users in the "Space.c" file).  It is the name
1753  *		of the interface.
1754  *
1755  *	@name_node:	Name hashlist node
1756  *	@ifalias:	SNMP alias
1757  *	@mem_end:	Shared memory end
1758  *	@mem_start:	Shared memory start
1759  *	@base_addr:	Device I/O address
1760  *	@irq:		Device IRQ number
1761  *
1762  *	@state:		Generic network queuing layer state, see netdev_state_t
1763  *	@dev_list:	The global list of network devices
1764  *	@napi_list:	List entry used for polling NAPI devices
1765  *	@unreg_list:	List entry  when we are unregistering the
1766  *			device; see the function unregister_netdev
1767  *	@close_list:	List entry used when we are closing the device
1768  *	@ptype_all:     Device-specific packet handlers for all protocols
1769  *	@ptype_specific: Device-specific, protocol-specific packet handlers
1770  *
1771  *	@adj_list:	Directly linked devices, like slaves for bonding
1772  *	@features:	Currently active device features
1773  *	@hw_features:	User-changeable features
1774  *
1775  *	@wanted_features:	User-requested features
1776  *	@vlan_features:		Mask of features inheritable by VLAN devices
1777  *
1778  *	@hw_enc_features:	Mask of features inherited by encapsulating devices
1779  *				This field indicates what encapsulation
1780  *				offloads the hardware is capable of doing,
1781  *				and drivers will need to set them appropriately.
1782  *
1783  *	@mpls_features:	Mask of features inheritable by MPLS
1784  *	@gso_partial_features: value(s) from NETIF_F_GSO\*
1785  *
1786  *	@ifindex:	interface index
1787  *	@group:		The group the device belongs to
1788  *
1789  *	@stats:		Statistics struct, which was left as a legacy, use
1790  *			rtnl_link_stats64 instead
1791  *
1792  *	@core_stats:	core networking counters,
1793  *			do not use this in drivers
1794  *	@carrier_up_count:	Number of times the carrier has been up
1795  *	@carrier_down_count:	Number of times the carrier has been down
1796  *
1797  *	@wireless_handlers:	List of functions to handle Wireless Extensions,
1798  *				instead of ioctl,
1799  *				see <net/iw_handler.h> for details.
1800  *	@wireless_data:	Instance data managed by the core of wireless extensions
1801  *
1802  *	@netdev_ops:	Includes several pointers to callbacks,
1803  *			if one wants to override the ndo_*() functions
1804  *	@xdp_metadata_ops:	Includes pointers to XDP metadata callbacks.
1805  *	@xsk_tx_metadata_ops:	Includes pointers to AF_XDP TX metadata callbacks.
1806  *	@ethtool_ops:	Management operations
1807  *	@l3mdev_ops:	Layer 3 master device operations
1808  *	@ndisc_ops:	Includes callbacks for different IPv6 neighbour
1809  *			discovery handling. Necessary for e.g. 6LoWPAN.
1810  *	@xfrmdev_ops:	Transformation offload operations
1811  *	@tlsdev_ops:	Transport Layer Security offload operations
1812  *	@header_ops:	Includes callbacks for creating,parsing,caching,etc
1813  *			of Layer 2 headers.
1814  *
1815  *	@flags:		Interface flags (a la BSD)
1816  *	@xdp_features:	XDP capability supported by the device
1817  *	@priv_flags:	Like 'flags' but invisible to userspace,
1818  *			see if.h for the definitions
1819  *	@gflags:	Global flags ( kept as legacy )
1820  *	@padded:	How much padding added by alloc_netdev()
1821  *	@operstate:	RFC2863 operstate
1822  *	@link_mode:	Mapping policy to operstate
1823  *	@if_port:	Selectable AUI, TP, ...
1824  *	@dma:		DMA channel
1825  *	@mtu:		Interface MTU value
1826  *	@min_mtu:	Interface Minimum MTU value
1827  *	@max_mtu:	Interface Maximum MTU value
1828  *	@type:		Interface hardware type
1829  *	@hard_header_len: Maximum hardware header length.
1830  *	@min_header_len:  Minimum hardware header length
1831  *
1832  *	@needed_headroom: Extra headroom the hardware may need, but not in all
1833  *			  cases can this be guaranteed
1834  *	@needed_tailroom: Extra tailroom the hardware may need, but not in all
1835  *			  cases can this be guaranteed. Some cases also use
1836  *			  LL_MAX_HEADER instead to allocate the skb
1837  *
1838  *	interface address info:
1839  *
1840  * 	@perm_addr:		Permanent hw address
1841  * 	@addr_assign_type:	Hw address assignment type
1842  * 	@addr_len:		Hardware address length
1843  *	@upper_level:		Maximum depth level of upper devices.
1844  *	@lower_level:		Maximum depth level of lower devices.
1845  *	@neigh_priv_len:	Used in neigh_alloc()
1846  * 	@dev_id:		Used to differentiate devices that share
1847  * 				the same link layer address
1848  * 	@dev_port:		Used to differentiate devices that share
1849  * 				the same function
1850  *	@addr_list_lock:	XXX: need comments on this one
1851  *	@name_assign_type:	network interface name assignment type
1852  *	@uc_promisc:		Counter that indicates promiscuous mode
1853  *				has been enabled due to the need to listen to
1854  *				additional unicast addresses in a device that
1855  *				does not implement ndo_set_rx_mode()
1856  *	@uc:			unicast mac addresses
1857  *	@mc:			multicast mac addresses
1858  *	@dev_addrs:		list of device hw addresses
1859  *	@queues_kset:		Group of all Kobjects in the Tx and RX queues
1860  *	@promiscuity:		Number of times the NIC is told to work in
1861  *				promiscuous mode; if it becomes 0 the NIC will
1862  *				exit promiscuous mode
1863  *	@allmulti:		Counter, enables or disables allmulticast mode
1864  *
1865  *	@vlan_info:	VLAN info
1866  *	@dsa_ptr:	dsa specific data
1867  *	@tipc_ptr:	TIPC specific data
1868  *	@atalk_ptr:	AppleTalk link
1869  *	@ip_ptr:	IPv4 specific data
1870  *	@ip6_ptr:	IPv6 specific data
1871  *	@ax25_ptr:	AX.25 specific data
1872  *	@ieee80211_ptr:	IEEE 802.11 specific data, assign before registering
1873  *	@ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
1874  *			 device struct
1875  *	@mpls_ptr:	mpls_dev struct pointer
1876  *	@mctp_ptr:	MCTP specific data
1877  *
1878  *	@dev_addr:	Hw address (before bcast,
1879  *			because most packets are unicast)
1880  *
1881  *	@_rx:			Array of RX queues
1882  *	@num_rx_queues:		Number of RX queues
1883  *				allocated at register_netdev() time
1884  *	@real_num_rx_queues: 	Number of RX queues currently active in device
1885  *	@xdp_prog:		XDP sockets filter program pointer
1886  *	@gro_flush_timeout:	timeout for GRO layer in NAPI
1887  *	@napi_defer_hard_irqs:	If not zero, provides a counter that would
1888  *				allow to avoid NIC hard IRQ, on busy queues.
1889  *
1890  *	@rx_handler:		handler for received packets
1891  *	@rx_handler_data: 	XXX: need comments on this one
1892  *	@tcx_ingress:		BPF & clsact qdisc specific data for ingress processing
1893  *	@ingress_queue:		XXX: need comments on this one
1894  *	@nf_hooks_ingress:	netfilter hooks executed for ingress packets
1895  *	@broadcast:		hw bcast address
1896  *
1897  *	@rx_cpu_rmap:	CPU reverse-mapping for RX completion interrupts,
1898  *			indexed by RX queue number. Assigned by driver.
1899  *			This must only be set if the ndo_rx_flow_steer
1900  *			operation is defined
1901  *	@index_hlist:		Device index hash chain
1902  *
1903  *	@_tx:			Array of TX queues
1904  *	@num_tx_queues:		Number of TX queues allocated at alloc_netdev_mq() time
1905  *	@real_num_tx_queues: 	Number of TX queues currently active in device
1906  *	@qdisc:			Root qdisc from userspace point of view
1907  *	@tx_queue_len:		Max frames per queue allowed
1908  *	@tx_global_lock: 	XXX: need comments on this one
1909  *	@xdp_bulkq:		XDP device bulk queue
1910  *	@xps_maps:		all CPUs/RXQs maps for XPS device
1911  *
1912  *	@xps_maps:	XXX: need comments on this one
1913  *	@tcx_egress:		BPF & clsact qdisc specific data for egress processing
1914  *	@nf_hooks_egress:	netfilter hooks executed for egress packets
1915  *	@qdisc_hash:		qdisc hash table
1916  *	@watchdog_timeo:	Represents the timeout that is used by
1917  *				the watchdog (see dev_watchdog())
1918  *	@watchdog_timer:	List of timers
1919  *
1920  *	@proto_down_reason:	reason a netdev interface is held down
1921  *	@pcpu_refcnt:		Number of references to this device
1922  *	@dev_refcnt:		Number of references to this device
1923  *	@refcnt_tracker:	Tracker directory for tracked references to this device
1924  *	@todo_list:		Delayed register/unregister
1925  *	@link_watch_list:	XXX: need comments on this one
1926  *
1927  *	@reg_state:		Register/unregister state machine
1928  *	@dismantle:		Device is going to be freed
1929  *	@rtnl_link_state:	This enum represents the phases of creating
1930  *				a new link
1931  *
1932  *	@needs_free_netdev:	Should unregister perform free_netdev?
1933  *	@priv_destructor:	Called from unregister
1934  *	@npinfo:		XXX: need comments on this one
1935  * 	@nd_net:		Network namespace this network device is inside
1936  *
1937  * 	@ml_priv:	Mid-layer private
1938  *	@ml_priv_type:  Mid-layer private type
1939  *
1940  *	@pcpu_stat_type:	Type of device statistics which the core should
1941  *				allocate/free: none, lstats, tstats, dstats. none
1942  *				means the driver is handling statistics allocation/
1943  *				freeing internally.
1944  *	@lstats:		Loopback statistics: packets, bytes
1945  *	@tstats:		Tunnel statistics: RX/TX packets, RX/TX bytes
1946  *	@dstats:		Dummy statistics: RX/TX/drop packets, RX/TX bytes
1947  *
1948  *	@garp_port:	GARP
1949  *	@mrp_port:	MRP
1950  *
1951  *	@dm_private:	Drop monitor private
1952  *
1953  *	@dev:		Class/net/name entry
1954  *	@sysfs_groups:	Space for optional device, statistics and wireless
1955  *			sysfs groups
1956  *
1957  *	@sysfs_rx_queue_group:	Space for optional per-rx queue attributes
1958  *	@rtnl_link_ops:	Rtnl_link_ops
1959  *	@stat_ops:	Optional ops for queue-aware statistics
1960  *	@queue_mgmt_ops:	Optional ops for queue management
1961  *
1962  *	@gso_max_size:	Maximum size of generic segmentation offload
1963  *	@tso_max_size:	Device (as in HW) limit on the max TSO request size
1964  *	@gso_max_segs:	Maximum number of segments that can be passed to the
1965  *			NIC for GSO
1966  *	@tso_max_segs:	Device (as in HW) limit on the max TSO segment count
1967  * 	@gso_ipv4_max_size:	Maximum size of generic segmentation offload,
1968  * 				for IPv4.
1969  *
1970  *	@dcbnl_ops:	Data Center Bridging netlink ops
1971  *	@num_tc:	Number of traffic classes in the net device
1972  *	@tc_to_txq:	XXX: need comments on this one
1973  *	@prio_tc_map:	XXX: need comments on this one
1974  *
1975  *	@fcoe_ddp_xid:	Max exchange id for FCoE LRO by ddp
1976  *
1977  *	@priomap:	XXX: need comments on this one
1978  *	@phydev:	Physical device may attach itself
1979  *			for hardware timestamping
1980  *	@sfp_bus:	attached &struct sfp_bus structure.
1981  *
1982  *	@qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1983  *
1984  *	@proto_down:	protocol port state information can be sent to the
1985  *			switch driver and used to set the phys state of the
1986  *			switch port.
1987  *
1988  *	@wol_enabled:	Wake-on-LAN is enabled
1989  *
1990  *	@threaded:	napi threaded mode is enabled
1991  *
1992  *	@net_notifier_list:	List of per-net netdev notifier block
1993  *				that follow this device when it is moved
1994  *				to another network namespace.
1995  *
1996  *	@macsec_ops:    MACsec offloading ops
1997  *
1998  *	@udp_tunnel_nic_info:	static structure describing the UDP tunnel
1999  *				offload capabilities of the device
2000  *	@udp_tunnel_nic:	UDP tunnel offload state
2001  *	@xdp_state:		stores info on attached XDP BPF programs
2002  *
2003  *	@nested_level:	Used as a parameter of spin_lock_nested() of
2004  *			dev->addr_list_lock.
2005  *	@unlink_list:	As netif_addr_lock() can be called recursively,
2006  *			keep a list of interfaces to be deleted.
2007  *	@gro_max_size:	Maximum size of aggregated packet in generic
2008  *			receive offload (GRO)
2009  * 	@gro_ipv4_max_size:	Maximum size of aggregated packet in generic
2010  * 				receive offload (GRO), for IPv4.
2011  *	@xdp_zc_max_segs:	Maximum number of segments supported by AF_XDP
2012  *				zero copy driver
2013  *
2014  *	@dev_addr_shadow:	Copy of @dev_addr to catch direct writes.
2015  *	@linkwatch_dev_tracker:	refcount tracker used by linkwatch.
2016  *	@watchdog_dev_tracker:	refcount tracker used by watchdog.
2017  *	@dev_registered_tracker:	tracker for reference held while
2018  *					registered
2019  *	@offload_xstats_l3:	L3 HW stats for this netdevice.
2020  *
2021  *	@devlink_port:	Pointer to related devlink port structure.
2022  *			Assigned by a driver before netdev registration using
2023  *			SET_NETDEV_DEVLINK_PORT macro. This pointer is static
2024  *			during the time netdevice is registered.
2025  *
2026  *	@dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem,
2027  *		   where the clock is recovered.
2028  *
2029  *	FIXME: cleanup struct net_device such that network protocol info
2030  *	moves out.
2031  */
2032 
2033 struct net_device {
2034 	/* Cacheline organization can be found documented in
2035 	 * Documentation/networking/net_cachelines/net_device.rst.
2036 	 * Please update the document when adding new fields.
2037 	 */
2038 
2039 	/* TX read-mostly hotpath */
2040 	__cacheline_group_begin(net_device_read_tx);
2041 	unsigned long long	priv_flags;
2042 	const struct net_device_ops *netdev_ops;
2043 	const struct header_ops *header_ops;
2044 	struct netdev_queue	*_tx;
2045 	netdev_features_t	gso_partial_features;
2046 	unsigned int		real_num_tx_queues;
2047 	unsigned int		gso_max_size;
2048 	unsigned int		gso_ipv4_max_size;
2049 	u16			gso_max_segs;
2050 	s16			num_tc;
2051 	/* Note : dev->mtu is often read without holding a lock.
2052 	 * Writers usually hold RTNL.
2053 	 * It is recommended to use READ_ONCE() to annotate the reads,
2054 	 * and to use WRITE_ONCE() to annotate the writes.
2055 	 */
2056 	unsigned int		mtu;
2057 	unsigned short		needed_headroom;
2058 	struct netdev_tc_txq	tc_to_txq[TC_MAX_QUEUE];
2059 #ifdef CONFIG_XPS
2060 	struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
2061 #endif
2062 #ifdef CONFIG_NETFILTER_EGRESS
2063 	struct nf_hook_entries __rcu *nf_hooks_egress;
2064 #endif
2065 #ifdef CONFIG_NET_XGRESS
2066 	struct bpf_mprog_entry __rcu *tcx_egress;
2067 #endif
2068 	__cacheline_group_end(net_device_read_tx);
2069 
2070 	/* TXRX read-mostly hotpath */
2071 	__cacheline_group_begin(net_device_read_txrx);
2072 	union {
2073 		struct pcpu_lstats __percpu		*lstats;
2074 		struct pcpu_sw_netstats __percpu	*tstats;
2075 		struct pcpu_dstats __percpu		*dstats;
2076 	};
2077 	unsigned long		state;
2078 	unsigned int		flags;
2079 	unsigned short		hard_header_len;
2080 	netdev_features_t	features;
2081 	struct inet6_dev __rcu	*ip6_ptr;
2082 	__cacheline_group_end(net_device_read_txrx);
2083 
2084 	/* RX read-mostly hotpath */
2085 	__cacheline_group_begin(net_device_read_rx);
2086 	struct bpf_prog __rcu	*xdp_prog;
2087 	struct list_head	ptype_specific;
2088 	int			ifindex;
2089 	unsigned int		real_num_rx_queues;
2090 	struct netdev_rx_queue	*_rx;
2091 	unsigned long		gro_flush_timeout;
2092 	int			napi_defer_hard_irqs;
2093 	unsigned int		gro_max_size;
2094 	unsigned int		gro_ipv4_max_size;
2095 	rx_handler_func_t __rcu	*rx_handler;
2096 	void __rcu		*rx_handler_data;
2097 	possible_net_t			nd_net;
2098 #ifdef CONFIG_NETPOLL
2099 	struct netpoll_info __rcu	*npinfo;
2100 #endif
2101 #ifdef CONFIG_NET_XGRESS
2102 	struct bpf_mprog_entry __rcu *tcx_ingress;
2103 #endif
2104 	__cacheline_group_end(net_device_read_rx);
2105 
2106 	char			name[IFNAMSIZ];
2107 	struct netdev_name_node	*name_node;
2108 	struct dev_ifalias	__rcu *ifalias;
2109 	/*
2110 	 *	I/O specific fields
2111 	 *	FIXME: Merge these and struct ifmap into one
2112 	 */
2113 	unsigned long		mem_end;
2114 	unsigned long		mem_start;
2115 	unsigned long		base_addr;
2116 
2117 	/*
2118 	 *	Some hardware also needs these fields (state,dev_list,
2119 	 *	napi_list,unreg_list,close_list) but they are not
2120 	 *	part of the usual set specified in Space.c.
2121 	 */
2122 
2123 
2124 	struct list_head	dev_list;
2125 	struct list_head	napi_list;
2126 	struct list_head	unreg_list;
2127 	struct list_head	close_list;
2128 	struct list_head	ptype_all;
2129 
2130 	struct {
2131 		struct list_head upper;
2132 		struct list_head lower;
2133 	} adj_list;
2134 
2135 	/* Read-mostly cache-line for fast-path access */
2136 	xdp_features_t		xdp_features;
2137 	const struct xdp_metadata_ops *xdp_metadata_ops;
2138 	const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops;
2139 	unsigned short		gflags;
2140 
2141 	unsigned short		needed_tailroom;
2142 
2143 	netdev_features_t	hw_features;
2144 	netdev_features_t	wanted_features;
2145 	netdev_features_t	vlan_features;
2146 	netdev_features_t	hw_enc_features;
2147 	netdev_features_t	mpls_features;
2148 
2149 	unsigned int		min_mtu;
2150 	unsigned int		max_mtu;
2151 	unsigned short		type;
2152 	unsigned char		min_header_len;
2153 	unsigned char		name_assign_type;
2154 
2155 	int			group;
2156 
2157 	struct net_device_stats	stats; /* not used by modern drivers */
2158 
2159 	struct net_device_core_stats __percpu *core_stats;
2160 
2161 	/* Stats to monitor link on/off, flapping */
2162 	atomic_t		carrier_up_count;
2163 	atomic_t		carrier_down_count;
2164 
2165 #ifdef CONFIG_WIRELESS_EXT
2166 	const struct iw_handler_def *wireless_handlers;
2167 	struct iw_public_data	*wireless_data;
2168 #endif
2169 	const struct ethtool_ops *ethtool_ops;
2170 #ifdef CONFIG_NET_L3_MASTER_DEV
2171 	const struct l3mdev_ops	*l3mdev_ops;
2172 #endif
2173 #if IS_ENABLED(CONFIG_IPV6)
2174 	const struct ndisc_ops *ndisc_ops;
2175 #endif
2176 
2177 #ifdef CONFIG_XFRM_OFFLOAD
2178 	const struct xfrmdev_ops *xfrmdev_ops;
2179 #endif
2180 
2181 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2182 	const struct tlsdev_ops *tlsdev_ops;
2183 #endif
2184 
2185 	unsigned int		operstate;
2186 	unsigned char		link_mode;
2187 
2188 	unsigned char		if_port;
2189 	unsigned char		dma;
2190 
2191 	/* Interface address info. */
2192 	unsigned char		perm_addr[MAX_ADDR_LEN];
2193 	unsigned char		addr_assign_type;
2194 	unsigned char		addr_len;
2195 	unsigned char		upper_level;
2196 	unsigned char		lower_level;
2197 
2198 	unsigned short		neigh_priv_len;
2199 	unsigned short          dev_id;
2200 	unsigned short          dev_port;
2201 	unsigned short		padded;
2202 
2203 	spinlock_t		addr_list_lock;
2204 	int			irq;
2205 
2206 	struct netdev_hw_addr_list	uc;
2207 	struct netdev_hw_addr_list	mc;
2208 	struct netdev_hw_addr_list	dev_addrs;
2209 
2210 #ifdef CONFIG_SYSFS
2211 	struct kset		*queues_kset;
2212 #endif
2213 #ifdef CONFIG_LOCKDEP
2214 	struct list_head	unlink_list;
2215 #endif
2216 	unsigned int		promiscuity;
2217 	unsigned int		allmulti;
2218 	bool			uc_promisc;
2219 #ifdef CONFIG_LOCKDEP
2220 	unsigned char		nested_level;
2221 #endif
2222 
2223 
2224 	/* Protocol-specific pointers */
2225 	struct in_device __rcu	*ip_ptr;
2226 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2227 	struct vlan_info __rcu	*vlan_info;
2228 #endif
2229 #if IS_ENABLED(CONFIG_NET_DSA)
2230 	struct dsa_port		*dsa_ptr;
2231 #endif
2232 #if IS_ENABLED(CONFIG_TIPC)
2233 	struct tipc_bearer __rcu *tipc_ptr;
2234 #endif
2235 #if IS_ENABLED(CONFIG_ATALK)
2236 	void 			*atalk_ptr;
2237 #endif
2238 #if IS_ENABLED(CONFIG_AX25)
2239 	void			*ax25_ptr;
2240 #endif
2241 #if IS_ENABLED(CONFIG_CFG80211)
2242 	struct wireless_dev	*ieee80211_ptr;
2243 #endif
2244 #if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
2245 	struct wpan_dev		*ieee802154_ptr;
2246 #endif
2247 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
2248 	struct mpls_dev __rcu	*mpls_ptr;
2249 #endif
2250 #if IS_ENABLED(CONFIG_MCTP)
2251 	struct mctp_dev __rcu	*mctp_ptr;
2252 #endif
2253 
2254 /*
2255  * Cache lines mostly used on receive path (including eth_type_trans())
2256  */
2257 	/* Interface address info used in eth_type_trans() */
2258 	const unsigned char	*dev_addr;
2259 
2260 	unsigned int		num_rx_queues;
2261 #define GRO_LEGACY_MAX_SIZE	65536u
2262 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
2263  * and shinfo->gso_segs is a 16bit field.
2264  */
2265 #define GRO_MAX_SIZE		(8 * 65535u)
2266 	unsigned int		xdp_zc_max_segs;
2267 	struct netdev_queue __rcu *ingress_queue;
2268 #ifdef CONFIG_NETFILTER_INGRESS
2269 	struct nf_hook_entries __rcu *nf_hooks_ingress;
2270 #endif
2271 
2272 	unsigned char		broadcast[MAX_ADDR_LEN];
2273 #ifdef CONFIG_RFS_ACCEL
2274 	struct cpu_rmap		*rx_cpu_rmap;
2275 #endif
2276 	struct hlist_node	index_hlist;
2277 
2278 /*
2279  * Cache lines mostly used on transmit path
2280  */
2281 	unsigned int		num_tx_queues;
2282 	struct Qdisc __rcu	*qdisc;
2283 	unsigned int		tx_queue_len;
2284 	spinlock_t		tx_global_lock;
2285 
2286 	struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
2287 
2288 #ifdef CONFIG_NET_SCHED
2289 	DECLARE_HASHTABLE	(qdisc_hash, 4);
2290 #endif
2291 	/* These may be needed for future network-power-down code. */
2292 	struct timer_list	watchdog_timer;
2293 	int			watchdog_timeo;
2294 
2295 	u32                     proto_down_reason;
2296 
2297 	struct list_head	todo_list;
2298 
2299 #ifdef CONFIG_PCPU_DEV_REFCNT
2300 	int __percpu		*pcpu_refcnt;
2301 #else
2302 	refcount_t		dev_refcnt;
2303 #endif
2304 	struct ref_tracker_dir	refcnt_tracker;
2305 
2306 	struct list_head	link_watch_list;
2307 
2308 	u8 reg_state;
2309 
2310 	bool dismantle;
2311 
2312 	enum {
2313 		RTNL_LINK_INITIALIZED,
2314 		RTNL_LINK_INITIALIZING,
2315 	} rtnl_link_state:16;
2316 
2317 	bool needs_free_netdev;
2318 	void (*priv_destructor)(struct net_device *dev);
2319 
2320 	/* mid-layer private */
2321 	void				*ml_priv;
2322 	enum netdev_ml_priv_type	ml_priv_type;
2323 
2324 	enum netdev_stat_type		pcpu_stat_type:8;
2325 
2326 #if IS_ENABLED(CONFIG_GARP)
2327 	struct garp_port __rcu	*garp_port;
2328 #endif
2329 #if IS_ENABLED(CONFIG_MRP)
2330 	struct mrp_port __rcu	*mrp_port;
2331 #endif
2332 #if IS_ENABLED(CONFIG_NET_DROP_MONITOR)
2333 	struct dm_hw_stat_delta __rcu *dm_private;
2334 #endif
2335 	struct device		dev;
2336 	const struct attribute_group *sysfs_groups[4];
2337 	const struct attribute_group *sysfs_rx_queue_group;
2338 
2339 	const struct rtnl_link_ops *rtnl_link_ops;
2340 
2341 	const struct netdev_stat_ops *stat_ops;
2342 
2343 	const struct netdev_queue_mgmt_ops *queue_mgmt_ops;
2344 
2345 	/* for setting kernel sock attribute on TCP connection setup */
2346 #define GSO_MAX_SEGS		65535u
2347 #define GSO_LEGACY_MAX_SIZE	65536u
2348 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
2349  * and shinfo->gso_segs is a 16bit field.
2350  */
2351 #define GSO_MAX_SIZE		(8 * GSO_MAX_SEGS)
2352 
2353 #define TSO_LEGACY_MAX_SIZE	65536
2354 #define TSO_MAX_SIZE		UINT_MAX
2355 	unsigned int		tso_max_size;
2356 #define TSO_MAX_SEGS		U16_MAX
2357 	u16			tso_max_segs;
2358 
2359 #ifdef CONFIG_DCB
2360 	const struct dcbnl_rtnl_ops *dcbnl_ops;
2361 #endif
2362 	u8			prio_tc_map[TC_BITMASK + 1];
2363 
2364 #if IS_ENABLED(CONFIG_FCOE)
2365 	unsigned int		fcoe_ddp_xid;
2366 #endif
2367 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2368 	struct netprio_map __rcu *priomap;
2369 #endif
2370 	struct phy_device	*phydev;
2371 	struct sfp_bus		*sfp_bus;
2372 	struct lock_class_key	*qdisc_tx_busylock;
2373 	bool			proto_down;
2374 	bool			threaded;
2375 	unsigned		wol_enabled:1;
2376 
2377 	struct list_head	net_notifier_list;
2378 
2379 #if IS_ENABLED(CONFIG_MACSEC)
2380 	/* MACsec management functions */
2381 	const struct macsec_ops *macsec_ops;
2382 #endif
2383 	const struct udp_tunnel_nic_info	*udp_tunnel_nic_info;
2384 	struct udp_tunnel_nic	*udp_tunnel_nic;
2385 
2386 	/* protected by rtnl_lock */
2387 	struct bpf_xdp_entity	xdp_state[__MAX_XDP_MODE];
2388 
2389 	u8 dev_addr_shadow[MAX_ADDR_LEN];
2390 	netdevice_tracker	linkwatch_dev_tracker;
2391 	netdevice_tracker	watchdog_dev_tracker;
2392 	netdevice_tracker	dev_registered_tracker;
2393 	struct rtnl_hw_stats64	*offload_xstats_l3;
2394 
2395 	struct devlink_port	*devlink_port;
2396 
2397 #if IS_ENABLED(CONFIG_DPLL)
2398 	struct dpll_pin	__rcu	*dpll_pin;
2399 #endif
2400 #if IS_ENABLED(CONFIG_PAGE_POOL)
2401 	/** @page_pools: page pools created for this netdevice */
2402 	struct hlist_head	page_pools;
2403 #endif
2404 };
2405 #define to_net_dev(d) container_of(d, struct net_device, dev)
2406 
2407 /*
2408  * Driver should use this to assign devlink port instance to a netdevice
2409  * before it registers the netdevice. Therefore devlink_port is static
2410  * during the netdev lifetime after it is registered.
2411  */
2412 #define SET_NETDEV_DEVLINK_PORT(dev, port)			\
2413 ({								\
2414 	WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED);	\
2415 	((dev)->devlink_port = (port));				\
2416 })
2417 
netif_elide_gro(const struct net_device * dev)2418 static inline bool netif_elide_gro(const struct net_device *dev)
2419 {
2420 	if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2421 		return true;
2422 	return false;
2423 }
2424 
2425 #define	NETDEV_ALIGN		32
2426 
2427 static inline
netdev_get_prio_tc_map(const struct net_device * dev,u32 prio)2428 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2429 {
2430 	return dev->prio_tc_map[prio & TC_BITMASK];
2431 }
2432 
2433 static inline
netdev_set_prio_tc_map(struct net_device * dev,u8 prio,u8 tc)2434 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2435 {
2436 	if (tc >= dev->num_tc)
2437 		return -EINVAL;
2438 
2439 	dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2440 	return 0;
2441 }
2442 
2443 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
2444 void netdev_reset_tc(struct net_device *dev);
2445 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2446 int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
2447 
2448 static inline
netdev_get_num_tc(struct net_device * dev)2449 int netdev_get_num_tc(struct net_device *dev)
2450 {
2451 	return dev->num_tc;
2452 }
2453 
net_prefetch(void * p)2454 static inline void net_prefetch(void *p)
2455 {
2456 	prefetch(p);
2457 #if L1_CACHE_BYTES < 128
2458 	prefetch((u8 *)p + L1_CACHE_BYTES);
2459 #endif
2460 }
2461 
net_prefetchw(void * p)2462 static inline void net_prefetchw(void *p)
2463 {
2464 	prefetchw(p);
2465 #if L1_CACHE_BYTES < 128
2466 	prefetchw((u8 *)p + L1_CACHE_BYTES);
2467 #endif
2468 }
2469 
2470 void netdev_unbind_sb_channel(struct net_device *dev,
2471 			      struct net_device *sb_dev);
2472 int netdev_bind_sb_channel_queue(struct net_device *dev,
2473 				 struct net_device *sb_dev,
2474 				 u8 tc, u16 count, u16 offset);
2475 int netdev_set_sb_channel(struct net_device *dev, u16 channel);
netdev_get_sb_channel(struct net_device * dev)2476 static inline int netdev_get_sb_channel(struct net_device *dev)
2477 {
2478 	return max_t(int, -dev->num_tc, 0);
2479 }
2480 
2481 static inline
netdev_get_tx_queue(const struct net_device * dev,unsigned int index)2482 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2483 					 unsigned int index)
2484 {
2485 	DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues);
2486 	return &dev->_tx[index];
2487 }
2488 
skb_get_tx_queue(const struct net_device * dev,const struct sk_buff * skb)2489 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2490 						    const struct sk_buff *skb)
2491 {
2492 	return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2493 }
2494 
netdev_for_each_tx_queue(struct net_device * dev,void (* f)(struct net_device *,struct netdev_queue *,void *),void * arg)2495 static inline void netdev_for_each_tx_queue(struct net_device *dev,
2496 					    void (*f)(struct net_device *,
2497 						      struct netdev_queue *,
2498 						      void *),
2499 					    void *arg)
2500 {
2501 	unsigned int i;
2502 
2503 	for (i = 0; i < dev->num_tx_queues; i++)
2504 		f(dev, &dev->_tx[i], arg);
2505 }
2506 
2507 #define netdev_lockdep_set_classes(dev)				\
2508 {								\
2509 	static struct lock_class_key qdisc_tx_busylock_key;	\
2510 	static struct lock_class_key qdisc_xmit_lock_key;	\
2511 	static struct lock_class_key dev_addr_list_lock_key;	\
2512 	unsigned int i;						\
2513 								\
2514 	(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key;	\
2515 	lockdep_set_class(&(dev)->addr_list_lock,		\
2516 			  &dev_addr_list_lock_key);		\
2517 	for (i = 0; i < (dev)->num_tx_queues; i++)		\
2518 		lockdep_set_class(&(dev)->_tx[i]._xmit_lock,	\
2519 				  &qdisc_xmit_lock_key);	\
2520 }
2521 
2522 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2523 		     struct net_device *sb_dev);
2524 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2525 					 struct sk_buff *skb,
2526 					 struct net_device *sb_dev);
2527 
2528 /* returns the headroom that the master device needs to take in account
2529  * when forwarding to this dev
2530  */
netdev_get_fwd_headroom(struct net_device * dev)2531 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2532 {
2533 	return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2534 }
2535 
netdev_set_rx_headroom(struct net_device * dev,int new_hr)2536 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2537 {
2538 	if (dev->netdev_ops->ndo_set_rx_headroom)
2539 		dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2540 }
2541 
2542 /* set the device rx headroom to the dev's default */
netdev_reset_rx_headroom(struct net_device * dev)2543 static inline void netdev_reset_rx_headroom(struct net_device *dev)
2544 {
2545 	netdev_set_rx_headroom(dev, -1);
2546 }
2547 
netdev_get_ml_priv(struct net_device * dev,enum netdev_ml_priv_type type)2548 static inline void *netdev_get_ml_priv(struct net_device *dev,
2549 				       enum netdev_ml_priv_type type)
2550 {
2551 	if (dev->ml_priv_type != type)
2552 		return NULL;
2553 
2554 	return dev->ml_priv;
2555 }
2556 
netdev_set_ml_priv(struct net_device * dev,void * ml_priv,enum netdev_ml_priv_type type)2557 static inline void netdev_set_ml_priv(struct net_device *dev,
2558 				      void *ml_priv,
2559 				      enum netdev_ml_priv_type type)
2560 {
2561 	WARN(dev->ml_priv_type && dev->ml_priv_type != type,
2562 	     "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
2563 	     dev->ml_priv_type, type);
2564 	WARN(!dev->ml_priv_type && dev->ml_priv,
2565 	     "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
2566 
2567 	dev->ml_priv = ml_priv;
2568 	dev->ml_priv_type = type;
2569 }
2570 
2571 /*
2572  * Net namespace inlines
2573  */
2574 static inline
dev_net(const struct net_device * dev)2575 struct net *dev_net(const struct net_device *dev)
2576 {
2577 	return read_pnet(&dev->nd_net);
2578 }
2579 
2580 static inline
dev_net_set(struct net_device * dev,struct net * net)2581 void dev_net_set(struct net_device *dev, struct net *net)
2582 {
2583 	write_pnet(&dev->nd_net, net);
2584 }
2585 
2586 /**
2587  *	netdev_priv - access network device private data
2588  *	@dev: network device
2589  *
2590  * Get network device private data
2591  */
netdev_priv(const struct net_device * dev)2592 static inline void *netdev_priv(const struct net_device *dev)
2593 {
2594 	return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2595 }
2596 
2597 /* Set the sysfs physical device reference for the network logical device
2598  * if set prior to registration will cause a symlink during initialization.
2599  */
2600 #define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev))
2601 
2602 /* Set the sysfs device type for the network logical device to allow
2603  * fine-grained identification of different network device types. For
2604  * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
2605  */
2606 #define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype))
2607 
2608 void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
2609 			  enum netdev_queue_type type,
2610 			  struct napi_struct *napi);
2611 
netif_napi_set_irq(struct napi_struct * napi,int irq)2612 static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
2613 {
2614 	napi->irq = irq;
2615 }
2616 
2617 /* Default NAPI poll() weight
2618  * Device drivers are strongly advised to not use bigger value
2619  */
2620 #define NAPI_POLL_WEIGHT 64
2621 
2622 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
2623 			   int (*poll)(struct napi_struct *, int), int weight);
2624 
2625 /**
2626  * netif_napi_add() - initialize a NAPI context
2627  * @dev:  network device
2628  * @napi: NAPI context
2629  * @poll: polling function
2630  *
2631  * netif_napi_add() must be used to initialize a NAPI context prior to calling
2632  * *any* of the other NAPI-related functions.
2633  */
2634 static inline void
netif_napi_add(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int))2635 netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2636 	       int (*poll)(struct napi_struct *, int))
2637 {
2638 	netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
2639 }
2640 
2641 static inline void
netif_napi_add_tx_weight(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int weight)2642 netif_napi_add_tx_weight(struct net_device *dev,
2643 			 struct napi_struct *napi,
2644 			 int (*poll)(struct napi_struct *, int),
2645 			 int weight)
2646 {
2647 	set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2648 	netif_napi_add_weight(dev, napi, poll, weight);
2649 }
2650 
2651 /**
2652  * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
2653  * @dev:  network device
2654  * @napi: NAPI context
2655  * @poll: polling function
2656  *
2657  * This variant of netif_napi_add() should be used from drivers using NAPI
2658  * to exclusively poll a TX queue.
2659  * This will avoid we add it into napi_hash[], thus polluting this hash table.
2660  */
netif_napi_add_tx(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int))2661 static inline void netif_napi_add_tx(struct net_device *dev,
2662 				     struct napi_struct *napi,
2663 				     int (*poll)(struct napi_struct *, int))
2664 {
2665 	netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
2666 }
2667 
2668 /**
2669  *  __netif_napi_del - remove a NAPI context
2670  *  @napi: NAPI context
2671  *
2672  * Warning: caller must observe RCU grace period before freeing memory
2673  * containing @napi. Drivers might want to call this helper to combine
2674  * all the needed RCU grace periods into a single one.
2675  */
2676 void __netif_napi_del(struct napi_struct *napi);
2677 
2678 /**
2679  *  netif_napi_del - remove a NAPI context
2680  *  @napi: NAPI context
2681  *
2682  *  netif_napi_del() removes a NAPI context from the network device NAPI list
2683  */
netif_napi_del(struct napi_struct * napi)2684 static inline void netif_napi_del(struct napi_struct *napi)
2685 {
2686 	__netif_napi_del(napi);
2687 	synchronize_net();
2688 }
2689 
2690 struct packet_type {
2691 	__be16			type;	/* This is really htons(ether_type). */
2692 	bool			ignore_outgoing;
2693 	struct net_device	*dev;	/* NULL is wildcarded here	     */
2694 	netdevice_tracker	dev_tracker;
2695 	int			(*func) (struct sk_buff *,
2696 					 struct net_device *,
2697 					 struct packet_type *,
2698 					 struct net_device *);
2699 	void			(*list_func) (struct list_head *,
2700 					      struct packet_type *,
2701 					      struct net_device *);
2702 	bool			(*id_match)(struct packet_type *ptype,
2703 					    struct sock *sk);
2704 	struct net		*af_packet_net;
2705 	void			*af_packet_priv;
2706 	struct list_head	list;
2707 };
2708 
2709 struct offload_callbacks {
2710 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
2711 						netdev_features_t features);
2712 	struct sk_buff		*(*gro_receive)(struct list_head *head,
2713 						struct sk_buff *skb);
2714 	int			(*gro_complete)(struct sk_buff *skb, int nhoff);
2715 };
2716 
2717 struct packet_offload {
2718 	__be16			 type;	/* This is really htons(ether_type). */
2719 	u16			 priority;
2720 	struct offload_callbacks callbacks;
2721 	struct list_head	 list;
2722 };
2723 
2724 /* often modified stats are per-CPU, other are shared (netdev->stats) */
2725 struct pcpu_sw_netstats {
2726 	u64_stats_t		rx_packets;
2727 	u64_stats_t		rx_bytes;
2728 	u64_stats_t		tx_packets;
2729 	u64_stats_t		tx_bytes;
2730 	struct u64_stats_sync   syncp;
2731 } __aligned(4 * sizeof(u64));
2732 
2733 struct pcpu_dstats {
2734 	u64			rx_packets;
2735 	u64			rx_bytes;
2736 	u64			rx_drops;
2737 	u64			tx_packets;
2738 	u64			tx_bytes;
2739 	u64			tx_drops;
2740 	struct u64_stats_sync	syncp;
2741 } __aligned(8 * sizeof(u64));
2742 
2743 struct pcpu_lstats {
2744 	u64_stats_t packets;
2745 	u64_stats_t bytes;
2746 	struct u64_stats_sync syncp;
2747 } __aligned(2 * sizeof(u64));
2748 
2749 void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
2750 
dev_sw_netstats_rx_add(struct net_device * dev,unsigned int len)2751 static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
2752 {
2753 	struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2754 
2755 	u64_stats_update_begin(&tstats->syncp);
2756 	u64_stats_add(&tstats->rx_bytes, len);
2757 	u64_stats_inc(&tstats->rx_packets);
2758 	u64_stats_update_end(&tstats->syncp);
2759 }
2760 
dev_sw_netstats_tx_add(struct net_device * dev,unsigned int packets,unsigned int len)2761 static inline void dev_sw_netstats_tx_add(struct net_device *dev,
2762 					  unsigned int packets,
2763 					  unsigned int len)
2764 {
2765 	struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2766 
2767 	u64_stats_update_begin(&tstats->syncp);
2768 	u64_stats_add(&tstats->tx_bytes, len);
2769 	u64_stats_add(&tstats->tx_packets, packets);
2770 	u64_stats_update_end(&tstats->syncp);
2771 }
2772 
dev_lstats_add(struct net_device * dev,unsigned int len)2773 static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
2774 {
2775 	struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
2776 
2777 	u64_stats_update_begin(&lstats->syncp);
2778 	u64_stats_add(&lstats->bytes, len);
2779 	u64_stats_inc(&lstats->packets);
2780 	u64_stats_update_end(&lstats->syncp);
2781 }
2782 
2783 #define __netdev_alloc_pcpu_stats(type, gfp)				\
2784 ({									\
2785 	typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2786 	if (pcpu_stats)	{						\
2787 		int __cpu;						\
2788 		for_each_possible_cpu(__cpu) {				\
2789 			typeof(type) *stat;				\
2790 			stat = per_cpu_ptr(pcpu_stats, __cpu);		\
2791 			u64_stats_init(&stat->syncp);			\
2792 		}							\
2793 	}								\
2794 	pcpu_stats;							\
2795 })
2796 
2797 #define netdev_alloc_pcpu_stats(type)					\
2798 	__netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2799 
2800 #define devm_netdev_alloc_pcpu_stats(dev, type)				\
2801 ({									\
2802 	typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\
2803 	if (pcpu_stats) {						\
2804 		int __cpu;						\
2805 		for_each_possible_cpu(__cpu) {				\
2806 			typeof(type) *stat;				\
2807 			stat = per_cpu_ptr(pcpu_stats, __cpu);		\
2808 			u64_stats_init(&stat->syncp);			\
2809 		}							\
2810 	}								\
2811 	pcpu_stats;							\
2812 })
2813 
2814 enum netdev_lag_tx_type {
2815 	NETDEV_LAG_TX_TYPE_UNKNOWN,
2816 	NETDEV_LAG_TX_TYPE_RANDOM,
2817 	NETDEV_LAG_TX_TYPE_BROADCAST,
2818 	NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2819 	NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2820 	NETDEV_LAG_TX_TYPE_HASH,
2821 };
2822 
2823 enum netdev_lag_hash {
2824 	NETDEV_LAG_HASH_NONE,
2825 	NETDEV_LAG_HASH_L2,
2826 	NETDEV_LAG_HASH_L34,
2827 	NETDEV_LAG_HASH_L23,
2828 	NETDEV_LAG_HASH_E23,
2829 	NETDEV_LAG_HASH_E34,
2830 	NETDEV_LAG_HASH_VLAN_SRCMAC,
2831 	NETDEV_LAG_HASH_UNKNOWN,
2832 };
2833 
2834 struct netdev_lag_upper_info {
2835 	enum netdev_lag_tx_type tx_type;
2836 	enum netdev_lag_hash hash_type;
2837 };
2838 
2839 struct netdev_lag_lower_state_info {
2840 	u8 link_up : 1,
2841 	   tx_enabled : 1;
2842 };
2843 
2844 #include <linux/notifier.h>
2845 
2846 /* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
2847  * and the rtnetlink notification exclusion list in rtnetlink_event() when
2848  * adding new types.
2849  */
2850 enum netdev_cmd {
2851 	NETDEV_UP	= 1,	/* For now you can't veto a device up/down */
2852 	NETDEV_DOWN,
2853 	NETDEV_REBOOT,		/* Tell a protocol stack a network interface
2854 				   detected a hardware crash and restarted
2855 				   - we can use this eg to kick tcp sessions
2856 				   once done */
2857 	NETDEV_CHANGE,		/* Notify device state change */
2858 	NETDEV_REGISTER,
2859 	NETDEV_UNREGISTER,
2860 	NETDEV_CHANGEMTU,	/* notify after mtu change happened */
2861 	NETDEV_CHANGEADDR,	/* notify after the address change */
2862 	NETDEV_PRE_CHANGEADDR,	/* notify before the address change */
2863 	NETDEV_GOING_DOWN,
2864 	NETDEV_CHANGENAME,
2865 	NETDEV_FEAT_CHANGE,
2866 	NETDEV_BONDING_FAILOVER,
2867 	NETDEV_PRE_UP,
2868 	NETDEV_PRE_TYPE_CHANGE,
2869 	NETDEV_POST_TYPE_CHANGE,
2870 	NETDEV_POST_INIT,
2871 	NETDEV_PRE_UNINIT,
2872 	NETDEV_RELEASE,
2873 	NETDEV_NOTIFY_PEERS,
2874 	NETDEV_JOIN,
2875 	NETDEV_CHANGEUPPER,
2876 	NETDEV_RESEND_IGMP,
2877 	NETDEV_PRECHANGEMTU,	/* notify before mtu change happened */
2878 	NETDEV_CHANGEINFODATA,
2879 	NETDEV_BONDING_INFO,
2880 	NETDEV_PRECHANGEUPPER,
2881 	NETDEV_CHANGELOWERSTATE,
2882 	NETDEV_UDP_TUNNEL_PUSH_INFO,
2883 	NETDEV_UDP_TUNNEL_DROP_INFO,
2884 	NETDEV_CHANGE_TX_QUEUE_LEN,
2885 	NETDEV_CVLAN_FILTER_PUSH_INFO,
2886 	NETDEV_CVLAN_FILTER_DROP_INFO,
2887 	NETDEV_SVLAN_FILTER_PUSH_INFO,
2888 	NETDEV_SVLAN_FILTER_DROP_INFO,
2889 	NETDEV_OFFLOAD_XSTATS_ENABLE,
2890 	NETDEV_OFFLOAD_XSTATS_DISABLE,
2891 	NETDEV_OFFLOAD_XSTATS_REPORT_USED,
2892 	NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
2893 	NETDEV_XDP_FEAT_CHANGE,
2894 };
2895 const char *netdev_cmd_to_name(enum netdev_cmd cmd);
2896 
2897 int register_netdevice_notifier(struct notifier_block *nb);
2898 int unregister_netdevice_notifier(struct notifier_block *nb);
2899 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
2900 int unregister_netdevice_notifier_net(struct net *net,
2901 				      struct notifier_block *nb);
2902 int register_netdevice_notifier_dev_net(struct net_device *dev,
2903 					struct notifier_block *nb,
2904 					struct netdev_net_notifier *nn);
2905 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
2906 					  struct notifier_block *nb,
2907 					  struct netdev_net_notifier *nn);
2908 
2909 struct netdev_notifier_info {
2910 	struct net_device	*dev;
2911 	struct netlink_ext_ack	*extack;
2912 };
2913 
2914 struct netdev_notifier_info_ext {
2915 	struct netdev_notifier_info info; /* must be first */
2916 	union {
2917 		u32 mtu;
2918 	} ext;
2919 };
2920 
2921 struct netdev_notifier_change_info {
2922 	struct netdev_notifier_info info; /* must be first */
2923 	unsigned int flags_changed;
2924 };
2925 
2926 struct netdev_notifier_changeupper_info {
2927 	struct netdev_notifier_info info; /* must be first */
2928 	struct net_device *upper_dev; /* new upper dev */
2929 	bool master; /* is upper dev master */
2930 	bool linking; /* is the notification for link or unlink */
2931 	void *upper_info; /* upper dev info */
2932 };
2933 
2934 struct netdev_notifier_changelowerstate_info {
2935 	struct netdev_notifier_info info; /* must be first */
2936 	void *lower_state_info; /* is lower dev state */
2937 };
2938 
2939 struct netdev_notifier_pre_changeaddr_info {
2940 	struct netdev_notifier_info info; /* must be first */
2941 	const unsigned char *dev_addr;
2942 };
2943 
2944 enum netdev_offload_xstats_type {
2945 	NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1,
2946 };
2947 
2948 struct netdev_notifier_offload_xstats_info {
2949 	struct netdev_notifier_info info; /* must be first */
2950 	enum netdev_offload_xstats_type type;
2951 
2952 	union {
2953 		/* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */
2954 		struct netdev_notifier_offload_xstats_rd *report_delta;
2955 		/* NETDEV_OFFLOAD_XSTATS_REPORT_USED */
2956 		struct netdev_notifier_offload_xstats_ru *report_used;
2957 	};
2958 };
2959 
2960 int netdev_offload_xstats_enable(struct net_device *dev,
2961 				 enum netdev_offload_xstats_type type,
2962 				 struct netlink_ext_ack *extack);
2963 int netdev_offload_xstats_disable(struct net_device *dev,
2964 				  enum netdev_offload_xstats_type type);
2965 bool netdev_offload_xstats_enabled(const struct net_device *dev,
2966 				   enum netdev_offload_xstats_type type);
2967 int netdev_offload_xstats_get(struct net_device *dev,
2968 			      enum netdev_offload_xstats_type type,
2969 			      struct rtnl_hw_stats64 *stats, bool *used,
2970 			      struct netlink_ext_ack *extack);
2971 void
2972 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd,
2973 				   const struct rtnl_hw_stats64 *stats);
2974 void
2975 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru);
2976 void netdev_offload_xstats_push_delta(struct net_device *dev,
2977 				      enum netdev_offload_xstats_type type,
2978 				      const struct rtnl_hw_stats64 *stats);
2979 
netdev_notifier_info_init(struct netdev_notifier_info * info,struct net_device * dev)2980 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2981 					     struct net_device *dev)
2982 {
2983 	info->dev = dev;
2984 	info->extack = NULL;
2985 }
2986 
2987 static inline struct net_device *
netdev_notifier_info_to_dev(const struct netdev_notifier_info * info)2988 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2989 {
2990 	return info->dev;
2991 }
2992 
2993 static inline struct netlink_ext_ack *
netdev_notifier_info_to_extack(const struct netdev_notifier_info * info)2994 netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
2995 {
2996 	return info->extack;
2997 }
2998 
2999 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
3000 int call_netdevice_notifiers_info(unsigned long val,
3001 				  struct netdev_notifier_info *info);
3002 
3003 #define for_each_netdev(net, d)		\
3004 		list_for_each_entry(d, &(net)->dev_base_head, dev_list)
3005 #define for_each_netdev_reverse(net, d)	\
3006 		list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
3007 #define for_each_netdev_rcu(net, d)		\
3008 		list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
3009 #define for_each_netdev_safe(net, d, n)	\
3010 		list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
3011 #define for_each_netdev_continue(net, d)		\
3012 		list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
3013 #define for_each_netdev_continue_reverse(net, d)		\
3014 		list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
3015 						     dev_list)
3016 #define for_each_netdev_continue_rcu(net, d)		\
3017 	list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
3018 #define for_each_netdev_in_bond_rcu(bond, slave)	\
3019 		for_each_netdev_rcu(&init_net, slave)	\
3020 			if (netdev_master_upper_dev_get_rcu(slave) == (bond))
3021 #define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list)
3022 
3023 #define for_each_netdev_dump(net, d, ifindex)				\
3024 	xa_for_each_start(&(net)->dev_by_index, (ifindex), (d), (ifindex))
3025 
next_net_device(struct net_device * dev)3026 static inline struct net_device *next_net_device(struct net_device *dev)
3027 {
3028 	struct list_head *lh;
3029 	struct net *net;
3030 
3031 	net = dev_net(dev);
3032 	lh = dev->dev_list.next;
3033 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
3034 }
3035 
next_net_device_rcu(struct net_device * dev)3036 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
3037 {
3038 	struct list_head *lh;
3039 	struct net *net;
3040 
3041 	net = dev_net(dev);
3042 	lh = rcu_dereference(list_next_rcu(&dev->dev_list));
3043 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
3044 }
3045 
first_net_device(struct net * net)3046 static inline struct net_device *first_net_device(struct net *net)
3047 {
3048 	return list_empty(&net->dev_base_head) ? NULL :
3049 		net_device_entry(net->dev_base_head.next);
3050 }
3051 
first_net_device_rcu(struct net * net)3052 static inline struct net_device *first_net_device_rcu(struct net *net)
3053 {
3054 	struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
3055 
3056 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
3057 }
3058 
3059 int netdev_boot_setup_check(struct net_device *dev);
3060 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
3061 				       const char *hwaddr);
3062 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
3063 void dev_add_pack(struct packet_type *pt);
3064 void dev_remove_pack(struct packet_type *pt);
3065 void __dev_remove_pack(struct packet_type *pt);
3066 void dev_add_offload(struct packet_offload *po);
3067 void dev_remove_offload(struct packet_offload *po);
3068 
3069 int dev_get_iflink(const struct net_device *dev);
3070 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
3071 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
3072 			  struct net_device_path_stack *stack);
3073 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
3074 				      unsigned short mask);
3075 struct net_device *dev_get_by_name(struct net *net, const char *name);
3076 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
3077 struct net_device *__dev_get_by_name(struct net *net, const char *name);
3078 bool netdev_name_in_use(struct net *net, const char *name);
3079 int dev_alloc_name(struct net_device *dev, const char *name);
3080 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
3081 void dev_close(struct net_device *dev);
3082 void dev_close_many(struct list_head *head, bool unlink);
3083 void dev_disable_lro(struct net_device *dev);
3084 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
3085 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
3086 		     struct net_device *sb_dev);
3087 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
3088 		       struct net_device *sb_dev);
3089 
3090 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
3091 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
3092 
dev_queue_xmit(struct sk_buff * skb)3093 static inline int dev_queue_xmit(struct sk_buff *skb)
3094 {
3095 	return __dev_queue_xmit(skb, NULL);
3096 }
3097 
dev_queue_xmit_accel(struct sk_buff * skb,struct net_device * sb_dev)3098 static inline int dev_queue_xmit_accel(struct sk_buff *skb,
3099 				       struct net_device *sb_dev)
3100 {
3101 	return __dev_queue_xmit(skb, sb_dev);
3102 }
3103 
dev_direct_xmit(struct sk_buff * skb,u16 queue_id)3104 static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
3105 {
3106 	int ret;
3107 
3108 	ret = __dev_direct_xmit(skb, queue_id);
3109 	if (!dev_xmit_complete(ret))
3110 		kfree_skb(skb);
3111 	return ret;
3112 }
3113 
3114 int register_netdevice(struct net_device *dev);
3115 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
3116 void unregister_netdevice_many(struct list_head *head);
unregister_netdevice(struct net_device * dev)3117 static inline void unregister_netdevice(struct net_device *dev)
3118 {
3119 	unregister_netdevice_queue(dev, NULL);
3120 }
3121 
3122 int netdev_refcnt_read(const struct net_device *dev);
3123 void free_netdev(struct net_device *dev);
3124 void netdev_freemem(struct net_device *dev);
3125 void init_dummy_netdev(struct net_device *dev);
3126 
3127 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
3128 					 struct sk_buff *skb,
3129 					 bool all_slaves);
3130 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
3131 					    struct sock *sk);
3132 struct net_device *dev_get_by_index(struct net *net, int ifindex);
3133 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
3134 struct net_device *netdev_get_by_index(struct net *net, int ifindex,
3135 				       netdevice_tracker *tracker, gfp_t gfp);
3136 struct net_device *netdev_get_by_name(struct net *net, const char *name,
3137 				      netdevice_tracker *tracker, gfp_t gfp);
3138 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
3139 struct net_device *dev_get_by_napi_id(unsigned int napi_id);
3140 void netdev_copy_name(struct net_device *dev, char *name);
3141 
dev_hard_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned int len)3142 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
3143 				  unsigned short type,
3144 				  const void *daddr, const void *saddr,
3145 				  unsigned int len)
3146 {
3147 	if (!dev->header_ops || !dev->header_ops->create)
3148 		return 0;
3149 
3150 	return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
3151 }
3152 
dev_parse_header(const struct sk_buff * skb,unsigned char * haddr)3153 static inline int dev_parse_header(const struct sk_buff *skb,
3154 				   unsigned char *haddr)
3155 {
3156 	const struct net_device *dev = skb->dev;
3157 
3158 	if (!dev->header_ops || !dev->header_ops->parse)
3159 		return 0;
3160 	return dev->header_ops->parse(skb, haddr);
3161 }
3162 
dev_parse_header_protocol(const struct sk_buff * skb)3163 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
3164 {
3165 	const struct net_device *dev = skb->dev;
3166 
3167 	if (!dev->header_ops || !dev->header_ops->parse_protocol)
3168 		return 0;
3169 	return dev->header_ops->parse_protocol(skb);
3170 }
3171 
3172 /* ll_header must have at least hard_header_len allocated */
dev_validate_header(const struct net_device * dev,char * ll_header,int len)3173 static inline bool dev_validate_header(const struct net_device *dev,
3174 				       char *ll_header, int len)
3175 {
3176 	if (likely(len >= dev->hard_header_len))
3177 		return true;
3178 	if (len < dev->min_header_len)
3179 		return false;
3180 
3181 	if (capable(CAP_SYS_RAWIO)) {
3182 		memset(ll_header + len, 0, dev->hard_header_len - len);
3183 		return true;
3184 	}
3185 
3186 	if (dev->header_ops && dev->header_ops->validate)
3187 		return dev->header_ops->validate(ll_header, len);
3188 
3189 	return false;
3190 }
3191 
dev_has_header(const struct net_device * dev)3192 static inline bool dev_has_header(const struct net_device *dev)
3193 {
3194 	return dev->header_ops && dev->header_ops->create;
3195 }
3196 
3197 /*
3198  * Incoming packets are placed on per-CPU queues
3199  */
3200 struct softnet_data {
3201 	struct list_head	poll_list;
3202 	struct sk_buff_head	process_queue;
3203 
3204 	/* stats */
3205 	unsigned int		processed;
3206 	unsigned int		time_squeeze;
3207 #ifdef CONFIG_RPS
3208 	struct softnet_data	*rps_ipi_list;
3209 #endif
3210 
3211 	unsigned int		received_rps;
3212 	bool			in_net_rx_action;
3213 	bool			in_napi_threaded_poll;
3214 
3215 #ifdef CONFIG_NET_FLOW_LIMIT
3216 	struct sd_flow_limit __rcu *flow_limit;
3217 #endif
3218 	struct Qdisc		*output_queue;
3219 	struct Qdisc		**output_queue_tailp;
3220 	struct sk_buff		*completion_queue;
3221 #ifdef CONFIG_XFRM_OFFLOAD
3222 	struct sk_buff_head	xfrm_backlog;
3223 #endif
3224 	/* written and read only by owning cpu: */
3225 	struct {
3226 		u16 recursion;
3227 		u8  more;
3228 #ifdef CONFIG_NET_EGRESS
3229 		u8  skip_txqueue;
3230 #endif
3231 	} xmit;
3232 #ifdef CONFIG_RPS
3233 	/* input_queue_head should be written by cpu owning this struct,
3234 	 * and only read by other cpus. Worth using a cache line.
3235 	 */
3236 	unsigned int		input_queue_head ____cacheline_aligned_in_smp;
3237 
3238 	/* Elements below can be accessed between CPUs for RPS/RFS */
3239 	call_single_data_t	csd ____cacheline_aligned_in_smp;
3240 	struct softnet_data	*rps_ipi_next;
3241 	unsigned int		cpu;
3242 	unsigned int		input_queue_tail;
3243 #endif
3244 	struct sk_buff_head	input_pkt_queue;
3245 	struct napi_struct	backlog;
3246 
3247 	atomic_t		dropped ____cacheline_aligned_in_smp;
3248 
3249 	/* Another possibly contended cache line */
3250 	spinlock_t		defer_lock ____cacheline_aligned_in_smp;
3251 	int			defer_count;
3252 	int			defer_ipi_scheduled;
3253 	struct sk_buff		*defer_list;
3254 	call_single_data_t	defer_csd;
3255 };
3256 
3257 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3258 
dev_recursion_level(void)3259 static inline int dev_recursion_level(void)
3260 {
3261 	return this_cpu_read(softnet_data.xmit.recursion);
3262 }
3263 
3264 void __netif_schedule(struct Qdisc *q);
3265 void netif_schedule_queue(struct netdev_queue *txq);
3266 
netif_tx_schedule_all(struct net_device * dev)3267 static inline void netif_tx_schedule_all(struct net_device *dev)
3268 {
3269 	unsigned int i;
3270 
3271 	for (i = 0; i < dev->num_tx_queues; i++)
3272 		netif_schedule_queue(netdev_get_tx_queue(dev, i));
3273 }
3274 
netif_tx_start_queue(struct netdev_queue * dev_queue)3275 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
3276 {
3277 	clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3278 }
3279 
3280 /**
3281  *	netif_start_queue - allow transmit
3282  *	@dev: network device
3283  *
3284  *	Allow upper layers to call the device hard_start_xmit routine.
3285  */
netif_start_queue(struct net_device * dev)3286 static inline void netif_start_queue(struct net_device *dev)
3287 {
3288 	netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
3289 }
3290 
netif_tx_start_all_queues(struct net_device * dev)3291 static inline void netif_tx_start_all_queues(struct net_device *dev)
3292 {
3293 	unsigned int i;
3294 
3295 	for (i = 0; i < dev->num_tx_queues; i++) {
3296 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3297 		netif_tx_start_queue(txq);
3298 	}
3299 }
3300 
3301 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
3302 
3303 /**
3304  *	netif_wake_queue - restart transmit
3305  *	@dev: network device
3306  *
3307  *	Allow upper layers to call the device hard_start_xmit routine.
3308  *	Used for flow control when transmit resources are available.
3309  */
netif_wake_queue(struct net_device * dev)3310 static inline void netif_wake_queue(struct net_device *dev)
3311 {
3312 	netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
3313 }
3314 
netif_tx_wake_all_queues(struct net_device * dev)3315 static inline void netif_tx_wake_all_queues(struct net_device *dev)
3316 {
3317 	unsigned int i;
3318 
3319 	for (i = 0; i < dev->num_tx_queues; i++) {
3320 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3321 		netif_tx_wake_queue(txq);
3322 	}
3323 }
3324 
netif_tx_stop_queue(struct netdev_queue * dev_queue)3325 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
3326 {
3327 	/* Must be an atomic op see netif_txq_try_stop() */
3328 	set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3329 }
3330 
3331 /**
3332  *	netif_stop_queue - stop transmitted packets
3333  *	@dev: network device
3334  *
3335  *	Stop upper layers calling the device hard_start_xmit routine.
3336  *	Used for flow control when transmit resources are unavailable.
3337  */
netif_stop_queue(struct net_device * dev)3338 static inline void netif_stop_queue(struct net_device *dev)
3339 {
3340 	netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
3341 }
3342 
3343 void netif_tx_stop_all_queues(struct net_device *dev);
3344 
netif_tx_queue_stopped(const struct netdev_queue * dev_queue)3345 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
3346 {
3347 	return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3348 }
3349 
3350 /**
3351  *	netif_queue_stopped - test if transmit queue is flowblocked
3352  *	@dev: network device
3353  *
3354  *	Test if transmit queue on device is currently unable to send.
3355  */
netif_queue_stopped(const struct net_device * dev)3356 static inline bool netif_queue_stopped(const struct net_device *dev)
3357 {
3358 	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
3359 }
3360 
netif_xmit_stopped(const struct netdev_queue * dev_queue)3361 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
3362 {
3363 	return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3364 }
3365 
3366 static inline bool
netif_xmit_frozen_or_stopped(const struct netdev_queue * dev_queue)3367 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
3368 {
3369 	return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3370 }
3371 
3372 static inline bool
netif_xmit_frozen_or_drv_stopped(const struct netdev_queue * dev_queue)3373 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3374 {
3375 	return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3376 }
3377 
3378 /**
3379  *	netdev_queue_set_dql_min_limit - set dql minimum limit
3380  *	@dev_queue: pointer to transmit queue
3381  *	@min_limit: dql minimum limit
3382  *
3383  * Forces xmit_more() to return true until the minimum threshold
3384  * defined by @min_limit is reached (or until the tx queue is
3385  * empty). Warning: to be use with care, misuse will impact the
3386  * latency.
3387  */
netdev_queue_set_dql_min_limit(struct netdev_queue * dev_queue,unsigned int min_limit)3388 static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue,
3389 						  unsigned int min_limit)
3390 {
3391 #ifdef CONFIG_BQL
3392 	dev_queue->dql.min_limit = min_limit;
3393 #endif
3394 }
3395 
netdev_queue_dql_avail(const struct netdev_queue * txq)3396 static inline int netdev_queue_dql_avail(const struct netdev_queue *txq)
3397 {
3398 #ifdef CONFIG_BQL
3399 	/* Non-BQL migrated drivers will return 0, too. */
3400 	return dql_avail(&txq->dql);
3401 #else
3402 	return 0;
3403 #endif
3404 }
3405 
3406 /**
3407  *	netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
3408  *	@dev_queue: pointer to transmit queue
3409  *
3410  * BQL enabled drivers might use this helper in their ndo_start_xmit(),
3411  * to give appropriate hint to the CPU.
3412  */
netdev_txq_bql_enqueue_prefetchw(struct netdev_queue * dev_queue)3413 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3414 {
3415 #ifdef CONFIG_BQL
3416 	prefetchw(&dev_queue->dql.num_queued);
3417 #endif
3418 }
3419 
3420 /**
3421  *	netdev_txq_bql_complete_prefetchw - prefetch bql data for write
3422  *	@dev_queue: pointer to transmit queue
3423  *
3424  * BQL enabled drivers might use this helper in their TX completion path,
3425  * to give appropriate hint to the CPU.
3426  */
netdev_txq_bql_complete_prefetchw(struct netdev_queue * dev_queue)3427 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3428 {
3429 #ifdef CONFIG_BQL
3430 	prefetchw(&dev_queue->dql.limit);
3431 #endif
3432 }
3433 
3434 /**
3435  *	netdev_tx_sent_queue - report the number of bytes queued to a given tx queue
3436  *	@dev_queue: network device queue
3437  *	@bytes: number of bytes queued to the device queue
3438  *
3439  *	Report the number of bytes queued for sending/completion to the network
3440  *	device hardware queue. @bytes should be a good approximation and should
3441  *	exactly match netdev_completed_queue() @bytes.
3442  *	This is typically called once per packet, from ndo_start_xmit().
3443  */
netdev_tx_sent_queue(struct netdev_queue * dev_queue,unsigned int bytes)3444 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3445 					unsigned int bytes)
3446 {
3447 #ifdef CONFIG_BQL
3448 	dql_queued(&dev_queue->dql, bytes);
3449 
3450 	if (likely(dql_avail(&dev_queue->dql) >= 0))
3451 		return;
3452 
3453 	set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3454 
3455 	/*
3456 	 * The XOFF flag must be set before checking the dql_avail below,
3457 	 * because in netdev_tx_completed_queue we update the dql_completed
3458 	 * before checking the XOFF flag.
3459 	 */
3460 	smp_mb();
3461 
3462 	/* check again in case another CPU has just made room avail */
3463 	if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3464 		clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3465 #endif
3466 }
3467 
3468 /* Variant of netdev_tx_sent_queue() for drivers that are aware
3469  * that they should not test BQL status themselves.
3470  * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
3471  * skb of a batch.
3472  * Returns true if the doorbell must be used to kick the NIC.
3473  */
__netdev_tx_sent_queue(struct netdev_queue * dev_queue,unsigned int bytes,bool xmit_more)3474 static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3475 					  unsigned int bytes,
3476 					  bool xmit_more)
3477 {
3478 	if (xmit_more) {
3479 #ifdef CONFIG_BQL
3480 		dql_queued(&dev_queue->dql, bytes);
3481 #endif
3482 		return netif_tx_queue_stopped(dev_queue);
3483 	}
3484 	netdev_tx_sent_queue(dev_queue, bytes);
3485 	return true;
3486 }
3487 
3488 /**
3489  *	netdev_sent_queue - report the number of bytes queued to hardware
3490  *	@dev: network device
3491  *	@bytes: number of bytes queued to the hardware device queue
3492  *
3493  *	Report the number of bytes queued for sending/completion to the network
3494  *	device hardware queue#0. @bytes should be a good approximation and should
3495  *	exactly match netdev_completed_queue() @bytes.
3496  *	This is typically called once per packet, from ndo_start_xmit().
3497  */
netdev_sent_queue(struct net_device * dev,unsigned int bytes)3498 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3499 {
3500 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3501 }
3502 
__netdev_sent_queue(struct net_device * dev,unsigned int bytes,bool xmit_more)3503 static inline bool __netdev_sent_queue(struct net_device *dev,
3504 				       unsigned int bytes,
3505 				       bool xmit_more)
3506 {
3507 	return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3508 				      xmit_more);
3509 }
3510 
3511 /**
3512  *	netdev_tx_completed_queue - report number of packets/bytes at TX completion.
3513  *	@dev_queue: network device queue
3514  *	@pkts: number of packets (currently ignored)
3515  *	@bytes: number of bytes dequeued from the device queue
3516  *
3517  *	Must be called at most once per TX completion round (and not per
3518  *	individual packet), so that BQL can adjust its limits appropriately.
3519  */
netdev_tx_completed_queue(struct netdev_queue * dev_queue,unsigned int pkts,unsigned int bytes)3520 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
3521 					     unsigned int pkts, unsigned int bytes)
3522 {
3523 #ifdef CONFIG_BQL
3524 	if (unlikely(!bytes))
3525 		return;
3526 
3527 	dql_completed(&dev_queue->dql, bytes);
3528 
3529 	/*
3530 	 * Without the memory barrier there is a small possiblity that
3531 	 * netdev_tx_sent_queue will miss the update and cause the queue to
3532 	 * be stopped forever
3533 	 */
3534 	smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */
3535 
3536 	if (unlikely(dql_avail(&dev_queue->dql) < 0))
3537 		return;
3538 
3539 	if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3540 		netif_schedule_queue(dev_queue);
3541 #endif
3542 }
3543 
3544 /**
3545  * 	netdev_completed_queue - report bytes and packets completed by device
3546  * 	@dev: network device
3547  * 	@pkts: actual number of packets sent over the medium
3548  * 	@bytes: actual number of bytes sent over the medium
3549  *
3550  * 	Report the number of bytes and packets transmitted by the network device
3551  * 	hardware queue over the physical medium, @bytes must exactly match the
3552  * 	@bytes amount passed to netdev_sent_queue()
3553  */
netdev_completed_queue(struct net_device * dev,unsigned int pkts,unsigned int bytes)3554 static inline void netdev_completed_queue(struct net_device *dev,
3555 					  unsigned int pkts, unsigned int bytes)
3556 {
3557 	netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3558 }
3559 
netdev_tx_reset_queue(struct netdev_queue * q)3560 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3561 {
3562 #ifdef CONFIG_BQL
3563 	clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3564 	dql_reset(&q->dql);
3565 #endif
3566 }
3567 
3568 /**
3569  * 	netdev_reset_queue - reset the packets and bytes count of a network device
3570  * 	@dev_queue: network device
3571  *
3572  * 	Reset the bytes and packet count of a network device and clear the
3573  * 	software flow control OFF bit for this network device
3574  */
netdev_reset_queue(struct net_device * dev_queue)3575 static inline void netdev_reset_queue(struct net_device *dev_queue)
3576 {
3577 	netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3578 }
3579 
3580 /**
3581  * 	netdev_cap_txqueue - check if selected tx queue exceeds device queues
3582  * 	@dev: network device
3583  * 	@queue_index: given tx queue index
3584  *
3585  * 	Returns 0 if given tx queue index >= number of device tx queues,
3586  * 	otherwise returns the originally passed tx queue index.
3587  */
netdev_cap_txqueue(struct net_device * dev,u16 queue_index)3588 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3589 {
3590 	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3591 		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3592 				     dev->name, queue_index,
3593 				     dev->real_num_tx_queues);
3594 		return 0;
3595 	}
3596 
3597 	return queue_index;
3598 }
3599 
3600 /**
3601  *	netif_running - test if up
3602  *	@dev: network device
3603  *
3604  *	Test if the device has been brought up.
3605  */
netif_running(const struct net_device * dev)3606 static inline bool netif_running(const struct net_device *dev)
3607 {
3608 	return test_bit(__LINK_STATE_START, &dev->state);
3609 }
3610 
3611 /*
3612  * Routines to manage the subqueues on a device.  We only need start,
3613  * stop, and a check if it's stopped.  All other device management is
3614  * done at the overall netdevice level.
3615  * Also test the device if we're multiqueue.
3616  */
3617 
3618 /**
3619  *	netif_start_subqueue - allow sending packets on subqueue
3620  *	@dev: network device
3621  *	@queue_index: sub queue index
3622  *
3623  * Start individual transmit queue of a device with multiple transmit queues.
3624  */
netif_start_subqueue(struct net_device * dev,u16 queue_index)3625 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3626 {
3627 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3628 
3629 	netif_tx_start_queue(txq);
3630 }
3631 
3632 /**
3633  *	netif_stop_subqueue - stop sending packets on subqueue
3634  *	@dev: network device
3635  *	@queue_index: sub queue index
3636  *
3637  * Stop individual transmit queue of a device with multiple transmit queues.
3638  */
netif_stop_subqueue(struct net_device * dev,u16 queue_index)3639 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3640 {
3641 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3642 	netif_tx_stop_queue(txq);
3643 }
3644 
3645 /**
3646  *	__netif_subqueue_stopped - test status of subqueue
3647  *	@dev: network device
3648  *	@queue_index: sub queue index
3649  *
3650  * Check individual transmit queue of a device with multiple transmit queues.
3651  */
__netif_subqueue_stopped(const struct net_device * dev,u16 queue_index)3652 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3653 					    u16 queue_index)
3654 {
3655 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3656 
3657 	return netif_tx_queue_stopped(txq);
3658 }
3659 
3660 /**
3661  *	netif_subqueue_stopped - test status of subqueue
3662  *	@dev: network device
3663  *	@skb: sub queue buffer pointer
3664  *
3665  * Check individual transmit queue of a device with multiple transmit queues.
3666  */
netif_subqueue_stopped(const struct net_device * dev,struct sk_buff * skb)3667 static inline bool netif_subqueue_stopped(const struct net_device *dev,
3668 					  struct sk_buff *skb)
3669 {
3670 	return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3671 }
3672 
3673 /**
3674  *	netif_wake_subqueue - allow sending packets on subqueue
3675  *	@dev: network device
3676  *	@queue_index: sub queue index
3677  *
3678  * Resume individual transmit queue of a device with multiple transmit queues.
3679  */
netif_wake_subqueue(struct net_device * dev,u16 queue_index)3680 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3681 {
3682 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3683 
3684 	netif_tx_wake_queue(txq);
3685 }
3686 
3687 #ifdef CONFIG_XPS
3688 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3689 			u16 index);
3690 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3691 			  u16 index, enum xps_map_type type);
3692 
3693 /**
3694  *	netif_attr_test_mask - Test a CPU or Rx queue set in a mask
3695  *	@j: CPU/Rx queue index
3696  *	@mask: bitmask of all cpus/rx queues
3697  *	@nr_bits: number of bits in the bitmask
3698  *
3699  * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
3700  */
netif_attr_test_mask(unsigned long j,const unsigned long * mask,unsigned int nr_bits)3701 static inline bool netif_attr_test_mask(unsigned long j,
3702 					const unsigned long *mask,
3703 					unsigned int nr_bits)
3704 {
3705 	cpu_max_bits_warn(j, nr_bits);
3706 	return test_bit(j, mask);
3707 }
3708 
3709 /**
3710  *	netif_attr_test_online - Test for online CPU/Rx queue
3711  *	@j: CPU/Rx queue index
3712  *	@online_mask: bitmask for CPUs/Rx queues that are online
3713  *	@nr_bits: number of bits in the bitmask
3714  *
3715  * Returns true if a CPU/Rx queue is online.
3716  */
netif_attr_test_online(unsigned long j,const unsigned long * online_mask,unsigned int nr_bits)3717 static inline bool netif_attr_test_online(unsigned long j,
3718 					  const unsigned long *online_mask,
3719 					  unsigned int nr_bits)
3720 {
3721 	cpu_max_bits_warn(j, nr_bits);
3722 
3723 	if (online_mask)
3724 		return test_bit(j, online_mask);
3725 
3726 	return (j < nr_bits);
3727 }
3728 
3729 /**
3730  *	netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
3731  *	@n: CPU/Rx queue index
3732  *	@srcp: the cpumask/Rx queue mask pointer
3733  *	@nr_bits: number of bits in the bitmask
3734  *
3735  * Returns >= nr_bits if no further CPUs/Rx queues set.
3736  */
netif_attrmask_next(int n,const unsigned long * srcp,unsigned int nr_bits)3737 static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
3738 					       unsigned int nr_bits)
3739 {
3740 	/* -1 is a legal arg here. */
3741 	if (n != -1)
3742 		cpu_max_bits_warn(n, nr_bits);
3743 
3744 	if (srcp)
3745 		return find_next_bit(srcp, nr_bits, n + 1);
3746 
3747 	return n + 1;
3748 }
3749 
3750 /**
3751  *	netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
3752  *	@n: CPU/Rx queue index
3753  *	@src1p: the first CPUs/Rx queues mask pointer
3754  *	@src2p: the second CPUs/Rx queues mask pointer
3755  *	@nr_bits: number of bits in the bitmask
3756  *
3757  * Returns >= nr_bits if no further CPUs/Rx queues set in both.
3758  */
netif_attrmask_next_and(int n,const unsigned long * src1p,const unsigned long * src2p,unsigned int nr_bits)3759 static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
3760 					  const unsigned long *src2p,
3761 					  unsigned int nr_bits)
3762 {
3763 	/* -1 is a legal arg here. */
3764 	if (n != -1)
3765 		cpu_max_bits_warn(n, nr_bits);
3766 
3767 	if (src1p && src2p)
3768 		return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
3769 	else if (src1p)
3770 		return find_next_bit(src1p, nr_bits, n + 1);
3771 	else if (src2p)
3772 		return find_next_bit(src2p, nr_bits, n + 1);
3773 
3774 	return n + 1;
3775 }
3776 #else
netif_set_xps_queue(struct net_device * dev,const struct cpumask * mask,u16 index)3777 static inline int netif_set_xps_queue(struct net_device *dev,
3778 				      const struct cpumask *mask,
3779 				      u16 index)
3780 {
3781 	return 0;
3782 }
3783 
__netif_set_xps_queue(struct net_device * dev,const unsigned long * mask,u16 index,enum xps_map_type type)3784 static inline int __netif_set_xps_queue(struct net_device *dev,
3785 					const unsigned long *mask,
3786 					u16 index, enum xps_map_type type)
3787 {
3788 	return 0;
3789 }
3790 #endif
3791 
3792 /**
3793  *	netif_is_multiqueue - test if device has multiple transmit queues
3794  *	@dev: network device
3795  *
3796  * Check if device has multiple transmit queues
3797  */
netif_is_multiqueue(const struct net_device * dev)3798 static inline bool netif_is_multiqueue(const struct net_device *dev)
3799 {
3800 	return dev->num_tx_queues > 1;
3801 }
3802 
3803 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3804 
3805 #ifdef CONFIG_SYSFS
3806 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3807 #else
netif_set_real_num_rx_queues(struct net_device * dev,unsigned int rxqs)3808 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3809 						unsigned int rxqs)
3810 {
3811 	dev->real_num_rx_queues = rxqs;
3812 	return 0;
3813 }
3814 #endif
3815 int netif_set_real_num_queues(struct net_device *dev,
3816 			      unsigned int txq, unsigned int rxq);
3817 
3818 int netif_get_num_default_rss_queues(void);
3819 
3820 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason);
3821 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason);
3822 
3823 /*
3824  * It is not allowed to call kfree_skb() or consume_skb() from hardware
3825  * interrupt context or with hardware interrupts being disabled.
3826  * (in_hardirq() || irqs_disabled())
3827  *
3828  * We provide four helpers that can be used in following contexts :
3829  *
3830  * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
3831  *  replacing kfree_skb(skb)
3832  *
3833  * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
3834  *  Typically used in place of consume_skb(skb) in TX completion path
3835  *
3836  * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
3837  *  replacing kfree_skb(skb)
3838  *
3839  * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
3840  *  and consumed a packet. Used in place of consume_skb(skb)
3841  */
dev_kfree_skb_irq(struct sk_buff * skb)3842 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3843 {
3844 	dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
3845 }
3846 
dev_consume_skb_irq(struct sk_buff * skb)3847 static inline void dev_consume_skb_irq(struct sk_buff *skb)
3848 {
3849 	dev_kfree_skb_irq_reason(skb, SKB_CONSUMED);
3850 }
3851 
dev_kfree_skb_any(struct sk_buff * skb)3852 static inline void dev_kfree_skb_any(struct sk_buff *skb)
3853 {
3854 	dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
3855 }
3856 
dev_consume_skb_any(struct sk_buff * skb)3857 static inline void dev_consume_skb_any(struct sk_buff *skb)
3858 {
3859 	dev_kfree_skb_any_reason(skb, SKB_CONSUMED);
3860 }
3861 
3862 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
3863 			     struct bpf_prog *xdp_prog);
3864 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
3865 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb);
3866 int netif_rx(struct sk_buff *skb);
3867 int __netif_rx(struct sk_buff *skb);
3868 
3869 int netif_receive_skb(struct sk_buff *skb);
3870 int netif_receive_skb_core(struct sk_buff *skb);
3871 void netif_receive_skb_list_internal(struct list_head *head);
3872 void netif_receive_skb_list(struct list_head *head);
3873 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3874 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3875 struct sk_buff *napi_get_frags(struct napi_struct *napi);
3876 void napi_get_frags_check(struct napi_struct *napi);
3877 gro_result_t napi_gro_frags(struct napi_struct *napi);
3878 
napi_free_frags(struct napi_struct * napi)3879 static inline void napi_free_frags(struct napi_struct *napi)
3880 {
3881 	kfree_skb(napi->skb);
3882 	napi->skb = NULL;
3883 }
3884 
3885 bool netdev_is_rx_handler_busy(struct net_device *dev);
3886 int netdev_rx_handler_register(struct net_device *dev,
3887 			       rx_handler_func_t *rx_handler,
3888 			       void *rx_handler_data);
3889 void netdev_rx_handler_unregister(struct net_device *dev);
3890 
3891 bool dev_valid_name(const char *name);
is_socket_ioctl_cmd(unsigned int cmd)3892 static inline bool is_socket_ioctl_cmd(unsigned int cmd)
3893 {
3894 	return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
3895 }
3896 int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg);
3897 int put_user_ifreq(struct ifreq *ifr, void __user *arg);
3898 int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
3899 		void __user *data, bool *need_copyout);
3900 int dev_ifconf(struct net *net, struct ifconf __user *ifc);
3901 int generic_hwtstamp_get_lower(struct net_device *dev,
3902 			       struct kernel_hwtstamp_config *kernel_cfg);
3903 int generic_hwtstamp_set_lower(struct net_device *dev,
3904 			       struct kernel_hwtstamp_config *kernel_cfg,
3905 			       struct netlink_ext_ack *extack);
3906 int dev_set_hwtstamp_phylib(struct net_device *dev,
3907 			    struct kernel_hwtstamp_config *cfg,
3908 			    struct netlink_ext_ack *extack);
3909 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
3910 unsigned int dev_get_flags(const struct net_device *);
3911 int __dev_change_flags(struct net_device *dev, unsigned int flags,
3912 		       struct netlink_ext_ack *extack);
3913 int dev_change_flags(struct net_device *dev, unsigned int flags,
3914 		     struct netlink_ext_ack *extack);
3915 int dev_set_alias(struct net_device *, const char *, size_t);
3916 int dev_get_alias(const struct net_device *, char *, size_t);
3917 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
3918 			       const char *pat, int new_ifindex);
3919 static inline
dev_change_net_namespace(struct net_device * dev,struct net * net,const char * pat)3920 int dev_change_net_namespace(struct net_device *dev, struct net *net,
3921 			     const char *pat)
3922 {
3923 	return __dev_change_net_namespace(dev, net, pat, 0);
3924 }
3925 int __dev_set_mtu(struct net_device *, int);
3926 int dev_set_mtu(struct net_device *, int);
3927 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
3928 			      struct netlink_ext_ack *extack);
3929 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
3930 			struct netlink_ext_ack *extack);
3931 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
3932 			     struct netlink_ext_ack *extack);
3933 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
3934 int dev_get_port_parent_id(struct net_device *dev,
3935 			   struct netdev_phys_item_id *ppid, bool recurse);
3936 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
3937 
3938 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
3939 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3940 				    struct netdev_queue *txq, int *ret);
3941 
3942 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
3943 u8 dev_xdp_prog_count(struct net_device *dev);
3944 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
3945 
3946 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3947 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3948 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
3949 bool is_skb_forwardable(const struct net_device *dev,
3950 			const struct sk_buff *skb);
3951 
__is_skb_forwardable(const struct net_device * dev,const struct sk_buff * skb,const bool check_mtu)3952 static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
3953 						 const struct sk_buff *skb,
3954 						 const bool check_mtu)
3955 {
3956 	const u32 vlan_hdr_len = 4; /* VLAN_HLEN */
3957 	unsigned int len;
3958 
3959 	if (!(dev->flags & IFF_UP))
3960 		return false;
3961 
3962 	if (!check_mtu)
3963 		return true;
3964 
3965 	len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
3966 	if (skb->len <= len)
3967 		return true;
3968 
3969 	/* if TSO is enabled, we don't care about the length as the packet
3970 	 * could be forwarded without being segmented before
3971 	 */
3972 	if (skb_is_gso(skb))
3973 		return true;
3974 
3975 	return false;
3976 }
3977 
3978 void netdev_core_stats_inc(struct net_device *dev, u32 offset);
3979 
3980 #define DEV_CORE_STATS_INC(FIELD)						\
3981 static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev)		\
3982 {										\
3983 	netdev_core_stats_inc(dev,						\
3984 			offsetof(struct net_device_core_stats, FIELD));		\
3985 }
3986 DEV_CORE_STATS_INC(rx_dropped)
DEV_CORE_STATS_INC(tx_dropped)3987 DEV_CORE_STATS_INC(tx_dropped)
3988 DEV_CORE_STATS_INC(rx_nohandler)
3989 DEV_CORE_STATS_INC(rx_otherhost_dropped)
3990 #undef DEV_CORE_STATS_INC
3991 
3992 static __always_inline int ____dev_forward_skb(struct net_device *dev,
3993 					       struct sk_buff *skb,
3994 					       const bool check_mtu)
3995 {
3996 	if (skb_orphan_frags(skb, GFP_ATOMIC) ||
3997 	    unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
3998 		dev_core_stats_rx_dropped_inc(dev);
3999 		kfree_skb(skb);
4000 		return NET_RX_DROP;
4001 	}
4002 
4003 	skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
4004 	skb->priority = 0;
4005 	return 0;
4006 }
4007 
4008 bool dev_nit_active(struct net_device *dev);
4009 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
4010 
__dev_put(struct net_device * dev)4011 static inline void __dev_put(struct net_device *dev)
4012 {
4013 	if (dev) {
4014 #ifdef CONFIG_PCPU_DEV_REFCNT
4015 		this_cpu_dec(*dev->pcpu_refcnt);
4016 #else
4017 		refcount_dec(&dev->dev_refcnt);
4018 #endif
4019 	}
4020 }
4021 
__dev_hold(struct net_device * dev)4022 static inline void __dev_hold(struct net_device *dev)
4023 {
4024 	if (dev) {
4025 #ifdef CONFIG_PCPU_DEV_REFCNT
4026 		this_cpu_inc(*dev->pcpu_refcnt);
4027 #else
4028 		refcount_inc(&dev->dev_refcnt);
4029 #endif
4030 	}
4031 }
4032 
__netdev_tracker_alloc(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4033 static inline void __netdev_tracker_alloc(struct net_device *dev,
4034 					  netdevice_tracker *tracker,
4035 					  gfp_t gfp)
4036 {
4037 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4038 	ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp);
4039 #endif
4040 }
4041 
4042 /* netdev_tracker_alloc() can upgrade a prior untracked reference
4043  * taken by dev_get_by_name()/dev_get_by_index() to a tracked one.
4044  */
netdev_tracker_alloc(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4045 static inline void netdev_tracker_alloc(struct net_device *dev,
4046 					netdevice_tracker *tracker, gfp_t gfp)
4047 {
4048 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4049 	refcount_dec(&dev->refcnt_tracker.no_tracker);
4050 	__netdev_tracker_alloc(dev, tracker, gfp);
4051 #endif
4052 }
4053 
netdev_tracker_free(struct net_device * dev,netdevice_tracker * tracker)4054 static inline void netdev_tracker_free(struct net_device *dev,
4055 				       netdevice_tracker *tracker)
4056 {
4057 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4058 	ref_tracker_free(&dev->refcnt_tracker, tracker);
4059 #endif
4060 }
4061 
netdev_hold(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4062 static inline void netdev_hold(struct net_device *dev,
4063 			       netdevice_tracker *tracker, gfp_t gfp)
4064 {
4065 	if (dev) {
4066 		__dev_hold(dev);
4067 		__netdev_tracker_alloc(dev, tracker, gfp);
4068 	}
4069 }
4070 
netdev_put(struct net_device * dev,netdevice_tracker * tracker)4071 static inline void netdev_put(struct net_device *dev,
4072 			      netdevice_tracker *tracker)
4073 {
4074 	if (dev) {
4075 		netdev_tracker_free(dev, tracker);
4076 		__dev_put(dev);
4077 	}
4078 }
4079 
4080 /**
4081  *	dev_hold - get reference to device
4082  *	@dev: network device
4083  *
4084  * Hold reference to device to keep it from being freed.
4085  * Try using netdev_hold() instead.
4086  */
dev_hold(struct net_device * dev)4087 static inline void dev_hold(struct net_device *dev)
4088 {
4089 	netdev_hold(dev, NULL, GFP_ATOMIC);
4090 }
4091 
4092 /**
4093  *	dev_put - release reference to device
4094  *	@dev: network device
4095  *
4096  * Release reference to device to allow it to be freed.
4097  * Try using netdev_put() instead.
4098  */
dev_put(struct net_device * dev)4099 static inline void dev_put(struct net_device *dev)
4100 {
4101 	netdev_put(dev, NULL);
4102 }
4103 
DEFINE_FREE(dev_put,struct net_device *,if (_T)dev_put (_T))4104 DEFINE_FREE(dev_put, struct net_device *, if (_T) dev_put(_T))
4105 
4106 static inline void netdev_ref_replace(struct net_device *odev,
4107 				      struct net_device *ndev,
4108 				      netdevice_tracker *tracker,
4109 				      gfp_t gfp)
4110 {
4111 	if (odev)
4112 		netdev_tracker_free(odev, tracker);
4113 
4114 	__dev_hold(ndev);
4115 	__dev_put(odev);
4116 
4117 	if (ndev)
4118 		__netdev_tracker_alloc(ndev, tracker, gfp);
4119 }
4120 
4121 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
4122  * and _off may be called from IRQ context, but it is caller
4123  * who is responsible for serialization of these calls.
4124  *
4125  * The name carrier is inappropriate, these functions should really be
4126  * called netif_lowerlayer_*() because they represent the state of any
4127  * kind of lower layer not just hardware media.
4128  */
4129 void linkwatch_fire_event(struct net_device *dev);
4130 
4131 /**
4132  * linkwatch_sync_dev - sync linkwatch for the given device
4133  * @dev: network device to sync linkwatch for
4134  *
4135  * Sync linkwatch for the given device, removing it from the
4136  * pending work list (if queued).
4137  */
4138 void linkwatch_sync_dev(struct net_device *dev);
4139 
4140 /**
4141  *	netif_carrier_ok - test if carrier present
4142  *	@dev: network device
4143  *
4144  * Check if carrier is present on device
4145  */
netif_carrier_ok(const struct net_device * dev)4146 static inline bool netif_carrier_ok(const struct net_device *dev)
4147 {
4148 	return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
4149 }
4150 
4151 unsigned long dev_trans_start(struct net_device *dev);
4152 
4153 void __netdev_watchdog_up(struct net_device *dev);
4154 
4155 void netif_carrier_on(struct net_device *dev);
4156 void netif_carrier_off(struct net_device *dev);
4157 void netif_carrier_event(struct net_device *dev);
4158 
4159 /**
4160  *	netif_dormant_on - mark device as dormant.
4161  *	@dev: network device
4162  *
4163  * Mark device as dormant (as per RFC2863).
4164  *
4165  * The dormant state indicates that the relevant interface is not
4166  * actually in a condition to pass packets (i.e., it is not 'up') but is
4167  * in a "pending" state, waiting for some external event.  For "on-
4168  * demand" interfaces, this new state identifies the situation where the
4169  * interface is waiting for events to place it in the up state.
4170  */
netif_dormant_on(struct net_device * dev)4171 static inline void netif_dormant_on(struct net_device *dev)
4172 {
4173 	if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
4174 		linkwatch_fire_event(dev);
4175 }
4176 
4177 /**
4178  *	netif_dormant_off - set device as not dormant.
4179  *	@dev: network device
4180  *
4181  * Device is not in dormant state.
4182  */
netif_dormant_off(struct net_device * dev)4183 static inline void netif_dormant_off(struct net_device *dev)
4184 {
4185 	if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
4186 		linkwatch_fire_event(dev);
4187 }
4188 
4189 /**
4190  *	netif_dormant - test if device is dormant
4191  *	@dev: network device
4192  *
4193  * Check if device is dormant.
4194  */
netif_dormant(const struct net_device * dev)4195 static inline bool netif_dormant(const struct net_device *dev)
4196 {
4197 	return test_bit(__LINK_STATE_DORMANT, &dev->state);
4198 }
4199 
4200 
4201 /**
4202  *	netif_testing_on - mark device as under test.
4203  *	@dev: network device
4204  *
4205  * Mark device as under test (as per RFC2863).
4206  *
4207  * The testing state indicates that some test(s) must be performed on
4208  * the interface. After completion, of the test, the interface state
4209  * will change to up, dormant, or down, as appropriate.
4210  */
netif_testing_on(struct net_device * dev)4211 static inline void netif_testing_on(struct net_device *dev)
4212 {
4213 	if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
4214 		linkwatch_fire_event(dev);
4215 }
4216 
4217 /**
4218  *	netif_testing_off - set device as not under test.
4219  *	@dev: network device
4220  *
4221  * Device is not in testing state.
4222  */
netif_testing_off(struct net_device * dev)4223 static inline void netif_testing_off(struct net_device *dev)
4224 {
4225 	if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
4226 		linkwatch_fire_event(dev);
4227 }
4228 
4229 /**
4230  *	netif_testing - test if device is under test
4231  *	@dev: network device
4232  *
4233  * Check if device is under test
4234  */
netif_testing(const struct net_device * dev)4235 static inline bool netif_testing(const struct net_device *dev)
4236 {
4237 	return test_bit(__LINK_STATE_TESTING, &dev->state);
4238 }
4239 
4240 
4241 /**
4242  *	netif_oper_up - test if device is operational
4243  *	@dev: network device
4244  *
4245  * Check if carrier is operational
4246  */
netif_oper_up(const struct net_device * dev)4247 static inline bool netif_oper_up(const struct net_device *dev)
4248 {
4249 	unsigned int operstate = READ_ONCE(dev->operstate);
4250 
4251 	return	operstate == IF_OPER_UP ||
4252 		operstate == IF_OPER_UNKNOWN /* backward compat */;
4253 }
4254 
4255 /**
4256  *	netif_device_present - is device available or removed
4257  *	@dev: network device
4258  *
4259  * Check if device has not been removed from system.
4260  */
netif_device_present(const struct net_device * dev)4261 static inline bool netif_device_present(const struct net_device *dev)
4262 {
4263 	return test_bit(__LINK_STATE_PRESENT, &dev->state);
4264 }
4265 
4266 void netif_device_detach(struct net_device *dev);
4267 
4268 void netif_device_attach(struct net_device *dev);
4269 
4270 /*
4271  * Network interface message level settings
4272  */
4273 
4274 enum {
4275 	NETIF_MSG_DRV_BIT,
4276 	NETIF_MSG_PROBE_BIT,
4277 	NETIF_MSG_LINK_BIT,
4278 	NETIF_MSG_TIMER_BIT,
4279 	NETIF_MSG_IFDOWN_BIT,
4280 	NETIF_MSG_IFUP_BIT,
4281 	NETIF_MSG_RX_ERR_BIT,
4282 	NETIF_MSG_TX_ERR_BIT,
4283 	NETIF_MSG_TX_QUEUED_BIT,
4284 	NETIF_MSG_INTR_BIT,
4285 	NETIF_MSG_TX_DONE_BIT,
4286 	NETIF_MSG_RX_STATUS_BIT,
4287 	NETIF_MSG_PKTDATA_BIT,
4288 	NETIF_MSG_HW_BIT,
4289 	NETIF_MSG_WOL_BIT,
4290 
4291 	/* When you add a new bit above, update netif_msg_class_names array
4292 	 * in net/ethtool/common.c
4293 	 */
4294 	NETIF_MSG_CLASS_COUNT,
4295 };
4296 /* Both ethtool_ops interface and internal driver implementation use u32 */
4297 static_assert(NETIF_MSG_CLASS_COUNT <= 32);
4298 
4299 #define __NETIF_MSG_BIT(bit)	((u32)1 << (bit))
4300 #define __NETIF_MSG(name)	__NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
4301 
4302 #define NETIF_MSG_DRV		__NETIF_MSG(DRV)
4303 #define NETIF_MSG_PROBE		__NETIF_MSG(PROBE)
4304 #define NETIF_MSG_LINK		__NETIF_MSG(LINK)
4305 #define NETIF_MSG_TIMER		__NETIF_MSG(TIMER)
4306 #define NETIF_MSG_IFDOWN	__NETIF_MSG(IFDOWN)
4307 #define NETIF_MSG_IFUP		__NETIF_MSG(IFUP)
4308 #define NETIF_MSG_RX_ERR	__NETIF_MSG(RX_ERR)
4309 #define NETIF_MSG_TX_ERR	__NETIF_MSG(TX_ERR)
4310 #define NETIF_MSG_TX_QUEUED	__NETIF_MSG(TX_QUEUED)
4311 #define NETIF_MSG_INTR		__NETIF_MSG(INTR)
4312 #define NETIF_MSG_TX_DONE	__NETIF_MSG(TX_DONE)
4313 #define NETIF_MSG_RX_STATUS	__NETIF_MSG(RX_STATUS)
4314 #define NETIF_MSG_PKTDATA	__NETIF_MSG(PKTDATA)
4315 #define NETIF_MSG_HW		__NETIF_MSG(HW)
4316 #define NETIF_MSG_WOL		__NETIF_MSG(WOL)
4317 
4318 #define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV)
4319 #define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE)
4320 #define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK)
4321 #define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER)
4322 #define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN)
4323 #define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP)
4324 #define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR)
4325 #define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR)
4326 #define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4327 #define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR)
4328 #define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE)
4329 #define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS)
4330 #define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA)
4331 #define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW)
4332 #define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL)
4333 
netif_msg_init(int debug_value,int default_msg_enable_bits)4334 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
4335 {
4336 	/* use default */
4337 	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
4338 		return default_msg_enable_bits;
4339 	if (debug_value == 0)	/* no output */
4340 		return 0;
4341 	/* set low N bits */
4342 	return (1U << debug_value) - 1;
4343 }
4344 
__netif_tx_lock(struct netdev_queue * txq,int cpu)4345 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
4346 {
4347 	spin_lock(&txq->_xmit_lock);
4348 	/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4349 	WRITE_ONCE(txq->xmit_lock_owner, cpu);
4350 }
4351 
__netif_tx_acquire(struct netdev_queue * txq)4352 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
4353 {
4354 	__acquire(&txq->_xmit_lock);
4355 	return true;
4356 }
4357 
__netif_tx_release(struct netdev_queue * txq)4358 static inline void __netif_tx_release(struct netdev_queue *txq)
4359 {
4360 	__release(&txq->_xmit_lock);
4361 }
4362 
__netif_tx_lock_bh(struct netdev_queue * txq)4363 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
4364 {
4365 	spin_lock_bh(&txq->_xmit_lock);
4366 	/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4367 	WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4368 }
4369 
__netif_tx_trylock(struct netdev_queue * txq)4370 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
4371 {
4372 	bool ok = spin_trylock(&txq->_xmit_lock);
4373 
4374 	if (likely(ok)) {
4375 		/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4376 		WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4377 	}
4378 	return ok;
4379 }
4380 
__netif_tx_unlock(struct netdev_queue * txq)4381 static inline void __netif_tx_unlock(struct netdev_queue *txq)
4382 {
4383 	/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4384 	WRITE_ONCE(txq->xmit_lock_owner, -1);
4385 	spin_unlock(&txq->_xmit_lock);
4386 }
4387 
__netif_tx_unlock_bh(struct netdev_queue * txq)4388 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
4389 {
4390 	/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4391 	WRITE_ONCE(txq->xmit_lock_owner, -1);
4392 	spin_unlock_bh(&txq->_xmit_lock);
4393 }
4394 
4395 /*
4396  * txq->trans_start can be read locklessly from dev_watchdog()
4397  */
txq_trans_update(struct netdev_queue * txq)4398 static inline void txq_trans_update(struct netdev_queue *txq)
4399 {
4400 	if (txq->xmit_lock_owner != -1)
4401 		WRITE_ONCE(txq->trans_start, jiffies);
4402 }
4403 
txq_trans_cond_update(struct netdev_queue * txq)4404 static inline void txq_trans_cond_update(struct netdev_queue *txq)
4405 {
4406 	unsigned long now = jiffies;
4407 
4408 	if (READ_ONCE(txq->trans_start) != now)
4409 		WRITE_ONCE(txq->trans_start, now);
4410 }
4411 
4412 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
netif_trans_update(struct net_device * dev)4413 static inline void netif_trans_update(struct net_device *dev)
4414 {
4415 	struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
4416 
4417 	txq_trans_cond_update(txq);
4418 }
4419 
4420 /**
4421  *	netif_tx_lock - grab network device transmit lock
4422  *	@dev: network device
4423  *
4424  * Get network device transmit lock
4425  */
4426 void netif_tx_lock(struct net_device *dev);
4427 
netif_tx_lock_bh(struct net_device * dev)4428 static inline void netif_tx_lock_bh(struct net_device *dev)
4429 {
4430 	local_bh_disable();
4431 	netif_tx_lock(dev);
4432 }
4433 
4434 void netif_tx_unlock(struct net_device *dev);
4435 
netif_tx_unlock_bh(struct net_device * dev)4436 static inline void netif_tx_unlock_bh(struct net_device *dev)
4437 {
4438 	netif_tx_unlock(dev);
4439 	local_bh_enable();
4440 }
4441 
4442 #define HARD_TX_LOCK(dev, txq, cpu) {			\
4443 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
4444 		__netif_tx_lock(txq, cpu);		\
4445 	} else {					\
4446 		__netif_tx_acquire(txq);		\
4447 	}						\
4448 }
4449 
4450 #define HARD_TX_TRYLOCK(dev, txq)			\
4451 	(((dev->features & NETIF_F_LLTX) == 0) ?	\
4452 		__netif_tx_trylock(txq) :		\
4453 		__netif_tx_acquire(txq))
4454 
4455 #define HARD_TX_UNLOCK(dev, txq) {			\
4456 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
4457 		__netif_tx_unlock(txq);			\
4458 	} else {					\
4459 		__netif_tx_release(txq);		\
4460 	}						\
4461 }
4462 
netif_tx_disable(struct net_device * dev)4463 static inline void netif_tx_disable(struct net_device *dev)
4464 {
4465 	unsigned int i;
4466 	int cpu;
4467 
4468 	local_bh_disable();
4469 	cpu = smp_processor_id();
4470 	spin_lock(&dev->tx_global_lock);
4471 	for (i = 0; i < dev->num_tx_queues; i++) {
4472 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4473 
4474 		__netif_tx_lock(txq, cpu);
4475 		netif_tx_stop_queue(txq);
4476 		__netif_tx_unlock(txq);
4477 	}
4478 	spin_unlock(&dev->tx_global_lock);
4479 	local_bh_enable();
4480 }
4481 
netif_addr_lock(struct net_device * dev)4482 static inline void netif_addr_lock(struct net_device *dev)
4483 {
4484 	unsigned char nest_level = 0;
4485 
4486 #ifdef CONFIG_LOCKDEP
4487 	nest_level = dev->nested_level;
4488 #endif
4489 	spin_lock_nested(&dev->addr_list_lock, nest_level);
4490 }
4491 
netif_addr_lock_bh(struct net_device * dev)4492 static inline void netif_addr_lock_bh(struct net_device *dev)
4493 {
4494 	unsigned char nest_level = 0;
4495 
4496 #ifdef CONFIG_LOCKDEP
4497 	nest_level = dev->nested_level;
4498 #endif
4499 	local_bh_disable();
4500 	spin_lock_nested(&dev->addr_list_lock, nest_level);
4501 }
4502 
netif_addr_unlock(struct net_device * dev)4503 static inline void netif_addr_unlock(struct net_device *dev)
4504 {
4505 	spin_unlock(&dev->addr_list_lock);
4506 }
4507 
netif_addr_unlock_bh(struct net_device * dev)4508 static inline void netif_addr_unlock_bh(struct net_device *dev)
4509 {
4510 	spin_unlock_bh(&dev->addr_list_lock);
4511 }
4512 
4513 /*
4514  * dev_addrs walker. Should be used only for read access. Call with
4515  * rcu_read_lock held.
4516  */
4517 #define for_each_dev_addr(dev, ha) \
4518 		list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4519 
4520 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
4521 
4522 void ether_setup(struct net_device *dev);
4523 
4524 /* Allocate dummy net_device */
4525 struct net_device *alloc_netdev_dummy(int sizeof_priv);
4526 
4527 /* Support for loadable net-drivers */
4528 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
4529 				    unsigned char name_assign_type,
4530 				    void (*setup)(struct net_device *),
4531 				    unsigned int txqs, unsigned int rxqs);
4532 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4533 	alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
4534 
4535 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4536 	alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4537 			 count)
4538 
4539 int register_netdev(struct net_device *dev);
4540 void unregister_netdev(struct net_device *dev);
4541 
4542 int devm_register_netdev(struct device *dev, struct net_device *ndev);
4543 
4544 /* General hardware address lists handling functions */
4545 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4546 		   struct netdev_hw_addr_list *from_list, int addr_len);
4547 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4548 		      struct netdev_hw_addr_list *from_list, int addr_len);
4549 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
4550 		       struct net_device *dev,
4551 		       int (*sync)(struct net_device *, const unsigned char *),
4552 		       int (*unsync)(struct net_device *,
4553 				     const unsigned char *));
4554 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
4555 			   struct net_device *dev,
4556 			   int (*sync)(struct net_device *,
4557 				       const unsigned char *, int),
4558 			   int (*unsync)(struct net_device *,
4559 					 const unsigned char *, int));
4560 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
4561 			      struct net_device *dev,
4562 			      int (*unsync)(struct net_device *,
4563 					    const unsigned char *, int));
4564 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
4565 			  struct net_device *dev,
4566 			  int (*unsync)(struct net_device *,
4567 					const unsigned char *));
4568 void __hw_addr_init(struct netdev_hw_addr_list *list);
4569 
4570 /* Functions used for device addresses handling */
4571 void dev_addr_mod(struct net_device *dev, unsigned int offset,
4572 		  const void *addr, size_t len);
4573 
4574 static inline void
__dev_addr_set(struct net_device * dev,const void * addr,size_t len)4575 __dev_addr_set(struct net_device *dev, const void *addr, size_t len)
4576 {
4577 	dev_addr_mod(dev, 0, addr, len);
4578 }
4579 
dev_addr_set(struct net_device * dev,const u8 * addr)4580 static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
4581 {
4582 	__dev_addr_set(dev, addr, dev->addr_len);
4583 }
4584 
4585 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
4586 		 unsigned char addr_type);
4587 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
4588 		 unsigned char addr_type);
4589 
4590 /* Functions used for unicast addresses handling */
4591 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
4592 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
4593 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
4594 int dev_uc_sync(struct net_device *to, struct net_device *from);
4595 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
4596 void dev_uc_unsync(struct net_device *to, struct net_device *from);
4597 void dev_uc_flush(struct net_device *dev);
4598 void dev_uc_init(struct net_device *dev);
4599 
4600 /**
4601  *  __dev_uc_sync - Synchonize device's unicast list
4602  *  @dev:  device to sync
4603  *  @sync: function to call if address should be added
4604  *  @unsync: function to call if address should be removed
4605  *
4606  *  Add newly added addresses to the interface, and release
4607  *  addresses that have been deleted.
4608  */
__dev_uc_sync(struct net_device * dev,int (* sync)(struct net_device *,const unsigned char *),int (* unsync)(struct net_device *,const unsigned char *))4609 static inline int __dev_uc_sync(struct net_device *dev,
4610 				int (*sync)(struct net_device *,
4611 					    const unsigned char *),
4612 				int (*unsync)(struct net_device *,
4613 					      const unsigned char *))
4614 {
4615 	return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
4616 }
4617 
4618 /**
4619  *  __dev_uc_unsync - Remove synchronized addresses from device
4620  *  @dev:  device to sync
4621  *  @unsync: function to call if address should be removed
4622  *
4623  *  Remove all addresses that were added to the device by dev_uc_sync().
4624  */
__dev_uc_unsync(struct net_device * dev,int (* unsync)(struct net_device *,const unsigned char *))4625 static inline void __dev_uc_unsync(struct net_device *dev,
4626 				   int (*unsync)(struct net_device *,
4627 						 const unsigned char *))
4628 {
4629 	__hw_addr_unsync_dev(&dev->uc, dev, unsync);
4630 }
4631 
4632 /* Functions used for multicast addresses handling */
4633 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
4634 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
4635 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
4636 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
4637 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
4638 int dev_mc_sync(struct net_device *to, struct net_device *from);
4639 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
4640 void dev_mc_unsync(struct net_device *to, struct net_device *from);
4641 void dev_mc_flush(struct net_device *dev);
4642 void dev_mc_init(struct net_device *dev);
4643 
4644 /**
4645  *  __dev_mc_sync - Synchonize device's multicast list
4646  *  @dev:  device to sync
4647  *  @sync: function to call if address should be added
4648  *  @unsync: function to call if address should be removed
4649  *
4650  *  Add newly added addresses to the interface, and release
4651  *  addresses that have been deleted.
4652  */
__dev_mc_sync(struct net_device * dev,int (* sync)(struct net_device *,const unsigned char *),int (* unsync)(struct net_device *,const unsigned char *))4653 static inline int __dev_mc_sync(struct net_device *dev,
4654 				int (*sync)(struct net_device *,
4655 					    const unsigned char *),
4656 				int (*unsync)(struct net_device *,
4657 					      const unsigned char *))
4658 {
4659 	return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
4660 }
4661 
4662 /**
4663  *  __dev_mc_unsync - Remove synchronized addresses from device
4664  *  @dev:  device to sync
4665  *  @unsync: function to call if address should be removed
4666  *
4667  *  Remove all addresses that were added to the device by dev_mc_sync().
4668  */
__dev_mc_unsync(struct net_device * dev,int (* unsync)(struct net_device *,const unsigned char *))4669 static inline void __dev_mc_unsync(struct net_device *dev,
4670 				   int (*unsync)(struct net_device *,
4671 						 const unsigned char *))
4672 {
4673 	__hw_addr_unsync_dev(&dev->mc, dev, unsync);
4674 }
4675 
4676 /* Functions used for secondary unicast and multicast support */
4677 void dev_set_rx_mode(struct net_device *dev);
4678 int dev_set_promiscuity(struct net_device *dev, int inc);
4679 int dev_set_allmulti(struct net_device *dev, int inc);
4680 void netdev_state_change(struct net_device *dev);
4681 void __netdev_notify_peers(struct net_device *dev);
4682 void netdev_notify_peers(struct net_device *dev);
4683 void netdev_features_change(struct net_device *dev);
4684 /* Load a device via the kmod */
4685 void dev_load(struct net *net, const char *name);
4686 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4687 					struct rtnl_link_stats64 *storage);
4688 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
4689 			     const struct net_device_stats *netdev_stats);
4690 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
4691 			   const struct pcpu_sw_netstats __percpu *netstats);
4692 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
4693 
4694 enum {
4695 	NESTED_SYNC_IMM_BIT,
4696 	NESTED_SYNC_TODO_BIT,
4697 };
4698 
4699 #define __NESTED_SYNC_BIT(bit)	((u32)1 << (bit))
4700 #define __NESTED_SYNC(name)	__NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
4701 
4702 #define NESTED_SYNC_IMM		__NESTED_SYNC(IMM)
4703 #define NESTED_SYNC_TODO	__NESTED_SYNC(TODO)
4704 
4705 struct netdev_nested_priv {
4706 	unsigned char flags;
4707 	void *data;
4708 };
4709 
4710 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
4711 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4712 						     struct list_head **iter);
4713 
4714 /* iterate through upper list, must be called under RCU read lock */
4715 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
4716 	for (iter = &(dev)->adj_list.upper, \
4717 	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
4718 	     updev; \
4719 	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
4720 
4721 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
4722 				  int (*fn)(struct net_device *upper_dev,
4723 					    struct netdev_nested_priv *priv),
4724 				  struct netdev_nested_priv *priv);
4725 
4726 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
4727 				  struct net_device *upper_dev);
4728 
4729 bool netdev_has_any_upper_dev(struct net_device *dev);
4730 
4731 void *netdev_lower_get_next_private(struct net_device *dev,
4732 				    struct list_head **iter);
4733 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4734 					struct list_head **iter);
4735 
4736 #define netdev_for_each_lower_private(dev, priv, iter) \
4737 	for (iter = (dev)->adj_list.lower.next, \
4738 	     priv = netdev_lower_get_next_private(dev, &(iter)); \
4739 	     priv; \
4740 	     priv = netdev_lower_get_next_private(dev, &(iter)))
4741 
4742 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
4743 	for (iter = &(dev)->adj_list.lower, \
4744 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
4745 	     priv; \
4746 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
4747 
4748 void *netdev_lower_get_next(struct net_device *dev,
4749 				struct list_head **iter);
4750 
4751 #define netdev_for_each_lower_dev(dev, ldev, iter) \
4752 	for (iter = (dev)->adj_list.lower.next, \
4753 	     ldev = netdev_lower_get_next(dev, &(iter)); \
4754 	     ldev; \
4755 	     ldev = netdev_lower_get_next(dev, &(iter)))
4756 
4757 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
4758 					     struct list_head **iter);
4759 int netdev_walk_all_lower_dev(struct net_device *dev,
4760 			      int (*fn)(struct net_device *lower_dev,
4761 					struct netdev_nested_priv *priv),
4762 			      struct netdev_nested_priv *priv);
4763 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
4764 				  int (*fn)(struct net_device *lower_dev,
4765 					    struct netdev_nested_priv *priv),
4766 				  struct netdev_nested_priv *priv);
4767 
4768 void *netdev_adjacent_get_private(struct list_head *adj_list);
4769 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
4770 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
4771 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
4772 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
4773 			  struct netlink_ext_ack *extack);
4774 int netdev_master_upper_dev_link(struct net_device *dev,
4775 				 struct net_device *upper_dev,
4776 				 void *upper_priv, void *upper_info,
4777 				 struct netlink_ext_ack *extack);
4778 void netdev_upper_dev_unlink(struct net_device *dev,
4779 			     struct net_device *upper_dev);
4780 int netdev_adjacent_change_prepare(struct net_device *old_dev,
4781 				   struct net_device *new_dev,
4782 				   struct net_device *dev,
4783 				   struct netlink_ext_ack *extack);
4784 void netdev_adjacent_change_commit(struct net_device *old_dev,
4785 				   struct net_device *new_dev,
4786 				   struct net_device *dev);
4787 void netdev_adjacent_change_abort(struct net_device *old_dev,
4788 				  struct net_device *new_dev,
4789 				  struct net_device *dev);
4790 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
4791 void *netdev_lower_dev_get_private(struct net_device *dev,
4792 				   struct net_device *lower_dev);
4793 void netdev_lower_state_changed(struct net_device *lower_dev,
4794 				void *lower_state_info);
4795 
4796 /* RSS keys are 40 or 52 bytes long */
4797 #define NETDEV_RSS_KEY_LEN 52
4798 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
4799 void netdev_rss_key_fill(void *buffer, size_t len);
4800 
4801 int skb_checksum_help(struct sk_buff *skb);
4802 int skb_crc32c_csum_help(struct sk_buff *skb);
4803 int skb_csum_hwoffload_help(struct sk_buff *skb,
4804 			    const netdev_features_t features);
4805 
4806 struct netdev_bonding_info {
4807 	ifslave	slave;
4808 	ifbond	master;
4809 };
4810 
4811 struct netdev_notifier_bonding_info {
4812 	struct netdev_notifier_info info; /* must be first */
4813 	struct netdev_bonding_info  bonding_info;
4814 };
4815 
4816 void netdev_bonding_info_change(struct net_device *dev,
4817 				struct netdev_bonding_info *bonding_info);
4818 
4819 #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
4820 void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
4821 #else
ethtool_notify(struct net_device * dev,unsigned int cmd,const void * data)4822 static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
4823 				  const void *data)
4824 {
4825 }
4826 #endif
4827 
4828 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
4829 
can_checksum_protocol(netdev_features_t features,__be16 protocol)4830 static inline bool can_checksum_protocol(netdev_features_t features,
4831 					 __be16 protocol)
4832 {
4833 	if (protocol == htons(ETH_P_FCOE))
4834 		return !!(features & NETIF_F_FCOE_CRC);
4835 
4836 	/* Assume this is an IP checksum (not SCTP CRC) */
4837 
4838 	if (features & NETIF_F_HW_CSUM) {
4839 		/* Can checksum everything */
4840 		return true;
4841 	}
4842 
4843 	switch (protocol) {
4844 	case htons(ETH_P_IP):
4845 		return !!(features & NETIF_F_IP_CSUM);
4846 	case htons(ETH_P_IPV6):
4847 		return !!(features & NETIF_F_IPV6_CSUM);
4848 	default:
4849 		return false;
4850 	}
4851 }
4852 
4853 #ifdef CONFIG_BUG
4854 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
4855 #else
netdev_rx_csum_fault(struct net_device * dev,struct sk_buff * skb)4856 static inline void netdev_rx_csum_fault(struct net_device *dev,
4857 					struct sk_buff *skb)
4858 {
4859 }
4860 #endif
4861 /* rx skb timestamps */
4862 void net_enable_timestamp(void);
4863 void net_disable_timestamp(void);
4864 
netdev_get_tstamp(struct net_device * dev,const struct skb_shared_hwtstamps * hwtstamps,bool cycles)4865 static inline ktime_t netdev_get_tstamp(struct net_device *dev,
4866 					const struct skb_shared_hwtstamps *hwtstamps,
4867 					bool cycles)
4868 {
4869 	const struct net_device_ops *ops = dev->netdev_ops;
4870 
4871 	if (ops->ndo_get_tstamp)
4872 		return ops->ndo_get_tstamp(dev, hwtstamps, cycles);
4873 
4874 	return hwtstamps->hwtstamp;
4875 }
4876 
__netdev_start_xmit(const struct net_device_ops * ops,struct sk_buff * skb,struct net_device * dev,bool more)4877 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
4878 					      struct sk_buff *skb, struct net_device *dev,
4879 					      bool more)
4880 {
4881 	__this_cpu_write(softnet_data.xmit.more, more);
4882 	return ops->ndo_start_xmit(skb, dev);
4883 }
4884 
netdev_xmit_more(void)4885 static inline bool netdev_xmit_more(void)
4886 {
4887 	return __this_cpu_read(softnet_data.xmit.more);
4888 }
4889 
netdev_start_xmit(struct sk_buff * skb,struct net_device * dev,struct netdev_queue * txq,bool more)4890 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
4891 					    struct netdev_queue *txq, bool more)
4892 {
4893 	const struct net_device_ops *ops = dev->netdev_ops;
4894 	netdev_tx_t rc;
4895 
4896 	rc = __netdev_start_xmit(ops, skb, dev, more);
4897 	if (rc == NETDEV_TX_OK)
4898 		txq_trans_update(txq);
4899 
4900 	return rc;
4901 }
4902 
4903 int netdev_class_create_file_ns(const struct class_attribute *class_attr,
4904 				const void *ns);
4905 void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
4906 				 const void *ns);
4907 
4908 extern const struct kobj_ns_type_operations net_ns_type_operations;
4909 
4910 const char *netdev_drivername(const struct net_device *dev);
4911 
netdev_intersect_features(netdev_features_t f1,netdev_features_t f2)4912 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
4913 							  netdev_features_t f2)
4914 {
4915 	if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
4916 		if (f1 & NETIF_F_HW_CSUM)
4917 			f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4918 		else
4919 			f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4920 	}
4921 
4922 	return f1 & f2;
4923 }
4924 
netdev_get_wanted_features(struct net_device * dev)4925 static inline netdev_features_t netdev_get_wanted_features(
4926 	struct net_device *dev)
4927 {
4928 	return (dev->features & ~dev->hw_features) | dev->wanted_features;
4929 }
4930 netdev_features_t netdev_increment_features(netdev_features_t all,
4931 	netdev_features_t one, netdev_features_t mask);
4932 
4933 /* Allow TSO being used on stacked device :
4934  * Performing the GSO segmentation before last device
4935  * is a performance improvement.
4936  */
netdev_add_tso_features(netdev_features_t features,netdev_features_t mask)4937 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4938 							netdev_features_t mask)
4939 {
4940 	return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4941 }
4942 
4943 int __netdev_update_features(struct net_device *dev);
4944 void netdev_update_features(struct net_device *dev);
4945 void netdev_change_features(struct net_device *dev);
4946 
4947 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4948 					struct net_device *dev);
4949 
4950 netdev_features_t passthru_features_check(struct sk_buff *skb,
4951 					  struct net_device *dev,
4952 					  netdev_features_t features);
4953 netdev_features_t netif_skb_features(struct sk_buff *skb);
4954 void skb_warn_bad_offload(const struct sk_buff *skb);
4955 
net_gso_ok(netdev_features_t features,int gso_type)4956 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4957 {
4958 	netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
4959 
4960 	/* check flags correspondence */
4961 	BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
4962 	BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4963 	BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
4964 	BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
4965 	BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4966 	BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
4967 	BUILD_BUG_ON(SKB_GSO_GRE     != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4968 	BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
4969 	BUILD_BUG_ON(SKB_GSO_IPXIP4  != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4970 	BUILD_BUG_ON(SKB_GSO_IPXIP6  != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
4971 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4972 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4973 	BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4974 	BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4975 	BUILD_BUG_ON(SKB_GSO_SCTP    != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4976 	BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
4977 	BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
4978 	BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
4979 	BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
4980 
4981 	return (features & feature) == feature;
4982 }
4983 
skb_gso_ok(struct sk_buff * skb,netdev_features_t features)4984 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
4985 {
4986 	return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
4987 	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
4988 }
4989 
netif_needs_gso(struct sk_buff * skb,netdev_features_t features)4990 static inline bool netif_needs_gso(struct sk_buff *skb,
4991 				   netdev_features_t features)
4992 {
4993 	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
4994 		unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4995 			 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
4996 }
4997 
4998 void netif_set_tso_max_size(struct net_device *dev, unsigned int size);
4999 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs);
5000 void netif_inherit_tso_max(struct net_device *to,
5001 			   const struct net_device *from);
5002 
netif_is_macsec(const struct net_device * dev)5003 static inline bool netif_is_macsec(const struct net_device *dev)
5004 {
5005 	return dev->priv_flags & IFF_MACSEC;
5006 }
5007 
netif_is_macvlan(const struct net_device * dev)5008 static inline bool netif_is_macvlan(const struct net_device *dev)
5009 {
5010 	return dev->priv_flags & IFF_MACVLAN;
5011 }
5012 
netif_is_macvlan_port(const struct net_device * dev)5013 static inline bool netif_is_macvlan_port(const struct net_device *dev)
5014 {
5015 	return dev->priv_flags & IFF_MACVLAN_PORT;
5016 }
5017 
netif_is_bond_master(const struct net_device * dev)5018 static inline bool netif_is_bond_master(const struct net_device *dev)
5019 {
5020 	return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
5021 }
5022 
netif_is_bond_slave(const struct net_device * dev)5023 static inline bool netif_is_bond_slave(const struct net_device *dev)
5024 {
5025 	return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
5026 }
5027 
netif_supports_nofcs(struct net_device * dev)5028 static inline bool netif_supports_nofcs(struct net_device *dev)
5029 {
5030 	return dev->priv_flags & IFF_SUPP_NOFCS;
5031 }
5032 
netif_has_l3_rx_handler(const struct net_device * dev)5033 static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
5034 {
5035 	return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
5036 }
5037 
netif_is_l3_master(const struct net_device * dev)5038 static inline bool netif_is_l3_master(const struct net_device *dev)
5039 {
5040 	return dev->priv_flags & IFF_L3MDEV_MASTER;
5041 }
5042 
netif_is_l3_slave(const struct net_device * dev)5043 static inline bool netif_is_l3_slave(const struct net_device *dev)
5044 {
5045 	return dev->priv_flags & IFF_L3MDEV_SLAVE;
5046 }
5047 
dev_sdif(const struct net_device * dev)5048 static inline int dev_sdif(const struct net_device *dev)
5049 {
5050 #ifdef CONFIG_NET_L3_MASTER_DEV
5051 	if (netif_is_l3_slave(dev))
5052 		return dev->ifindex;
5053 #endif
5054 	return 0;
5055 }
5056 
netif_is_bridge_master(const struct net_device * dev)5057 static inline bool netif_is_bridge_master(const struct net_device *dev)
5058 {
5059 	return dev->priv_flags & IFF_EBRIDGE;
5060 }
5061 
netif_is_bridge_port(const struct net_device * dev)5062 static inline bool netif_is_bridge_port(const struct net_device *dev)
5063 {
5064 	return dev->priv_flags & IFF_BRIDGE_PORT;
5065 }
5066 
netif_is_ovs_master(const struct net_device * dev)5067 static inline bool netif_is_ovs_master(const struct net_device *dev)
5068 {
5069 	return dev->priv_flags & IFF_OPENVSWITCH;
5070 }
5071 
netif_is_ovs_port(const struct net_device * dev)5072 static inline bool netif_is_ovs_port(const struct net_device *dev)
5073 {
5074 	return dev->priv_flags & IFF_OVS_DATAPATH;
5075 }
5076 
netif_is_any_bridge_master(const struct net_device * dev)5077 static inline bool netif_is_any_bridge_master(const struct net_device *dev)
5078 {
5079 	return netif_is_bridge_master(dev) || netif_is_ovs_master(dev);
5080 }
5081 
netif_is_any_bridge_port(const struct net_device * dev)5082 static inline bool netif_is_any_bridge_port(const struct net_device *dev)
5083 {
5084 	return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
5085 }
5086 
netif_is_team_master(const struct net_device * dev)5087 static inline bool netif_is_team_master(const struct net_device *dev)
5088 {
5089 	return dev->priv_flags & IFF_TEAM;
5090 }
5091 
netif_is_team_port(const struct net_device * dev)5092 static inline bool netif_is_team_port(const struct net_device *dev)
5093 {
5094 	return dev->priv_flags & IFF_TEAM_PORT;
5095 }
5096 
netif_is_lag_master(const struct net_device * dev)5097 static inline bool netif_is_lag_master(const struct net_device *dev)
5098 {
5099 	return netif_is_bond_master(dev) || netif_is_team_master(dev);
5100 }
5101 
netif_is_lag_port(const struct net_device * dev)5102 static inline bool netif_is_lag_port(const struct net_device *dev)
5103 {
5104 	return netif_is_bond_slave(dev) || netif_is_team_port(dev);
5105 }
5106 
netif_is_rxfh_configured(const struct net_device * dev)5107 static inline bool netif_is_rxfh_configured(const struct net_device *dev)
5108 {
5109 	return dev->priv_flags & IFF_RXFH_CONFIGURED;
5110 }
5111 
netif_is_failover(const struct net_device * dev)5112 static inline bool netif_is_failover(const struct net_device *dev)
5113 {
5114 	return dev->priv_flags & IFF_FAILOVER;
5115 }
5116 
netif_is_failover_slave(const struct net_device * dev)5117 static inline bool netif_is_failover_slave(const struct net_device *dev)
5118 {
5119 	return dev->priv_flags & IFF_FAILOVER_SLAVE;
5120 }
5121 
5122 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
netif_keep_dst(struct net_device * dev)5123 static inline void netif_keep_dst(struct net_device *dev)
5124 {
5125 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
5126 }
5127 
5128 /* return true if dev can't cope with mtu frames that need vlan tag insertion */
netif_reduces_vlan_mtu(struct net_device * dev)5129 static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
5130 {
5131 	/* TODO: reserve and use an additional IFF bit, if we get more users */
5132 	return netif_is_macsec(dev);
5133 }
5134 
5135 extern struct pernet_operations __net_initdata loopback_net_ops;
5136 
5137 /* Logging, debugging and troubleshooting/diagnostic helpers. */
5138 
5139 /* netdev_printk helpers, similar to dev_printk */
5140 
netdev_name(const struct net_device * dev)5141 static inline const char *netdev_name(const struct net_device *dev)
5142 {
5143 	if (!dev->name[0] || strchr(dev->name, '%'))
5144 		return "(unnamed net_device)";
5145 	return dev->name;
5146 }
5147 
netdev_reg_state(const struct net_device * dev)5148 static inline const char *netdev_reg_state(const struct net_device *dev)
5149 {
5150 	u8 reg_state = READ_ONCE(dev->reg_state);
5151 
5152 	switch (reg_state) {
5153 	case NETREG_UNINITIALIZED: return " (uninitialized)";
5154 	case NETREG_REGISTERED: return "";
5155 	case NETREG_UNREGISTERING: return " (unregistering)";
5156 	case NETREG_UNREGISTERED: return " (unregistered)";
5157 	case NETREG_RELEASED: return " (released)";
5158 	case NETREG_DUMMY: return " (dummy)";
5159 	}
5160 
5161 	WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state);
5162 	return " (unknown)";
5163 }
5164 
5165 #define MODULE_ALIAS_NETDEV(device) \
5166 	MODULE_ALIAS("netdev-" device)
5167 
5168 /*
5169  * netdev_WARN() acts like dev_printk(), but with the key difference
5170  * of using a WARN/WARN_ON to get the message out, including the
5171  * file/line information and a backtrace.
5172  */
5173 #define netdev_WARN(dev, format, args...)			\
5174 	WARN(1, "netdevice: %s%s: " format, netdev_name(dev),	\
5175 	     netdev_reg_state(dev), ##args)
5176 
5177 #define netdev_WARN_ONCE(dev, format, args...)				\
5178 	WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev),	\
5179 		  netdev_reg_state(dev), ##args)
5180 
5181 /*
5182  *	The list of packet types we will receive (as opposed to discard)
5183  *	and the routines to invoke.
5184  *
5185  *	Why 16. Because with 16 the only overlap we get on a hash of the
5186  *	low nibble of the protocol value is RARP/SNAP/X.25.
5187  *
5188  *		0800	IP
5189  *		0001	802.3
5190  *		0002	AX.25
5191  *		0004	802.2
5192  *		8035	RARP
5193  *		0005	SNAP
5194  *		0805	X.25
5195  *		0806	ARP
5196  *		8137	IPX
5197  *		0009	Localtalk
5198  *		86DD	IPv6
5199  */
5200 #define PTYPE_HASH_SIZE	(16)
5201 #define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)
5202 
5203 extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
5204 
5205 extern struct net_device *blackhole_netdev;
5206 
5207 /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
5208 #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
5209 #define DEV_STATS_ADD(DEV, FIELD, VAL) 	\
5210 		atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
5211 #define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
5212 
5213 #endif	/* _LINUX_NETDEVICE_H */
5214