xref: /dragonfly/sys/net/if_var.h (revision ed36d35d)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	From: @(#)if.h	8.1 (Berkeley) 6/10/93
30  * $FreeBSD: src/sys/net/if_var.h,v 1.18.2.16 2003/04/15 18:11:19 fjoe Exp $
31  */
32 
33 #ifndef	_NET_IF_VAR_H_
34 #define	_NET_IF_VAR_H_
35 
36 #ifndef _SYS_SERIALIZE_H_
37 #include <sys/serialize.h>
38 #endif
39 #ifndef _NET_IF_H_
40 #include <net/if.h>
41 #endif
42 #ifndef _SYS_MUTEX_H_
43 #include <sys/mutex.h>
44 #endif
45 
46 /*
47  * Structures defining a network interface, providing a packet
48  * transport mechanism (ala level 0 of the PUP protocols).
49  *
50  * Each interface accepts output datagrams of a specified maximum
51  * length, and provides higher level routines with input datagrams
52  * received from its medium.
53  *
54  * Output occurs when the routine if_output is called, with four parameters:
55  *
56  *	ifp->if_output(ifp, m, dst, rt)
57  *
58  * Here m is the mbuf chain to be sent and dst is the destination address.
59  * The output routine encapsulates the supplied datagram if necessary,
60  * and then transmits it on its medium.
61  *
62  * On input, each interface unwraps the data received by it, and either
63  * places it on the input queue of a internetwork datagram routine
64  * and posts the associated software interrupt, or passes the datagram to
65  * the routine if_input. It is called with four parameters:
66  *
67  *	ifp->if_input(ifp, m, pi, cpuid)
68  *
69  * Here m is the mbuf chain to be received. The input routine removes the
70  * protocol dependent header if necessary. A driver may also call using
71  * custom struct pktinfo reference pi and a cpuid to take advantage of
72  * hardware supplied information. Otherwise, the defaults for pi and cpuid
73  * are as follows:
74  *
75  *	ifp->if_input(ifp, m, NULL, -1);
76  *
77  * Routines exist for locating interfaces by their addresses
78  * or for locating a interface on a certain network, as well as more general
79  * routing and gateway routines maintaining information used to locate
80  * interfaces.  These routines live in the files if.c and route.c
81  */
82 
83 /*
84  * Forward structure declarations for function prototypes [sic].
85  */
86 struct	mbuf;
87 struct	proc;
88 struct	rtentry;
89 struct	rt_addrinfo;
90 struct	socket;
91 struct	ether_header;
92 struct	ucred;
93 struct	lwkt_serialize;
94 struct	ifaddr_container;
95 struct	ifaddr;
96 struct	lwkt_port;
97 struct	lwkt_msg;
98 union	netmsg;
99 struct	pktinfo;
100 struct	ifpoll_info;
101 struct	ifdata_pcpu;
102 
103 #include <sys/queue.h>		/* get TAILQ macros */
104 
105 #include <net/altq/if_altq.h>
106 
107 #ifdef _KERNEL
108 #include <sys/eventhandler.h>
109 #include <sys/mbuf.h>
110 #include <sys/systm.h>		/* XXX */
111 #include <sys/thread2.h>
112 #endif /* _KERNEL */
113 
114 #define IF_DUNIT_NONE   -1
115 
116 TAILQ_HEAD(ifnethead, ifnet);	/* we use TAILQs so that the order of */
117 TAILQ_HEAD(ifaddrhead, ifaddr_container); /* instantiation is preserved in the list */
118 TAILQ_HEAD(ifmultihead, ifmultiaddr);
119 
120 /*
121  * Structure defining a mbuf queue.
122  */
123 struct ifqueue {
124 	struct	mbuf *ifq_head;
125 	struct	mbuf *ifq_tail;
126 	int	ifq_len;
127 	int	ifq_maxlen;
128 	int	ifq_drops;
129 };
130 
131 /*
132  * Note of IFPOLL_ENABLE
133  * 1) Any file(*.c) that depends on IFPOLL_ENABLE supports in this
134  *    file should include opt_ifpoll.h at its beginning.
135  * 2) When struct changes, which are conditioned by IFPOLL_ENABLE,
136  *    are to be introduced, please keep the struct's size and layout
137  *    same, no matter whether IFPOLL_ENABLE is defined or not.
138  *    See ifnet.if_npoll and ifnet.if_npoll_unused for example.
139  */
140 
141 /*
142  * Network serialize/deserialize types
143  */
144 enum ifnet_serialize {
145 	IFNET_SERIALIZE_ALL	/* all serializers */
146 };
147 
148 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
149 
150 /*
151  * Structure defining a network interface.
152  *
153  * (Would like to call this struct ``if'', but C isn't PL/1.)
154  */
155 
156 /*
157  * NB: For DragonFlyBSD, it is assumed that each NIC driver's softc starts
158  * with one of these structures, typically held within an arpcom structure.
159  *
160  *	struct <foo>_softc {
161  *		struct arpcom {
162  *			struct  ifnet ac_if;
163  *			...
164  *		} <arpcom> ;
165  *		...
166  *	};
167  *
168  * The assumption is used in a number of places, including many
169  * files in sys/net, device drivers, and sys/dev/mii.c:miibus_attach().
170  *
171  * Unfortunately devices' softc are opaque, so we depend on this layout
172  * to locate the struct ifnet from the softc in the generic code.
173  *
174  *
175  *
176  * MPSAFE NOTES:
177  *
178  * ifnet is protected by calling if_serialize, if_tryserialize and
179  * if_deserialize serialize functions with the ifnet_serialize parameter.
180  * Callers of if_ioctl, if_watchdog, if_init, if_resolvemulti, and if_npoll
181  * should call the ifnet serialize functions with IFNET_SERIALIZE_ALL.
182  *
183  * if_snd subqueues are protected by its own serializers.  Callers of
184  * if_start should call ifsq_serialiize_hw(), ifsq_deserialize_hw() and
185  * ifsq_tryserialize_hw() to properly serialize hardware for transmission.
186  *
187  * Caller of if_output MUST NOT serialize ifnet or if_snd by calling
188  * the related serialize functions.
189  *
190  * For better tranmission performance, driver should setup if_snd subqueue
191  * owner cpuid properly using ifsq_set_cpuid() (or ifq_set_cpuid(), if not
192  * multiple transmit queue capable).  Normally, the if_snd subqueue owner
193  * cpu is the one that processing the transmission interrupt.  And in driver,
194  * direct call of if_start should be avoided, use ifsq_devstart() or
195  * ifsq_devstart_sched() instead (or if_devstart()/if_devstart_sched(), if
196  * not multiple transmit queue capable).
197  *
198  *
199  *
200  * STATISTICS:
201  *
202  * if_data is no longer used to hold per interface statistics, so DO NOT use
203  * the old style ifp->if_ipackets++ to update statistics; instead IFNET_STAT_
204  * macros should be used.
205  *
206  *
207  *
208  * SINGLE SERIALIZER MODE:
209  *
210  * In this mode, driver MUST NOT setup if_serialize, if_deserialize,
211  * if_tryserialize or if_serialize_assert.  Driver could supply its own
212  * serializer to be used (through the type specific attach function, e.g.
213  * ether_ifattach()) or it could depend on the default serializer.  In this
214  * mode if_serializer will be setup properly.
215  *
216  * If a device driver installs the same serializer for its interrupt
217  * as for ifnet, then the driver only really needs to worry about further
218  * serialization in timeout based entry points and device_method_t entry
219  * points.  All other entry points will already be serialized.
220  *
221  *
222  *
223  * MULTI SERIALIZERS MODE:
224  *
225  * In this mode, driver MUST setup if_serialize, if_deserialize,
226  * if_tryserialize and if_serialize_assert.  Driver MUST NOT supply its own
227  * serializer to be used.  In this mode, if_serializer will be left as NULL.
228  * And driver MUST setup if_snd subqueues' hardware serailizer properly by
229  * calling ifsq_set_hw_serialize().
230  *
231  *
232  *
233  * MULTIPLE TRANSMIT QUEUES:
234  *
235  * This should be implemented in "MULTI SERIALIZERS MODE".  Legacy if_watchdog
236  * method SHOULD NOT be used.
237  *
238  * 1) Attach
239  *
240  * Before the type specific attach, e.g. ether_ifattach(), driver should
241  * setup the transmit queue count and cpuid to subqueue mapping method
242  * properly (assume QCOUNT is power of 2):
243  *
244  *	ifq_set_subq_cnt(&ifp->if_snd, QCOUNT);
245  *	ifp->if_mapsubq = ifq_mapsubq_modulo;
246  *	ifq_set_subq_divisor(&ifp->if_snd, QCOUNT);
247  *
248  * After the type specific attach, driver should setup the subqueues owner
249  * cpu, serializer and watchdog properly:
250  *
251  *	for (i = 0; i < QCOUNT, ++i) {
252  *		struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
253  *
254  *		ifsq_set_cpuid(ifsq, Q_CPUID);
255  *		ifsq_set_hw_serialize(ifsq, Q_SLIZE);
256  *		ifsq_watchdog_init(Q_WDOG, ifsq, Q_WDOG_FUNC);
257  *	}
258  *
259  * Q_CPUID, the cpu which handles the hardware transmit queue interrupt
260  * Q_SLIZE, the serializer protects the hardware transmit queue
261  * Q_WDOG, per hardware transmit queue watchdog handler, struct ifsubq_watchdog
262  * Q_WDOG_FUNC, watchdog function, probably should reset hardware
263  *
264  * 2) Stop
265  *
266  * Make sure per hardware transmit queue watchdog is stopped and oactive is
267  * cleared:
268  *
269  *	for (i = 0; i < QCOUNT, ++i) {
270  *		ifsq_clr_oactive(ifsq);
271  *		ifsq_watchdog_stop(Q_WDOG);
272  *	}
273  *
274  * 3) Initialize
275  *
276  * Make sure per hardware transmit queue watchdog is started and oactive is
277  * cleared:
278  *
279  *	for (i = 0; i < QCOUNT, ++i) {
280  *		ifsq_clr_oactive(ifsq);
281  *		ifsq_watchdog_start(Q_WDOG);
282  *	}
283  *
284  * 4) if_start
285  *
286  * if_start takes subqueue as parameter, so instead of using ifq_ functions
287  * ifsq_ functions should be used.  If device could not be programmed to
288  * transmit when no media link is not up, MAKE SURE to purge the subqueue:
289  *
290  *	if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
291  *		return;
292  *	if (NO_LINK) {
293  *		ifsq_purge(ifsq);
294  *		return;
295  *	}
296  *	for (;;) {
297  *		if (NO_FREE_DESC) {
298  *			ifsq_set_oactive(ifsq);
299  *			break;
300  *		}
301  *		m = ifsq_dequeue(ifsq);
302  *		if (m != NULL)
303  *			DRIVER_ENCAP(m);
304  *		Q_WDOG.wd_timer = WDOG_TIMEOUT;
305  *	}
306  *
307  * 5) Transmission done, e.g. transmit queue interrupt processing
308  *
309  * Same as if_start, ifsq_ functions should be used:
310  *
311  *	DRIVER_COLLECT_DESC();
312  *	if (HAS_FREE_DESC)
313  *		ifsq_clr_oactive(ifsq);
314  *	if (NO_PENDING_DESC)
315  *		Q_WDOG.wd_timer = 0;
316  *	if (!ifsq_is_empty(ifsq))
317  *		ifsq_devstart(ifsq);
318  */
319 struct ifnet {
320 	void	*if_softc;		/* pointer to driver state */
321 	void	*if_l2com;		/* pointer to protocol bits */
322 	TAILQ_ENTRY(ifnet) if_link;	/* all struct ifnets are chained */
323 	char	if_xname[IFNAMSIZ];	/* external name (name + unit) */
324 	const char *if_dname;		/* driver name */
325 	int	if_dunit;		/* unit or IF_DUNIT_NONE */
326 	void	*if_vlantrunks;		/* vlan trunks */
327 	struct	ifaddrhead *if_addrheads; /* per-cpu per-if addresses */
328 	int	if_pcount;		/* number of promiscuous listeners */
329 	void	*if_carp;		/* carp interfaces */
330 	struct	bpf_if *if_bpf;		/* packet filter structure */
331 	u_short	if_index;		/* numeric abbreviation for this if  */
332 	short	if_timer;		/* time 'til if_watchdog called */
333 	int	if_flags;		/* up/down, broadcast, etc. */
334 	int	if_capabilities;	/* interface capabilities */
335 	int	if_capenable;		/* enabled features */
336 	void	*if_linkmib;		/* link-type-specific MIB data */
337 	size_t	if_linkmiblen;		/* length of above data */
338 	struct	if_data if_data;	/* NOTE: stats are in if_data_pcpu */
339 	struct	ifmultihead if_multiaddrs; /* multicast addresses configured */
340 	int	if_amcount;		/* number of all-multicast requests */
341 /* procedure handles */
342 	int	(*if_output)		/* output routine (enqueue) */
343 		(struct ifnet *, struct mbuf *, struct sockaddr *,
344 		     struct rtentry *);
345 	void	(*if_input)		/* input routine from hardware driver */
346 		(struct ifnet *, struct mbuf *,
347 		     const struct pktinfo *pi, int cpuid);
348 	void	(*if_start)		/* initiate output routine */
349 		(struct ifnet *, struct ifaltq_subque *);
350 	int	(*if_ioctl)		/* ioctl routine */
351 		(struct ifnet *, u_long, caddr_t, struct ucred *);
352 	void	(*if_watchdog)		/* timer routine */
353 		(struct ifnet *);
354 	void	(*if_init)		/* init routine */
355 		(void *);
356 	int	(*if_resolvemulti)	/* validate/resolve multicast */
357 		(struct ifnet *, struct sockaddr **, struct sockaddr *);
358 	void	*if_unused5;
359 	TAILQ_HEAD(, ifg_list) if_groups; /* linked list of groups per if */
360 	int	(*if_mapsubq)		/* cpuid to if_snd subqueue map */
361 		(struct ifaltq *, int);
362 	int	if_unused2;
363 
364 	/*
365 	 * ifnet serialize functions
366 	 */
367 	void	(*if_serialize)
368 		(struct ifnet *, enum ifnet_serialize);
369 	void	(*if_deserialize)
370 		(struct ifnet *, enum ifnet_serialize);
371 	int	(*if_tryserialize)
372 		(struct ifnet *, enum ifnet_serialize);
373 #ifdef INVARIANTS
374 	void	(*if_serialize_assert)
375 		(struct ifnet *, enum ifnet_serialize, boolean_t);
376 #else
377 	/* Place holder */
378 	void	(*if_serialize_unused)(void);
379 #endif
380 
381 #ifdef IFPOLL_ENABLE
382 	void	(*if_npoll)		/* polling config */
383 		(struct ifnet *, struct ifpoll_info *);
384 #else
385 	/* Place holder */
386 	void	(*if_npoll_unused)(void);
387 #endif
388 	int	if_tsolen;		/* max TSO length */
389 	struct	ifaltq if_snd;		/* output subqueues */
390 	const uint8_t	*if_broadcastaddr;
391 	void	*if_bridge;		/* bridge glue */
392 	void	*if_lagg;		/* lagg glue */
393 	void	*if_afdata[AF_MAX];
394 	struct ifaddr	*if_lladdr;
395 
396 	/* serializer, in single serializer mode */
397 	struct lwkt_serialize *if_serializer;
398 	/*
399 	 * default serializer, in single serializer mode,
400 	 * if driver does not supply one
401 	 */
402 	struct lwkt_serialize if_default_serializer;
403 
404 	int	if_unused4;
405 	struct ifdata_pcpu *if_data_pcpu; /* per-cpu stats */
406 	void	*if_pf_kif;		/* pf interface */
407 
408 	/*
409 	 * Mbuf clusters/jclusters limits should be increased
410 	 * by if_nmbclusters/if_nmbjclusters.  Mainly for mbuf
411 	 * clusters/jclusters that could sit on the device
412 	 * queues, e.g. reception queues, for quite some time.
413 	 */
414 	int	if_nmbclusters;
415 	int	if_nmbjclusters;
416 };
417 typedef void if_init_f_t (void *);
418 
419 #define	if_mtu		if_data.ifi_mtu
420 #define	if_type		if_data.ifi_type
421 #define if_physical	if_data.ifi_physical
422 #define	if_addrlen	if_data.ifi_addrlen
423 #define	if_hdrlen	if_data.ifi_hdrlen
424 #define	if_metric	if_data.ifi_metric
425 #define	if_link_state	if_data.ifi_link_state
426 #define	if_baudrate	if_data.ifi_baudrate
427 #define	if_hwassist	if_data.ifi_hwassist
428 #define	if_ipackets	if_data.ifi_ipackets
429 #define	if_ierrors	if_data.ifi_ierrors
430 #define	if_opackets	if_data.ifi_opackets
431 #define	if_oerrors	if_data.ifi_oerrors
432 #define	if_collisions	if_data.ifi_collisions
433 #define	if_ibytes	if_data.ifi_ibytes
434 #define	if_obytes	if_data.ifi_obytes
435 #define	if_imcasts	if_data.ifi_imcasts
436 #define	if_omcasts	if_data.ifi_omcasts
437 #define	if_iqdrops	if_data.ifi_iqdrops
438 #define	if_noproto	if_data.ifi_noproto
439 #define	if_oqdrops	if_data.ifi_oqdrops
440 #define	if_lastchange	if_data.ifi_lastchange
441 #define if_recvquota	if_data.ifi_recvquota
442 #define	if_xmitquota	if_data.ifi_xmitquota
443 #define if_rawoutput(if, m, sa) if_output(if, m, sa, NULL)
444 
445 /* for compatibility with other BSDs */
446 #define	if_list		if_link
447 
448 /*
449  * Per-cpu interface statistics
450  */
451 struct ifdata_pcpu {
452 	u_long	ifd_ipackets;		/* packets received on interface */
453 	u_long	ifd_ierrors;		/* input errors on interface */
454 	u_long	ifd_opackets;		/* packets sent on interface */
455 	u_long	ifd_oerrors;		/* output errors on interface */
456 	u_long	ifd_collisions;		/* collisions on csma interfaces */
457 	u_long	ifd_ibytes;		/* total number of octets received */
458 	u_long	ifd_obytes;		/* total number of octets sent */
459 	u_long	ifd_imcasts;		/* packets received via multicast */
460 	u_long	ifd_omcasts;		/* packets sent via multicast */
461 	u_long	ifd_iqdrops;		/* dropped on input, this interface */
462 	u_long	ifd_noproto;		/* destined for unsupported protocol */
463 	u_long	ifd_oqdrops;		/* dropped on output, this interface */
464 } __cachealign;
465 
466 #endif	/* _KERNEL || _KERNEL_STRUCTURES */
467 
468 /*
469  * ifqueue operation macros
470  */
471 #define	IF_QFULL(ifq)		((ifq)->ifq_len >= (ifq)->ifq_maxlen)
472 #define	IF_DROP(ifq)		((ifq)->ifq_drops++)
473 #define	IF_QLEN(ifq)		((ifq)->ifq_len)
474 #define	IF_QEMPTY(ifq)		(IF_QLEN(ifq) == 0)
475 
476 #define	IF_ENQUEUE(ifq, m) do {						\
477 	(m)->m_nextpkt = NULL;						\
478 	if ((ifq)->ifq_tail == NULL)					\
479 		(ifq)->ifq_head = m;					\
480 	else								\
481 		(ifq)->ifq_tail->m_nextpkt = m;				\
482 	(ifq)->ifq_tail = m;						\
483 	(ifq)->ifq_len++;						\
484 } while (0)
485 
486 #define	IF_PREPEND(ifq, m) do {						\
487 	(m)->m_nextpkt = (ifq)->ifq_head;				\
488 	if ((ifq)->ifq_tail == NULL)					\
489 		(ifq)->ifq_tail = (m);					\
490 	(ifq)->ifq_head = (m);						\
491 	(ifq)->ifq_len++;						\
492 } while (0)
493 
494 #define	IF_DEQUEUE(ifq, m) do {						\
495 	(m) = (ifq)->ifq_head;						\
496 	if (m) {							\
497 		if (((ifq)->ifq_head = (m)->m_nextpkt) == NULL)		\
498 			(ifq)->ifq_tail = NULL;				\
499 		(m)->m_nextpkt = NULL;					\
500 		(ifq)->ifq_len--;					\
501 	}								\
502 } while (0)
503 
504 #define	IF_POLL(ifq, m)		((m) = (ifq)->ifq_head)
505 
506 #define IF_DRAIN(ifq) do {						\
507 	struct mbuf *m;							\
508 	while (1) {							\
509 		IF_DEQUEUE(ifq, m);					\
510 		if (m == NULL)						\
511 			break;						\
512 		m_freem(m);						\
513 	}								\
514 } while (0)
515 
516 #ifdef _KERNEL
517 
518 /* interface link layer address change event */
519 typedef void (*iflladdr_event_handler_t)(void *, struct ifnet *);
520 EVENTHANDLER_DECLARE(iflladdr_event, iflladdr_event_handler_t);
521 
522 #ifdef INVARIANTS
523 #define ASSERT_IFNET_SERIALIZED_ALL(ifp) \
524 	(ifp)->if_serialize_assert((ifp), IFNET_SERIALIZE_ALL, TRUE)
525 #define ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp) \
526 	(ifp)->if_serialize_assert((ifp), IFNET_SERIALIZE_ALL, FALSE)
527 #else
528 #define ASSERT_IFNET_SERIALIZED_ALL(ifp)	((void)0)
529 #define ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp)	((void)0)
530 #endif
531 
532 static __inline void
533 ifnet_serialize_all(struct ifnet *_ifp)
534 {
535 	_ifp->if_serialize(_ifp, IFNET_SERIALIZE_ALL);
536 }
537 
538 static __inline void
539 ifnet_deserialize_all(struct ifnet *_ifp)
540 {
541 	_ifp->if_deserialize(_ifp, IFNET_SERIALIZE_ALL);
542 }
543 
544 static __inline int
545 ifnet_tryserialize_all(struct ifnet *_ifp)
546 {
547 	return _ifp->if_tryserialize(_ifp, IFNET_SERIALIZE_ALL);
548 }
549 
550 /*
551  * 72 was chosen below because it is the size of a TCP/IP
552  * header (40) + the minimum mss (32).
553  */
554 #define	IF_MINMTU	72
555 #define	IF_MAXMTU	65535
556 
557 #endif /* _KERNEL */
558 
559 struct in_ifaddr;
560 
561 struct in_ifaddr_container {
562 	struct in_ifaddr	*ia;
563 	LIST_ENTRY(in_ifaddr_container) ia_hash;
564 				/* entry in bucket of inet addresses */
565 	TAILQ_ENTRY(in_ifaddr_container) ia_link;
566 				/* list of internet addresses */
567 	struct ifaddr_container	*ia_ifac; /* parent ifaddr_container */
568 };
569 
570 /*
571  * Per-cpu ifaddr container:
572  * - per-cpu ifaddr reference count
573  * - linkage to per-cpu addresses lists
574  * - per-cpu ifaddr statistics
575  */
576 struct ifaddr_container {
577 #define IFA_CONTAINER_MAGIC	0x19810219
578 #define IFA_CONTAINER_DEAD	0xc0dedead
579 	uint32_t		ifa_magic;  /* IFA_CONTAINER_MAGIC */
580 	struct ifaddr		*ifa;
581 	TAILQ_ENTRY(ifaddr_container)	ifa_link;   /* queue macro glue */
582 	u_int			ifa_refcnt; /* references to this structure */
583 	uint16_t		ifa_listmask;	/* IFA_LIST_ */
584 	uint16_t		ifa_prflags;	/* protocol specific flags */
585 
586 	u_long			ifa_ipackets;	/* packets received on addr */
587 	u_long			ifa_ibytes;	/* bytes received on addr */
588 	u_long			ifa_opackets;	/* packets sent on addr */
589 	u_long			ifa_obytes;	/* bytes sent on addr */
590 
591 	/*
592 	 * Protocol specific states
593 	 */
594 	union {
595 		struct in_ifaddr_container u_in_ifac;
596 	} ifa_proto_u;
597 } __cachealign;
598 
599 #define IFA_LIST_IFADDRHEAD	0x01	/* on ifnet.if_addrheads[cpuid] */
600 #define IFA_LIST_IN_IFADDRHEAD	0x02	/* on in_ifaddrheads[cpuid] */
601 #define IFA_LIST_IN_IFADDRHASH	0x04	/* on in_ifaddrhashtbls[cpuid] */
602 
603 #define IFA_PRF_FLAG0		0x01
604 #define IFA_PRF_FLAG1		0x02
605 #define IFA_PRF_FLAG2		0x04
606 #define IFA_PRF_FLAG3		0x08
607 
608 /*
609  * The ifaddr structure contains information about one address
610  * of an interface.  They are maintained by the different address families,
611  * are allocated and attached when an address is set, and are linked
612  * together so all addresses for an interface can be located.
613  *
614  * NOTE:
615  * Statistics are no longer stored in if_data, instead, they are stored
616  * in the per-cpu ifaddr_container.  So don't use the old style
617  * ifa->if_ipackets++ to update statistics, use IFA_STAT_ macros.
618  */
619 struct ifaddr {
620 	struct	sockaddr *ifa_addr;	/* address of interface */
621 	struct	sockaddr *ifa_dstaddr;	/* other end of p-to-p link */
622 #define	ifa_broadaddr	ifa_dstaddr	/* broadcast address interface */
623 	struct	sockaddr *ifa_netmask;	/* used to determine subnet */
624 	struct	if_data if_data;	/* not all members are meaningful */
625 	struct	ifnet *ifa_ifp;		/* back-pointer to interface */
626 	void	*ifa_link_pad;
627 	struct ifaddr_container *ifa_containers; /* per-cpu data */
628 	void	(*ifa_rtrequest)	/* check or clean routes (+ or -)'d */
629 		(int, struct rtentry *);
630 	u_short	ifa_flags;		/* mostly rt_flags for cloning */
631 	int	ifa_ncnt;		/* # of valid ifaddr_container */
632 	int	ifa_metric;		/* cost of going out this interface */
633 #ifdef notdef
634 	struct	rtentry *ifa_rt;	/* XXXX for ROUTETOIF ????? */
635 #endif
636 	int (*ifa_claim_addr)		/* check if an addr goes to this if */
637 		(struct ifaddr *, struct sockaddr *);
638 
639 };
640 #define	IFA_ROUTE	RTF_UP		/* route installed */
641 
642 /* for compatibility with other BSDs */
643 #define	ifa_list	ifa_link
644 
645 /*
646  * Multicast address structure.  This is analogous to the ifaddr
647  * structure except that it keeps track of multicast addresses.
648  * Also, the reference count here is a count of requests for this
649  * address, not a count of pointers to this structure.
650  */
651 struct ifmultiaddr {
652 	TAILQ_ENTRY(ifmultiaddr) ifma_link; /* queue macro glue */
653 	struct	sockaddr *ifma_addr;	/* address this membership is for */
654 	struct	sockaddr *ifma_lladdr;	/* link-layer translation, if any */
655 	struct	ifnet *ifma_ifp;	/* back-pointer to interface */
656 	u_int	ifma_refcount;		/* reference count */
657 	void	*ifma_protospec;	/* protocol-specific state, if any */
658 };
659 
660 #ifdef _KERNEL
661 
662 struct ifaddr_marker {
663 	struct ifaddr		ifa;
664 	struct ifaddr_container	ifac;
665 	struct sockaddr		addr;
666 	struct sockaddr		netmask;
667 	struct sockaddr		dstaddr;
668 };
669 
670 /*
671  * ifaddr statistics update macro
672  */
673 #define IFA_STAT_INC(ifa, name, v) \
674 do { \
675 	(ifa)->ifa_containers[mycpuid].ifa_##name += (v); \
676 } while (0)
677 
678 /*
679  * Interface (ifnet) statistics update macros
680  */
681 #define IFNET_STAT_INC(ifp, name, v) \
682 do { \
683 	(ifp)->if_data_pcpu[mycpuid].ifd_##name += (v); \
684 } while (0)
685 
686 #define IFNET_STAT_SET(ifp, name, v) \
687 do { \
688 	int _cpu; \
689 	(ifp)->if_data_pcpu[0].ifd_##name = (v); \
690 	for (_cpu = 1; _cpu < ncpus; ++_cpu) \
691 		(ifp)->if_data_pcpu[_cpu].ifd_##name = 0; \
692 } while (0)
693 
694 #define IFNET_STAT_GET(ifp, name, v) \
695 do { \
696 	int _cpu; \
697 	(v) = (ifp)->if_data_pcpu[0].ifd_##name; \
698 	for (_cpu = 1; _cpu < ncpus; ++_cpu) \
699 		(v) += (ifp)->if_data_pcpu[_cpu].ifd_##name; \
700 } while (0)
701 
702 #ifndef _SYS_SERIALIZE2_H_
703 #include <sys/serialize2.h>
704 #endif
705 
706 enum ifaddr_event {
707 	IFADDR_EVENT_ADD,
708 	IFADDR_EVENT_DELETE,
709 	IFADDR_EVENT_CHANGE
710 };
711 
712 /* interface address change event */
713 typedef void (*ifaddr_event_handler_t)(void *, struct ifnet *,
714 	enum ifaddr_event, struct ifaddr *);
715 EVENTHANDLER_DECLARE(ifaddr_event, ifaddr_event_handler_t);
716 /* new interface attach event */
717 typedef void (*ifnet_attach_event_handler_t)(void *, struct ifnet *);
718 EVENTHANDLER_DECLARE(ifnet_attach_event, ifnet_attach_event_handler_t);
719 /* interface detach event */
720 typedef void (*ifnet_detach_event_handler_t)(void *, struct ifnet *);
721 EVENTHANDLER_DECLARE(ifnet_detach_event, ifnet_detach_event_handler_t);
722 
723 /* Array of all ifnets in the system */
724 struct ifnet_array {
725 	int		ifnet_count;	/* # of elem. in ifnet_arr */
726 	int		ifnet_pad;	/* explicit */
727 	struct ifnet	*ifnet_arr[];
728 };
729 
730 /*
731  * interface groups
732  */
733 struct ifg_group {
734 	char				 ifg_group[IFNAMSIZ];
735 	u_int				 ifg_refcnt;
736 	void				*ifg_pf_kif;
737 	int				 ifg_carp_demoted;
738 	TAILQ_HEAD(, ifg_member)	 ifg_members;
739 	TAILQ_ENTRY(ifg_group)		 ifg_next;
740 };
741 
742 struct ifg_member {
743 	TAILQ_ENTRY(ifg_member)	 ifgm_next;
744 	struct ifnet		*ifgm_ifp;
745 };
746 
747 struct ifg_list {
748 	struct ifg_group	*ifgl_group;
749 	TAILQ_ENTRY(ifg_list)	 ifgl_next;
750 };
751 
752 /* group attach event */
753 typedef void (*group_attach_event_handler_t)(void *, struct ifg_group *);
754 EVENTHANDLER_DECLARE(group_attach_event, group_attach_event_handler_t);
755 /* group detach event */
756 typedef void (*group_detach_event_handler_t)(void *, struct ifg_group *);
757 EVENTHANDLER_DECLARE(group_detach_event, group_detach_event_handler_t);
758 /* group change event */
759 typedef void (*group_change_event_handler_t)(void *, const char *);
760 EVENTHANDLER_DECLARE(group_change_event, group_change_event_handler_t);
761 
762 
763 #ifdef INVARIANTS
764 #define ASSERT_IFAC_VALID(ifac)	do { \
765 	KKASSERT((ifac)->ifa_magic == IFA_CONTAINER_MAGIC); \
766 	KKASSERT((ifac)->ifa_refcnt > 0); \
767 } while (0)
768 #else
769 #define ASSERT_IFAC_VALID(ifac)	((void)0)
770 #endif
771 
772 static __inline void
773 _IFAREF(struct ifaddr *_ifa, int _cpu_id)
774 {
775 	struct ifaddr_container *_ifac = &_ifa->ifa_containers[_cpu_id];
776 
777 	crit_enter();
778 	ASSERT_IFAC_VALID(_ifac);
779 	++_ifac->ifa_refcnt;
780 	crit_exit();
781 }
782 
783 static __inline void
784 IFAREF(struct ifaddr *_ifa)
785 {
786 	_IFAREF(_ifa, mycpuid);
787 }
788 
789 #include <sys/malloc.h>
790 
791 MALLOC_DECLARE(M_IFADDR);
792 MALLOC_DECLARE(M_IFMADDR);
793 MALLOC_DECLARE(M_IFNET);
794 
795 void	ifac_free(struct ifaddr_container *, int);
796 
797 static __inline void
798 _IFAFREE(struct ifaddr *_ifa, int _cpu_id)
799 {
800 	struct ifaddr_container *_ifac = &_ifa->ifa_containers[_cpu_id];
801 
802 	crit_enter();
803 	ASSERT_IFAC_VALID(_ifac);
804 	if (--_ifac->ifa_refcnt == 0)
805 		ifac_free(_ifac, _cpu_id);
806 	crit_exit();
807 }
808 
809 static __inline void
810 IFAFREE(struct ifaddr *_ifa)
811 {
812 	_IFAFREE(_ifa, mycpuid);
813 }
814 
815 static __inline void
816 ifnet_serialize_array_enter(lwkt_serialize_t *_arr, int _arrcnt,
817     enum ifnet_serialize _slz)
818 {
819 	KKASSERT(_slz == IFNET_SERIALIZE_ALL);
820 	lwkt_serialize_array_enter(_arr, _arrcnt, 0);
821 }
822 
823 static __inline void
824 ifnet_serialize_array_exit(lwkt_serialize_t *_arr, int _arrcnt,
825     enum ifnet_serialize _slz)
826 {
827 	KKASSERT(_slz == IFNET_SERIALIZE_ALL);
828 	lwkt_serialize_array_exit(_arr, _arrcnt, 0);
829 }
830 
831 static __inline int
832 ifnet_serialize_array_try(lwkt_serialize_t *_arr, int _arrcnt,
833     enum ifnet_serialize _slz)
834 {
835 	KKASSERT(_slz == IFNET_SERIALIZE_ALL);
836 	return lwkt_serialize_array_try(_arr, _arrcnt, 0);
837 }
838 
839 #ifdef INVARIANTS
840 
841 static __inline void
842 ifnet_serialize_array_assert(lwkt_serialize_t *_arr, int _arrcnt,
843     enum ifnet_serialize _slz, boolean_t _serialized)
844 {
845 	int _i;
846 
847 	KKASSERT(_slz == IFNET_SERIALIZE_ALL);
848 	if (_serialized) {
849 		for (_i = 0; _i < _arrcnt; ++_i)
850 			ASSERT_SERIALIZED(_arr[_i]);
851 	} else {
852 		for (_i = 0; _i < _arrcnt; ++_i)
853 			ASSERT_NOT_SERIALIZED(_arr[_i]);
854 	}
855 }
856 
857 #endif	/* INVARIANTS */
858 
859 #define REINPUT_KEEPRCVIF	0x0001	/* ether_reinput_oncpu() */
860 #define REINPUT_RUNBPF 		0x0002	/* ether_reinput_oncpu() */
861 
862 /*
863  * MPSAFE NOTE for ifnet queue (ifnet), ifnet array, ifunit() and
864  * ifindex2ifnet.
865  *
866  * - ifnet queue must only be accessed by non-netisr threads and
867  *   ifnet lock must be held (by ifnet_lock()).
868  * - If accessing ifnet queue is needed in netisrs, ifnet array
869  *   (obtained through ifnet_array_get()) must be used instead.
870  *   There is no need to (must not, actually) hold ifnet lock for
871  *   ifnet array accessing.
872  * - ifindex2ifnet could be accessed by both non-netisr threads and
873  *   netisrs.  Accessing ifindex2ifnet in non-netisr threads must be
874  *   protected by ifnet lock (by ifnet_lock()).  Accessing
875  *   ifindex2ifnet in netisrs is lockless MPSAFE and ifnet lock must
876  *   not be held.  However, ifindex2ifnet should be saved in a stack
877  *   variable to get a consistent view of ifindex2ifnet, if
878  *   ifindex2ifnet is accessed multiple times from a function in
879  *   netisrs.
880  * - ifunit() must only be called in non-netisr threads and ifnet
881  *   lock must be held before calling this function and for the
882  *   accessing of the ifp returned by this function.
883  * - If ifunit() is needed in netisr, ifunit_netisr() must be used
884  *   instead.  There is no need to (must not, actually) hold ifnet
885  *   lock for ifunit_netisr() and the returned ifp.
886  */
887 extern struct ifnethead	ifnet;
888 #define ifnetlist	ifnet	/* easily distinguished ifnet alias */
889 
890 extern struct ifnet	**ifindex2ifnet;
891 extern int		if_index;
892 
893 struct ifnet		*ifunit(const char *);
894 struct ifnet		*ifunit_netisr(const char *);
895 const struct ifnet_array *ifnet_array_get(void);
896 int			ifnet_array_isempty(void);
897 
898 extern	int ifqmaxlen;
899 extern	struct ifnet *loif;
900 
901 struct ip;
902 struct tcphdr;
903 
904 void	ether_ifattach(struct ifnet *, const uint8_t *,
905 	    struct lwkt_serialize *);
906 void	ether_ifattach_bpf(struct ifnet *, const uint8_t *, u_int, u_int,
907 	    struct lwkt_serialize *);
908 void	ether_ifdetach(struct ifnet *);
909 void	ether_demux(struct mbuf *);
910 void	ether_demux_oncpu(struct ifnet *, struct mbuf *);
911 void	ether_reinput_oncpu(struct ifnet *, struct mbuf *, int);
912 void	ether_input(struct ifnet *, struct mbuf *,
913 	    const struct pktinfo *, int);
914 int	ether_output_frame(struct ifnet *, struct mbuf *);
915 int	ether_ioctl(struct ifnet *, u_long, caddr_t);
916 u_char	*kether_aton(const char *, u_char *);
917 char	*kether_ntoa(const u_char *, char *);
918 struct ifnet *ether_bridge_interface(struct ifnet *ifp);
919 uint32_t	ether_crc32_le(const uint8_t *, size_t);
920 uint32_t	ether_crc32_be(const uint8_t *, size_t);
921 
922 int	if_addmulti(struct ifnet *, struct sockaddr *, struct ifmultiaddr **);
923 int	if_addmulti_serialized(struct ifnet *, struct sockaddr *,
924 	    struct ifmultiaddr **);
925 int	if_allmulti(struct ifnet *, int);
926 void	if_attach(struct ifnet *, struct lwkt_serialize *);
927 int	if_delmulti(struct ifnet *, struct sockaddr *);
928 void	if_delallmulti_serialized(struct ifnet *ifp);
929 void	if_purgeaddrs_nolink(struct ifnet *);
930 void	if_detach(struct ifnet *);
931 void	if_down(struct ifnet *);
932 void	if_link_state_change(struct ifnet *);
933 void	if_initname(struct ifnet *, const char *, int);
934 int	if_getanyethermac(uint16_t *, int);
935 int	if_printf(struct ifnet *, const char *, ...) __printflike(2, 3);
936 struct ifnet *if_alloc(uint8_t);
937 void	if_free(struct ifnet *);
938 void	if_route(struct ifnet *, int flag, int fam);
939 int	if_setlladdr(struct ifnet *, const u_char *, int);
940 void	if_unroute(struct ifnet *, int flag, int fam);
941 void	if_up(struct ifnet *);
942 /*void	ifinit(void);*/ /* declared in systm.h for main() */
943 int	ifioctl(struct socket *, u_long, caddr_t, struct ucred *);
944 int	ifpromisc(struct ifnet *, int);
945 
946 struct	ifg_group *if_creategroup(const char *);
947 int     if_addgroup(struct ifnet *, const char *);
948 int     if_delgroup(struct ifnet *, const char *);
949 int     if_getgroup(caddr_t, struct ifnet *);
950 int     if_getgroupmembers(caddr_t);
951 
952 struct	ifaddr *ifa_ifwithaddr(struct sockaddr *);
953 struct	ifaddr *ifa_ifwithdstaddr(struct sockaddr *);
954 struct	ifaddr *ifa_ifwithnet(struct sockaddr *);
955 struct	ifaddr *ifa_ifwithroute(int, struct sockaddr *, struct sockaddr *);
956 struct	ifaddr *ifaof_ifpforaddr(struct sockaddr *, struct ifnet *);
957 
958 typedef void *if_com_alloc_t(u_char type, struct ifnet *ifp);
959 typedef void if_com_free_t(void *com, u_char type);
960 void    if_register_com_alloc(u_char, if_com_alloc_t *a, if_com_free_t *);
961 void    if_deregister_com_alloc(u_char);
962 
963 void	*ifa_create(int);
964 void	ifa_destroy(struct ifaddr *);
965 void	ifa_iflink(struct ifaddr *, struct ifnet *, int);
966 void	ifa_ifunlink(struct ifaddr *, struct ifnet *);
967 void	ifa_marker_init(struct ifaddr_marker *, struct ifnet *);
968 
969 struct	ifmultiaddr *ifmaof_ifpforaddr(struct sockaddr *, struct ifnet *);
970 int	if_simloop(struct ifnet *ifp, struct mbuf *m, int af, int hlen);
971 void	if_devstart(struct ifnet *ifp); /* COMPAT */
972 void	if_devstart_sched(struct ifnet *ifp); /* COMPAT */
973 
974 void	ifnet_lock(void);
975 void	ifnet_unlock(void);
976 
977 #define IF_LLSOCKADDR(ifp)						\
978     ((struct sockaddr_dl *)(ifp)->if_lladdr->ifa_addr)
979 #define IF_LLADDR(ifp)	LLADDR(IF_LLSOCKADDR(ifp))
980 
981 #ifdef IFPOLL_ENABLE
982 int	ifpoll_register(struct ifnet *);
983 int	ifpoll_deregister(struct ifnet *);
984 #endif	/* IFPOLL_ENABLE */
985 
986 #endif /* _KERNEL */
987 
988 #endif /* !_NET_IF_VAR_H_ */
989