xref: /openbsd/sys/net/ifq.h (revision f547d376)
1*f547d376Sdlg /*	$OpenBSD: ifq.h,v 1.27 2020/01/25 06:31:32 dlg Exp $ */
244430e21Sdlg 
344430e21Sdlg /*
444430e21Sdlg  * Copyright (c) 2015 David Gwynne <dlg@openbsd.org>
544430e21Sdlg  *
644430e21Sdlg  * Permission to use, copy, modify, and distribute this software for any
744430e21Sdlg  * purpose with or without fee is hereby granted, provided that the above
844430e21Sdlg  * copyright notice and this permission notice appear in all copies.
944430e21Sdlg  *
1044430e21Sdlg  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
1144430e21Sdlg  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
1244430e21Sdlg  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
1344430e21Sdlg  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
1444430e21Sdlg  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
1544430e21Sdlg  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1644430e21Sdlg  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
1744430e21Sdlg  */
1844430e21Sdlg 
1944430e21Sdlg #ifndef _NET_IFQ_H_
2044430e21Sdlg #define _NET_IFQ_H_
2144430e21Sdlg 
2244430e21Sdlg struct ifnet;
2344430e21Sdlg 
2444430e21Sdlg struct ifq_ops;
2544430e21Sdlg 
2644430e21Sdlg struct ifqueue {
2783d8135eSdlg 	struct ifnet		*ifq_if;
28986fddd0Sdlg 	struct taskq		*ifq_softnet;
29722eb25eSdlg 	union {
30722eb25eSdlg 		void			*_ifq_softc;
31722eb25eSdlg 		/*
32722eb25eSdlg 		 * a rings sndq is found by looking up an array of pointers.
33722eb25eSdlg 		 * by default we only have one sndq and the default drivers
34722eb25eSdlg 		 * dont use ifq_softc, so we can borrow it for the map until
35722eb25eSdlg 		 * we need to allocate a proper map.
36722eb25eSdlg 		 */
37722eb25eSdlg 		struct ifqueue		*_ifq_ifqs[1];
38722eb25eSdlg 	} _ifq_ptr;
39722eb25eSdlg #define ifq_softc		 _ifq_ptr._ifq_softc
40722eb25eSdlg #define ifq_ifqs		 _ifq_ptr._ifq_ifqs
4183d8135eSdlg 
4283d8135eSdlg 	/* mbuf handling */
4344430e21Sdlg 	struct mutex		 ifq_mtx;
4444430e21Sdlg 	const struct ifq_ops	*ifq_ops;
4544430e21Sdlg 	void			*ifq_q;
46a1b9aa84Sdlg 	struct mbuf_list	 ifq_free;
4744430e21Sdlg 	unsigned int		 ifq_len;
4844430e21Sdlg 	unsigned int		 ifq_oactive;
4944430e21Sdlg 
50d071e2e3Sdlg 	/* statistics */
51d071e2e3Sdlg 	uint64_t		 ifq_packets;
52d071e2e3Sdlg 	uint64_t		 ifq_bytes;
53d071e2e3Sdlg 	uint64_t		 ifq_qdrops;
54d071e2e3Sdlg 	uint64_t		 ifq_errors;
55d071e2e3Sdlg 	uint64_t		 ifq_mcasts;
56d071e2e3Sdlg 
5783d8135eSdlg 	/* work serialisation */
5883d8135eSdlg 	struct mutex		 ifq_task_mtx;
5983d8135eSdlg 	struct task_list	 ifq_task_list;
608ac498c1Sdlg 	void			*ifq_serializer;
61986fddd0Sdlg 	struct task		 ifq_bundle;
6283d8135eSdlg 
6383d8135eSdlg 	/* work to be serialised */
6483d8135eSdlg 	struct task		 ifq_start;
6583d8135eSdlg 	struct task		 ifq_restart;
6683d8135eSdlg 
67722eb25eSdlg 	/* properties */
6844430e21Sdlg 	unsigned int		 ifq_maxlen;
69722eb25eSdlg 	unsigned int		 ifq_idx;
7044430e21Sdlg };
7144430e21Sdlg 
72ea47e5e1Sdlg struct ifiqueue {
73ea47e5e1Sdlg 	struct ifnet		*ifiq_if;
74ea47e5e1Sdlg 	struct taskq		*ifiq_softnet;
75ea47e5e1Sdlg 	union {
76ea47e5e1Sdlg 		void			*_ifiq_softc;
77ea47e5e1Sdlg 		struct ifiqueue		*_ifiq_ifiqs[1];
78ea47e5e1Sdlg 	} _ifiq_ptr;
79ea47e5e1Sdlg #define ifiq_softc		 _ifiq_ptr._ifiq_softc
80ea47e5e1Sdlg #define ifiq_ifiqs		 _ifiq_ptr._ifiq_ifiqs
81ea47e5e1Sdlg 
82ea47e5e1Sdlg 	struct mutex		 ifiq_mtx;
83ea47e5e1Sdlg 	struct mbuf_list	 ifiq_ml;
84ea47e5e1Sdlg 	struct task		 ifiq_task;
851648f199Sdlg 	unsigned int		 ifiq_pressure;
86ea47e5e1Sdlg 
87ea47e5e1Sdlg 	/* counters */
88ea47e5e1Sdlg 	uint64_t		 ifiq_packets;
89ea47e5e1Sdlg 	uint64_t		 ifiq_bytes;
90ea47e5e1Sdlg 	uint64_t		 ifiq_qdrops;
91ea47e5e1Sdlg 	uint64_t		 ifiq_errors;
92ea47e5e1Sdlg 	uint64_t		 ifiq_mcasts;
93ea47e5e1Sdlg 	uint64_t		 ifiq_noproto;
94ea47e5e1Sdlg 
95ea47e5e1Sdlg 	/* properties */
96ea47e5e1Sdlg 	unsigned int		 ifiq_idx;
97ea47e5e1Sdlg };
98ea47e5e1Sdlg 
9944430e21Sdlg #ifdef _KERNEL
10044430e21Sdlg 
10144430e21Sdlg #define IFQ_MAXLEN		256
10244430e21Sdlg 
103edc1be42Sdlg /*
104edc1be42Sdlg  *
105edc1be42Sdlg  * Interface Send Queues
106edc1be42Sdlg  *
107edc1be42Sdlg  * struct ifqueue sits between the network stack and a drivers
108edc1be42Sdlg  * transmission of packets. The high level view is that when the stack
109edc1be42Sdlg  * has finished generating a packet it hands it to a driver for
110edc1be42Sdlg  * transmission. It does this by queueing the packet on an ifqueue and
111edc1be42Sdlg  * notifying the driver to start transmission of the queued packets.
112edc1be42Sdlg  *
1132c2eb2edSdlg  * A network device may have multiple contexts for the transmission
1142c2eb2edSdlg  * of packets, ie, independent transmit rings. An network device
1152c2eb2edSdlg  * represented by a struct ifnet may have multiple ifqueue structures,
1162c2eb2edSdlg  * each of which represents an independent context.
1172c2eb2edSdlg  *
118edc1be42Sdlg  * struct ifqueue also provides the point where conditioning of
119edc1be42Sdlg  * traffic (ie, priq and hfsc) is implemented, and provides some
120edc1be42Sdlg  * infrastructure to assist in the implementation of network drivers.
121edc1be42Sdlg  *
122edc1be42Sdlg  * = ifq API
123edc1be42Sdlg  *
124edc1be42Sdlg  * The ifq API provides functions for three distinct consumers:
125edc1be42Sdlg  *
126edc1be42Sdlg  * 1. The network stack
127edc1be42Sdlg  * 2. Traffic QoS/conditioning implementations
128edc1be42Sdlg  * 3. Network drivers
129edc1be42Sdlg  *
130edc1be42Sdlg  * == Network Stack API
131edc1be42Sdlg  *
132edc1be42Sdlg  * The network stack is responsible for initialising and destroying
1332c2eb2edSdlg  * the ifqueue structures, changing the traffic conditioner on an
1342c2eb2edSdlg  * interface, enqueuing packets for transmission, and notifying
1352c2eb2edSdlg  * the driver to start transmission of a particular ifqueue.
136edc1be42Sdlg  *
137edc1be42Sdlg  * === ifq_init()
138edc1be42Sdlg  *
139edc1be42Sdlg  * During if_attach(), the network stack calls ifq_init to initialise
140edc1be42Sdlg  * the ifqueue structure. By default it configures the priq traffic
141edc1be42Sdlg  * conditioner.
142edc1be42Sdlg  *
143edc1be42Sdlg  * === ifq_destroy()
144edc1be42Sdlg  *
145edc1be42Sdlg  * The network stack calls ifq_destroy() during if_detach to tear down
146edc1be42Sdlg  * the ifqueue structure. It frees the traffic conditioner state, and
147edc1be42Sdlg  * frees any mbufs that were left queued.
148edc1be42Sdlg  *
149edc1be42Sdlg  * === ifq_attach()
150edc1be42Sdlg  *
151edc1be42Sdlg  * ifq_attach() is used to replace the current traffic conditioner on
152edc1be42Sdlg  * the ifqueue. All the pending mbufs are removed from the previous
153edc1be42Sdlg  * conditioner and requeued on the new.
154edc1be42Sdlg  *
1552c2eb2edSdlg  * === ifq_idx()
1562c2eb2edSdlg  *
1572c2eb2edSdlg  * ifq_idx() selects a specific ifqueue from the current ifnet
1582c2eb2edSdlg  * structure for use in the transmission of the mbuf.
1592c2eb2edSdlg  *
16032290fe6Sdlg  * === ifq_enqueue()
161edc1be42Sdlg  *
16232290fe6Sdlg  * ifq_enqueue() attempts to fit an mbuf onto the ifqueue. The
16332290fe6Sdlg  * current traffic conditioner may drop a packet to make space on the
16432290fe6Sdlg  * queue.
165edc1be42Sdlg  *
166edc1be42Sdlg  * === ifq_start()
167edc1be42Sdlg  *
16832290fe6Sdlg  * Once a packet has been successfully queued with ifq_enqueue(),
1692c2eb2edSdlg  * the network card is notified with a call to ifq_start().
1702c2eb2edSdlg  * Calls to ifq_start() run in the ifqueue serialisation context,
1712c2eb2edSdlg  * guaranteeing that only one instance of ifp->if_qstart() will be
1722c2eb2edSdlg  * running on behalf of a specific ifqueue in the system at any point
1732c2eb2edSdlg  * in time.
174edc1be42Sdlg  *
175edc1be42Sdlg  * == Traffic conditioners API
176edc1be42Sdlg  *
177edc1be42Sdlg  * The majority of interaction between struct ifqueue and a traffic
178edc1be42Sdlg  * conditioner occurs via the callbacks a traffic conditioner provides
179edc1be42Sdlg  * in an instance of struct ifq_ops.
180edc1be42Sdlg  *
181edc1be42Sdlg  * XXX document ifqop_*
182edc1be42Sdlg  *
183edc1be42Sdlg  * The ifqueue API implements the locking on behalf of the conditioning
184edc1be42Sdlg  * implementations so conditioners only have to reject or keep mbufs.
185edc1be42Sdlg  * If something needs to inspect a conditioners internals, the queue lock
186edc1be42Sdlg  * needs to be taken to allow for a consistent or safe view. The queue
187edc1be42Sdlg  * lock may be taken and released with ifq_q_enter() and ifq_q_leave().
188edc1be42Sdlg  *
189edc1be42Sdlg  * === ifq_q_enter()
190edc1be42Sdlg  *
191edc1be42Sdlg  * Code wishing to access a conditioners internals may take the queue
192edc1be42Sdlg  * lock with ifq_q_enter(). The caller must pass a reference to the
193edc1be42Sdlg  * conditioners ifq_ops structure so the infrastructure can ensure the
194edc1be42Sdlg  * caller is able to understand the internals. ifq_q_enter() returns
195edc1be42Sdlg  * a pointer to the conditions internal structures, or NULL if the
196edc1be42Sdlg  * ifq_ops did not match the current conditioner.
197edc1be42Sdlg  *
198edc1be42Sdlg  * === ifq_q_leave()
199edc1be42Sdlg  *
200edc1be42Sdlg  * The queue lock acquired with ifq_q_enter() is released with
201edc1be42Sdlg  * ifq_q_leave().
202edc1be42Sdlg  *
20345e6331dSmikeb  * === ifq_mfreem() and ifq_mfreeml()
204a1b9aa84Sdlg  *
205a1b9aa84Sdlg  * A goal of the API is to avoid freeing an mbuf while mutexs are
206a1b9aa84Sdlg  * held. Because the ifq API manages the lock on behalf of the backend
207a1b9aa84Sdlg  * ifqops, the backend should not directly free mbufs. If a conditioner
208a1b9aa84Sdlg  * backend needs to drop a packet during the handling of ifqop_deq_begin,
209a1b9aa84Sdlg  * it may free it by calling ifq_mfreem(). This accounts for the drop,
210a1b9aa84Sdlg  * and schedules the free of the mbuf outside the hold of ifq_mtx.
21145e6331dSmikeb  * ifq_mfreeml() takes an mbuf list as an argument instead.
212a1b9aa84Sdlg  *
213edc1be42Sdlg  *
214edc1be42Sdlg  * == Network Driver API
215edc1be42Sdlg  *
216edc1be42Sdlg  * The API used by network drivers is mostly documented in the
2178fd5aef1Sdlg  * ifq_dequeue(9) manpage except for ifq_serialize().
218edc1be42Sdlg  *
219edc1be42Sdlg  * === ifq_serialize()
220edc1be42Sdlg  *
221edc1be42Sdlg  * A driver may run arbitrary work in the ifqueue serialiser context
222edc1be42Sdlg  * via ifq_serialize(). The work to be done is represented by a task
223edc1be42Sdlg  * that has been prepared with task_set.
224edc1be42Sdlg  *
225edc1be42Sdlg  * The work will be run in series with any other work dispatched by
226edc1be42Sdlg  * ifq_start(), ifq_restart(), or other ifq_serialize() calls.
227edc1be42Sdlg  *
228edc1be42Sdlg  * Because the work may be run on another CPU, the lifetime of the
229edc1be42Sdlg  * task and the work it represents can extend beyond the end of the
230edc1be42Sdlg  * call to ifq_serialize() that dispatched it.
231edc1be42Sdlg  *
232edc1be42Sdlg  *
233edc1be42Sdlg  * = ifqueue work serialisation
234edc1be42Sdlg  *
235edc1be42Sdlg  * ifqueues provide a mechanism to dispatch work to be run in a single
236edc1be42Sdlg  * context. Work in this mechanism is represtented by task structures.
237edc1be42Sdlg  *
238edc1be42Sdlg  * The tasks are run in a context similar to a taskq serviced by a
239edc1be42Sdlg  * single kernel thread, except the work is run immediately by the
240edc1be42Sdlg  * first CPU that dispatches work. If a second CPU attempts to dispatch
241edc1be42Sdlg  * additional tasks while the first is still running, it will be queued
242edc1be42Sdlg  * to be run by the first CPU. The second CPU will return immediately.
243edc1be42Sdlg  *
244edc1be42Sdlg  * = MP Safe Network Drivers
245edc1be42Sdlg  *
246edc1be42Sdlg  * An MP safe network driver is one in which its start routine can be
247edc1be42Sdlg  * called by the network stack without holding the big kernel lock.
248edc1be42Sdlg  *
249edc1be42Sdlg  * == Attach
250edc1be42Sdlg  *
2512c2eb2edSdlg  * A driver advertises it's ability to run its start routine without
2522c2eb2edSdlg  * the kernel lock by setting the IFXF_MPSAFE flag in ifp->if_xflags
2532c2eb2edSdlg  * before calling if_attach(). Advertising an MPSAFE start routine
2542c2eb2edSdlg  * also implies that the driver understands that a network card can
2552c2eb2edSdlg  * have multiple rings or transmit queues, and therefore provides
2562c2eb2edSdlg  * if_qstart function (which takes an ifqueue pointer) instead of an
2572c2eb2edSdlg  * if_start function (which takes an ifnet pointer).
258edc1be42Sdlg  *
2592c2eb2edSdlg  *	void	drv_start(struct ifqueue *);
2602c2eb2edSdlg  *
2612c2eb2edSdlg  *	void
2622c2eb2edSdlg  *	drv_attach()
2632c2eb2edSdlg  *	{
2642c2eb2edSdlg  *	...
265edc1be42Sdlg  *		ifp->if_xflags = IFXF_MPSAFE;
2662c2eb2edSdlg  *		ifp->if_qstart = drv_start;
267edc1be42Sdlg  *		if_attach(ifp);
2682c2eb2edSdlg  *	}
269edc1be42Sdlg  *
2702c2eb2edSdlg  * The network stack will then call ifp->if_qstart via ifq_start()
2712c2eb2edSdlg  * to guarantee there is only one instance of that function running
2722c2eb2edSdlg  * in the system and to serialise it with other work the driver may
2732c2eb2edSdlg  * provide.
274edc1be42Sdlg  *
275edc1be42Sdlg  * == Initialise
276edc1be42Sdlg  *
277edc1be42Sdlg  * When the stack requests an interface be brought up (ie, drv_ioctl()
278edc1be42Sdlg  * is called to handle SIOCSIFFLAGS with IFF_UP set in ifp->if_flags)
279edc1be42Sdlg  * drivers should set IFF_RUNNING in ifp->if_flags and call
280edc1be42Sdlg  * ifq_clr_oactive().
281edc1be42Sdlg  *
282edc1be42Sdlg  * == if_start
283edc1be42Sdlg  *
284edc1be42Sdlg  * ifq_start() checks that IFF_RUNNING is set in ifp->if_flags, that
285edc1be42Sdlg  * ifq_is_oactive() does not return true, and that there are pending
286edc1be42Sdlg  * packets to transmit via a call to ifq_len(). Therefore, drivers are
287edc1be42Sdlg  * no longer responsible for doing this themselves.
288edc1be42Sdlg  *
289edc1be42Sdlg  * If a driver should not transmit packets while its link is down, use
290edc1be42Sdlg  * ifq_purge() to flush pending packets from the transmit queue.
291edc1be42Sdlg  *
292edc1be42Sdlg  * Drivers for hardware should use the following pattern to transmit
293edc1be42Sdlg  * packets:
294edc1be42Sdlg  *
295edc1be42Sdlg  *	void
2962c2eb2edSdlg  *	drv_start(struct ifqueue *ifq)
297edc1be42Sdlg  *	{
2982c2eb2edSdlg  *		struct ifnet *ifp = ifq->ifq_if;
299edc1be42Sdlg  *		struct drv_softc *sc = ifp->if_softc;
300edc1be42Sdlg  *		struct mbuf *m;
301edc1be42Sdlg  *		int kick = 0;
302edc1be42Sdlg  *
303edc1be42Sdlg  *		if (NO_LINK) {
3042c2eb2edSdlg  *			ifq_purge(ifq);
305edc1be42Sdlg  *			return;
306edc1be42Sdlg  *		}
307edc1be42Sdlg  *
308edc1be42Sdlg  *		for (;;) {
309edc1be42Sdlg  *			if (NO_SPACE) {
3102c2eb2edSdlg  *				ifq_set_oactive(ifq);
311edc1be42Sdlg  *				break;
312edc1be42Sdlg  *			}
313edc1be42Sdlg  *
3142c2eb2edSdlg  *			m = ifq_dequeue(ifq);
315edc1be42Sdlg  *			if (m == NULL)
316edc1be42Sdlg  *				break;
317edc1be42Sdlg  *
318edc1be42Sdlg  *			if (drv_encap(sc, m) != 0) { // map and fill ring
319edc1be42Sdlg  *				m_freem(m);
320edc1be42Sdlg  *				continue;
321edc1be42Sdlg  *			}
322edc1be42Sdlg  *
323edc1be42Sdlg  *			bpf_mtap();
324edc1be42Sdlg  *		}
325edc1be42Sdlg  *
326edc1be42Sdlg  *		drv_kick(sc); // notify hw of new descriptors on the ring
327edc1be42Sdlg  *	 }
328edc1be42Sdlg  *
329edc1be42Sdlg  * == Transmission completion
330edc1be42Sdlg  *
331edc1be42Sdlg  * The following pattern should be used for transmit queue interrupt
332edc1be42Sdlg  * processing:
333edc1be42Sdlg  *
334edc1be42Sdlg  *	void
3352c2eb2edSdlg  *	drv_txeof(struct ifqueue *ifq)
336edc1be42Sdlg  *	{
337edc1be42Sdlg  *		while (COMPLETED_PKTS) {
338edc1be42Sdlg  *			// unmap packets, m_freem() the mbufs.
339edc1be42Sdlg  *		}
340edc1be42Sdlg  *
3412c2eb2edSdlg  *		if (ifq_is_oactive(ifq))
3422c2eb2edSdlg  *			ifq_restart(ifq);
343edc1be42Sdlg  *	}
344edc1be42Sdlg  *
345edc1be42Sdlg  * == Stop
346edc1be42Sdlg  *
347edc1be42Sdlg  * Bringing an interface down (ie, IFF_UP was cleared in ifp->if_flags)
348edc1be42Sdlg  * should clear IFF_RUNNING in ifp->if_flags, and guarantee the start
349edc1be42Sdlg  * routine is not running before freeing any resources it uses:
350edc1be42Sdlg  *
351edc1be42Sdlg  *	void
352edc1be42Sdlg  *	drv_down(struct drv_softc *sc)
353edc1be42Sdlg  *	{
354edc1be42Sdlg  *		struct ifnet *ifp = &sc->sc_if;
3552c2eb2edSdlg  *		struct ifqueue *ifq;
3562c2eb2edSdlg  *		int i;
357edc1be42Sdlg  *
358edc1be42Sdlg  *		CLR(ifp->if_flags, IFF_RUNNING);
359edc1be42Sdlg  *		DISABLE_INTERRUPTS();
360edc1be42Sdlg  *
3612c2eb2edSdlg  *		for (i = 0; i < sc->sc_num_queues; i++) {
3622c2eb2edSdlg  * 			ifq = ifp->if_ifqs[i];
3632c2eb2edSdlg  *			ifq_barrier(ifq);
3642c2eb2edSdlg  *		}
3652c2eb2edSdlg  *
366edc1be42Sdlg  *		intr_barrier(sc->sc_ih);
367edc1be42Sdlg  *
368edc1be42Sdlg  *		FREE_RESOURCES();
369edc1be42Sdlg  *
3702c2eb2edSdlg  *		for (i = 0; i < sc->sc_num_queues; i++) {
3712c2eb2edSdlg  * 			ifq = ifp->if_ifqs[i];
3722c2eb2edSdlg  *			ifq_clr_oactive(ifq);
3732c2eb2edSdlg  *		}
374edc1be42Sdlg  *	}
375edc1be42Sdlg  *
376edc1be42Sdlg  */
377edc1be42Sdlg 
37844430e21Sdlg struct ifq_ops {
379722eb25eSdlg 	unsigned int		 (*ifqop_idx)(unsigned int,
380722eb25eSdlg 				    const struct mbuf *);
38132290fe6Sdlg 	struct mbuf		*(*ifqop_enq)(struct ifqueue *, struct mbuf *);
38244430e21Sdlg 	struct mbuf		*(*ifqop_deq_begin)(struct ifqueue *, void **);
38344430e21Sdlg 	void			 (*ifqop_deq_commit)(struct ifqueue *,
38444430e21Sdlg 				    struct mbuf *, void *);
38544430e21Sdlg 	void			 (*ifqop_purge)(struct ifqueue *,
38644430e21Sdlg 				    struct mbuf_list *);
387722eb25eSdlg 	void			*(*ifqop_alloc)(unsigned int, void *);
388722eb25eSdlg 	void			 (*ifqop_free)(unsigned int, void *);
38944430e21Sdlg };
39044430e21Sdlg 
391a4ca8437Sdlg extern const struct ifq_ops * const ifq_priq_ops;
392a4ca8437Sdlg 
39344430e21Sdlg /*
39444430e21Sdlg  * Interface send queues.
39544430e21Sdlg  */
39644430e21Sdlg 
397722eb25eSdlg void		 ifq_init(struct ifqueue *, struct ifnet *, unsigned int);
39844430e21Sdlg void		 ifq_attach(struct ifqueue *, const struct ifq_ops *, void *);
39944430e21Sdlg void		 ifq_destroy(struct ifqueue *);
400de042275Sdlg void		 ifq_add_data(struct ifqueue *, struct if_data *);
40144430e21Sdlg int		 ifq_enqueue(struct ifqueue *, struct mbuf *);
402986fddd0Sdlg void		 ifq_start(struct ifqueue *);
40344430e21Sdlg struct mbuf	*ifq_deq_begin(struct ifqueue *);
40444430e21Sdlg void		 ifq_deq_commit(struct ifqueue *, struct mbuf *);
40544430e21Sdlg void		 ifq_deq_rollback(struct ifqueue *, struct mbuf *);
40644430e21Sdlg struct mbuf	*ifq_dequeue(struct ifqueue *);
407d3e32c05Sdlg int		 ifq_hdatalen(struct ifqueue *);
408a1b9aa84Sdlg void		 ifq_mfreem(struct ifqueue *, struct mbuf *);
40945e6331dSmikeb void		 ifq_mfreeml(struct ifqueue *, struct mbuf_list *);
41044430e21Sdlg unsigned int	 ifq_purge(struct ifqueue *);
41144430e21Sdlg void		*ifq_q_enter(struct ifqueue *, const struct ifq_ops *);
41244430e21Sdlg void		 ifq_q_leave(struct ifqueue *, void *);
41383d8135eSdlg void		 ifq_serialize(struct ifqueue *, struct task *);
41483d8135eSdlg void		 ifq_barrier(struct ifqueue *);
41544430e21Sdlg 
416*f547d376Sdlg 
417*f547d376Sdlg int		 ifq_deq_sleep(struct ifqueue *, struct mbuf **, int, int,
418*f547d376Sdlg 		     const char *, volatile unsigned int *,
419*f547d376Sdlg 		     volatile unsigned int *);
420*f547d376Sdlg 
42144430e21Sdlg #define	ifq_len(_ifq)			((_ifq)->ifq_len)
42244430e21Sdlg #define	ifq_empty(_ifq)			(ifq_len(_ifq) == 0)
42344430e21Sdlg #define	ifq_set_maxlen(_ifq, _l)	((_ifq)->ifq_maxlen = (_l))
42444430e21Sdlg 
425a4ca8437Sdlg static inline int
426a4ca8437Sdlg ifq_is_priq(struct ifqueue *ifq)
427a4ca8437Sdlg {
428a4ca8437Sdlg 	return (ifq->ifq_ops == ifq_priq_ops);
429a4ca8437Sdlg }
430a4ca8437Sdlg 
43144430e21Sdlg static inline void
43244430e21Sdlg ifq_set_oactive(struct ifqueue *ifq)
43344430e21Sdlg {
43444430e21Sdlg 	ifq->ifq_oactive = 1;
43544430e21Sdlg }
43644430e21Sdlg 
43744430e21Sdlg static inline void
43844430e21Sdlg ifq_clr_oactive(struct ifqueue *ifq)
43944430e21Sdlg {
44044430e21Sdlg 	ifq->ifq_oactive = 0;
44144430e21Sdlg }
44244430e21Sdlg 
44344430e21Sdlg static inline unsigned int
44444430e21Sdlg ifq_is_oactive(struct ifqueue *ifq)
44544430e21Sdlg {
44644430e21Sdlg 	return (ifq->ifq_oactive);
44744430e21Sdlg }
44844430e21Sdlg 
44983d8135eSdlg static inline void
45083d8135eSdlg ifq_restart(struct ifqueue *ifq)
45183d8135eSdlg {
45283d8135eSdlg 	ifq_serialize(ifq, &ifq->ifq_restart);
45383d8135eSdlg }
45483d8135eSdlg 
455722eb25eSdlg static inline unsigned int
456722eb25eSdlg ifq_idx(struct ifqueue *ifq, unsigned int nifqs, const struct mbuf *m)
457722eb25eSdlg {
458722eb25eSdlg 	return ((*ifq->ifq_ops->ifqop_idx)(nifqs, m));
459722eb25eSdlg }
460722eb25eSdlg 
461ea47e5e1Sdlg /* ifiq */
462ea47e5e1Sdlg 
463ea47e5e1Sdlg void		 ifiq_init(struct ifiqueue *, struct ifnet *, unsigned int);
464ea47e5e1Sdlg void		 ifiq_destroy(struct ifiqueue *);
4651648f199Sdlg int		 ifiq_input(struct ifiqueue *, struct mbuf_list *);
466ea47e5e1Sdlg int		 ifiq_enqueue(struct ifiqueue *, struct mbuf *);
467ea47e5e1Sdlg void		 ifiq_add_data(struct ifiqueue *, struct if_data *);
468ea47e5e1Sdlg 
469ea47e5e1Sdlg #define	ifiq_len(_ifiq)			ml_len(&(_ifiq)->ifiq_ml)
470ea47e5e1Sdlg #define	ifiq_empty(_ifiq)		ml_empty(&(_ifiq)->ifiq_ml)
471ea47e5e1Sdlg 
47244430e21Sdlg #endif /* _KERNEL */
47344430e21Sdlg 
47444430e21Sdlg #endif /* _NET_IFQ_H_ */
475