xref: /openbsd/sys/net/ifq.h (revision a4f86d2e)
1*a4f86d2eSdlg /*	$OpenBSD: ifq.h,v 1.42 2024/11/20 02:18:45 dlg Exp $ */
244430e21Sdlg 
344430e21Sdlg /*
444430e21Sdlg  * Copyright (c) 2015 David Gwynne <dlg@openbsd.org>
544430e21Sdlg  *
644430e21Sdlg  * Permission to use, copy, modify, and distribute this software for any
744430e21Sdlg  * purpose with or without fee is hereby granted, provided that the above
844430e21Sdlg  * copyright notice and this permission notice appear in all copies.
944430e21Sdlg  *
1044430e21Sdlg  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
1144430e21Sdlg  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
1244430e21Sdlg  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
1344430e21Sdlg  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
1444430e21Sdlg  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
1544430e21Sdlg  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1644430e21Sdlg  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
1744430e21Sdlg  */
1844430e21Sdlg 
1944430e21Sdlg #ifndef _NET_IFQ_H_
2044430e21Sdlg #define _NET_IFQ_H_
2144430e21Sdlg 
2244430e21Sdlg struct ifnet;
2357b86033Sdlg struct kstat;
2444430e21Sdlg 
2544430e21Sdlg struct ifq_ops;
2644430e21Sdlg 
2744430e21Sdlg struct ifqueue {
2883d8135eSdlg 	struct ifnet		*ifq_if;
29986fddd0Sdlg 	struct taskq		*ifq_softnet;
30722eb25eSdlg 	union {
31722eb25eSdlg 		void			*_ifq_softc;
32722eb25eSdlg 		/*
33722eb25eSdlg 		 * a rings sndq is found by looking up an array of pointers.
34722eb25eSdlg 		 * by default we only have one sndq and the default drivers
35722eb25eSdlg 		 * dont use ifq_softc, so we can borrow it for the map until
36722eb25eSdlg 		 * we need to allocate a proper map.
37722eb25eSdlg 		 */
38722eb25eSdlg 		struct ifqueue		*_ifq_ifqs[1];
39722eb25eSdlg 	} _ifq_ptr;
40722eb25eSdlg #define ifq_softc		 _ifq_ptr._ifq_softc
41722eb25eSdlg #define ifq_ifqs		 _ifq_ptr._ifq_ifqs
4283d8135eSdlg 
4383d8135eSdlg 	/* mbuf handling */
4444430e21Sdlg 	struct mutex		 ifq_mtx;
4544430e21Sdlg 	const struct ifq_ops	*ifq_ops;
4644430e21Sdlg 	void			*ifq_q;
47a1b9aa84Sdlg 	struct mbuf_list	 ifq_free;
4844430e21Sdlg 	unsigned int		 ifq_len;
4944430e21Sdlg 	unsigned int		 ifq_oactive;
5044430e21Sdlg 
51d071e2e3Sdlg 	/* statistics */
52d071e2e3Sdlg 	uint64_t		 ifq_packets;
53d071e2e3Sdlg 	uint64_t		 ifq_bytes;
54d071e2e3Sdlg 	uint64_t		 ifq_qdrops;
55d071e2e3Sdlg 	uint64_t		 ifq_errors;
56d071e2e3Sdlg 	uint64_t		 ifq_mcasts;
576e7a640fSdlg 	uint32_t		 ifq_oactives;
58d071e2e3Sdlg 
5957b86033Sdlg 	struct kstat		*ifq_kstat;
6057b86033Sdlg 
6183d8135eSdlg 	/* work serialisation */
6283d8135eSdlg 	struct mutex		 ifq_task_mtx;
6383d8135eSdlg 	struct task_list	 ifq_task_list;
648ac498c1Sdlg 	void			*ifq_serializer;
65986fddd0Sdlg 	struct task		 ifq_bundle;
6683d8135eSdlg 
6783d8135eSdlg 	/* work to be serialised */
6883d8135eSdlg 	struct task		 ifq_start;
6983d8135eSdlg 	struct task		 ifq_restart;
7083d8135eSdlg 
71722eb25eSdlg 	/* properties */
7244430e21Sdlg 	unsigned int		 ifq_maxlen;
73722eb25eSdlg 	unsigned int		 ifq_idx;
7444430e21Sdlg };
7544430e21Sdlg 
76ea47e5e1Sdlg struct ifiqueue {
77ea47e5e1Sdlg 	struct ifnet		*ifiq_if;
78ea47e5e1Sdlg 	struct taskq		*ifiq_softnet;
79ea47e5e1Sdlg 	union {
80ea47e5e1Sdlg 		void			*_ifiq_softc;
81ea47e5e1Sdlg 		struct ifiqueue		*_ifiq_ifiqs[1];
82ea47e5e1Sdlg 	} _ifiq_ptr;
83ea47e5e1Sdlg #define ifiq_softc		 _ifiq_ptr._ifiq_softc
84ea47e5e1Sdlg #define ifiq_ifiqs		 _ifiq_ptr._ifiq_ifiqs
85ea47e5e1Sdlg 
86ea47e5e1Sdlg 	struct mutex		 ifiq_mtx;
87ea47e5e1Sdlg 	struct mbuf_list	 ifiq_ml;
88ea47e5e1Sdlg 	struct task		 ifiq_task;
891648f199Sdlg 	unsigned int		 ifiq_pressure;
90ea47e5e1Sdlg 
91ea47e5e1Sdlg 	/* counters */
92ea47e5e1Sdlg 	uint64_t		 ifiq_packets;
93ea47e5e1Sdlg 	uint64_t		 ifiq_bytes;
9452a758a0Sdlg 	uint64_t		 ifiq_fdrops;
95ea47e5e1Sdlg 	uint64_t		 ifiq_qdrops;
96ea47e5e1Sdlg 	uint64_t		 ifiq_errors;
97ea47e5e1Sdlg 	uint64_t		 ifiq_mcasts;
98ea47e5e1Sdlg 	uint64_t		 ifiq_noproto;
99ea47e5e1Sdlg 
100df2795bfSdlg 	/* number of times a list of packets were put on ifiq_ml */
101df2795bfSdlg 	uint64_t		 ifiq_enqueues;
102df2795bfSdlg 	/* number of times a list of packets were pulled off ifiq_ml */
103df2795bfSdlg 	uint64_t		 ifiq_dequeues;
104df2795bfSdlg 
10557b86033Sdlg 	struct kstat		*ifiq_kstat;
10657b86033Sdlg 
107ea47e5e1Sdlg 	/* properties */
108ea47e5e1Sdlg 	unsigned int		 ifiq_idx;
109ea47e5e1Sdlg };
110ea47e5e1Sdlg 
11144430e21Sdlg #ifdef _KERNEL
11244430e21Sdlg 
11344430e21Sdlg #define IFQ_MAXLEN		256
11444430e21Sdlg 
115edc1be42Sdlg /*
116edc1be42Sdlg  *
117edc1be42Sdlg  * Interface Send Queues
118edc1be42Sdlg  *
119edc1be42Sdlg  * struct ifqueue sits between the network stack and a drivers
120edc1be42Sdlg  * transmission of packets. The high level view is that when the stack
121edc1be42Sdlg  * has finished generating a packet it hands it to a driver for
122edc1be42Sdlg  * transmission. It does this by queueing the packet on an ifqueue and
123edc1be42Sdlg  * notifying the driver to start transmission of the queued packets.
124edc1be42Sdlg  *
1252c2eb2edSdlg  * A network device may have multiple contexts for the transmission
12695ca5adbSdlg  * of packets, ie, independent transmit rings. Such a network device,
12795ca5adbSdlg  * represented by a struct ifnet, would then have multiple ifqueue
12895ca5adbSdlg  * structures, each of which maps to an independent transmit ring.
1292c2eb2edSdlg  *
130edc1be42Sdlg  * struct ifqueue also provides the point where conditioning of
131edc1be42Sdlg  * traffic (ie, priq and hfsc) is implemented, and provides some
132edc1be42Sdlg  * infrastructure to assist in the implementation of network drivers.
133edc1be42Sdlg  *
134edc1be42Sdlg  * = ifq API
135edc1be42Sdlg  *
136edc1be42Sdlg  * The ifq API provides functions for three distinct consumers:
137edc1be42Sdlg  *
138edc1be42Sdlg  * 1. The network stack
139edc1be42Sdlg  * 2. Traffic QoS/conditioning implementations
140edc1be42Sdlg  * 3. Network drivers
141edc1be42Sdlg  *
142edc1be42Sdlg  * == Network Stack API
143edc1be42Sdlg  *
144edc1be42Sdlg  * The network stack is responsible for initialising and destroying
1452c2eb2edSdlg  * the ifqueue structures, changing the traffic conditioner on an
1462c2eb2edSdlg  * interface, enqueuing packets for transmission, and notifying
1472c2eb2edSdlg  * the driver to start transmission of a particular ifqueue.
148edc1be42Sdlg  *
149edc1be42Sdlg  * === ifq_init()
150edc1be42Sdlg  *
151edc1be42Sdlg  * During if_attach(), the network stack calls ifq_init to initialise
152edc1be42Sdlg  * the ifqueue structure. By default it configures the priq traffic
153edc1be42Sdlg  * conditioner.
154edc1be42Sdlg  *
155edc1be42Sdlg  * === ifq_destroy()
156edc1be42Sdlg  *
157edc1be42Sdlg  * The network stack calls ifq_destroy() during if_detach to tear down
158edc1be42Sdlg  * the ifqueue structure. It frees the traffic conditioner state, and
159edc1be42Sdlg  * frees any mbufs that were left queued.
160edc1be42Sdlg  *
161edc1be42Sdlg  * === ifq_attach()
162edc1be42Sdlg  *
163edc1be42Sdlg  * ifq_attach() is used to replace the current traffic conditioner on
164edc1be42Sdlg  * the ifqueue. All the pending mbufs are removed from the previous
165edc1be42Sdlg  * conditioner and requeued on the new.
166edc1be42Sdlg  *
1672c2eb2edSdlg  * === ifq_idx()
1682c2eb2edSdlg  *
1692c2eb2edSdlg  * ifq_idx() selects a specific ifqueue from the current ifnet
1702c2eb2edSdlg  * structure for use in the transmission of the mbuf.
1712c2eb2edSdlg  *
17232290fe6Sdlg  * === ifq_enqueue()
173edc1be42Sdlg  *
17432290fe6Sdlg  * ifq_enqueue() attempts to fit an mbuf onto the ifqueue. The
17532290fe6Sdlg  * current traffic conditioner may drop a packet to make space on the
17632290fe6Sdlg  * queue.
177edc1be42Sdlg  *
178edc1be42Sdlg  * === ifq_start()
179edc1be42Sdlg  *
18032290fe6Sdlg  * Once a packet has been successfully queued with ifq_enqueue(),
1812c2eb2edSdlg  * the network card is notified with a call to ifq_start().
1822c2eb2edSdlg  * Calls to ifq_start() run in the ifqueue serialisation context,
1832c2eb2edSdlg  * guaranteeing that only one instance of ifp->if_qstart() will be
1842c2eb2edSdlg  * running on behalf of a specific ifqueue in the system at any point
1852c2eb2edSdlg  * in time.
186edc1be42Sdlg  *
187edc1be42Sdlg  * == Traffic conditioners API
188edc1be42Sdlg  *
189edc1be42Sdlg  * The majority of interaction between struct ifqueue and a traffic
190edc1be42Sdlg  * conditioner occurs via the callbacks a traffic conditioner provides
191edc1be42Sdlg  * in an instance of struct ifq_ops.
192edc1be42Sdlg  *
193edc1be42Sdlg  * XXX document ifqop_*
194edc1be42Sdlg  *
195edc1be42Sdlg  * The ifqueue API implements the locking on behalf of the conditioning
196edc1be42Sdlg  * implementations so conditioners only have to reject or keep mbufs.
197edc1be42Sdlg  * If something needs to inspect a conditioners internals, the queue lock
198edc1be42Sdlg  * needs to be taken to allow for a consistent or safe view. The queue
199edc1be42Sdlg  * lock may be taken and released with ifq_q_enter() and ifq_q_leave().
200edc1be42Sdlg  *
201edc1be42Sdlg  * === ifq_q_enter()
202edc1be42Sdlg  *
203edc1be42Sdlg  * Code wishing to access a conditioners internals may take the queue
204edc1be42Sdlg  * lock with ifq_q_enter(). The caller must pass a reference to the
205edc1be42Sdlg  * conditioners ifq_ops structure so the infrastructure can ensure the
206edc1be42Sdlg  * caller is able to understand the internals. ifq_q_enter() returns
20701a6bdd4Sdlg  * a pointer to the conditioners internal structures, or NULL if the
208edc1be42Sdlg  * ifq_ops did not match the current conditioner.
209edc1be42Sdlg  *
210edc1be42Sdlg  * === ifq_q_leave()
211edc1be42Sdlg  *
212edc1be42Sdlg  * The queue lock acquired with ifq_q_enter() is released with
213edc1be42Sdlg  * ifq_q_leave().
214edc1be42Sdlg  *
21545e6331dSmikeb  * === ifq_mfreem() and ifq_mfreeml()
216a1b9aa84Sdlg  *
217678831beSjsg  * A goal of the API is to avoid freeing an mbuf while mutexes are
218a1b9aa84Sdlg  * held. Because the ifq API manages the lock on behalf of the backend
219a1b9aa84Sdlg  * ifqops, the backend should not directly free mbufs. If a conditioner
220a1b9aa84Sdlg  * backend needs to drop a packet during the handling of ifqop_deq_begin,
221a1b9aa84Sdlg  * it may free it by calling ifq_mfreem(). This accounts for the drop,
222a1b9aa84Sdlg  * and schedules the free of the mbuf outside the hold of ifq_mtx.
22345e6331dSmikeb  * ifq_mfreeml() takes an mbuf list as an argument instead.
224a1b9aa84Sdlg  *
225edc1be42Sdlg  *
226edc1be42Sdlg  * == Network Driver API
227edc1be42Sdlg  *
228edc1be42Sdlg  * The API used by network drivers is mostly documented in the
2298fd5aef1Sdlg  * ifq_dequeue(9) manpage except for ifq_serialize().
230edc1be42Sdlg  *
231edc1be42Sdlg  * === ifq_serialize()
232edc1be42Sdlg  *
233edc1be42Sdlg  * A driver may run arbitrary work in the ifqueue serialiser context
234edc1be42Sdlg  * via ifq_serialize(). The work to be done is represented by a task
235edc1be42Sdlg  * that has been prepared with task_set.
236edc1be42Sdlg  *
237edc1be42Sdlg  * The work will be run in series with any other work dispatched by
238edc1be42Sdlg  * ifq_start(), ifq_restart(), or other ifq_serialize() calls.
239edc1be42Sdlg  *
240edc1be42Sdlg  * Because the work may be run on another CPU, the lifetime of the
241edc1be42Sdlg  * task and the work it represents can extend beyond the end of the
242edc1be42Sdlg  * call to ifq_serialize() that dispatched it.
243edc1be42Sdlg  *
244edc1be42Sdlg  *
245edc1be42Sdlg  * = ifqueue work serialisation
246edc1be42Sdlg  *
247edc1be42Sdlg  * ifqueues provide a mechanism to dispatch work to be run in a single
24801a6bdd4Sdlg  * context. Work in this mechanism is represented by task structures.
249edc1be42Sdlg  *
250edc1be42Sdlg  * The tasks are run in a context similar to a taskq serviced by a
251edc1be42Sdlg  * single kernel thread, except the work is run immediately by the
252edc1be42Sdlg  * first CPU that dispatches work. If a second CPU attempts to dispatch
253edc1be42Sdlg  * additional tasks while the first is still running, it will be queued
254edc1be42Sdlg  * to be run by the first CPU. The second CPU will return immediately.
255edc1be42Sdlg  *
256edc1be42Sdlg  * = MP Safe Network Drivers
257edc1be42Sdlg  *
258edc1be42Sdlg  * An MP safe network driver is one in which its start routine can be
259edc1be42Sdlg  * called by the network stack without holding the big kernel lock.
260edc1be42Sdlg  *
261edc1be42Sdlg  * == Attach
262edc1be42Sdlg  *
263a671ffa0Sguenther  * A driver advertises its ability to run its start routine without
2642c2eb2edSdlg  * the kernel lock by setting the IFXF_MPSAFE flag in ifp->if_xflags
2652c2eb2edSdlg  * before calling if_attach(). Advertising an MPSAFE start routine
2662c2eb2edSdlg  * also implies that the driver understands that a network card can
2672c2eb2edSdlg  * have multiple rings or transmit queues, and therefore provides
2682c2eb2edSdlg  * if_qstart function (which takes an ifqueue pointer) instead of an
2692c2eb2edSdlg  * if_start function (which takes an ifnet pointer).
270edc1be42Sdlg  *
271f99b415fSdlg  * If the hardware supports multiple transmit rings, it advertises
272f99b415fSdlg  * support for multiple rings to the network stack with if_attach_queues()
273f99b415fSdlg  * after the call to if_attach(). if_attach_queues allocates a struct
274f99b415fSdlg  * ifqueue for each hardware ring, which can then be initialised by
275f99b415fSdlg  * the driver with data for each ring.
276f99b415fSdlg  *
2772c2eb2edSdlg  *	void	drv_start(struct ifqueue *);
2782c2eb2edSdlg  *
2792c2eb2edSdlg  *	void
2802c2eb2edSdlg  *	drv_attach()
2812c2eb2edSdlg  *	{
2822c2eb2edSdlg  *	...
283edc1be42Sdlg  *		ifp->if_xflags = IFXF_MPSAFE;
2842c2eb2edSdlg  *		ifp->if_qstart = drv_start;
285edc1be42Sdlg  *		if_attach(ifp);
286f99b415fSdlg  *
287f99b415fSdlg  *		if_attach_queues(ifp, DRV_NUM_TX_RINGS);
288fad6d194Sdlg  *		for (i = 0; i < DRV_NUM_TX_RINGS; i++) {
289f99b415fSdlg  *			struct ifqueue *ifq = ifp->if_ifqs[i];
290f99b415fSdlg  *			struct drv_tx_ring *ring = &sc->sc_tx_rings[i];
291f99b415fSdlg  *
292f99b415fSdlg  *			ifq->ifq_softc = ring;
293f99b415fSdlg  *			ring->ifq = ifq;
294f99b415fSdlg  *		}
2952c2eb2edSdlg  *	}
296edc1be42Sdlg  *
2972c2eb2edSdlg  * The network stack will then call ifp->if_qstart via ifq_start()
2982c2eb2edSdlg  * to guarantee there is only one instance of that function running
299f99b415fSdlg  * for each ifq in the system, and to serialise it with other work
300f99b415fSdlg  * the driver may provide.
301edc1be42Sdlg  *
302edc1be42Sdlg  * == Initialise
303edc1be42Sdlg  *
304edc1be42Sdlg  * When the stack requests an interface be brought up (ie, drv_ioctl()
305edc1be42Sdlg  * is called to handle SIOCSIFFLAGS with IFF_UP set in ifp->if_flags)
306f99b415fSdlg  * drivers should set IFF_RUNNING in ifp->if_flags, and then call
307f99b415fSdlg  * ifq_clr_oactive() against each ifq.
308edc1be42Sdlg  *
309edc1be42Sdlg  * == if_start
310edc1be42Sdlg  *
311edc1be42Sdlg  * ifq_start() checks that IFF_RUNNING is set in ifp->if_flags, that
312edc1be42Sdlg  * ifq_is_oactive() does not return true, and that there are pending
313edc1be42Sdlg  * packets to transmit via a call to ifq_len(). Therefore, drivers are
314edc1be42Sdlg  * no longer responsible for doing this themselves.
315edc1be42Sdlg  *
316edc1be42Sdlg  * If a driver should not transmit packets while its link is down, use
317edc1be42Sdlg  * ifq_purge() to flush pending packets from the transmit queue.
318edc1be42Sdlg  *
319edc1be42Sdlg  * Drivers for hardware should use the following pattern to transmit
320edc1be42Sdlg  * packets:
321edc1be42Sdlg  *
322edc1be42Sdlg  *	void
3232c2eb2edSdlg  *	drv_start(struct ifqueue *ifq)
324edc1be42Sdlg  *	{
325f99b415fSdlg  *		struct drv_tx_ring *ring = ifq->ifq_softc;
3262c2eb2edSdlg  *		struct ifnet *ifp = ifq->ifq_if;
327edc1be42Sdlg  *		struct drv_softc *sc = ifp->if_softc;
328edc1be42Sdlg  *		struct mbuf *m;
329edc1be42Sdlg  *		int kick = 0;
330edc1be42Sdlg  *
331edc1be42Sdlg  *		if (NO_LINK) {
3322c2eb2edSdlg  *			ifq_purge(ifq);
333edc1be42Sdlg  *			return;
334edc1be42Sdlg  *		}
335edc1be42Sdlg  *
336edc1be42Sdlg  *		for (;;) {
337f99b415fSdlg  *			if (NO_SPACE(ring)) {
3382c2eb2edSdlg  *				ifq_set_oactive(ifq);
339edc1be42Sdlg  *				break;
340edc1be42Sdlg  *			}
341edc1be42Sdlg  *
3422c2eb2edSdlg  *			m = ifq_dequeue(ifq);
343edc1be42Sdlg  *			if (m == NULL)
344edc1be42Sdlg  *				break;
345edc1be42Sdlg  *
346f99b415fSdlg  *			if (drv_encap(sc, ring, m) != 0) { // map and fill ring
347edc1be42Sdlg  *				m_freem(m);
348edc1be42Sdlg  *				continue;
349edc1be42Sdlg  *			}
350edc1be42Sdlg  *
351edc1be42Sdlg  *			bpf_mtap();
352edc1be42Sdlg  *		}
353edc1be42Sdlg  *
354f99b415fSdlg  *		drv_kick(ring); // notify hw of new descriptors on the ring
355edc1be42Sdlg  *	 }
356edc1be42Sdlg  *
357edc1be42Sdlg  * == Transmission completion
358edc1be42Sdlg  *
359edc1be42Sdlg  * The following pattern should be used for transmit queue interrupt
360edc1be42Sdlg  * processing:
361edc1be42Sdlg  *
362edc1be42Sdlg  *	void
363f99b415fSdlg  *	drv_txeof(struct drv_tx_ring *ring)
364edc1be42Sdlg  *	{
365f99b415fSdlg  *		struct ifqueue *ifq = ring->ifq;
366f99b415fSdlg  *
367f99b415fSdlg  *		while (COMPLETED_PKTS(ring)) {
368edc1be42Sdlg  *			// unmap packets, m_freem() the mbufs.
369edc1be42Sdlg  *		}
370edc1be42Sdlg  *
3712c2eb2edSdlg  *		if (ifq_is_oactive(ifq))
3722c2eb2edSdlg  *			ifq_restart(ifq);
373edc1be42Sdlg  *	}
374edc1be42Sdlg  *
375edc1be42Sdlg  * == Stop
376edc1be42Sdlg  *
377edc1be42Sdlg  * Bringing an interface down (ie, IFF_UP was cleared in ifp->if_flags)
378edc1be42Sdlg  * should clear IFF_RUNNING in ifp->if_flags, and guarantee the start
379edc1be42Sdlg  * routine is not running before freeing any resources it uses:
380edc1be42Sdlg  *
381edc1be42Sdlg  *	void
382edc1be42Sdlg  *	drv_down(struct drv_softc *sc)
383edc1be42Sdlg  *	{
384edc1be42Sdlg  *		struct ifnet *ifp = &sc->sc_if;
3852c2eb2edSdlg  *		struct ifqueue *ifq;
3862c2eb2edSdlg  *		int i;
387edc1be42Sdlg  *
388edc1be42Sdlg  *		CLR(ifp->if_flags, IFF_RUNNING);
389edc1be42Sdlg  *		DISABLE_INTERRUPTS();
390edc1be42Sdlg  *
3912c2eb2edSdlg  *		for (i = 0; i < sc->sc_num_queues; i++) {
3922c2eb2edSdlg  *			ifq = ifp->if_ifqs[i];
3932c2eb2edSdlg  *			ifq_barrier(ifq);
3942c2eb2edSdlg  *		}
3952c2eb2edSdlg  *
396edc1be42Sdlg  *		intr_barrier(sc->sc_ih);
397edc1be42Sdlg  *
398edc1be42Sdlg  *		FREE_RESOURCES();
399edc1be42Sdlg  *
4002c2eb2edSdlg  *		for (i = 0; i < sc->sc_num_queues; i++) {
4012c2eb2edSdlg  *			ifq = ifp->if_ifqs[i];
4022c2eb2edSdlg  *			ifq_clr_oactive(ifq);
4032c2eb2edSdlg  *		}
404edc1be42Sdlg  *	}
405edc1be42Sdlg  *
406edc1be42Sdlg  */
407edc1be42Sdlg 
40844430e21Sdlg struct ifq_ops {
409722eb25eSdlg 	unsigned int		 (*ifqop_idx)(unsigned int,
410722eb25eSdlg 				    const struct mbuf *);
41132290fe6Sdlg 	struct mbuf		*(*ifqop_enq)(struct ifqueue *, struct mbuf *);
41244430e21Sdlg 	struct mbuf		*(*ifqop_deq_begin)(struct ifqueue *, void **);
41344430e21Sdlg 	void			 (*ifqop_deq_commit)(struct ifqueue *,
41444430e21Sdlg 				    struct mbuf *, void *);
41544430e21Sdlg 	void			 (*ifqop_purge)(struct ifqueue *,
41644430e21Sdlg 				    struct mbuf_list *);
417722eb25eSdlg 	void			*(*ifqop_alloc)(unsigned int, void *);
418722eb25eSdlg 	void			 (*ifqop_free)(unsigned int, void *);
41944430e21Sdlg };
42044430e21Sdlg 
421a4ca8437Sdlg extern const struct ifq_ops * const ifq_priq_ops;
422a4ca8437Sdlg 
42344430e21Sdlg /*
42444430e21Sdlg  * Interface send queues.
42544430e21Sdlg  */
42644430e21Sdlg 
427722eb25eSdlg void		 ifq_init(struct ifqueue *, struct ifnet *, unsigned int);
42844430e21Sdlg void		 ifq_attach(struct ifqueue *, const struct ifq_ops *, void *);
42944430e21Sdlg void		 ifq_destroy(struct ifqueue *);
430de042275Sdlg void		 ifq_add_data(struct ifqueue *, struct if_data *);
43144430e21Sdlg int		 ifq_enqueue(struct ifqueue *, struct mbuf *);
432986fddd0Sdlg void		 ifq_start(struct ifqueue *);
43344430e21Sdlg struct mbuf	*ifq_deq_begin(struct ifqueue *);
43444430e21Sdlg void		 ifq_deq_commit(struct ifqueue *, struct mbuf *);
43544430e21Sdlg void		 ifq_deq_rollback(struct ifqueue *, struct mbuf *);
43644430e21Sdlg struct mbuf	*ifq_dequeue(struct ifqueue *);
437d3e32c05Sdlg int		 ifq_hdatalen(struct ifqueue *);
438cf96265bSbluhm void		 ifq_init_maxlen(struct ifqueue *, unsigned int);
439a1b9aa84Sdlg void		 ifq_mfreem(struct ifqueue *, struct mbuf *);
44045e6331dSmikeb void		 ifq_mfreeml(struct ifqueue *, struct mbuf_list *);
44144430e21Sdlg unsigned int	 ifq_purge(struct ifqueue *);
44244430e21Sdlg void		*ifq_q_enter(struct ifqueue *, const struct ifq_ops *);
44344430e21Sdlg void		 ifq_q_leave(struct ifqueue *, void *);
44483d8135eSdlg void		 ifq_serialize(struct ifqueue *, struct task *);
44583d8135eSdlg void		 ifq_barrier(struct ifqueue *);
4466e7a640fSdlg void		 ifq_set_oactive(struct ifqueue *);
447*a4f86d2eSdlg void		 ifq_deq_set_oactive(struct ifqueue *);
448f547d376Sdlg 
449f547d376Sdlg int		 ifq_deq_sleep(struct ifqueue *, struct mbuf **, int, int,
450f547d376Sdlg 		     const char *, volatile unsigned int *,
451f547d376Sdlg 		     volatile unsigned int *);
452f547d376Sdlg 
453cf96265bSbluhm #define ifq_len(_ifq)		READ_ONCE((_ifq)->ifq_len)
45444430e21Sdlg #define ifq_empty(_ifq)		(ifq_len(_ifq) == 0)
45544430e21Sdlg 
456a4ca8437Sdlg static inline int
ifq_is_priq(struct ifqueue * ifq)457a4ca8437Sdlg ifq_is_priq(struct ifqueue *ifq)
458a4ca8437Sdlg {
459a4ca8437Sdlg 	return (ifq->ifq_ops == ifq_priq_ops);
460a4ca8437Sdlg }
461a4ca8437Sdlg 
46244430e21Sdlg static inline void
ifq_clr_oactive(struct ifqueue * ifq)46344430e21Sdlg ifq_clr_oactive(struct ifqueue *ifq)
46444430e21Sdlg {
46544430e21Sdlg 	ifq->ifq_oactive = 0;
46644430e21Sdlg }
46744430e21Sdlg 
46844430e21Sdlg static inline unsigned int
ifq_is_oactive(struct ifqueue * ifq)46944430e21Sdlg ifq_is_oactive(struct ifqueue *ifq)
47044430e21Sdlg {
47144430e21Sdlg 	return (ifq->ifq_oactive);
47244430e21Sdlg }
47344430e21Sdlg 
47483d8135eSdlg static inline void
ifq_restart(struct ifqueue * ifq)47583d8135eSdlg ifq_restart(struct ifqueue *ifq)
47683d8135eSdlg {
47783d8135eSdlg 	ifq_serialize(ifq, &ifq->ifq_restart);
47883d8135eSdlg }
47983d8135eSdlg 
480722eb25eSdlg static inline unsigned int
ifq_idx(struct ifqueue * ifq,unsigned int nifqs,const struct mbuf * m)481722eb25eSdlg ifq_idx(struct ifqueue *ifq, unsigned int nifqs, const struct mbuf *m)
482722eb25eSdlg {
483722eb25eSdlg 	return ((*ifq->ifq_ops->ifqop_idx)(nifqs, m));
484722eb25eSdlg }
485722eb25eSdlg 
486ea47e5e1Sdlg /* ifiq */
487ea47e5e1Sdlg 
488ea47e5e1Sdlg void		 ifiq_init(struct ifiqueue *, struct ifnet *, unsigned int);
489ea47e5e1Sdlg void		 ifiq_destroy(struct ifiqueue *);
4901648f199Sdlg int		 ifiq_input(struct ifiqueue *, struct mbuf_list *);
491ea47e5e1Sdlg int		 ifiq_enqueue(struct ifiqueue *, struct mbuf *);
492ea47e5e1Sdlg void		 ifiq_add_data(struct ifiqueue *, struct if_data *);
493ea47e5e1Sdlg 
494cf96265bSbluhm #define ifiq_len(_ifiq)		READ_ONCE(ml_len(&(_ifiq)->ifiq_ml))
495cf96265bSbluhm #define ifiq_empty(_ifiq)	(ifiq_len(_ifiq) == 0)
496ea47e5e1Sdlg 
49744430e21Sdlg #endif /* _KERNEL */
49844430e21Sdlg 
49944430e21Sdlg #endif /* _NET_IFQ_H_ */
500