xref: /freebsd/sys/net/ifq.h (revision 112f50ff)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	From: @(#)if.h	8.1 (Berkeley) 6/10/93
30  * $FreeBSD$
31  */
32 
33 #ifndef	_NET_IFQ_H_
34 #define	_NET_IFQ_H_
35 
36 #ifdef _KERNEL
37 #include <sys/mbuf.h>		/* ifqueue only? */
38 #include <sys/buf_ring.h>
39 #include <net/vnet.h>
40 #endif /* _KERNEL */
41 #include <sys/lock.h>		/* XXX */
42 #include <sys/mutex.h>		/* struct ifqueue */
43 
44 /*
45  * Couple of ugly extra definitions that are required since ifq.h
46  * is splitted from if_var.h.
47  */
48 #define	IF_DUNIT_NONE	-1
49 void if_inc_counter(struct ifnet *, ift_counter, int64_t inc);
50 
51 #include <altq/if_altq.h>
52 
53 /*
54  * Structure defining a queue for a network interface.
55  */
56 struct	ifqueue {
57 	struct	mbuf *ifq_head;
58 	struct	mbuf *ifq_tail;
59 	int	ifq_len;
60 	int	ifq_maxlen;
61 	struct	mtx ifq_mtx;
62 };
63 
64 #ifdef _KERNEL
65 /*
66  * Output queues (ifp->if_snd) and slow device input queues (*ifp->if_slowq)
67  * are queues of messages stored on ifqueue structures
68  * (defined above).  Entries are added to and deleted from these structures
69  * by these macros.
70  */
71 #define IF_LOCK(ifq)		mtx_lock(&(ifq)->ifq_mtx)
72 #define IF_UNLOCK(ifq)		mtx_unlock(&(ifq)->ifq_mtx)
73 #define	IF_LOCK_ASSERT(ifq)	mtx_assert(&(ifq)->ifq_mtx, MA_OWNED)
74 #define	_IF_QFULL(ifq)		((ifq)->ifq_len >= (ifq)->ifq_maxlen)
75 #define	_IF_QLEN(ifq)		((ifq)->ifq_len)
76 
77 #define	_IF_ENQUEUE(ifq, m) do { 				\
78 	(m)->m_nextpkt = NULL;					\
79 	if ((ifq)->ifq_tail == NULL) 				\
80 		(ifq)->ifq_head = m; 				\
81 	else 							\
82 		(ifq)->ifq_tail->m_nextpkt = m; 		\
83 	(ifq)->ifq_tail = m; 					\
84 	(ifq)->ifq_len++; 					\
85 } while (0)
86 
87 #define IF_ENQUEUE(ifq, m) do {					\
88 	IF_LOCK(ifq); 						\
89 	_IF_ENQUEUE(ifq, m); 					\
90 	IF_UNLOCK(ifq); 					\
91 } while (0)
92 
93 #define	_IF_PREPEND(ifq, m) do {				\
94 	(m)->m_nextpkt = (ifq)->ifq_head; 			\
95 	if ((ifq)->ifq_tail == NULL) 				\
96 		(ifq)->ifq_tail = (m); 				\
97 	(ifq)->ifq_head = (m); 					\
98 	(ifq)->ifq_len++; 					\
99 } while (0)
100 
101 #define IF_PREPEND(ifq, m) do {		 			\
102 	IF_LOCK(ifq); 						\
103 	_IF_PREPEND(ifq, m); 					\
104 	IF_UNLOCK(ifq); 					\
105 } while (0)
106 
107 #define	_IF_DEQUEUE(ifq, m) do { 				\
108 	(m) = (ifq)->ifq_head; 					\
109 	if (m) { 						\
110 		if (((ifq)->ifq_head = (m)->m_nextpkt) == NULL)	\
111 			(ifq)->ifq_tail = NULL; 		\
112 		(m)->m_nextpkt = NULL; 				\
113 		(ifq)->ifq_len--; 				\
114 	} 							\
115 } while (0)
116 
117 #define IF_DEQUEUE(ifq, m) do { 				\
118 	IF_LOCK(ifq); 						\
119 	_IF_DEQUEUE(ifq, m); 					\
120 	IF_UNLOCK(ifq); 					\
121 } while (0)
122 
123 #define	_IF_DEQUEUE_ALL(ifq, m) do {				\
124 	(m) = (ifq)->ifq_head;					\
125 	(ifq)->ifq_head = (ifq)->ifq_tail = NULL;		\
126 	(ifq)->ifq_len = 0;					\
127 } while (0)
128 
129 #define	IF_DEQUEUE_ALL(ifq, m) do {				\
130 	IF_LOCK(ifq); 						\
131 	_IF_DEQUEUE_ALL(ifq, m);				\
132 	IF_UNLOCK(ifq); 					\
133 } while (0)
134 
135 #define	_IF_POLL(ifq, m)	((m) = (ifq)->ifq_head)
136 #define	IF_POLL(ifq, m)		_IF_POLL(ifq, m)
137 
138 #define _IF_DRAIN(ifq) do { 					\
139 	struct mbuf *m; 					\
140 	for (;;) { 						\
141 		_IF_DEQUEUE(ifq, m); 				\
142 		if (m == NULL) 					\
143 			break; 					\
144 		m_freem(m); 					\
145 	} 							\
146 } while (0)
147 
148 #define IF_DRAIN(ifq) do {					\
149 	IF_LOCK(ifq);						\
150 	_IF_DRAIN(ifq);						\
151 	IF_UNLOCK(ifq);						\
152 } while(0)
153 
154 int	if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp,
155 	    int adjust);
156 #define	IF_HANDOFF(ifq, m, ifp)			\
157 	if_handoff((struct ifqueue *)ifq, m, ifp, 0)
158 #define	IF_HANDOFF_ADJ(ifq, m, ifp, adj)	\
159 	if_handoff((struct ifqueue *)ifq, m, ifp, adj)
160 
161 void	if_start(struct ifnet *);
162 
163 #define	IFQ_ENQUEUE(ifq, m, err)					\
164 do {									\
165 	IF_LOCK(ifq);							\
166 	if (ALTQ_IS_ENABLED(ifq))					\
167 		ALTQ_ENQUEUE(ifq, m, NULL, err);			\
168 	else {								\
169 		if (_IF_QFULL(ifq)) {					\
170 			m_freem(m);					\
171 			(err) = ENOBUFS;				\
172 		} else {						\
173 			_IF_ENQUEUE(ifq, m);				\
174 			(err) = 0;					\
175 		}							\
176 	}								\
177 	IF_UNLOCK(ifq);							\
178 } while (0)
179 
180 #define	IFQ_DEQUEUE_NOLOCK(ifq, m)					\
181 do {									\
182 	if (TBR_IS_ENABLED(ifq))					\
183 		(m) = tbr_dequeue_ptr(ifq, ALTDQ_REMOVE);		\
184 	else if (ALTQ_IS_ENABLED(ifq))					\
185 		ALTQ_DEQUEUE(ifq, m);					\
186 	else								\
187 		_IF_DEQUEUE(ifq, m);					\
188 } while (0)
189 
190 #define	IFQ_DEQUEUE(ifq, m)						\
191 do {									\
192 	IF_LOCK(ifq);							\
193 	IFQ_DEQUEUE_NOLOCK(ifq, m);					\
194 	IF_UNLOCK(ifq);							\
195 } while (0)
196 
197 #define	IFQ_POLL_NOLOCK(ifq, m)						\
198 do {									\
199 	if (TBR_IS_ENABLED(ifq))					\
200 		(m) = tbr_dequeue_ptr(ifq, ALTDQ_POLL);			\
201 	else if (ALTQ_IS_ENABLED(ifq))					\
202 		ALTQ_POLL(ifq, m);					\
203 	else								\
204 		_IF_POLL(ifq, m);					\
205 } while (0)
206 
207 #define	IFQ_POLL(ifq, m)						\
208 do {									\
209 	IF_LOCK(ifq);							\
210 	IFQ_POLL_NOLOCK(ifq, m);					\
211 	IF_UNLOCK(ifq);							\
212 } while (0)
213 
214 #define	IFQ_PURGE_NOLOCK(ifq)						\
215 do {									\
216 	if (ALTQ_IS_ENABLED(ifq)) {					\
217 		ALTQ_PURGE(ifq);					\
218 	} else								\
219 		_IF_DRAIN(ifq);						\
220 } while (0)
221 
222 #define	IFQ_PURGE(ifq)							\
223 do {									\
224 	IF_LOCK(ifq);							\
225 	IFQ_PURGE_NOLOCK(ifq);						\
226 	IF_UNLOCK(ifq);							\
227 } while (0)
228 
229 #define	IFQ_SET_READY(ifq)						\
230 	do { ((ifq)->altq_flags |= ALTQF_READY); } while (0)
231 
232 #define	IFQ_LOCK(ifq)			IF_LOCK(ifq)
233 #define	IFQ_UNLOCK(ifq)			IF_UNLOCK(ifq)
234 #define	IFQ_LOCK_ASSERT(ifq)		IF_LOCK_ASSERT(ifq)
235 #define	IFQ_IS_EMPTY(ifq)		((ifq)->ifq_len == 0)
236 #define	IFQ_INC_LEN(ifq)		((ifq)->ifq_len++)
237 #define	IFQ_DEC_LEN(ifq)		(--(ifq)->ifq_len)
238 #define	IFQ_SET_MAXLEN(ifq, len)	((ifq)->ifq_maxlen = (len))
239 
240 /*
241  * The IFF_DRV_OACTIVE test should really occur in the device driver, not in
242  * the handoff logic, as that flag is locked by the device driver.
243  */
244 #define	IFQ_HANDOFF_ADJ(ifp, m, adj, err)				\
245 do {									\
246 	int len;							\
247 	short mflags;							\
248 									\
249 	len = (m)->m_pkthdr.len;					\
250 	mflags = (m)->m_flags;						\
251 	IFQ_ENQUEUE(&(ifp)->if_snd, m, err);				\
252 	if ((err) == 0) {						\
253 		if_inc_counter((ifp), IFCOUNTER_OBYTES, len + (adj));	\
254 		if (mflags & M_MCAST)					\
255 			if_inc_counter((ifp), IFCOUNTER_OMCASTS, 1);	\
256 		if (((ifp)->if_drv_flags & IFF_DRV_OACTIVE) == 0)	\
257 			if_start(ifp);					\
258 	} else								\
259 		if_inc_counter((ifp), IFCOUNTER_OQDROPS, 1);		\
260 } while (0)
261 
262 #define	IFQ_HANDOFF(ifp, m, err)					\
263 	IFQ_HANDOFF_ADJ(ifp, m, 0, err)
264 
265 #define	IFQ_DRV_DEQUEUE(ifq, m)						\
266 do {									\
267 	(m) = (ifq)->ifq_drv_head;					\
268 	if (m) {							\
269 		if (((ifq)->ifq_drv_head = (m)->m_nextpkt) == NULL)	\
270 			(ifq)->ifq_drv_tail = NULL;			\
271 		(m)->m_nextpkt = NULL;					\
272 		(ifq)->ifq_drv_len--;					\
273 	} else {							\
274 		IFQ_LOCK(ifq);						\
275 		IFQ_DEQUEUE_NOLOCK(ifq, m);				\
276 		while ((ifq)->ifq_drv_len < (ifq)->ifq_drv_maxlen) {	\
277 			struct mbuf *m0;				\
278 			IFQ_DEQUEUE_NOLOCK(ifq, m0);			\
279 			if (m0 == NULL)					\
280 				break;					\
281 			m0->m_nextpkt = NULL;				\
282 			if ((ifq)->ifq_drv_tail == NULL)		\
283 				(ifq)->ifq_drv_head = m0;		\
284 			else						\
285 				(ifq)->ifq_drv_tail->m_nextpkt = m0;	\
286 			(ifq)->ifq_drv_tail = m0;			\
287 			(ifq)->ifq_drv_len++;				\
288 		}							\
289 		IFQ_UNLOCK(ifq);					\
290 	}								\
291 } while (0)
292 
293 #define	IFQ_DRV_PREPEND(ifq, m)						\
294 do {									\
295 	(m)->m_nextpkt = (ifq)->ifq_drv_head;				\
296 	if ((ifq)->ifq_drv_tail == NULL)				\
297 		(ifq)->ifq_drv_tail = (m);				\
298 	(ifq)->ifq_drv_head = (m);					\
299 	(ifq)->ifq_drv_len++;						\
300 } while (0)
301 
302 #define	IFQ_DRV_IS_EMPTY(ifq)						\
303 	(((ifq)->ifq_drv_len == 0) && ((ifq)->ifq_len == 0))
304 
305 #define	IFQ_DRV_PURGE(ifq)						\
306 do {									\
307 	struct mbuf *m, *n = (ifq)->ifq_drv_head;			\
308 	while((m = n) != NULL) {					\
309 		n = m->m_nextpkt;					\
310 		m_freem(m);						\
311 	}								\
312 	(ifq)->ifq_drv_head = (ifq)->ifq_drv_tail = NULL;		\
313 	(ifq)->ifq_drv_len = 0;						\
314 	IFQ_PURGE(ifq);							\
315 } while (0)
316 
317 static __inline int
318 drbr_enqueue(struct ifnet *ifp, struct buf_ring *br, struct mbuf *m)
319 {
320 	int error = 0;
321 
322 #ifdef ALTQ
323 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
324 		IFQ_ENQUEUE(&ifp->if_snd, m, error);
325 		if (error)
326 			if_inc_counter((ifp), IFCOUNTER_OQDROPS, 1);
327 		return (error);
328 	}
329 #endif
330 	error = buf_ring_enqueue(br, m);
331 	if (error)
332 		m_freem(m);
333 
334 	return (error);
335 }
336 
337 static __inline void
338 drbr_putback(struct ifnet *ifp, struct buf_ring *br, struct mbuf *new)
339 {
340 	/*
341 	 * The top of the list needs to be swapped
342 	 * for this one.
343 	 */
344 #ifdef ALTQ
345 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
346 		/*
347 		 * Peek in altq case dequeued it
348 		 * so put it back.
349 		 */
350 		IFQ_DRV_PREPEND(&ifp->if_snd, new);
351 		return;
352 	}
353 #endif
354 	buf_ring_putback_sc(br, new);
355 }
356 
357 static __inline struct mbuf *
358 drbr_peek(struct ifnet *ifp, struct buf_ring *br)
359 {
360 #ifdef ALTQ
361 	struct mbuf *m;
362 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
363 		/*
364 		 * Pull it off like a dequeue
365 		 * since drbr_advance() does nothing
366 		 * for altq and drbr_putback() will
367 		 * use the old prepend function.
368 		 */
369 		IFQ_DEQUEUE(&ifp->if_snd, m);
370 		return (m);
371 	}
372 #endif
373 	return(buf_ring_peek(br));
374 }
375 
376 static __inline void
377 drbr_flush(struct ifnet *ifp, struct buf_ring *br)
378 {
379 	struct mbuf *m;
380 
381 #ifdef ALTQ
382 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd))
383 		IFQ_PURGE(&ifp->if_snd);
384 #endif
385 	while ((m = buf_ring_dequeue_sc(br)) != NULL)
386 		m_freem(m);
387 }
388 
389 static __inline void
390 drbr_free(struct buf_ring *br, struct malloc_type *type)
391 {
392 
393 	drbr_flush(NULL, br);
394 	buf_ring_free(br, type);
395 }
396 
397 static __inline struct mbuf *
398 drbr_dequeue(struct ifnet *ifp, struct buf_ring *br)
399 {
400 #ifdef ALTQ
401 	struct mbuf *m;
402 
403 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
404 		IFQ_DEQUEUE(&ifp->if_snd, m);
405 		return (m);
406 	}
407 #endif
408 	return (buf_ring_dequeue_sc(br));
409 }
410 
411 static __inline void
412 drbr_advance(struct ifnet *ifp, struct buf_ring *br)
413 {
414 #ifdef ALTQ
415 	/* Nothing to do here since peek dequeues in altq case */
416 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd))
417 		return;
418 #endif
419 	return (buf_ring_advance_sc(br));
420 }
421 
422 
423 static __inline struct mbuf *
424 drbr_dequeue_cond(struct ifnet *ifp, struct buf_ring *br,
425     int (*func) (struct mbuf *, void *), void *arg)
426 {
427 	struct mbuf *m;
428 #ifdef ALTQ
429 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
430 		IFQ_LOCK(&ifp->if_snd);
431 		IFQ_POLL_NOLOCK(&ifp->if_snd, m);
432 		if (m != NULL && func(m, arg) == 0) {
433 			IFQ_UNLOCK(&ifp->if_snd);
434 			return (NULL);
435 		}
436 		IFQ_DEQUEUE_NOLOCK(&ifp->if_snd, m);
437 		IFQ_UNLOCK(&ifp->if_snd);
438 		return (m);
439 	}
440 #endif
441 	m = buf_ring_peek(br);
442 	if (m == NULL || func(m, arg) == 0)
443 		return (NULL);
444 
445 	return (buf_ring_dequeue_sc(br));
446 }
447 
448 static __inline int
449 drbr_empty(struct ifnet *ifp, struct buf_ring *br)
450 {
451 #ifdef ALTQ
452 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
453 		return (IFQ_IS_EMPTY(&ifp->if_snd));
454 #endif
455 	return (buf_ring_empty(br));
456 }
457 
458 static __inline int
459 drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
460 {
461 #ifdef ALTQ
462 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
463 		return (1);
464 #endif
465 	return (!buf_ring_empty(br));
466 }
467 
468 static __inline int
469 drbr_inuse(struct ifnet *ifp, struct buf_ring *br)
470 {
471 #ifdef ALTQ
472 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
473 		return (ifp->if_snd.ifq_len);
474 #endif
475 	return (buf_ring_count(br));
476 }
477 
478 extern	int ifqmaxlen;
479 
480 void	if_qflush(struct ifnet *);
481 void	ifq_init(struct ifaltq *, struct ifnet *ifp);
482 void	ifq_delete(struct ifaltq *);
483 
484 #ifdef DEVICE_POLLING
485 enum poll_cmd {	POLL_ONLY, POLL_AND_CHECK_STATUS };
486 
487 typedef	int poll_handler_t(struct ifnet *ifp, enum poll_cmd cmd, int count);
488 int    ether_poll_register(poll_handler_t *h, struct ifnet *ifp);
489 int    ether_poll_deregister(struct ifnet *ifp);
490 /* The following should be temporary, till all drivers use the driver API */
491 typedef	int poll_handler_drv_t(if_t ifh, enum poll_cmd cmd, int count);
492 int	ether_poll_register_drv(poll_handler_drv_t *h, if_t ifh);
493 int	ether_poll_deregister_drv(if_t ifh);
494 #endif /* DEVICE_POLLING */
495 
496 #endif /* _KERNEL */
497 #endif /* !_NET_IFQ_H_ */
498