xref: /dragonfly/sys/net/ifq_var.h (revision ed36d35d)
1 /*-
2  * Copyright (c) 2005 The DragonFly Project.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  * 3. Neither the name of The DragonFly Project nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific, prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifndef _NET_IFQ_VAR_H_
33 #define _NET_IFQ_VAR_H_
34 
35 #ifndef _KERNEL
36 #error "This file should not be included by userland programs."
37 #endif
38 
39 #ifndef _SYS_SYSTM_H_
40 #include <sys/systm.h>
41 #endif
42 #ifndef _SYS_THREAD2_H_
43 #include <sys/thread2.h>
44 #endif
45 #ifndef _SYS_SERIALIZE_H_
46 #include <sys/serialize.h>
47 #endif
48 #ifndef _SYS_MBUF_H_
49 #include <sys/mbuf.h>
50 #endif
51 #ifndef _NET_IF_VAR_H_
52 #include <net/if_var.h>
53 #endif
54 #ifndef _NET_ALTQ_IF_ALTQ_H_
55 #include <net/altq/if_altq.h>
56 #endif
57 
58 #define ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq) \
59 	KASSERT(ifsq_get_ifp((ifsq)) == (ifp) && \
60 	    ifsq_get_index((ifsq)) == ALTQ_SUBQ_INDEX_DEFAULT, \
61 	    ("not ifp's default subqueue"));
62 
63 struct ifaltq;
64 struct ifaltq_subque;
65 
66 /*
67  * Subqueue watchdog
68  */
69 typedef void	(*ifsq_watchdog_t)(struct ifaltq_subque *);
70 
71 struct ifsubq_watchdog {
72 	struct callout	wd_callout;
73 	int		wd_timer;
74 	struct ifaltq_subque *wd_subq;
75 	ifsq_watchdog_t	wd_watchdog;
76 };
77 
78 /*
79  * Support for "classic" ALTQ interfaces.
80  */
81 int		ifsq_classic_enqueue(struct ifaltq_subque *, struct mbuf *,
82 		    struct altq_pktattr *);
83 struct mbuf	*ifsq_classic_dequeue(struct ifaltq_subque *, int);
84 int		ifsq_classic_request(struct ifaltq_subque *, int, void *);
85 void		ifq_set_classic(struct ifaltq *);
86 
87 void		ifq_set_maxlen(struct ifaltq *, int);
88 void		ifq_set_methods(struct ifaltq *, altq_mapsubq_t,
89 		    ifsq_enqueue_t, ifsq_dequeue_t, ifsq_request_t);
90 int		ifq_mapsubq_default(struct ifaltq *, int);
91 int		ifq_mapsubq_modulo(struct ifaltq *, int);
92 
93 void		ifsq_devstart(struct ifaltq_subque *ifsq);
94 void		ifsq_devstart_sched(struct ifaltq_subque *ifsq);
95 
96 void		ifsq_watchdog_init(struct ifsubq_watchdog *,
97 		    struct ifaltq_subque *, ifsq_watchdog_t);
98 void		ifsq_watchdog_start(struct ifsubq_watchdog *);
99 void		ifsq_watchdog_stop(struct ifsubq_watchdog *);
100 
101 /*
102  * Dispatch a packet to an interface.
103  */
104 int		ifq_dispatch(struct ifnet *, struct mbuf *,
105 		    struct altq_pktattr *);
106 
107 #ifdef ALTQ
108 
109 static __inline int
110 ifq_is_enabled(struct ifaltq *_ifq)
111 {
112 	return(_ifq->altq_flags & ALTQF_ENABLED);
113 }
114 
115 static __inline int
116 ifq_is_attached(struct ifaltq *_ifq)
117 {
118 	return(_ifq->altq_disc != NULL);
119 }
120 
121 #else	/* !ALTQ */
122 
123 static __inline int
124 ifq_is_enabled(struct ifaltq *_ifq)
125 {
126 	return(0);
127 }
128 
129 static __inline int
130 ifq_is_attached(struct ifaltq *_ifq)
131 {
132 	return(0);
133 }
134 
135 #endif	/* ALTQ */
136 
137 static __inline int
138 ifq_is_ready(struct ifaltq *_ifq)
139 {
140 	return(_ifq->altq_flags & ALTQF_READY);
141 }
142 
143 static __inline void
144 ifq_set_ready(struct ifaltq *_ifq)
145 {
146 	_ifq->altq_flags |= ALTQF_READY;
147 }
148 
149 /*
150  * Subqueue lock must be held
151  */
152 static __inline int
153 ifsq_enqueue_locked(struct ifaltq_subque *_ifsq, struct mbuf *_m,
154     struct altq_pktattr *_pa)
155 {
156 #ifdef ALTQ
157 	if (!ifq_is_enabled(_ifsq->ifsq_altq))
158 		return ifsq_classic_enqueue(_ifsq, _m, _pa);
159 	else
160 #endif
161 	return _ifsq->ifsq_enqueue(_ifsq, _m, _pa);
162 }
163 
164 static __inline int
165 ifsq_enqueue(struct ifaltq_subque *_ifsq, struct mbuf *_m,
166     struct altq_pktattr *_pa)
167 {
168 	int _error;
169 
170 	ALTQ_SQ_LOCK(_ifsq);
171 	_error = ifsq_enqueue_locked(_ifsq, _m, _pa);
172 	ALTQ_SQ_UNLOCK(_ifsq);
173 	return _error;
174 }
175 
176 static __inline struct mbuf *
177 ifsq_dequeue(struct ifaltq_subque *_ifsq)
178 {
179 	struct mbuf *_m;
180 
181 	ALTQ_SQ_LOCK(_ifsq);
182 	if (_ifsq->ifsq_prepended != NULL) {
183 		_m = _ifsq->ifsq_prepended;
184 		_ifsq->ifsq_prepended = NULL;
185 		ALTQ_SQ_CNTR_DEC(_ifsq, _m->m_pkthdr.len);
186 		ALTQ_SQ_UNLOCK(_ifsq);
187 		return _m;
188 	}
189 
190 #ifdef ALTQ
191 	if (_ifsq->ifsq_altq->altq_tbr != NULL)
192 		_m = tbr_dequeue(_ifsq, ALTDQ_REMOVE);
193 	else if (!ifq_is_enabled(_ifsq->ifsq_altq))
194 		_m = ifsq_classic_dequeue(_ifsq, ALTDQ_REMOVE);
195 	else
196 #endif
197 	_m = _ifsq->ifsq_dequeue(_ifsq, ALTDQ_REMOVE);
198 	ALTQ_SQ_UNLOCK(_ifsq);
199 	return _m;
200 }
201 
202 /*
203  * Subqueue lock must be held
204  */
205 static __inline struct mbuf *
206 ifsq_poll_locked(struct ifaltq_subque *_ifsq)
207 {
208 	if (_ifsq->ifsq_prepended != NULL)
209 		return _ifsq->ifsq_prepended;
210 
211 #ifdef ALTQ
212 	if (_ifsq->ifsq_altq->altq_tbr != NULL)
213 		return tbr_dequeue(_ifsq, ALTDQ_POLL);
214 	else if (!ifq_is_enabled(_ifsq->ifsq_altq))
215 		return ifsq_classic_dequeue(_ifsq, ALTDQ_POLL);
216 	else
217 #endif
218 	return _ifsq->ifsq_dequeue(_ifsq, ALTDQ_POLL);
219 }
220 
221 static __inline struct mbuf *
222 ifsq_poll(struct ifaltq_subque *_ifsq)
223 {
224 	struct mbuf *_m;
225 
226 	ALTQ_SQ_LOCK(_ifsq);
227 	_m = ifsq_poll_locked(_ifsq);
228 	ALTQ_SQ_UNLOCK(_ifsq);
229 	return _m;
230 }
231 
232 static __inline int
233 ifsq_poll_pktlen(struct ifaltq_subque *_ifsq)
234 {
235 	struct mbuf *_m;
236 	int _len = 0;
237 
238 	ALTQ_SQ_LOCK(_ifsq);
239 
240 	_m = ifsq_poll_locked(_ifsq);
241 	if (_m != NULL) {
242 		M_ASSERTPKTHDR(_m);
243 		_len = _m->m_pkthdr.len;
244 	}
245 
246 	ALTQ_SQ_UNLOCK(_ifsq);
247 
248 	return _len;
249 }
250 
251 /*
252  * Subqueue lock must be held
253  */
254 static __inline void
255 ifsq_purge_locked(struct ifaltq_subque *_ifsq)
256 {
257 	if (_ifsq->ifsq_prepended != NULL) {
258 		ALTQ_SQ_CNTR_DEC(_ifsq, _ifsq->ifsq_prepended->m_pkthdr.len);
259 		m_freem(_ifsq->ifsq_prepended);
260 		_ifsq->ifsq_prepended = NULL;
261 	}
262 
263 #ifdef ALTQ
264 	if (!ifq_is_enabled(_ifsq->ifsq_altq))
265 		ifsq_classic_request(_ifsq, ALTRQ_PURGE, NULL);
266 	else
267 #endif
268 	_ifsq->ifsq_request(_ifsq, ALTRQ_PURGE, NULL);
269 }
270 
271 static __inline void
272 ifsq_purge(struct ifaltq_subque *_ifsq)
273 {
274 	ALTQ_SQ_LOCK(_ifsq);
275 	ifsq_purge_locked(_ifsq);
276 	ALTQ_SQ_UNLOCK(_ifsq);
277 }
278 
279 static __inline void
280 ifq_lock_all(struct ifaltq *_ifq)
281 {
282 	int _q;
283 
284 	for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q)
285 		ALTQ_SQ_LOCK(&_ifq->altq_subq[_q]);
286 }
287 
288 static __inline void
289 ifq_unlock_all(struct ifaltq *_ifq)
290 {
291 	int _q;
292 
293 	for (_q = _ifq->altq_subq_cnt - 1; _q >= 0; --_q)
294 		ALTQ_SQ_UNLOCK(&_ifq->altq_subq[_q]);
295 }
296 
297 /*
298  * All of the subqueue locks must be held
299  */
300 static __inline void
301 ifq_purge_all_locked(struct ifaltq *_ifq)
302 {
303 	int _q;
304 
305 	for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q)
306 		ifsq_purge_locked(&_ifq->altq_subq[_q]);
307 }
308 
309 static __inline void
310 ifq_purge_all(struct ifaltq *_ifq)
311 {
312 	ifq_lock_all(_ifq);
313 	ifq_purge_all_locked(_ifq);
314 	ifq_unlock_all(_ifq);
315 }
316 
317 static __inline void
318 ifq_classify(struct ifaltq *_ifq, struct mbuf *_m, uint8_t _af,
319     struct altq_pktattr *_pa)
320 {
321 #ifdef ALTQ
322 	if (ifq_is_enabled(_ifq)) {
323 		_pa->pattr_af = _af;
324 		_pa->pattr_hdr = mtod(_m, caddr_t);
325 		if (ifq_is_enabled(_ifq) &&
326 		    (_ifq->altq_flags & ALTQF_CLASSIFY)) {
327 			/* XXX default subqueue */
328 			struct ifaltq_subque *_ifsq =
329 			    &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
330 
331 			ALTQ_SQ_LOCK(_ifsq);
332 			if (ifq_is_enabled(_ifq) &&
333 			    (_ifq->altq_flags & ALTQF_CLASSIFY))
334 				_ifq->altq_classify(_ifq, _m, _pa);
335 			ALTQ_SQ_UNLOCK(_ifsq);
336 		}
337 	}
338 #endif
339 }
340 
341 static __inline void
342 ifsq_prepend(struct ifaltq_subque *_ifsq, struct mbuf *_m)
343 {
344 	ALTQ_SQ_LOCK(_ifsq);
345 	KASSERT(_ifsq->ifsq_prepended == NULL, ("pending prepended mbuf"));
346 	_ifsq->ifsq_prepended = _m;
347 	ALTQ_SQ_CNTR_INC(_ifsq, _m->m_pkthdr.len);
348 	ALTQ_SQ_UNLOCK(_ifsq);
349 }
350 
351 /*
352  * Subqueue hardware serializer must be held
353  */
354 static __inline void
355 ifsq_set_oactive(struct ifaltq_subque *_ifsq)
356 {
357 	_ifsq->ifsq_hw_oactive = 1;
358 }
359 
360 /*
361  * Subqueue hardware serializer must be held
362  */
363 static __inline void
364 ifsq_clr_oactive(struct ifaltq_subque *_ifsq)
365 {
366 	_ifsq->ifsq_hw_oactive = 0;
367 }
368 
369 /*
370  * Subqueue hardware serializer must be held
371  */
372 static __inline int
373 ifsq_is_oactive(const struct ifaltq_subque *_ifsq)
374 {
375 	return _ifsq->ifsq_hw_oactive;
376 }
377 
378 /*
379  * Hand a packet to the interface's default subqueue.
380  *
381  * The default subqueue hardware serializer must be held.  If the
382  * subqueue hardware serializer is not held yet, ifq_dispatch()
383  * should be used to get better performance.
384  */
385 static __inline int
386 ifq_handoff(struct ifnet *_ifp, struct mbuf *_m, struct altq_pktattr *_pa)
387 {
388 	struct ifaltq_subque *_ifsq;
389 	int _error;
390 	int _qid = ALTQ_SUBQ_INDEX_DEFAULT; /* XXX default subqueue */
391 
392 	_ifsq = &_ifp->if_snd.altq_subq[_qid];
393 
394 	ASSERT_ALTQ_SQ_SERIALIZED_HW(_ifsq);
395 	_error = ifsq_enqueue(_ifsq, _m, _pa);
396 	if (_error == 0) {
397 		IFNET_STAT_INC(_ifp, obytes, _m->m_pkthdr.len);
398 		if (_m->m_flags & M_MCAST)
399 			IFNET_STAT_INC(_ifp, omcasts, 1);
400 		if (!ifsq_is_oactive(_ifsq))
401 			(*_ifp->if_start)(_ifp, _ifsq);
402 	} else {
403 		IFNET_STAT_INC(_ifp, oqdrops, 1);
404 	}
405 	return(_error);
406 }
407 
408 static __inline int
409 ifsq_is_empty(const struct ifaltq_subque *_ifsq)
410 {
411 	return(_ifsq->ifsq_len == 0);
412 }
413 
414 /*
415  * Subqueue lock must be held
416  */
417 static __inline int
418 ifsq_data_ready(struct ifaltq_subque *_ifsq)
419 {
420 #ifdef ALTQ
421 	if (_ifsq->ifsq_altq->altq_tbr != NULL)
422 		return (ifsq_poll_locked(_ifsq) != NULL);
423 	else
424 #endif
425 	return !ifsq_is_empty(_ifsq);
426 }
427 
428 /*
429  * Subqueue lock must be held
430  */
431 static __inline int
432 ifsq_is_started(const struct ifaltq_subque *_ifsq)
433 {
434 	return _ifsq->ifsq_started;
435 }
436 
437 /*
438  * Subqueue lock must be held
439  */
440 static __inline void
441 ifsq_set_started(struct ifaltq_subque *_ifsq)
442 {
443 	_ifsq->ifsq_started = 1;
444 }
445 
446 /*
447  * Subqueue lock must be held
448  */
449 static __inline void
450 ifsq_clr_started(struct ifaltq_subque *_ifsq)
451 {
452 	_ifsq->ifsq_started = 0;
453 }
454 
455 static __inline struct ifsubq_stage *
456 ifsq_get_stage(struct ifaltq_subque *_ifsq, int _cpuid)
457 {
458 	return &_ifsq->ifsq_stage[_cpuid];
459 }
460 
461 static __inline int
462 ifsq_get_cpuid(const struct ifaltq_subque *_ifsq)
463 {
464 	return _ifsq->ifsq_cpuid;
465 }
466 
467 static __inline void
468 ifsq_set_cpuid(struct ifaltq_subque *_ifsq, int _cpuid)
469 {
470 	KASSERT(_cpuid >= 0 && _cpuid < ncpus,
471 	    ("invalid ifsq_cpuid %d", _cpuid));
472 	_ifsq->ifsq_cpuid = _cpuid;
473 }
474 
475 static __inline struct lwkt_msg *
476 ifsq_get_ifstart_lmsg(struct ifaltq_subque *_ifsq, int _cpuid)
477 {
478 	return &_ifsq->ifsq_ifstart_nmsg[_cpuid].lmsg;
479 }
480 
481 static __inline int
482 ifsq_get_index(const struct ifaltq_subque *_ifsq)
483 {
484 	return _ifsq->ifsq_index;
485 }
486 
487 static __inline void
488 ifsq_set_priv(struct ifaltq_subque *_ifsq, void *_priv)
489 {
490 	_ifsq->ifsq_hw_priv = _priv;
491 }
492 
493 static __inline void *
494 ifsq_get_priv(const struct ifaltq_subque *_ifsq)
495 {
496 	return _ifsq->ifsq_hw_priv;
497 }
498 
499 static __inline struct ifnet *
500 ifsq_get_ifp(const struct ifaltq_subque *_ifsq)
501 {
502 	return _ifsq->ifsq_ifp;
503 }
504 
505 static __inline void
506 ifsq_set_hw_serialize(struct ifaltq_subque *_ifsq,
507     struct lwkt_serialize *_hwslz)
508 {
509 	KASSERT(_hwslz != NULL, ("NULL hw serialize"));
510 	KASSERT(_ifsq->ifsq_hw_serialize == NULL,
511 	    ("hw serialize has been setup"));
512 	_ifsq->ifsq_hw_serialize = _hwslz;
513 }
514 
515 static __inline void
516 ifsq_serialize_hw(struct ifaltq_subque *_ifsq)
517 {
518 	lwkt_serialize_enter(_ifsq->ifsq_hw_serialize);
519 }
520 
521 static __inline void
522 ifsq_deserialize_hw(struct ifaltq_subque *_ifsq)
523 {
524 	lwkt_serialize_exit(_ifsq->ifsq_hw_serialize);
525 }
526 
527 static __inline int
528 ifsq_tryserialize_hw(struct ifaltq_subque *_ifsq)
529 {
530 	return lwkt_serialize_try(_ifsq->ifsq_hw_serialize);
531 }
532 
533 static __inline struct ifaltq_subque *
534 ifq_get_subq_default(const struct ifaltq *_ifq)
535 {
536 	return &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
537 }
538 
539 static __inline struct ifaltq_subque *
540 ifq_get_subq(const struct ifaltq *_ifq, int _idx)
541 {
542 	KASSERT(_idx >= 0 && _idx < _ifq->altq_subq_cnt,
543 	    ("invalid qid %d", _idx));
544 	return &_ifq->altq_subq[_idx];
545 }
546 
547 static __inline struct ifaltq_subque *
548 ifq_map_subq(struct ifaltq *_ifq, int _cpuid)
549 {
550 	int _idx = _ifq->altq_mapsubq(_ifq, _cpuid);
551 	return ifq_get_subq(_ifq, _idx);
552 }
553 
554 static __inline void
555 ifq_set_subq_cnt(struct ifaltq *_ifq, int _cnt)
556 {
557 	_ifq->altq_subq_cnt = _cnt;
558 }
559 
560 static __inline void
561 ifq_set_subq_divisor(struct ifaltq *_ifq, uint32_t _divisor)
562 {
563 
564 	KASSERT(_divisor > 0, ("invalid divisor %u", _divisor));
565 	KASSERT(_divisor <= _ifq->altq_subq_cnt,
566 	    ("invalid divisor %u, max %d", _divisor, _ifq->altq_subq_cnt));
567 	_ifq->altq_subq_mappriv = _divisor;
568 }
569 
570 /* COMPAT */
571 static __inline int
572 ifq_is_oactive(const struct ifaltq *_ifq)
573 {
574 	return ifsq_is_oactive(ifq_get_subq_default(_ifq));
575 }
576 
577 /* COMPAT */
578 static __inline void
579 ifq_set_oactive(struct ifaltq *_ifq)
580 {
581 	ifsq_set_oactive(ifq_get_subq_default(_ifq));
582 }
583 
584 /* COMPAT */
585 static __inline void
586 ifq_clr_oactive(struct ifaltq *_ifq)
587 {
588 	ifsq_clr_oactive(ifq_get_subq_default(_ifq));
589 }
590 
591 /* COMPAT */
592 static __inline int
593 ifq_is_empty(struct ifaltq *_ifq)
594 {
595 	return ifsq_is_empty(ifq_get_subq_default(_ifq));
596 }
597 
598 /* COMPAT */
599 static __inline void
600 ifq_purge(struct ifaltq *_ifq)
601 {
602 	ifsq_purge(ifq_get_subq_default(_ifq));
603 }
604 
605 /* COMPAT */
606 static __inline struct mbuf *
607 ifq_dequeue(struct ifaltq *_ifq)
608 {
609 	return ifsq_dequeue(ifq_get_subq_default(_ifq));
610 }
611 
612 /* COMPAT */
613 static __inline void
614 ifq_prepend(struct ifaltq *_ifq, struct mbuf *_m)
615 {
616 	ifsq_prepend(ifq_get_subq_default(_ifq), _m);
617 }
618 
619 /* COMPAT */
620 static __inline void
621 ifq_set_cpuid(struct ifaltq *_ifq, int _cpuid)
622 {
623 	KASSERT(_ifq->altq_subq_cnt == 1,
624 	    ("invalid subqueue count %d", _ifq->altq_subq_cnt));
625 	ifsq_set_cpuid(ifq_get_subq_default(_ifq), _cpuid);
626 }
627 
628 /* COMPAT */
629 static __inline void
630 ifq_set_hw_serialize(struct ifaltq *_ifq, struct lwkt_serialize *_hwslz)
631 {
632 	KASSERT(_ifq->altq_subq_cnt == 1,
633 	    ("invalid subqueue count %d", _ifq->altq_subq_cnt));
634 	ifsq_set_hw_serialize(ifq_get_subq_default(_ifq), _hwslz);
635 }
636 
637 #endif	/* _NET_IFQ_VAR_H_ */
638