xref: /dragonfly/sys/net/ifq_var.h (revision cecb9aae)
1 /*-
2  * Copyright (c) 2005 The DragonFly Project.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  * 3. Neither the name of The DragonFly Project nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific, prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifndef _NET_IFQ_VAR_H_
33 #define _NET_IFQ_VAR_H_
34 
35 #ifndef _KERNEL
36 
37 #error "This file should not be included by userland programs."
38 
39 #else
40 
41 #ifndef _SYS_SYSTM_H_
42 #include <sys/systm.h>
43 #endif
44 #ifndef _SYS_THREAD2_H_
45 #include <sys/thread2.h>
46 #endif
47 #ifndef _SYS_SERIALIZE_H_
48 #include <sys/serialize.h>
49 #endif
50 #ifndef _SYS_MBUF_H_
51 #include <sys/mbuf.h>
52 #endif
53 #ifndef _NET_IF_VAR_H_
54 #include <net/if_var.h>
55 #endif
56 #ifndef _NET_ALTQ_IF_ALTQ_H_
57 #include <net/altq/if_altq.h>
58 #endif
59 
60 #define ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq) \
61 	KASSERT(ifsq_get_ifp((ifsq)) == (ifp) && \
62 	    ifsq_get_index((ifsq)) == ALTQ_SUBQ_INDEX_DEFAULT, \
63 	    ("not ifp's default subqueue"));
64 
65 struct ifaltq;
66 struct ifaltq_subque;
67 
68 typedef void	(*ifsq_watchdog_t)(struct ifaltq_subque *);
69 
70 struct ifsubq_watchdog {
71 	struct callout	wd_callout;
72 	int		wd_timer;
73 	struct ifaltq_subque *wd_subq;
74 	ifsq_watchdog_t	wd_watchdog;
75 };
76 
77 /*
78  * Support for "classic" ALTQ interfaces.
79  */
80 int		ifsq_classic_enqueue(struct ifaltq_subque *, struct mbuf *,
81 		    struct altq_pktattr *);
82 struct mbuf	*ifsq_classic_dequeue(struct ifaltq_subque *, struct mbuf *,
83 		    int);
84 int		ifsq_classic_request(struct ifaltq_subque *, int, void *);
85 void		ifq_set_classic(struct ifaltq *);
86 
87 void		ifq_set_maxlen(struct ifaltq *, int);
88 void		ifq_set_methods(struct ifaltq *, altq_mapsubq_t,
89 		    ifsq_enqueue_t, ifsq_dequeue_t, ifsq_request_t);
90 int		ifq_mapsubq_default(struct ifaltq *, int);
91 int		ifq_mapsubq_mask(struct ifaltq *, int);
92 
93 void		ifsq_devstart(struct ifaltq_subque *ifsq);
94 void		ifsq_devstart_sched(struct ifaltq_subque *ifsq);
95 
96 void		ifsq_watchdog_init(struct ifsubq_watchdog *,
97 		    struct ifaltq_subque *, ifsq_watchdog_t);
98 void		ifsq_watchdog_start(struct ifsubq_watchdog *);
99 void		ifsq_watchdog_stop(struct ifsubq_watchdog *);
100 
101 /*
102  * Dispatch a packet to an interface.
103  */
104 int		ifq_dispatch(struct ifnet *, struct mbuf *,
105 		    struct altq_pktattr *);
106 
107 #ifdef ALTQ
108 
109 static __inline int
110 ifq_is_enabled(struct ifaltq *_ifq)
111 {
112 	return(_ifq->altq_flags & ALTQF_ENABLED);
113 }
114 
115 static __inline int
116 ifq_is_attached(struct ifaltq *_ifq)
117 {
118 	return(_ifq->altq_disc != NULL);
119 }
120 
121 #else	/* !ALTQ */
122 
123 static __inline int
124 ifq_is_enabled(struct ifaltq *_ifq)
125 {
126 	return(0);
127 }
128 
129 static __inline int
130 ifq_is_attached(struct ifaltq *_ifq)
131 {
132 	return(0);
133 }
134 
135 #endif	/* ALTQ */
136 
137 static __inline int
138 ifq_is_ready(struct ifaltq *_ifq)
139 {
140 	return(_ifq->altq_flags & ALTQF_READY);
141 }
142 
143 static __inline void
144 ifq_set_ready(struct ifaltq *_ifq)
145 {
146 	_ifq->altq_flags |= ALTQF_READY;
147 }
148 
149 /*
150  * ALTQ lock must be held
151  */
152 static __inline int
153 ifsq_enqueue_locked(struct ifaltq_subque *_ifsq, struct mbuf *_m,
154     struct altq_pktattr *_pa)
155 {
156 #ifdef ALTQ
157 	if (!ifq_is_enabled(_ifsq->ifsq_altq))
158 		return ifsq_classic_enqueue(_ifsq, _m, _pa);
159 	else
160 #endif
161 	return _ifsq->ifsq_enqueue(_ifsq, _m, _pa);
162 }
163 
164 static __inline int
165 ifsq_enqueue(struct ifaltq_subque *_ifsq, struct mbuf *_m,
166     struct altq_pktattr *_pa)
167 {
168 	int _error;
169 
170 	ALTQ_SQ_LOCK(_ifsq);
171 	_error = ifsq_enqueue_locked(_ifsq, _m, _pa);
172 	ALTQ_SQ_UNLOCK(_ifsq);
173 	return _error;
174 }
175 
176 static __inline struct mbuf *
177 ifsq_dequeue(struct ifaltq_subque *_ifsq, struct mbuf *_mpolled)
178 {
179 	struct mbuf *_m;
180 
181 	ALTQ_SQ_LOCK(_ifsq);
182 	if (_ifsq->ifsq_prepended != NULL) {
183 		_m = _ifsq->ifsq_prepended;
184 		_ifsq->ifsq_prepended = NULL;
185 		KKASSERT(_ifsq->ifq_len > 0);
186 		_ifsq->ifq_len--;
187 		ALTQ_SQ_UNLOCK(_ifsq);
188 		return _m;
189 	}
190 
191 #ifdef ALTQ
192 	if (_ifsq->ifsq_altq->altq_tbr != NULL)
193 		_m = tbr_dequeue(_ifsq, _mpolled, ALTDQ_REMOVE);
194 	else if (!ifq_is_enabled(_ifsq->ifsq_altq))
195 		_m = ifsq_classic_dequeue(_ifsq, _mpolled, ALTDQ_REMOVE);
196 	else
197 #endif
198 	_m = _ifsq->ifsq_dequeue(_ifsq, _mpolled, ALTDQ_REMOVE);
199 	ALTQ_SQ_UNLOCK(_ifsq);
200 	return _m;
201 }
202 
203 /*
204  * ALTQ lock must be held
205  */
206 static __inline struct mbuf *
207 ifsq_poll_locked(struct ifaltq_subque *_ifsq)
208 {
209 	if (_ifsq->ifsq_prepended != NULL)
210 		return _ifsq->ifsq_prepended;
211 
212 #ifdef ALTQ
213 	if (_ifsq->ifsq_altq->altq_tbr != NULL)
214 		return tbr_dequeue(_ifsq, NULL, ALTDQ_POLL);
215 	else if (!ifq_is_enabled(_ifsq->ifsq_altq))
216 		return ifsq_classic_dequeue(_ifsq, NULL, ALTDQ_POLL);
217 	else
218 #endif
219 	return _ifsq->ifsq_dequeue(_ifsq, NULL, ALTDQ_POLL);
220 }
221 
222 static __inline struct mbuf *
223 ifsq_poll(struct ifaltq_subque *_ifsq)
224 {
225 	struct mbuf *_m;
226 
227 	ALTQ_SQ_LOCK(_ifsq);
228 	_m = ifsq_poll_locked(_ifsq);
229 	ALTQ_SQ_UNLOCK(_ifsq);
230 	return _m;
231 }
232 
233 /*
234  * ALTQ lock must be held
235  */
236 static __inline void
237 ifsq_purge_locked(struct ifaltq_subque *_ifsq)
238 {
239 	if (_ifsq->ifsq_prepended != NULL) {
240 		m_freem(_ifsq->ifsq_prepended);
241 		_ifsq->ifsq_prepended = NULL;
242 		KKASSERT(_ifsq->ifq_len > 0);
243 		_ifsq->ifq_len--;
244 	}
245 
246 #ifdef ALTQ
247 	if (!ifq_is_enabled(_ifsq->ifsq_altq))
248 		ifsq_classic_request(_ifsq, ALTRQ_PURGE, NULL);
249 	else
250 #endif
251 	_ifsq->ifsq_request(_ifsq, ALTRQ_PURGE, NULL);
252 }
253 
254 static __inline void
255 ifsq_purge(struct ifaltq_subque *_ifsq)
256 {
257 	ALTQ_SQ_LOCK(_ifsq);
258 	ifsq_purge_locked(_ifsq);
259 	ALTQ_SQ_UNLOCK(_ifsq);
260 }
261 
262 static __inline void
263 ifq_lock_all(struct ifaltq *_ifq)
264 {
265 	int _q;
266 
267 	for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q)
268 		ALTQ_SQ_LOCK(&_ifq->altq_subq[_q]);
269 }
270 
271 static __inline void
272 ifq_unlock_all(struct ifaltq *_ifq)
273 {
274 	int _q;
275 
276 	for (_q = _ifq->altq_subq_cnt - 1; _q >= 0; --_q)
277 		ALTQ_SQ_UNLOCK(&_ifq->altq_subq[_q]);
278 }
279 
280 /*
281  * ALTQ lock must be held
282  */
283 static __inline void
284 ifq_purge_all_locked(struct ifaltq *_ifq)
285 {
286 	int _q;
287 
288 	for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q)
289 		ifsq_purge_locked(&_ifq->altq_subq[_q]);
290 }
291 
292 static __inline void
293 ifq_purge_all(struct ifaltq *_ifq)
294 {
295 	ifq_lock_all(_ifq);
296 	ifq_purge_all_locked(_ifq);
297 	ifq_unlock_all(_ifq);
298 }
299 
300 static __inline void
301 ifq_classify(struct ifaltq *_ifq, struct mbuf *_m, uint8_t _af,
302     struct altq_pktattr *_pa)
303 {
304 #ifdef ALTQ
305 	if (ifq_is_enabled(_ifq)) {
306 		_pa->pattr_af = _af;
307 		_pa->pattr_hdr = mtod(_m, caddr_t);
308 		if (ifq_is_enabled(_ifq) &&
309 		    (_ifq->altq_flags & ALTQF_CLASSIFY)) {
310 			/* XXX default subqueue */
311 			struct ifaltq_subque *_ifsq =
312 			    &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
313 
314 			ALTQ_SQ_LOCK(_ifsq);
315 			if (ifq_is_enabled(_ifq) &&
316 			    (_ifq->altq_flags & ALTQF_CLASSIFY))
317 				_ifq->altq_classify(_ifq, _m, _pa);
318 			ALTQ_SQ_UNLOCK(_ifsq);
319 		}
320 	}
321 #endif
322 }
323 
324 static __inline void
325 ifsq_prepend(struct ifaltq_subque *_ifsq, struct mbuf *_m)
326 {
327 	ALTQ_SQ_LOCK(_ifsq);
328 	KASSERT(_ifsq->ifsq_prepended == NULL, ("pending prepended mbuf"));
329 	_ifsq->ifsq_prepended = _m;
330 	_ifsq->ifq_len++;
331 	ALTQ_SQ_UNLOCK(_ifsq);
332 }
333 
334 /*
335  * Interface TX serializer must be held
336  */
337 static __inline void
338 ifsq_set_oactive(struct ifaltq_subque *_ifsq)
339 {
340 	_ifsq->ifsq_hw_oactive = 1;
341 }
342 
343 /*
344  * Interface TX serializer must be held
345  */
346 static __inline void
347 ifsq_clr_oactive(struct ifaltq_subque *_ifsq)
348 {
349 	_ifsq->ifsq_hw_oactive = 0;
350 }
351 
352 /*
353  * Interface TX serializer must be held
354  */
355 static __inline int
356 ifsq_is_oactive(const struct ifaltq_subque *_ifsq)
357 {
358 	return _ifsq->ifsq_hw_oactive;
359 }
360 
361 /*
362  * Hand a packet to an interface.
363  *
364  * Interface TX serializer must be held.  If the interface TX
365  * serializer is not held yet, ifq_dispatch() should be used
366  * to get better performance.
367  */
368 static __inline int
369 ifq_handoff(struct ifnet *_ifp, struct mbuf *_m, struct altq_pktattr *_pa)
370 {
371 	struct ifaltq_subque *_ifsq;
372 	int _error;
373 	int _qid = ALTQ_SUBQ_INDEX_DEFAULT; /* XXX default subqueue */
374 
375 	_ifsq = &_ifp->if_snd.altq_subq[_qid];
376 
377 	ASSERT_ALTQ_SQ_SERIALIZED_HW(_ifsq);
378 	_error = ifsq_enqueue(_ifsq, _m, _pa);
379 	if (_error == 0) {
380 		IFNET_STAT_INC(_ifp, obytes, _m->m_pkthdr.len);
381 		if (_m->m_flags & M_MCAST)
382 			IFNET_STAT_INC(_ifp, omcasts, 1);
383 		if (!ifsq_is_oactive(_ifsq))
384 			(*_ifp->if_start)(_ifp, _ifsq);
385 	}
386 	return(_error);
387 }
388 
389 static __inline int
390 ifsq_is_empty(const struct ifaltq_subque *_ifsq)
391 {
392 	return(_ifsq->ifq_len == 0);
393 }
394 
395 /*
396  * ALTQ lock must be held
397  */
398 static __inline int
399 ifsq_data_ready(struct ifaltq_subque *_ifsq)
400 {
401 #ifdef ALTQ
402 	if (_ifsq->ifsq_altq->altq_tbr != NULL)
403 		return (ifsq_poll_locked(_ifsq) != NULL);
404 	else
405 #endif
406 	return !ifsq_is_empty(_ifsq);
407 }
408 
409 /*
410  * ALTQ lock must be held
411  */
412 static __inline int
413 ifsq_is_started(const struct ifaltq_subque *_ifsq)
414 {
415 	return _ifsq->ifsq_started;
416 }
417 
418 /*
419  * ALTQ lock must be held
420  */
421 static __inline void
422 ifsq_set_started(struct ifaltq_subque *_ifsq)
423 {
424 	_ifsq->ifsq_started = 1;
425 }
426 
427 /*
428  * ALTQ lock must be held
429  */
430 static __inline void
431 ifsq_clr_started(struct ifaltq_subque *_ifsq)
432 {
433 	_ifsq->ifsq_started = 0;
434 }
435 
436 static __inline struct ifsubq_stage *
437 ifsq_get_stage(struct ifaltq_subque *_ifsq, int _cpuid)
438 {
439 	return &_ifsq->ifsq_stage[_cpuid];
440 }
441 
442 static __inline int
443 ifsq_get_cpuid(const struct ifaltq_subque *_ifsq)
444 {
445 	return _ifsq->ifsq_cpuid;
446 }
447 
448 static __inline void
449 ifsq_set_cpuid(struct ifaltq_subque *_ifsq, int _cpuid)
450 {
451 	KASSERT(_cpuid >= 0 && _cpuid < ncpus,
452 	    ("invalid ifsq_cpuid %d", _cpuid));
453 	_ifsq->ifsq_cpuid = _cpuid;
454 }
455 
456 static __inline struct lwkt_msg *
457 ifsq_get_ifstart_lmsg(struct ifaltq_subque *_ifsq, int _cpuid)
458 {
459 	return &_ifsq->ifsq_ifstart_nmsg[_cpuid].lmsg;
460 }
461 
462 static __inline int
463 ifsq_get_index(const struct ifaltq_subque *_ifsq)
464 {
465 	return _ifsq->ifsq_index;
466 }
467 
468 static __inline void
469 ifsq_set_priv(struct ifaltq_subque *_ifsq, void *_priv)
470 {
471 	_ifsq->ifsq_hw_priv = _priv;
472 }
473 
474 static __inline void *
475 ifsq_get_priv(const struct ifaltq_subque *_ifsq)
476 {
477 	return _ifsq->ifsq_hw_priv;
478 }
479 
480 static __inline struct ifnet *
481 ifsq_get_ifp(const struct ifaltq_subque *_ifsq)
482 {
483 	return _ifsq->ifsq_ifp;
484 }
485 
486 static __inline void
487 ifsq_set_hw_serialize(struct ifaltq_subque *_ifsq,
488     struct lwkt_serialize *_hwslz)
489 {
490 	KASSERT(_hwslz != NULL, ("NULL hw serialize"));
491 	KASSERT(_ifsq->ifsq_hw_serialize == NULL,
492 	    ("hw serialize has been setup"));
493 	_ifsq->ifsq_hw_serialize = _hwslz;
494 }
495 
496 static __inline void
497 ifsq_serialize_hw(struct ifaltq_subque *_ifsq)
498 {
499 	lwkt_serialize_enter(_ifsq->ifsq_hw_serialize);
500 }
501 
502 static __inline void
503 ifsq_deserialize_hw(struct ifaltq_subque *_ifsq)
504 {
505 	lwkt_serialize_exit(_ifsq->ifsq_hw_serialize);
506 }
507 
508 static __inline int
509 ifsq_tryserialize_hw(struct ifaltq_subque *_ifsq)
510 {
511 	return lwkt_serialize_try(_ifsq->ifsq_hw_serialize);
512 }
513 
514 static __inline struct ifaltq_subque *
515 ifq_get_subq_default(const struct ifaltq *_ifq)
516 {
517 	return &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
518 }
519 
520 static __inline struct ifaltq_subque *
521 ifq_get_subq(const struct ifaltq *_ifq, int _idx)
522 {
523 	KASSERT(_idx >= 0 && _idx < _ifq->altq_subq_cnt,
524 	    ("invalid qid %d", _idx));
525 	return &_ifq->altq_subq[_idx];
526 }
527 
528 static __inline struct ifaltq_subque *
529 ifq_map_subq(struct ifaltq *_ifq, int _cpuid)
530 {
531 	int _idx = _ifq->altq_mapsubq(_ifq, _cpuid);
532 	return ifq_get_subq(_ifq, _idx);
533 }
534 
535 static __inline void
536 ifq_set_subq_cnt(struct ifaltq *_ifq, int _cnt)
537 {
538 	_ifq->altq_subq_cnt = _cnt;
539 }
540 
541 static __inline void
542 ifq_set_subq_mask(struct ifaltq *_ifq, uint32_t _mask)
543 {
544 	KASSERT(((_mask + 1) & _mask) == 0, ("invalid mask %08x", _mask));
545 	_ifq->altq_subq_mask = _mask;
546 }
547 
548 /* COMPAT */
549 static __inline int
550 ifq_is_oactive(const struct ifaltq *_ifq)
551 {
552 	return ifsq_is_oactive(ifq_get_subq_default(_ifq));
553 }
554 
555 /* COMPAT */
556 static __inline void
557 ifq_set_oactive(struct ifaltq *_ifq)
558 {
559 	ifsq_set_oactive(ifq_get_subq_default(_ifq));
560 }
561 
562 /* COMPAT */
563 static __inline void
564 ifq_clr_oactive(struct ifaltq *_ifq)
565 {
566 	ifsq_clr_oactive(ifq_get_subq_default(_ifq));
567 }
568 
569 /* COMPAT */
570 static __inline int
571 ifq_is_empty(struct ifaltq *_ifq)
572 {
573 	return ifsq_is_empty(ifq_get_subq_default(_ifq));
574 }
575 
576 /* COMPAT */
577 static __inline void
578 ifq_purge(struct ifaltq *_ifq)
579 {
580 	ifsq_purge(ifq_get_subq_default(_ifq));
581 }
582 
583 /* COMPAT */
584 static __inline struct mbuf *
585 ifq_dequeue(struct ifaltq *_ifq, struct mbuf *_mpolled)
586 {
587 	return ifsq_dequeue(ifq_get_subq_default(_ifq), _mpolled);
588 }
589 
590 /* COMPAT */
591 static __inline void
592 ifq_prepend(struct ifaltq *_ifq, struct mbuf *_m)
593 {
594 	ifsq_prepend(ifq_get_subq_default(_ifq), _m);
595 }
596 
597 /* COMPAT */
598 static __inline void
599 ifq_set_cpuid(struct ifaltq *_ifq, int _cpuid)
600 {
601 	KASSERT(_ifq->altq_subq_cnt == 1,
602 	    ("invalid subqueue count %d", _ifq->altq_subq_cnt));
603 	ifsq_set_cpuid(ifq_get_subq_default(_ifq), _cpuid);
604 }
605 
606 #endif	/* _KERNEL */
607 #endif	/* _NET_IFQ_VAR_H_ */
608