xref: /netbsd/sys/altq/altq_blue.c (revision bf9ec67e)
1 /*	$NetBSD: altq_blue.c,v 1.5 2002/03/05 04:12:57 itojun Exp $	*/
2 /*	$KAME: altq_blue.c,v 1.8 2002/01/07 11:25:40 kjc Exp $	*/
3 
4 /*
5  * Copyright (C) 1997-2000
6  *	Sony Computer Science Laboratories Inc.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 /*
31  * Copyright (c) 1990-1994 Regents of the University of California.
32  * All rights reserved.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  * 3. All advertising materials mentioning features or use of this software
43  *    must display the following acknowledgement:
44  *	This product includes software developed by the Computer Systems
45  *	Engineering Group at Lawrence Berkeley Laboratory.
46  * 4. Neither the name of the University nor of the Laboratory may be used
47  *    to endorse or promote products derived from this software without
48  *    specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60  * SUCH DAMAGE.
61  */
62 
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: altq_blue.c,v 1.5 2002/03/05 04:12:57 itojun Exp $");
65 
66 #if defined(__FreeBSD__) || defined(__NetBSD__)
67 #include "opt_altq.h"
68 #if (__FreeBSD__ != 2)
69 #include "opt_inet.h"
70 #ifdef __FreeBSD__
71 #include "opt_inet6.h"
72 #endif
73 #endif
74 #endif /* __FreeBSD__ || __NetBSD__ */
75 #ifdef ALTQ_BLUE	/* blue is enabled by ALTQ_BLUE option in opt_altq.h */
76 
77 #include <sys/param.h>
78 #include <sys/malloc.h>
79 #include <sys/mbuf.h>
80 #include <sys/socket.h>
81 #include <sys/sockio.h>
82 #include <sys/systm.h>
83 #include <sys/proc.h>
84 #include <sys/errno.h>
85 #include <sys/kernel.h>
86 
87 #include <net/if.h>
88 #include <net/if_types.h>
89 #include <netinet/in.h>
90 #include <netinet/in_systm.h>
91 #include <netinet/ip.h>
92 #ifdef INET6
93 #include <netinet/ip6.h>
94 #endif
95 
96 #include <altq/altq.h>
97 #include <altq/altq_conf.h>
98 #include <altq/altq_blue.h>
99 
100 /*
101  * Blue is proposed and implemented by Wu-chang Feng <wuchang@eecs.umich.edu>.
102  * more information on Blue is available from
103  * http://www.eecs.umich.edu/~wuchang/blue/
104  */
105 
106 /* fixed-point uses 12-bit decimal places */
107 #define	FP_SHIFT	12	/* fixed-point shift */
108 
109 #define	BLUE_LIMIT	200	/* default max queue lenght */
110 #define	BLUE_STATS		/* collect statistics */
111 
112 /* blue_list keeps all blue_state_t's allocated. */
113 static blue_queue_t *blue_list = NULL;
114 
115 /* internal function prototypes */
116 static int blue_enqueue __P((struct ifaltq *, struct mbuf *,
117 			     struct altq_pktattr *));
118 static struct mbuf *blue_dequeue __P((struct ifaltq *, int));
119 static int drop_early __P((blue_t *));
120 static int mark_ecn __P((struct mbuf *, struct altq_pktattr *, int));
121 static int blue_detach __P((blue_queue_t *));
122 static int blue_request __P((struct ifaltq *, int, void *));
123 
124 /*
125  * blue device interface
126  */
127 altqdev_decl(blue);
128 
129 int
130 blueopen(dev, flag, fmt, p)
131 	dev_t dev;
132 	int flag, fmt;
133 	struct proc *p;
134 {
135 	/* everything will be done when the queueing scheme is attached. */
136 	return 0;
137 }
138 
139 int
140 blueclose(dev, flag, fmt, p)
141 	dev_t dev;
142 	int flag, fmt;
143 	struct proc *p;
144 {
145 	blue_queue_t *rqp;
146 	int err, error = 0;
147 
148 	while ((rqp = blue_list) != NULL) {
149 		/* destroy all */
150 		err = blue_detach(rqp);
151 		if (err != 0 && error == 0)
152 			error = err;
153 	}
154 
155 	return error;
156 }
157 
158 int
159 blueioctl(dev, cmd, addr, flag, p)
160 	dev_t dev;
161 	ioctlcmd_t cmd;
162 	caddr_t addr;
163 	int flag;
164 	struct proc *p;
165 {
166 	blue_queue_t *rqp;
167 	struct blue_interface *ifacep;
168 	struct ifnet *ifp;
169 	int	error = 0;
170 
171 	/* check super-user privilege */
172 	switch (cmd) {
173 	case BLUE_GETSTATS:
174 		break;
175 	default:
176 #if (__FreeBSD_version > 400000)
177 		if ((error = suser(p)) != 0)
178 			return (error);
179 #else
180 		if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
181 			return (error);
182 #endif
183 		break;
184 	}
185 
186 	switch (cmd) {
187 
188 	case BLUE_ENABLE:
189 		ifacep = (struct blue_interface *)addr;
190 		if ((rqp = altq_lookup(ifacep->blue_ifname, ALTQT_BLUE)) == NULL) {
191 			error = EBADF;
192 			break;
193 		}
194 		error = altq_enable(rqp->rq_ifq);
195 		break;
196 
197 	case BLUE_DISABLE:
198 		ifacep = (struct blue_interface *)addr;
199 		if ((rqp = altq_lookup(ifacep->blue_ifname, ALTQT_BLUE)) == NULL) {
200 			error = EBADF;
201 			break;
202 		}
203 		error = altq_disable(rqp->rq_ifq);
204 		break;
205 
206 	case BLUE_IF_ATTACH:
207 		ifp = ifunit(((struct blue_interface *)addr)->blue_ifname);
208 		if (ifp == NULL) {
209 			error = ENXIO;
210 			break;
211 		}
212 
213 		/* allocate and initialize blue_state_t */
214 		MALLOC(rqp, blue_queue_t *, sizeof(blue_queue_t), M_DEVBUF, M_WAITOK);
215 		bzero(rqp, sizeof(blue_queue_t));
216 
217 		MALLOC(rqp->rq_q, class_queue_t *, sizeof(class_queue_t),
218 		       M_DEVBUF, M_WAITOK);
219 		bzero(rqp->rq_q, sizeof(class_queue_t));
220 
221 		MALLOC(rqp->rq_blue, blue_t *, sizeof(blue_t), M_DEVBUF, M_WAITOK);
222 		bzero(rqp->rq_blue, sizeof(blue_t));
223 
224 		rqp->rq_ifq = &ifp->if_snd;
225 		qtail(rqp->rq_q) = NULL;
226 		qlen(rqp->rq_q) = 0;
227 		qlimit(rqp->rq_q) = BLUE_LIMIT;
228 
229 		/* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
230 		blue_init(rqp->rq_blue, 0, 800, 1000, 50000);
231 
232 		/*
233 		 * set BLUE to this ifnet structure.
234 		 */
235 		error = altq_attach(rqp->rq_ifq, ALTQT_BLUE, rqp,
236 				    blue_enqueue, blue_dequeue, blue_request,
237 				    NULL, NULL);
238 		if (error) {
239 			FREE(rqp->rq_blue, M_DEVBUF);
240 			FREE(rqp->rq_q, M_DEVBUF);
241 			FREE(rqp, M_DEVBUF);
242 			break;
243 		}
244 
245 		/* add this state to the blue list */
246 		rqp->rq_next = blue_list;
247 		blue_list = rqp;
248 		break;
249 
250 	case BLUE_IF_DETACH:
251 		ifacep = (struct blue_interface *)addr;
252 		if ((rqp = altq_lookup(ifacep->blue_ifname, ALTQT_BLUE)) == NULL) {
253 			error = EBADF;
254 			break;
255 		}
256 		error = blue_detach(rqp);
257 		break;
258 
259 	case BLUE_GETSTATS:
260 		do {
261 			struct blue_stats *q_stats;
262 			blue_t *rp;
263 
264 			q_stats = (struct blue_stats *)addr;
265 			if ((rqp = altq_lookup(q_stats->iface.blue_ifname,
266 					     ALTQT_BLUE)) == NULL) {
267 				error = EBADF;
268 				break;
269 			}
270 
271 			q_stats->q_len 	   = qlen(rqp->rq_q);
272 			q_stats->q_limit   = qlimit(rqp->rq_q);
273 
274 			rp = rqp->rq_blue;
275 			q_stats->q_pmark = rp->blue_pmark;
276 			q_stats->xmit_packets  = rp->blue_stats.xmit_packets;
277 			q_stats->xmit_bytes    = rp->blue_stats.xmit_bytes;
278 			q_stats->drop_packets  = rp->blue_stats.drop_packets;
279 			q_stats->drop_bytes    = rp->blue_stats.drop_bytes;
280 			q_stats->drop_forced   = rp->blue_stats.drop_forced;
281 			q_stats->drop_unforced = rp->blue_stats.drop_unforced;
282 			q_stats->marked_packets = rp->blue_stats.marked_packets;
283 
284 		} while (0);
285 		break;
286 
287 	case BLUE_CONFIG:
288 		do {
289 			struct blue_conf *fc;
290 			int limit;
291 
292 			fc = (struct blue_conf *)addr;
293 			if ((rqp = altq_lookup(fc->iface.blue_ifname,
294 					       ALTQT_BLUE)) == NULL) {
295 				error = EBADF;
296 				break;
297 			}
298 			limit = fc->blue_limit;
299 			qlimit(rqp->rq_q) = limit;
300 			fc->blue_limit = limit;	/* write back the new value */
301 			if (fc->blue_pkttime > 0)
302 				rqp->rq_blue->blue_pkttime = fc->blue_pkttime;
303 			if (fc->blue_max_pmark > 0)
304 				rqp->rq_blue->blue_max_pmark = fc->blue_max_pmark;
305 			if (fc->blue_hold_time > 0)
306 				rqp->rq_blue->blue_hold_time = fc->blue_hold_time;
307 			rqp->rq_blue->blue_flags = fc->blue_flags;
308 
309 			blue_init(rqp->rq_blue, rqp->rq_blue->blue_flags,
310 				  rqp->rq_blue->blue_pkttime,
311 				  rqp->rq_blue->blue_max_pmark,
312 				  rqp->rq_blue->blue_hold_time);
313 		} while (0);
314 		break;
315 
316 	default:
317 		error = EINVAL;
318 		break;
319 	}
320 	return error;
321 }
322 
323 static int blue_detach(rqp)
324 	blue_queue_t *rqp;
325 {
326 	blue_queue_t *tmp;
327 	int error = 0;
328 
329 	if (ALTQ_IS_ENABLED(rqp->rq_ifq))
330 		altq_disable(rqp->rq_ifq);
331 
332 	if ((error = altq_detach(rqp->rq_ifq)))
333 		return (error);
334 
335 	if (blue_list == rqp)
336 		blue_list = rqp->rq_next;
337 	else {
338 		for (tmp = blue_list; tmp != NULL; tmp = tmp->rq_next)
339 			if (tmp->rq_next == rqp) {
340 				tmp->rq_next = rqp->rq_next;
341 				break;
342 			}
343 		if (tmp == NULL)
344 			printf("blue_detach: no state found in blue_list!\n");
345 	}
346 
347 	FREE(rqp->rq_q, M_DEVBUF);
348 	FREE(rqp->rq_blue, M_DEVBUF);
349 	FREE(rqp, M_DEVBUF);
350 	return (error);
351 }
352 
353 /*
354  * blue support routines
355  */
356 
357 int
358 blue_init(rp, flags, pkttime, blue_max_pmark, blue_hold_time)
359 	blue_t 	*rp;
360 	int	flags;
361 	int	pkttime;
362 	int	blue_max_pmark;
363 	int	blue_hold_time;
364 {
365 	int npkts_per_sec;
366 
367 	rp->blue_idle = 1;
368 	rp->blue_flags = flags;
369 	rp->blue_pkttime = pkttime;
370 	rp->blue_max_pmark = blue_max_pmark;
371 	rp->blue_hold_time = blue_hold_time;
372 	if (pkttime == 0)
373 		rp->blue_pkttime = 1;
374 
375 	/* when the link is very slow, adjust blue parameters */
376 	npkts_per_sec = 1000000 / rp->blue_pkttime;
377 	if (npkts_per_sec < 50) {
378 	}
379 	else if (npkts_per_sec < 300) {
380 	}
381 
382 	microtime(&rp->blue_last);
383 	return (0);
384 }
385 
386 /*
387  * enqueue routine:
388  *
389  *	returns: 0 when successfully queued.
390  *		 ENOBUFS when drop occurs.
391  */
392 static int
393 blue_enqueue(ifq, m, pktattr)
394 	struct ifaltq *ifq;
395 	struct mbuf *m;
396 	struct altq_pktattr *pktattr;
397 {
398 	blue_queue_t *rqp = (blue_queue_t *)ifq->altq_disc;
399 	int error = 0;
400 
401 	if (blue_addq(rqp->rq_blue, rqp->rq_q, m, pktattr) == 0)
402 		ifq->ifq_len++;
403 	else
404 		error = ENOBUFS;
405 	return error;
406 }
407 
408 #define	DTYPE_NODROP	0	/* no drop */
409 #define	DTYPE_FORCED	1	/* a "forced" drop */
410 #define	DTYPE_EARLY	2	/* an "unforced" (early) drop */
411 
412 int
413 blue_addq(rp, q, m, pktattr)
414 	blue_t *rp;
415 	class_queue_t *q;
416 	struct mbuf *m;
417 	struct altq_pktattr *pktattr;
418 {
419 	int droptype;
420 
421 	/*
422 	 * if we were idle, this is an enqueue onto an empty queue
423 	 * and we should decrement marking probability
424 	 *
425 	 */
426 	if (rp->blue_idle) {
427 		struct timeval now;
428 		int t;
429 		rp->blue_idle = 0;
430 		microtime(&now);
431 		t = (now.tv_sec - rp->blue_last.tv_sec);
432 		if ( t > 1) {
433 			rp->blue_pmark = 1;
434 			microtime(&rp->blue_last);
435 		} else {
436 			t = t * 1000000 + (now.tv_usec - rp->blue_last.tv_usec);
437 			if (t > rp->blue_hold_time) {
438 				rp->blue_pmark--;
439 				if (rp->blue_pmark < 0) rp->blue_pmark = 0;
440 				microtime(&rp->blue_last);
441 			}
442 		}
443 	}
444 
445 	/* see if we drop early */
446 	droptype = DTYPE_NODROP;
447 	if (drop_early(rp) && qlen(q) > 1) {
448 		/* mark or drop by blue */
449 		if ((rp->blue_flags & BLUEF_ECN) &&
450 		    mark_ecn(m, pktattr, rp->blue_flags)) {
451 			/* successfully marked.  do not drop. */
452 #ifdef BLUE_STATS
453 			rp->blue_stats.marked_packets++;
454 #endif
455 		} else {
456 			/* unforced drop by blue */
457 			droptype = DTYPE_EARLY;
458 		}
459 	}
460 
461 	/*
462 	 * if the queue length hits the hard limit, it's a forced drop.
463 	 */
464 	if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
465 		droptype = DTYPE_FORCED;
466 
467 	/* if successful or forced drop, enqueue this packet. */
468 	if (droptype != DTYPE_EARLY)
469 		_addq(q, m);
470 
471 	if (droptype != DTYPE_NODROP) {
472 		if (droptype == DTYPE_EARLY) {
473 			/* drop the incoming packet */
474 #ifdef BLUE_STATS
475 			rp->blue_stats.drop_unforced++;
476 #endif
477 		} else {
478 			struct timeval now;
479 			int t;
480 			/* forced drop, select a victim packet in the queue. */
481 			m = _getq_random(q);
482 			microtime(&now);
483 			t = (now.tv_sec - rp->blue_last.tv_sec);
484 			t = t * 1000000 + (now.tv_usec - rp->blue_last.tv_usec);
485 			if (t > rp->blue_hold_time) {
486 				rp->blue_pmark += rp->blue_max_pmark >> 3;
487 				if (rp->blue_pmark > rp->blue_max_pmark)
488 					rp->blue_pmark = rp->blue_max_pmark;
489 				microtime(&rp->blue_last);
490 			}
491 #ifdef BLUE_STATS
492 			rp->blue_stats.drop_forced++;
493 #endif
494 		}
495 #ifdef BLUE_STATS
496 		rp->blue_stats.drop_packets++;
497 		rp->blue_stats.drop_bytes += m->m_pkthdr.len;
498 #endif
499 		m_freem(m);
500 		return (-1);
501 	}
502 	/* successfully queued */
503 	return (0);
504 }
505 
506 /*
507  * early-drop probability is kept in blue_pmark
508  *
509  */
510 static int
511 drop_early(rp)
512 	blue_t *rp;
513 {
514 	if ((random() % rp->blue_max_pmark) < rp->blue_pmark) {
515 		/* drop or mark */
516 		return (1);
517 	}
518 	/* no drop/mark */
519 	return (0);
520 }
521 
522 /*
523  * try to mark CE bit to the packet.
524  *    returns 1 if successfully marked, 0 otherwise.
525  */
526 static int
527 mark_ecn(m, pktattr, flags)
528 	struct mbuf *m;
529 	struct altq_pktattr *pktattr;
530 	int flags;
531 {
532 	struct mbuf *m0;
533 
534 	if (pktattr == NULL ||
535 	    (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
536 		return (0);
537 
538 	/* verify that pattr_hdr is within the mbuf data */
539 	for (m0 = m; m0 != NULL; m0 = m0->m_next)
540 		if ((pktattr->pattr_hdr >= m0->m_data) &&
541 		    (pktattr->pattr_hdr < m0->m_data + m0->m_len))
542 			break;
543 	if (m0 == NULL) {
544 		/* ick, pattr_hdr is stale */
545 		pktattr->pattr_af = AF_UNSPEC;
546 		return (0);
547 	}
548 
549 	switch (pktattr->pattr_af) {
550 	case AF_INET:
551 		if (flags & BLUEF_ECN4) {
552 			struct ip *ip = (struct ip *)pktattr->pattr_hdr;
553 			u_int8_t otos;
554 			int sum;
555 
556 			if (ip->ip_v != 4)
557 				return (0);	/* version mismatch! */
558 			if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT)
559 				return (0);	/* not-ECT */
560 			if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE)
561 				return (1);	/* already marked */
562 
563 			/*
564 			 * ecn-capable but not marked,
565 			 * mark CE and update checksum
566 			 */
567 			otos = ip->ip_tos;
568 			ip->ip_tos |= IPTOS_ECN_CE;
569 			/*
570 			 * update checksum (from RFC1624)
571 			 *	   HC' = ~(~HC + ~m + m')
572 			 */
573 			sum = ~ntohs(ip->ip_sum) & 0xffff;
574 			sum += (~otos & 0xffff) + ip->ip_tos;
575 			sum = (sum >> 16) + (sum & 0xffff);
576 			sum += (sum >> 16);  /* add carry */
577 			ip->ip_sum = htons(~sum & 0xffff);
578 			return (1);
579 		}
580 		break;
581 #ifdef INET6
582 	case AF_INET6:
583 		if (flags & BLUEF_ECN6) {
584 			struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
585 			u_int32_t flowlabel;
586 
587 			flowlabel = ntohl(ip6->ip6_flow);
588 			if ((flowlabel >> 28) != 6)
589 				return (0);	/* version mismatch! */
590 			if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
591 			    (IPTOS_ECN_NOTECT << 20))
592 				return (0);	/* not-ECT */
593 			if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
594 			    (IPTOS_ECN_CE << 20))
595 				return (1);	/* already marked */
596 			/*
597 			 * ecn-capable but not marked,  mark CE
598 			 */
599 			flowlabel |= (IPTOS_ECN_CE << 20);
600 			ip6->ip6_flow = htonl(flowlabel);
601 			return (1);
602 		}
603 		break;
604 #endif  /* INET6 */
605 	}
606 
607 	/* not marked */
608 	return (0);
609 }
610 
611 /*
612  * dequeue routine:
613  *	must be called in splnet.
614  *
615  *	returns: mbuf dequeued.
616  *		 NULL when no packet is available in the queue.
617  */
618 
619 static struct mbuf *
620 blue_dequeue(ifq, op)
621 	struct ifaltq *ifq;
622 	int op;
623 {
624 	blue_queue_t *rqp = (blue_queue_t *)ifq->altq_disc;
625 	struct mbuf *m = NULL;
626 
627 	if (op == ALTDQ_POLL)
628 		return (qhead(rqp->rq_q));
629 
630 	m = blue_getq(rqp->rq_blue, rqp->rq_q);
631 	if (m != NULL)
632 		ifq->ifq_len--;
633 	return m;
634 }
635 
636 struct mbuf *blue_getq(rp, q)
637 	blue_t *rp;
638 	class_queue_t *q;
639 {
640 	struct mbuf *m;
641 
642 	if ((m = _getq(q)) == NULL) {
643 		if (rp->blue_idle == 0) {
644 			rp->blue_idle = 1;
645 			microtime(&rp->blue_last);
646 		}
647 		return NULL;
648 	}
649 
650 	rp->blue_idle = 0;
651 #ifdef BLUE_STATS
652 	rp->blue_stats.xmit_packets++;
653 	rp->blue_stats.xmit_bytes += m->m_pkthdr.len;
654 #endif
655 	return (m);
656 }
657 
658 static int
659 blue_request(ifq, req, arg)
660 	struct ifaltq *ifq;
661 	int req;
662 	void *arg;
663 {
664 	blue_queue_t *rqp = (blue_queue_t *)ifq->altq_disc;
665 
666 	switch (req) {
667 	case ALTRQ_PURGE:
668 		_flushq(rqp->rq_q);
669 		if (ALTQ_IS_ENABLED(ifq))
670 			ifq->ifq_len = 0;
671 		break;
672 	}
673 	return (0);
674 }
675 
676 
677 #ifdef KLD_MODULE
678 
679 static struct altqsw blue_sw =
680 	{"blue", blueopen, blueclose, blueioctl};
681 
682 ALTQ_MODULE(altq_blue, ALTQT_BLUE, &blue_sw);
683 
684 #endif /* KLD_MODULE */
685 
686 #endif /* ALTQ_BLUE */
687