xref: /dragonfly/sys/net/altq/altq_rmclass.c (revision dcb5d66b)
1 /*	@(#)rm_class.c  1.48     97/12/05 SMI	*/
2 /*	$KAME: altq_rmclass.c,v 1.18 2003/11/06 06:32:53 kjc Exp $	*/
3 
4 /*
5  * Copyright (c) 1991-1997 Regents of the University of California.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by the Network Research
19  *      Group at Lawrence Berkeley Laboratory.
20  * 4. Neither the name of the University nor of the Laboratory may be used
21  *    to endorse or promote products derived from this software without
22  *    specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * LBL code modified by speer@eng.sun.com, May 1977.
37  * For questions and/or comments, please send mail to cbq@ee.lbl.gov
38  */
39 
40 #include "opt_altq.h"
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 
44 #ifdef ALTQ_CBQ	/* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
45 
46 #include <sys/param.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/socket.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/errno.h>
53 #include <sys/time.h>
54 #include <sys/thread.h>
55 #include <sys/thread2.h>
56 
57 #include <net/if.h>
58 #include <net/netmsg2.h>
59 #include <net/netisr2.h>
60 
61 #include <net/altq/altq.h>
62 #include <net/altq/altq_rmclass.h>
63 #include <net/altq/altq_rmclass_debug.h>
64 #include <net/altq/altq_red.h>
65 #include <net/altq/altq_rio.h>
66 
67 #ifdef CBQ_TRACE
68 static struct cbqtrace cbqtrace_buffer[NCBQTRACE+1];
69 static struct cbqtrace *cbqtrace_ptr = NULL;
70 static int cbqtrace_count;
71 #endif
72 
73 /*
74  * Local Macros
75  */
76 
77 #define	reset_cutoff(ifd)	{ ifd->cutoff_ = RM_MAXDEPTH; }
78 
79 /*
80  * Local routines.
81  */
82 
83 static int	rmc_satisfied(struct rm_class *, struct timeval *);
84 static void	rmc_wrr_set_weights(struct rm_ifdat *);
85 static void	rmc_depth_compute(struct rm_class *);
86 static void	rmc_depth_recompute(rm_class_t *);
87 
88 static struct mbuf *_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
89 static struct mbuf *_rmc_prr_dequeue_next(struct rm_ifdat *, int);
90 
91 static int	_rmc_addq(rm_class_t *, struct mbuf *);
92 static void	_rmc_dropq(rm_class_t *);
93 static struct mbuf *_rmc_getq(rm_class_t *);
94 static struct mbuf *_rmc_pollq(rm_class_t *);
95 
96 static int	rmc_under_limit(struct rm_class *, struct timeval *);
97 static void	rmc_tl_satisfied(struct rm_ifdat *, struct timeval *);
98 static void	rmc_drop_action(struct rm_class *);
99 static void	rmc_restart(void *);
100 static void	rmc_restart_dispatch(netmsg_t);
101 static void	rmc_root_overlimit(struct rm_class *, struct rm_class *);
102 
103 #define	BORROW_OFFTIME
104 /*
105  * BORROW_OFFTIME (experimental):
106  * borrow the offtime of the class borrowing from.
107  * the reason is that when its own offtime is set, the class is unable
108  * to borrow much, especially when cutoff is taking effect.
109  * but when the borrowed class is overloaded (advidle is close to minidle),
110  * use the borrowing class's offtime to avoid overload.
111  */
112 #define	ADJUST_CUTOFF
113 /*
114  * ADJUST_CUTOFF (experimental):
115  * if no underlimit class is found due to cutoff, increase cutoff and
116  * retry the scheduling loop.
117  * also, don't invoke delay_actions while cutoff is taking effect,
118  * since a sleeping class won't have a chance to be scheduled in the
119  * next loop.
120  *
121  * now heuristics for setting the top-level variable (cutoff_) becomes:
122  *	1. if a packet arrives for a not-overlimit class, set cutoff
123  *	   to the depth of the class.
124  *	2. if cutoff is i, and a packet arrives for an overlimit class
125  *	   with an underlimit ancestor at a lower level than i (say j),
126  *	   then set cutoff to j.
127  *	3. at scheduling a packet, if there is no underlimit class
128  *	   due to the current cutoff level, increase cutoff by 1 and
129  *	   then try to schedule again.
130  */
131 
132 /*
133  * rm_class_t *
134  * rmc_newclass(...) - Create a new resource management class at priority
135  * 'pri' on the interface given by 'ifd'.
136  *
137  * nsecPerByte  is the data rate of the interface in nanoseconds/byte.
138  *              E.g., 800 for a 10Mb/s ethernet.  If the class gets less
139  *              than 100% of the bandwidth, this number should be the
140  *              'effective' rate for the class.  Let f be the
141  *              bandwidth fraction allocated to this class, and let
142  *              nsPerByte be the data rate of the output link in
143  *              nanoseconds/byte.  Then nsecPerByte is set to
144  *              nsPerByte / f.  E.g., 1600 (= 800 / .5)
145  *              for a class that gets 50% of an ethernet's bandwidth.
146  *
147  * action       the routine to call when the class is over limit.
148  *
149  * maxq         max allowable queue size for class (in packets).
150  *
151  * parent       parent class pointer.
152  *
153  * borrow       class to borrow from (should be either 'parent' or null).
154  *
155  * maxidle      max value allowed for class 'idle' time estimate (this
156  *              parameter determines how large an initial burst of packets
157  *              can be before overlimit action is invoked.
158  *
159  * offtime      how long 'delay' action will delay when class goes over
160  *              limit (this parameter determines the steady-state burst
161  *              size when a class is running over its limit).
162  *
163  * Maxidle and offtime have to be computed from the following:  If the
164  * average packet size is s, the bandwidth fraction allocated to this
165  * class is f, we want to allow b packet bursts, and the gain of the
166  * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
167  *
168  *   ptime = s * nsPerByte * (1 - f) / f
169  *   maxidle = ptime * (1 - g^b) / g^b
170  *   minidle = -ptime * (1 / (f - 1))
171  *   offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
172  *
173  * Operationally, it's convenient to specify maxidle & offtime in units
174  * independent of the link bandwidth so the maxidle & offtime passed to
175  * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
176  * (The constant factor is a scale factor needed to make the parameters
177  * integers.  This scaling also means that the 'unscaled' values of
178  * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
179  * not nanoseconds.)  Also note that the 'idle' filter computation keeps
180  * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
181  * maxidle also must be scaled upward by this value.  Thus, the passed
182  * values for maxidle and offtime can be computed as follows:
183  *
184  * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
185  * offtime = offtime * 8 / (1000 * nsecPerByte)
186  *
187  * When USE_HRTIME is employed, then maxidle and offtime become:
188  * 	maxidle = maxilde * (8.0 / nsecPerByte);
189  * 	offtime = offtime * (8.0 / nsecPerByte);
190  */
191 struct rm_class *
192 rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
193 	     void (*action)(rm_class_t *, rm_class_t *), int maxq,
194 	     struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
195 	     int minidle, u_int offtime, int pktsize, int flags)
196 {
197 	struct rm_class *cl;
198 	struct rm_class *peer;
199 
200 	if (pri >= RM_MAXPRIO)
201 		return (NULL);
202 #ifndef ALTQ_RED
203 	if (flags & RMCF_RED) {
204 #ifdef ALTQ_DEBUG
205 		kprintf("rmc_newclass: RED not configured for CBQ!\n");
206 #endif
207 		return (NULL);
208 	}
209 #endif
210 #ifndef ALTQ_RIO
211 	if (flags & RMCF_RIO) {
212 #ifdef ALTQ_DEBUG
213 		kprintf("rmc_newclass: RIO not configured for CBQ!\n");
214 #endif
215 		return (NULL);
216 	}
217 #endif
218 
219 	cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO);
220 	callout_init(&cl->callout_);
221 	netmsg_init(&cl->callout_nmsg_, NULL, &netisr_adone_rport,
222 	    MSGF_PRIORITY, rmc_restart_dispatch);
223 	cl->callout_nmsg_.lmsg.u.ms_resultp = cl;
224 
225 	cl->q_ = kmalloc(sizeof(*cl->q_), M_ALTQ, M_WAITOK | M_ZERO);
226 
227 	/*
228 	 * Class initialization.
229 	 */
230 	cl->children_ = NULL;
231 	cl->parent_ = parent;
232 	cl->borrow_ = borrow;
233 	cl->leaf_ = 1;
234 	cl->ifdat_ = ifd;
235 	cl->pri_ = pri;
236 	cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
237 	cl->depth_ = 0;
238 	cl->qthresh_ = 0;
239 	cl->ns_per_byte_ = nsecPerByte;
240 
241 	qlimit(cl->q_) = maxq;
242 	qtype(cl->q_) = Q_DROPHEAD;
243 	qlen(cl->q_) = 0;
244 	cl->flags_ = flags;
245 
246 #if 1 /* minidle is also scaled in ALTQ */
247 	cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
248 	if (cl->minidle_ > 0)
249 		cl->minidle_ = 0;
250 #else
251 	cl->minidle_ = minidle;
252 #endif
253 	cl->maxidle_ = (maxidle * nsecPerByte) / 8;
254 	if (cl->maxidle_ == 0)
255 		cl->maxidle_ = 1;
256 #if 1 /* offtime is also scaled in ALTQ */
257 	cl->avgidle_ = cl->maxidle_;
258 	cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
259 	if (cl->offtime_ == 0)
260 		cl->offtime_ = 1;
261 #else
262 	cl->avgidle_ = 0;
263 	cl->offtime_ = (offtime * nsecPerByte) / 8;
264 #endif
265 	cl->overlimit = action;
266 
267 #ifdef ALTQ_RED
268 	if (flags & (RMCF_RED|RMCF_RIO)) {
269 		int red_flags, red_pkttime;
270 
271 		red_flags = 0;
272 		if (flags & RMCF_ECN)
273 			red_flags |= REDF_ECN;
274 #ifdef ALTQ_RIO
275 		if (flags & RMCF_CLEARDSCP)
276 			red_flags |= RIOF_CLEARDSCP;
277 #endif
278 		red_pkttime = nsecPerByte * pktsize  / 1000;
279 
280 		if (flags & RMCF_RED) {
281 			cl->red_ = red_alloc(0, 0,
282 			    qlimit(cl->q_) * 10/100,
283 			    qlimit(cl->q_) * 30/100,
284 			    red_flags, red_pkttime);
285 			if (cl->red_ != NULL)
286 				qtype(cl->q_) = Q_RED;
287 		}
288 #ifdef ALTQ_RIO
289 		else {
290 			cl->red_ = (red_t *)rio_alloc(0, NULL,
291 						      red_flags, red_pkttime);
292 			if (cl->red_ != NULL)
293 				qtype(cl->q_) = Q_RIO;
294 		}
295 #endif
296 	}
297 #endif /* ALTQ_RED */
298 
299 	/*
300 	 * put the class into the class tree
301 	 */
302 	crit_enter();
303 	if ((peer = ifd->active_[pri]) != NULL) {
304 		/* find the last class at this pri */
305 		cl->peer_ = peer;
306 		while (peer->peer_ != ifd->active_[pri])
307 			peer = peer->peer_;
308 		peer->peer_ = cl;
309 	} else {
310 		ifd->active_[pri] = cl;
311 		cl->peer_ = cl;
312 	}
313 
314 	if (cl->parent_) {
315 		cl->next_ = parent->children_;
316 		parent->children_ = cl;
317 		parent->leaf_ = 0;
318 	}
319 
320 	/*
321 	 * Compute the depth of this class and its ancestors in the class
322 	 * hierarchy.
323 	 */
324 	rmc_depth_compute(cl);
325 
326 	/*
327 	 * If CBQ's WRR is enabled, then initialize the class WRR state.
328 	 */
329 	if (ifd->wrr_) {
330 		ifd->num_[pri]++;
331 		ifd->alloc_[pri] += cl->allotment_;
332 		rmc_wrr_set_weights(ifd);
333 	}
334 	crit_exit();
335 	return (cl);
336 }
337 
338 int
339 rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
340 	     int minidle, u_int offtime, int pktsize)
341 {
342 	struct rm_ifdat *ifd;
343 	u_int old_allotment;
344 
345 	ifd = cl->ifdat_;
346 	old_allotment = cl->allotment_;
347 
348 	crit_enter();
349 	cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
350 	cl->qthresh_ = 0;
351 	cl->ns_per_byte_ = nsecPerByte;
352 
353 	qlimit(cl->q_) = maxq;
354 
355 #if 1 /* minidle is also scaled in ALTQ */
356 	cl->minidle_ = (minidle * nsecPerByte) / 8;
357 	if (cl->minidle_ > 0)
358 		cl->minidle_ = 0;
359 #else
360 	cl->minidle_ = minidle;
361 #endif
362 	cl->maxidle_ = (maxidle * nsecPerByte) / 8;
363 	if (cl->maxidle_ == 0)
364 		cl->maxidle_ = 1;
365 #if 1 /* offtime is also scaled in ALTQ */
366 	cl->avgidle_ = cl->maxidle_;
367 	cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
368 	if (cl->offtime_ == 0)
369 		cl->offtime_ = 1;
370 #else
371 	cl->avgidle_ = 0;
372 	cl->offtime_ = (offtime * nsecPerByte) / 8;
373 #endif
374 
375 	/*
376 	 * If CBQ's WRR is enabled, then initialize the class WRR state.
377 	 */
378 	if (ifd->wrr_) {
379 		ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
380 		rmc_wrr_set_weights(ifd);
381 	}
382 	crit_exit();
383 	return (0);
384 }
385 
386 /*
387  * static void
388  * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
389  *	the appropriate run robin weights for the CBQ weighted round robin
390  *	algorithm.
391  *
392  *	Returns: NONE
393  */
394 
395 static void
396 rmc_wrr_set_weights(struct rm_ifdat *ifd)
397 {
398 	int i;
399 	struct rm_class *cl, *clh;
400 
401 	for (i = 0; i < RM_MAXPRIO; i++) {
402 		/*
403 		 * This is inverted from that of the simulator to
404 		 * maintain precision.
405 		 */
406 		if (ifd->num_[i] == 0)
407 			ifd->M_[i] = 0;
408 		else
409 			ifd->M_[i] = ifd->alloc_[i] /
410 				(ifd->num_[i] * ifd->maxpkt_);
411 		/*
412 		 * Compute the weighted allotment for each class.
413 		 * This takes the expensive div instruction out
414 		 * of the main loop for the wrr scheduling path.
415 		 * These only get recomputed when a class comes or
416 		 * goes.
417 		 */
418 		if (ifd->active_[i] != NULL) {
419 			clh = cl = ifd->active_[i];
420 			do {
421 				/* safe-guard for slow link or alloc_ == 0 */
422 				if (ifd->M_[i] == 0)
423 					cl->w_allotment_ = 0;
424 				else
425 					cl->w_allotment_ = cl->allotment_ /
426 						ifd->M_[i];
427 				cl = cl->peer_;
428 			} while ((cl != NULL) && (cl != clh));
429 		}
430 	}
431 }
432 
433 int
434 rmc_get_weight(struct rm_ifdat *ifd, int pri)
435 {
436 	if ((pri >= 0) && (pri < RM_MAXPRIO))
437 		return (ifd->M_[pri]);
438 	else
439 		return (0);
440 }
441 
442 /*
443  * static void
444  * rmc_depth_compute(struct rm_class *cl) - This function computes the
445  *	appropriate depth of class 'cl' and its ancestors.
446  *
447  *	Returns:	NONE
448  */
449 
450 static void
451 rmc_depth_compute(struct rm_class *cl)
452 {
453 	rm_class_t *t = cl, *p;
454 
455 	/*
456 	 * Recompute the depth for the branch of the tree.
457 	 */
458 	while (t != NULL) {
459 		p = t->parent_;
460 		if (p && (t->depth_ >= p->depth_)) {
461 			p->depth_ = t->depth_ + 1;
462 			t = p;
463 		} else
464 			t = NULL;
465 	}
466 }
467 
468 /*
469  * static void
470  * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
471  *	the depth of the tree after a class has been deleted.
472  *
473  *	Returns:	NONE
474  */
475 
476 static void
477 rmc_depth_recompute(rm_class_t *cl)
478 {
479 #if 1 /* ALTQ */
480 	rm_class_t *p, *t;
481 
482 	p = cl;
483 	while (p != NULL) {
484 		if ((t = p->children_) == NULL) {
485 			p->depth_ = 0;
486 		} else {
487 			int cdepth = 0;
488 
489 			while (t != NULL) {
490 				if (t->depth_ > cdepth)
491 					cdepth = t->depth_;
492 				t = t->next_;
493 			}
494 
495 			if (p->depth_ == cdepth + 1)
496 				/* no change to this parent */
497 				return;
498 
499 			p->depth_ = cdepth + 1;
500 		}
501 
502 		p = p->parent_;
503 	}
504 #else
505 	rm_class_t	*t;
506 
507 	if (cl->depth_ >= 1) {
508 		if (cl->children_ == NULL) {
509 			cl->depth_ = 0;
510 		} else if ((t = cl->children_) != NULL) {
511 			while (t != NULL) {
512 				if (t->children_ != NULL)
513 					rmc_depth_recompute(t);
514 				t = t->next_;
515 			}
516 		} else
517 			rmc_depth_compute(cl);
518 	}
519 #endif
520 }
521 
522 /*
523  * void
524  * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
525  *	function deletes a class from the link-sharing structure and frees
526  *	all resources associated with the class.
527  *
528  *	Returns: NONE
529  */
530 
531 void
532 rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
533 {
534 	struct rm_class *p, *head, *previous;
535 	struct netmsg_base smsg;
536 	struct ifaltq_subque *ifsq =
537 	    &ifd->ifq_->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
538 
539 	KKASSERT(cl->children_ == NULL);
540 
541 	ALTQ_SQ_ASSERT_LOCKED(ifsq);
542 	ALTQ_SQ_UNLOCK(ifsq);
543 	callout_stop_sync(&cl->callout_);
544 	/* Make sure that cl->callout_nmsg_ stops. */
545 	netmsg_init(&smsg, NULL, &curthread->td_msgport, 0,
546 	    netmsg_sync_handler);
547 	lwkt_domsg(netisr_cpuport(0), &smsg.lmsg, 0);
548 	ALTQ_SQ_LOCK(ifsq);
549 
550 	crit_enter();
551 
552 	if (ifd->pollcache_ == cl)
553 		ifd->pollcache_ = NULL;
554 
555 	/*
556 	 * Free packets in the packet queue.
557 	 * XXX - this may not be a desired behavior.  Packets should be
558 	 *		re-queued.
559 	 */
560 	rmc_dropall(cl);
561 
562 	/*
563 	 * If the class has a parent, then remove the class from the
564 	 * class from the parent's children chain.
565 	 */
566 	if (cl->parent_ != NULL) {
567 		head = cl->parent_->children_;
568 		p = previous = head;
569 		if (head->next_ == NULL) {
570 			KKASSERT(head == cl);
571 			cl->parent_->children_ = NULL;
572 			cl->parent_->leaf_ = 1;
573 		} else while (p != NULL) {
574 			if (p == cl) {
575 				if (cl == head)
576 					cl->parent_->children_ = cl->next_;
577 				else
578 					previous->next_ = cl->next_;
579 				cl->next_ = NULL;
580 				p = NULL;
581 			} else {
582 				previous = p;
583 				p = p->next_;
584 			}
585 		}
586 	}
587 
588 	/*
589 	 * Delete class from class priority peer list.
590 	 */
591 	if ((p = ifd->active_[cl->pri_]) != NULL) {
592 		/*
593 		 * If there is more than one member of this priority
594 		 * level, then look for class(cl) in the priority level.
595 		 */
596 		if (p != p->peer_) {
597 			while (p->peer_ != cl)
598 				p = p->peer_;
599 			p->peer_ = cl->peer_;
600 
601 			if (ifd->active_[cl->pri_] == cl)
602 				ifd->active_[cl->pri_] = cl->peer_;
603 		} else {
604 			KKASSERT(p == cl);
605 			ifd->active_[cl->pri_] = NULL;
606 		}
607 	}
608 
609 	/*
610 	 * Recompute the WRR weights.
611 	 */
612 	if (ifd->wrr_) {
613 		ifd->alloc_[cl->pri_] -= cl->allotment_;
614 		ifd->num_[cl->pri_]--;
615 		rmc_wrr_set_weights(ifd);
616 	}
617 
618 	/*
619 	 * Re-compute the depth of the tree.
620 	 */
621 #if 1 /* ALTQ */
622 	rmc_depth_recompute(cl->parent_);
623 #else
624 	rmc_depth_recompute(ifd->root_);
625 #endif
626 
627 	crit_exit();
628 
629 	/*
630 	 * Free the class structure.
631 	 */
632 	if (cl->red_ != NULL) {
633 #ifdef ALTQ_RIO
634 		if (q_is_rio(cl->q_))
635 			rio_destroy((rio_t *)cl->red_);
636 #endif
637 #ifdef ALTQ_RED
638 		if (q_is_red(cl->q_))
639 			red_destroy(cl->red_);
640 #endif
641 	}
642 	kfree(cl->q_, M_ALTQ);
643 	kfree(cl, M_ALTQ);
644 }
645 
646 /*
647  * void
648  * rmc_init(...) - Initialize the resource management data structures
649  *	associated with the output portion of interface 'ifp'.  'ifd' is
650  *	where the structures will be built (for backwards compatibility, the
651  *	structures aren't kept in the ifnet struct).  'nsecPerByte'
652  *	gives the link speed (inverse of bandwidth) in nanoseconds/byte.
653  *	'restart' is the driver-specific routine that the generic 'delay
654  *	until under limit' action will call to restart output.  `maxq'
655  *	is the queue size of the 'link' & 'default' classes.  'maxqueued'
656  *	is the maximum number of packets that the resource management
657  *	code will allow to be queued 'downstream' (this is typically 1).
658  *
659  *	Returns:	NONE
660  */
661 
662 void
663 rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte,
664          void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
665 	 int minidle, u_int offtime, int flags)
666 {
667 	int i, mtu;
668 
669 	/*
670 	 * Initialize the CBQ tracing/debug facility.
671 	 */
672 	CBQTRACEINIT();
673 
674 	bzero(ifd, sizeof (*ifd));
675 	mtu = ifq->altq_ifp->if_mtu;
676 	ifd->ifq_ = ifq;
677 	ifd->restart = restart;
678 	ifd->maxqueued_ = maxqueued;
679 	ifd->ns_per_byte_ = nsecPerByte;
680 	ifd->maxpkt_ = mtu;
681 	ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
682 	ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
683 #if 1
684 	ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
685 	if (mtu * nsecPerByte > 10 * 1000000)
686 		ifd->maxiftime_ /= 4;
687 #endif
688 
689 	reset_cutoff(ifd);
690 	CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
691 
692 	/*
693 	 * Initialize the CBQ's WRR state.
694 	 */
695 	for (i = 0; i < RM_MAXPRIO; i++) {
696 		ifd->alloc_[i] = 0;
697 		ifd->M_[i] = 0;
698 		ifd->num_[i] = 0;
699 		ifd->na_[i] = 0;
700 		ifd->active_[i] = NULL;
701 	}
702 
703 	/*
704 	 * Initialize current packet state.
705 	 */
706 	ifd->qi_ = 0;
707 	ifd->qo_ = 0;
708 	for (i = 0; i < RM_MAXQUEUED; i++) {
709 		ifd->class_[i] = NULL;
710 		ifd->curlen_[i] = 0;
711 		ifd->borrowed_[i] = NULL;
712 	}
713 
714 	/*
715 	 * Create the root class of the link-sharing structure.
716 	 */
717 	ifd->root_ = rmc_newclass(0, ifd, nsecPerByte, rmc_root_overlimit,
718 				  maxq, 0, 0, maxidle, minidle, offtime, 0, 0);
719 	if (ifd->root_ == NULL) {
720 		kprintf("rmc_init: root class not allocated\n");
721 		return ;
722 	}
723 	ifd->root_->depth_ = 0;
724 }
725 
726 /*
727  * void
728  * rmc_queue_packet(struct rm_class *cl, struct mbuf *m) - Add packet given by
729  *	mbuf 'm' to queue for resource class 'cl'.  This routine is called
730  *	by a driver's if_output routine.  This routine must be called with
731  *	output packet completion interrupts locked out (to avoid racing with
732  *	rmc_dequeue_next).
733  *
734  *	Returns:	0 on successful queueing
735  *			-1 when packet drop occurs
736  */
737 int
738 rmc_queue_packet(struct rm_class *cl, struct mbuf *m)
739 {
740 	struct timeval now;
741 	struct rm_ifdat *ifd = cl->ifdat_;
742 	int cpri = cl->pri_;
743 	int is_empty = qempty(cl->q_);
744 
745 	RM_GETTIME(now);
746 	if (ifd->cutoff_ > 0) {
747 		if (TV_LT(&cl->undertime_, &now)) {
748 			if (ifd->cutoff_ > cl->depth_)
749 				ifd->cutoff_ = cl->depth_;
750 			CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
751 		}
752 #if 1 /* ALTQ */
753 		else {
754 			/*
755 			 * the class is overlimit. if the class has
756 			 * underlimit ancestors, set cutoff to the lowest
757 			 * depth among them.
758 			 */
759 			struct rm_class *borrow = cl->borrow_;
760 
761 			while (borrow != NULL &&
762 			       borrow->depth_ < ifd->cutoff_) {
763 				if (TV_LT(&borrow->undertime_, &now)) {
764 					ifd->cutoff_ = borrow->depth_;
765 					CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
766 					break;
767 				}
768 				borrow = borrow->borrow_;
769 			}
770 		}
771 #else /* !ALTQ */
772 		else if ((ifd->cutoff_ > 1) && cl->borrow_) {
773 			if (TV_LT(&cl->borrow_->undertime_, &now)) {
774 				ifd->cutoff_ = cl->borrow_->depth_;
775 				CBQTRACE(rmc_queue_packet, 'ffob',
776 					 cl->borrow_->depth_);
777 			}
778 		}
779 #endif /* !ALTQ */
780 	}
781 
782 	if (_rmc_addq(cl, m) < 0)
783 		/* failed */
784 		return (-1);
785 
786 	if (is_empty) {
787 		CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
788 		ifd->na_[cpri]++;
789 	}
790 
791 	if (qlen(cl->q_) > qlimit(cl->q_)) {
792 		/* note: qlimit can be set to 0 or 1 */
793 		rmc_drop_action(cl);
794 		return (-1);
795 	}
796 	return (0);
797 }
798 
799 /*
800  * void
801  * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
802  *	classes to see if there are satified.
803  */
804 
805 static void
806 rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now)
807 {
808 	int i;
809 	rm_class_t *p, *bp;
810 
811 	for (i = RM_MAXPRIO - 1; i >= 0; i--) {
812 		if ((bp = ifd->active_[i]) != NULL) {
813 			p = bp;
814 			do {
815 				if (!rmc_satisfied(p, now)) {
816 					ifd->cutoff_ = p->depth_;
817 					return;
818 				}
819 				p = p->peer_;
820 			} while (p != bp);
821 		}
822 	}
823 
824 	reset_cutoff(ifd);
825 }
826 
827 /*
828  * rmc_satisfied - Return 1 of the class is satisfied.  O, otherwise.
829  */
830 
831 static int
832 rmc_satisfied(struct rm_class *cl, struct timeval *now)
833 {
834 	rm_class_t *p;
835 
836 	if (cl == NULL)
837 		return (1);
838 	if (TV_LT(now, &cl->undertime_))
839 		return (1);
840 	if (cl->depth_ == 0) {
841 		if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
842 			return (0);
843 		else
844 			return (1);
845 	}
846 	if (cl->children_ != NULL) {
847 		p = cl->children_;
848 		while (p != NULL) {
849 			if (!rmc_satisfied(p, now))
850 				return (0);
851 			p = p->next_;
852 		}
853 	}
854 
855 	return (1);
856 }
857 
858 /*
859  * Return 1 if class 'cl' is under limit or can borrow from a parent,
860  * 0 if overlimit.  As a side-effect, this routine will invoke the
861  * class overlimit action if the class if overlimit.
862  */
863 
864 static int
865 rmc_under_limit(struct rm_class *cl, struct timeval *now)
866 {
867 	rm_class_t *p = cl;
868 	rm_class_t *top;
869 	struct rm_ifdat *ifd = cl->ifdat_;
870 
871 	ifd->borrowed_[ifd->qi_] = NULL;
872 	/*
873 	 * If cl is the root class, then always return that it is
874 	 * underlimit.  Otherwise, check to see if the class is underlimit.
875 	 */
876 	if (cl->parent_ == NULL)
877 		return (1);
878 
879 	if (cl->sleeping_) {
880 		if (TV_LT(now, &cl->undertime_))
881 			return (0);
882 
883 		callout_stop(&cl->callout_);
884 		cl->sleeping_ = 0;
885 		cl->undertime_.tv_sec = 0;
886 		return (1);
887 	}
888 
889 	top = NULL;
890 	while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
891 		if (((cl = cl->borrow_) == NULL) ||
892 		    (cl->depth_ > ifd->cutoff_)) {
893 #ifdef ADJUST_CUTOFF
894 			if (cl != NULL)
895 				/* cutoff is taking effect, just
896 				   return false without calling
897 				   the delay action. */
898 				return (0);
899 #endif
900 #ifdef BORROW_OFFTIME
901 			/*
902 			 * check if the class can borrow offtime too.
903 			 * borrow offtime from the top of the borrow
904 			 * chain if the top class is not overloaded.
905 			 */
906 			if (cl != NULL) {
907 				/* cutoff is taking effect, use this class as top. */
908 				top = cl;
909 				CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
910 			}
911 			if (top != NULL && top->avgidle_ == top->minidle_)
912 				top = NULL;
913 			p->overtime_ = *now;
914 			(p->overlimit)(p, top);
915 #else
916 			p->overtime_ = *now;
917 			(p->overlimit)(p, NULL);
918 #endif
919 			return (0);
920 		}
921 		top = cl;
922 	}
923 
924 	if (cl != p)
925 		ifd->borrowed_[ifd->qi_] = cl;
926 	return (1);
927 }
928 
929 /*
930  * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
931  *	Packet-by-packet round robin.
932  *
933  * The heart of the weighted round-robin scheduler, which decides which
934  * class next gets to send a packet.  Highest priority first, then
935  * weighted round-robin within priorites.
936  *
937  * Each able-to-send class gets to send until its byte allocation is
938  * exhausted.  Thus, the active pointer is only changed after a class has
939  * exhausted its allocation.
940  *
941  * If the scheduler finds no class that is underlimit or able to borrow,
942  * then the first class found that had a nonzero queue and is allowed to
943  * borrow gets to send.
944  */
945 
946 static struct mbuf *
947 _rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
948 {
949 	struct rm_class *cl = NULL, *first = NULL;
950 	u_int deficit;
951 	int cpri;
952 	struct mbuf *m;
953 	struct timeval now;
954 
955 	RM_GETTIME(now);
956 
957 	/*
958 	 * if the driver polls the top of the queue and then removes
959 	 * the polled packet, we must return the same packet.
960 	 */
961 	if (op == ALTDQ_REMOVE && ifd->pollcache_) {
962 		cl = ifd->pollcache_;
963 		cpri = cl->pri_;
964 		if (ifd->efficient_) {
965 			/* check if this class is overlimit */
966 			if (cl->undertime_.tv_sec != 0 &&
967 			    rmc_under_limit(cl, &now) == 0)
968 				first = cl;
969 		}
970 		ifd->pollcache_ = NULL;
971 		goto _wrr_out;
972 	}
973 	/* mode == ALTDQ_POLL || pollcache == NULL */
974 	ifd->pollcache_ = NULL;
975 	ifd->borrowed_[ifd->qi_] = NULL;
976 #ifdef ADJUST_CUTOFF
977  _again:
978 #endif
979 	for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
980 		if (ifd->na_[cpri] == 0)
981 			continue;
982 		deficit = 0;
983 		/*
984 		 * Loop through twice for a priority level, if some class
985 		 * was unable to send a packet the first round because
986 		 * of the weighted round-robin mechanism.
987 		 * During the second loop at this level, deficit==2.
988 		 * (This second loop is not needed if for every class,
989 		 * "M[cl->pri_])" times "cl->allotment" is greater than
990 		 * the byte size for the largest packet in the class.)
991 		 */
992  _wrr_loop:
993 		cl = ifd->active_[cpri];
994 		KKASSERT(cl != NULL);
995 		do {
996 			if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
997 				cl->bytes_alloc_ += cl->w_allotment_;
998 			if (!qempty(cl->q_)) {
999 				if ((cl->undertime_.tv_sec == 0) ||
1000 				    rmc_under_limit(cl, &now)) {
1001 					if (cl->bytes_alloc_ > 0 || deficit > 1)
1002 						goto _wrr_out;
1003 
1004 					/* underlimit but no alloc */
1005 					deficit = 1;
1006 #if 1
1007 					ifd->borrowed_[ifd->qi_] = NULL;
1008 #endif
1009 				}
1010 				else if (first == NULL && cl->borrow_ != NULL)
1011 					first = cl; /* borrowing candidate */
1012 			}
1013 
1014 			cl->bytes_alloc_ = 0;
1015 			cl = cl->peer_;
1016 		} while (cl != ifd->active_[cpri]);
1017 
1018 		if (deficit == 1) {
1019 			/* first loop found an underlimit class with deficit */
1020 			/* Loop on same priority level, with new deficit.  */
1021 			deficit = 2;
1022 			goto _wrr_loop;
1023 		}
1024 	}
1025 
1026 #ifdef ADJUST_CUTOFF
1027 	/*
1028 	 * no underlimit class found.  if cutoff is taking effect,
1029 	 * increase cutoff and try again.
1030 	 */
1031 	if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1032 		ifd->cutoff_++;
1033 		CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
1034 		goto _again;
1035 	}
1036 #endif /* ADJUST_CUTOFF */
1037 	/*
1038 	 * If LINK_EFFICIENCY is turned on, then the first overlimit
1039 	 * class we encounter will send a packet if all the classes
1040 	 * of the link-sharing structure are overlimit.
1041 	 */
1042 	reset_cutoff(ifd);
1043 	CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
1044 
1045 	if (!ifd->efficient_ || first == NULL)
1046 		return (NULL);
1047 
1048 	cl = first;
1049 	cpri = cl->pri_;
1050 #if 0	/* too time-consuming for nothing */
1051 	if (cl->sleeping_)
1052 		callout_stop(&cl->callout_);
1053 	cl->sleeping_ = 0;
1054 	cl->undertime_.tv_sec = 0;
1055 #endif
1056 	ifd->borrowed_[ifd->qi_] = cl->borrow_;
1057 	ifd->cutoff_ = cl->borrow_->depth_;
1058 
1059 	/*
1060 	 * Deque the packet and do the book keeping...
1061 	 */
1062  _wrr_out:
1063 	if (op == ALTDQ_REMOVE) {
1064 		m = _rmc_getq(cl);
1065 		if (m == NULL)
1066 			panic("_rmc_wrr_dequeue_next");
1067 		if (qempty(cl->q_))
1068 			ifd->na_[cpri]--;
1069 
1070 		/*
1071 		 * Update class statistics and link data.
1072 		 */
1073 		if (cl->bytes_alloc_ > 0)
1074 			cl->bytes_alloc_ -= m_pktlen(m);
1075 
1076 		if ((cl->bytes_alloc_ <= 0) || first == cl)
1077 			ifd->active_[cl->pri_] = cl->peer_;
1078 		else
1079 			ifd->active_[cl->pri_] = cl;
1080 
1081 		ifd->class_[ifd->qi_] = cl;
1082 		ifd->curlen_[ifd->qi_] = m_pktlen(m);
1083 		ifd->now_[ifd->qi_] = now;
1084 		ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1085 		ifd->queued_++;
1086 	} else {
1087 		/* mode == ALTDQ_PPOLL */
1088 		m = _rmc_pollq(cl);
1089 #ifdef foo
1090 		/*
1091 		 * Don't use poll cache; the poll/dequeue
1092 		 * model is no longer applicable to SMP
1093 		 * system.  e.g.
1094 		 *    CPU-A            CPU-B
1095 		 *      :                :
1096 		 *    poll               :
1097 		 *      :              poll
1098 		 *    dequeue (+)        :
1099 		 *
1100 		 * The dequeue at (+) will hit the poll
1101 		 * cache set by CPU-B.
1102 		 */
1103 		ifd->pollcache_ = cl;
1104 #endif
1105 	}
1106 	return (m);
1107 }
1108 
1109 /*
1110  * Dequeue & return next packet from the highest priority class that
1111  * has a packet to send & has enough allocation to send it.  This
1112  * routine is called by a driver whenever it needs a new packet to
1113  * output.
1114  */
1115 static struct mbuf *
1116 _rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
1117 {
1118 	struct mbuf *m;
1119 	int cpri;
1120 	struct rm_class *cl, *first = NULL;
1121 	struct timeval now;
1122 
1123 	RM_GETTIME(now);
1124 
1125 	/*
1126 	 * if the driver polls the top of the queue and then removes
1127 	 * the polled packet, we must return the same packet.
1128 	 */
1129 	if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1130 		cl = ifd->pollcache_;
1131 		cpri = cl->pri_;
1132 		ifd->pollcache_ = NULL;
1133 		goto _prr_out;
1134 	}
1135 	/* mode == ALTDQ_POLL || pollcache == NULL */
1136 	ifd->pollcache_ = NULL;
1137 	ifd->borrowed_[ifd->qi_] = NULL;
1138 #ifdef ADJUST_CUTOFF
1139  _again:
1140 #endif
1141 	for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1142 		if (ifd->na_[cpri] == 0)
1143 			continue;
1144 		cl = ifd->active_[cpri];
1145 		KKASSERT(cl != NULL);
1146 		do {
1147 			if (!qempty(cl->q_)) {
1148 				if ((cl->undertime_.tv_sec == 0) ||
1149 				    rmc_under_limit(cl, &now))
1150 					goto _prr_out;
1151 				if (first == NULL && cl->borrow_ != NULL)
1152 					first = cl;
1153 			}
1154 			cl = cl->peer_;
1155 		} while (cl != ifd->active_[cpri]);
1156 	}
1157 
1158 #ifdef ADJUST_CUTOFF
1159 	/*
1160 	 * no underlimit class found.  if cutoff is taking effect, increase
1161 	 * cutoff and try again.
1162 	 */
1163 	if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1164 		ifd->cutoff_++;
1165 		goto _again;
1166 	}
1167 #endif /* ADJUST_CUTOFF */
1168 	/*
1169 	 * If LINK_EFFICIENCY is turned on, then the first overlimit
1170 	 * class we encounter will send a packet if all the classes
1171 	 * of the link-sharing structure are overlimit.
1172 	 */
1173 	reset_cutoff(ifd);
1174 	if (!ifd->efficient_ || first == NULL)
1175 		return (NULL);
1176 
1177 	cl = first;
1178 	cpri = cl->pri_;
1179 #if 0	/* too time-consuming for nothing */
1180 	if (cl->sleeping_)
1181 		callout_stop(&cl->callout_);
1182 	cl->sleeping_ = 0;
1183 	cl->undertime_.tv_sec = 0;
1184 #endif
1185 	ifd->borrowed_[ifd->qi_] = cl->borrow_;
1186 	ifd->cutoff_ = cl->borrow_->depth_;
1187 
1188 	/*
1189 	 * Deque the packet and do the book keeping...
1190 	 */
1191  _prr_out:
1192 	if (op == ALTDQ_REMOVE) {
1193 		m = _rmc_getq(cl);
1194 		if (m == NULL)
1195 			panic("_rmc_prr_dequeue_next");
1196 		if (qempty(cl->q_))
1197 			ifd->na_[cpri]--;
1198 
1199 		ifd->active_[cpri] = cl->peer_;
1200 
1201 		ifd->class_[ifd->qi_] = cl;
1202 		ifd->curlen_[ifd->qi_] = m_pktlen(m);
1203 		ifd->now_[ifd->qi_] = now;
1204 		ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1205 		ifd->queued_++;
1206 	} else {
1207 		/* mode == ALTDQ_POLL */
1208 		m = _rmc_pollq(cl);
1209 #ifdef foo
1210 		/*
1211 		 * Don't use poll cache; the poll/dequeue
1212 		 * model is no longer applicable to SMP
1213 		 * system.  e.g.
1214 		 *    CPU-A            CPU-B
1215 		 *      :                :
1216 		 *    poll               :
1217 		 *      :              poll
1218 		 *    dequeue (+)        :
1219 		 *
1220 		 * The dequeue at (+) will hit the poll
1221 		 * cache set by CPU-B.
1222 		 */
1223 		ifd->pollcache_ = cl;
1224 #endif
1225 	}
1226 	return (m);
1227 }
1228 
1229 /*
1230  * struct mbuf *
1231  * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
1232  *	is invoked by the packet driver to get the next packet to be
1233  *	dequeued and output on the link.  If WRR is enabled, then the
1234  *	WRR dequeue next routine will determine the next packet to sent.
1235  *	Otherwise, packet-by-packet round robin is invoked.
1236  *
1237  *	Returns:	NULL, if a packet is not available or if all
1238  *			classes are overlimit.
1239  *
1240  *			Otherwise, Pointer to the next packet.
1241  */
1242 
1243 struct mbuf *
1244 rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
1245 {
1246 	if (ifd->queued_ >= ifd->maxqueued_)
1247 		return (NULL);
1248 	else if (ifd->wrr_)
1249 		return (_rmc_wrr_dequeue_next(ifd, mode));
1250 	else
1251 		return (_rmc_prr_dequeue_next(ifd, mode));
1252 }
1253 
1254 /*
1255  * Update the utilization estimate for the packet that just completed.
1256  * The packet's class & the parent(s) of that class all get their
1257  * estimators updated.  This routine is called by the driver's output-
1258  * packet-completion interrupt service routine.
1259  */
1260 
1261 /*
1262  * a macro to approximate "divide by 1000" that gives 0.000999,
1263  * if a value has enough effective digits.
1264  * (on pentium, mul takes 9 cycles but div takes 46!)
1265  */
1266 #define	NSEC_TO_USEC(t)	(((t) >> 10) + ((t) >> 16) + ((t) >> 17))
1267 void
1268 rmc_update_class_util(struct rm_ifdat *ifd)
1269 {
1270 	int idle, avgidle, pktlen;
1271 	int pkt_time, tidle;
1272 	rm_class_t *cl, *borrowed;
1273 	rm_class_t *borrows;
1274 	struct timeval *nowp;
1275 
1276 	/*
1277 	 * Get the most recent completed class.
1278 	 */
1279 	if ((cl = ifd->class_[ifd->qo_]) == NULL)
1280 		return;
1281 
1282 	pktlen = ifd->curlen_[ifd->qo_];
1283 	borrowed = ifd->borrowed_[ifd->qo_];
1284 	borrows = borrowed;
1285 
1286 	PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1287 
1288 	/*
1289 	 * Run estimator on class and its ancestors.
1290 	 */
1291 	/*
1292 	 * rm_update_class_util is designed to be called when the
1293 	 * transfer is completed from a xmit complete interrupt,
1294 	 * but most drivers don't implement an upcall for that.
1295 	 * so, just use estimated completion time.
1296 	 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1297 	 */
1298 	nowp = &ifd->now_[ifd->qo_];
1299 	/* get pkt_time (for link) in usec */
1300 #if 1  /* use approximation */
1301 	pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
1302 	pkt_time = NSEC_TO_USEC(pkt_time);
1303 #else
1304 	pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1305 #endif
1306 #if 1 /* ALTQ4PPP */
1307 	if (TV_LT(nowp, &ifd->ifnow_)) {
1308 		int iftime;
1309 
1310 		/*
1311 		 * make sure the estimated completion time does not go
1312 		 * too far.  it can happen when the link layer supports
1313 		 * data compression or the interface speed is set to
1314 		 * a much lower value.
1315 		 */
1316 		TV_DELTA(&ifd->ifnow_, nowp, iftime);
1317 		if (iftime+pkt_time < ifd->maxiftime_) {
1318 			TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1319 		} else {
1320 			TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1321 		}
1322 	} else {
1323 		TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1324 	}
1325 #else
1326 	if (TV_LT(nowp, &ifd->ifnow_)) {
1327 		TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1328 	} else {
1329 		TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1330 	}
1331 #endif
1332 
1333 	while (cl != NULL) {
1334 		TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
1335 		if (idle >= 2000000)
1336 			/*
1337 			 * this class is idle enough, reset avgidle.
1338 			 * (TV_DELTA returns 2000000 us when delta is large.)
1339 			 */
1340 			cl->avgidle_ = cl->maxidle_;
1341 
1342 		/* get pkt_time (for class) in usec */
1343 #if 1  /* use approximation */
1344 		pkt_time = pktlen * cl->ns_per_byte_;
1345 		pkt_time = NSEC_TO_USEC(pkt_time);
1346 #else
1347 		pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1348 #endif
1349 		idle -= pkt_time;
1350 
1351 		avgidle = cl->avgidle_;
1352 		avgidle += idle - (avgidle >> RM_FILTER_GAIN);
1353 		cl->avgidle_ = avgidle;
1354 
1355 		/* Are we overlimit ? */
1356 		if (avgidle <= 0) {
1357 			CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1358 #if 1 /* ALTQ */
1359 			/*
1360 			 * need some lower bound for avgidle, otherwise
1361 			 * a borrowing class gets unbounded penalty.
1362 			 */
1363 			if (avgidle < cl->minidle_)
1364 				avgidle = cl->avgidle_ = cl->minidle_;
1365 #endif
1366 			/* set next idle to make avgidle 0 */
1367 			tidle = pkt_time +
1368 				(((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
1369 			TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
1370 			++cl->stats_.over;
1371 		} else {
1372 			cl->avgidle_ =
1373 			    (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1374 			cl->undertime_.tv_sec = 0;
1375 			if (cl->sleeping_) {
1376 				callout_stop(&cl->callout_);
1377 				cl->sleeping_ = 0;
1378 			}
1379 		}
1380 
1381 		if (borrows != NULL) {
1382 			if (borrows != cl)
1383 				++cl->stats_.borrows;
1384 			else
1385 				borrows = NULL;
1386 		}
1387 		cl->last_ = ifd->ifnow_;
1388 		cl->last_pkttime_ = pkt_time;
1389 
1390 #if 1
1391 		if (cl->parent_ == NULL) {
1392 			/* take stats of root class */
1393 			PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1394 		}
1395 #endif
1396 
1397 		cl = cl->parent_;
1398 	}
1399 
1400 	/*
1401 	 * Check to see if cutoff needs to set to a new level.
1402 	 */
1403 	cl = ifd->class_[ifd->qo_];
1404 	if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1405 #if 1 /* ALTQ */
1406 		if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
1407 			rmc_tl_satisfied(ifd, nowp);
1408 			CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1409 		} else {
1410 			ifd->cutoff_ = borrowed->depth_;
1411 			CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1412 		}
1413 #else /* !ALTQ */
1414 		if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
1415 			reset_cutoff(ifd);
1416 #ifdef notdef
1417 			rmc_tl_satisfied(ifd, &now);
1418 #endif
1419 			CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1420 		} else {
1421 			ifd->cutoff_ = borrowed->depth_;
1422 			CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1423 		}
1424 #endif /* !ALTQ */
1425 	}
1426 
1427 	/*
1428 	 * Release class slot
1429 	 */
1430 	ifd->borrowed_[ifd->qo_] = NULL;
1431 	ifd->class_[ifd->qo_] = NULL;
1432 	ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1433 	ifd->queued_--;
1434 }
1435 
1436 /*
1437  * void
1438  * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1439  *	over-limit action routines.  These get invoked by rmc_under_limit()
1440  *	if a class with packets to send if over its bandwidth limit & can't
1441  *	borrow from a parent class.
1442  *
1443  *	Returns: NONE
1444  */
1445 
1446 static void
1447 rmc_drop_action(struct rm_class *cl)
1448 {
1449 	struct rm_ifdat *ifd = cl->ifdat_;
1450 
1451 	KKASSERT(qlen(cl->q_) > 0);
1452 	_rmc_dropq(cl);
1453 	if (qempty(cl->q_))
1454 		ifd->na_[cl->pri_]--;
1455 }
1456 
1457 void
1458 rmc_dropall(struct rm_class *cl)
1459 {
1460 	struct rm_ifdat *ifd = cl->ifdat_;
1461 
1462 	if (!qempty(cl->q_)) {
1463 		_flushq(cl->q_);
1464 
1465 		ifd->na_[cl->pri_]--;
1466 	}
1467 }
1468 
1469 /*
1470  * void
1471  * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1472  *	delay action routine.  It is invoked via rmc_under_limit when the
1473  *	packet is discoverd to be overlimit.
1474  *
1475  *	If the delay action is result of borrow class being overlimit, then
1476  *	delay for the offtime of the borrowing class that is overlimit.
1477  *
1478  *	Returns: NONE
1479  */
1480 
1481 void
1482 rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1483 {
1484 	int delay, t, extradelay;
1485 
1486 	cl->stats_.overactions++;
1487 	TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
1488 #ifndef BORROW_OFFTIME
1489 	delay += cl->offtime_;
1490 #endif
1491 
1492 	if (!cl->sleeping_) {
1493 		CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1494 #ifdef BORROW_OFFTIME
1495 		if (borrow != NULL)
1496 			extradelay = borrow->offtime_;
1497 		else
1498 #endif
1499 			extradelay = cl->offtime_;
1500 
1501 #ifdef ALTQ
1502 		/*
1503 		 * XXX recalculate suspend time:
1504 		 * current undertime is (tidle + pkt_time) calculated
1505 		 * from the last transmission.
1506 		 *	tidle: time required to bring avgidle back to 0
1507 		 *	pkt_time: target waiting time for this class
1508 		 * we need to replace pkt_time by offtime
1509 		 */
1510 		extradelay -= cl->last_pkttime_;
1511 #endif
1512 		if (extradelay > 0) {
1513 			TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1514 			delay += extradelay;
1515 		}
1516 
1517 		cl->sleeping_ = 1;
1518 		cl->stats_.delays++;
1519 
1520 		/*
1521 		 * Since packets are phased randomly with respect to the
1522 		 * clock, 1 tick (the next clock tick) can be an arbitrarily
1523 		 * short time so we have to wait for at least two ticks.
1524 		 * NOTE:  If there's no other traffic, we need the timer as
1525 		 * a 'backstop' to restart this class.
1526 		 */
1527 		if (delay > ustick * 2)
1528 			t = (delay + ustick - 1) / ustick;
1529 		else
1530 			t = 2;
1531 		callout_reset_bycpu(&cl->callout_, t, rmc_restart, cl, 0);
1532 	}
1533 }
1534 
1535 /*
1536  * void
1537  * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1538  *	called by the system timer code & is responsible checking if the
1539  *	class is still sleeping (it might have been restarted as a side
1540  *	effect of the queue scan on a packet arrival) and, if so, restarting
1541  *	output for the class.  Inspecting the class state & restarting output
1542  *	require locking the class structure.  In general the driver is
1543  *	responsible for locking but this is the only routine that is not
1544  *	called directly or indirectly from the interface driver so it has
1545  *	know about system locking conventions.  Under bsd, locking is done
1546  *	by raising IPL to splimp so that's what's implemented here.  On a
1547  *	different system this would probably need to be changed.
1548  *
1549  *	Since this function is called from an independant timeout, we
1550  *	have to set up the lock conditions expected for the ALTQ operation.
1551  *	Note that the restart will probably fall through to an if_start.
1552  *
1553  *	Returns:	NONE
1554  */
1555 
1556 static void
1557 rmc_restart_dispatch(netmsg_t nmsg)
1558 {
1559 	struct rm_class *cl = nmsg->lmsg.u.ms_resultp;
1560 	struct rm_ifdat *ifd = cl->ifdat_;
1561 	struct ifaltq_subque *ifsq =
1562 	    &ifd->ifq_->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
1563 
1564 	ASSERT_NETISR0;
1565 
1566 	crit_enter();
1567 	lwkt_replymsg(&nmsg->lmsg, 0);	/* reply ASAP */
1568 	crit_exit();
1569 
1570 	ALTQ_SQ_LOCK(ifsq);
1571 	if (cl->sleeping_) {
1572 		cl->sleeping_ = 0;
1573 		cl->undertime_.tv_sec = 0;
1574 
1575 		if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1576 			CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1577 			(ifd->restart)(ifd->ifq_);
1578 		}
1579 	}
1580 	ALTQ_SQ_UNLOCK(ifsq);
1581 }
1582 
1583 static void
1584 rmc_restart(void *xcl)
1585 {
1586 	struct rm_class *cl = xcl;
1587 	struct lwkt_msg *lmsg = &cl->callout_nmsg_.lmsg;
1588 
1589 	KASSERT(mycpuid == 0, ("not on cpu0"));
1590 	crit_enter();
1591 	if (lmsg->ms_flags & MSGF_DONE)
1592 		lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg);
1593 	crit_exit();
1594 }
1595 
1596 /*
1597  * void
1598  * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1599  *	handling routine for the root class of the link sharing structure.
1600  *
1601  *	Returns: NONE
1602  */
1603 
1604 static void
1605 rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
1606 {
1607         panic("rmc_root_overlimit");
1608 }
1609 
1610 /*
1611  * Packet Queue handling routines.  Eventually, this is to localize the
1612  *	effects on the code whether queues are red queues or droptail
1613  *	queues.
1614  */
1615 
1616 static int
1617 _rmc_addq(rm_class_t *cl, struct mbuf *m)
1618 {
1619 #ifdef ALTQ_RIO
1620 	if (q_is_rio(cl->q_))
1621 		return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1622 #endif
1623 #ifdef ALTQ_RED
1624 	if (q_is_red(cl->q_))
1625 		return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1626 #endif /* ALTQ_RED */
1627 
1628 	if (cl->flags_ & RMCF_CLEARDSCP)
1629 		write_dsfield(m, cl->pktattr_, 0);
1630 
1631 	_addq(cl->q_, m);
1632 	return (0);
1633 }
1634 
1635 /* note: _rmc_dropq is not called for red */
1636 static void
1637 _rmc_dropq(rm_class_t *cl)
1638 {
1639 	struct mbuf *m;
1640 
1641 	if ((m = _getq(cl->q_)) != NULL)
1642 		m_freem(m);
1643 }
1644 
1645 static struct mbuf *
1646 _rmc_getq(rm_class_t *cl)
1647 {
1648 #ifdef ALTQ_RIO
1649 	if (q_is_rio(cl->q_))
1650 		return rio_getq((rio_t *)cl->red_, cl->q_);
1651 #endif
1652 #ifdef ALTQ_RED
1653 	if (q_is_red(cl->q_))
1654 		return red_getq(cl->red_, cl->q_);
1655 #endif
1656 	return _getq(cl->q_);
1657 }
1658 
1659 static struct mbuf *
1660 _rmc_pollq(rm_class_t *cl)
1661 {
1662 	return qhead(cl->q_);
1663 }
1664 
1665 #ifdef CBQ_TRACE
1666 /*
1667  * DDB hook to trace cbq events:
1668  *  the last 1024 events are held in a circular buffer.
1669  *  use "call cbqtrace_dump(N)" to display 20 events from Nth event.
1670  */
1671 void		cbqtrace_dump(int);
1672 static char	*rmc_funcname(void *);
1673 
1674 static struct rmc_funcs {
1675 	void	*func;
1676 	char	*name;
1677 } rmc_funcs[] = {
1678 	rmc_init,		"rmc_init",
1679 	rmc_queue_packet,	"rmc_queue_packet",
1680 	rmc_under_limit,	"rmc_under_limit",
1681 	rmc_update_class_util,	"rmc_update_class_util",
1682 	rmc_delay_action,	"rmc_delay_action",
1683 	rmc_restart,		"rmc_restart",
1684 	_rmc_wrr_dequeue_next,	"_rmc_wrr_dequeue_next",
1685 	NULL,			NULL
1686 };
1687 
1688 static char *
1689 rmc_funcname(void *func)
1690 {
1691 	struct rmc_funcs *fp;
1692 
1693 	for (fp = rmc_funcs; fp->func != NULL; fp++) {
1694 		if (fp->func == func)
1695 			return (fp->name);
1696 	}
1697 
1698 	return ("unknown");
1699 }
1700 
1701 void
1702 cbqtrace_dump(int counter)
1703 {
1704 	int i, *p;
1705 	char *cp;
1706 
1707 	counter = counter % NCBQTRACE;
1708 	p = (int *)&cbqtrace_buffer[counter];
1709 
1710 	for (i=0; i<20; i++) {
1711 		kprintf("[0x%x] ", *p++);
1712 		kprintf("%s: ", rmc_funcname((void *)*p++));
1713 		cp = (char *)p++;
1714 		kprintf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
1715 		kprintf("%d\n",*p++);
1716 
1717 		if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
1718 			p = (int *)cbqtrace_buffer;
1719 	}
1720 }
1721 #endif /* CBQ_TRACE */
1722 #endif /* ALTQ_CBQ */
1723