1 /* @(#)rm_class.c 1.48 97/12/05 SMI */
2 /* $KAME: altq_rmclass.c,v 1.18 2003/11/06 06:32:53 kjc Exp $ */
3
4 /*
5 * Copyright (c) 1991-1997 Regents of the University of California.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the Network Research
19 * Group at Lawrence Berkeley Laboratory.
20 * 4. Neither the name of the University nor of the Laboratory may be used
21 * to endorse or promote products derived from this software without
22 * specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * LBL code modified by speer@eng.sun.com, May 1977.
37 * For questions and/or comments, please send mail to cbq@ee.lbl.gov
38 */
39
40 #include "opt_altq.h"
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43
44 #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
45
46 #include <sys/param.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/socket.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/errno.h>
53 #include <sys/time.h>
54 #include <sys/thread.h>
55 #include <sys/thread2.h>
56
57 #include <net/if.h>
58 #include <net/ifq_var.h>
59 #include <net/netmsg2.h>
60 #include <net/netisr2.h>
61
62 #include <net/altq/altq.h>
63 #include <net/altq/altq_rmclass.h>
64 #include <net/altq/altq_rmclass_debug.h>
65 #include <net/altq/altq_red.h>
66 #include <net/altq/altq_rio.h>
67
68 #ifdef CBQ_TRACE
69 static struct cbqtrace cbqtrace_buffer[NCBQTRACE+1];
70 static struct cbqtrace *cbqtrace_ptr = NULL;
71 static int cbqtrace_count;
72 #endif
73
74 /*
75 * Local Macros
76 */
77
78 #define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; }
79
80 /*
81 * Local routines.
82 */
83
84 static int rmc_satisfied(struct rm_class *, struct timeval *);
85 static void rmc_wrr_set_weights(struct rm_ifdat *);
86 static void rmc_depth_compute(struct rm_class *);
87 static void rmc_depth_recompute(rm_class_t *);
88
89 static struct mbuf *_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
90 static struct mbuf *_rmc_prr_dequeue_next(struct rm_ifdat *, int);
91
92 static int _rmc_addq(rm_class_t *, struct mbuf *);
93 static void _rmc_dropq(rm_class_t *);
94 static struct mbuf *_rmc_getq(rm_class_t *);
95 static struct mbuf *_rmc_pollq(rm_class_t *);
96
97 static int rmc_under_limit(struct rm_class *, struct timeval *);
98 static void rmc_tl_satisfied(struct rm_ifdat *, struct timeval *);
99 static void rmc_drop_action(struct rm_class *);
100 static void rmc_restart(void *);
101 static void rmc_restart_dispatch(netmsg_t);
102 static void rmc_root_overlimit(struct rm_class *, struct rm_class *);
103
104 #define BORROW_OFFTIME
105 /*
106 * BORROW_OFFTIME (experimental):
107 * borrow the offtime of the class borrowing from.
108 * the reason is that when its own offtime is set, the class is unable
109 * to borrow much, especially when cutoff is taking effect.
110 * but when the borrowed class is overloaded (advidle is close to minidle),
111 * use the borrowing class's offtime to avoid overload.
112 */
113 #define ADJUST_CUTOFF
114 /*
115 * ADJUST_CUTOFF (experimental):
116 * if no underlimit class is found due to cutoff, increase cutoff and
117 * retry the scheduling loop.
118 * also, don't invoke delay_actions while cutoff is taking effect,
119 * since a sleeping class won't have a chance to be scheduled in the
120 * next loop.
121 *
122 * now heuristics for setting the top-level variable (cutoff_) becomes:
123 * 1. if a packet arrives for a not-overlimit class, set cutoff
124 * to the depth of the class.
125 * 2. if cutoff is i, and a packet arrives for an overlimit class
126 * with an underlimit ancestor at a lower level than i (say j),
127 * then set cutoff to j.
128 * 3. at scheduling a packet, if there is no underlimit class
129 * due to the current cutoff level, increase cutoff by 1 and
130 * then try to schedule again.
131 */
132
133 /*
134 * rm_class_t *
135 * rmc_newclass(...) - Create a new resource management class at priority
136 * 'pri' on the interface given by 'ifd'.
137 *
138 * nsecPerByte is the data rate of the interface in nanoseconds/byte.
139 * E.g., 800 for a 10Mb/s ethernet. If the class gets less
140 * than 100% of the bandwidth, this number should be the
141 * 'effective' rate for the class. Let f be the
142 * bandwidth fraction allocated to this class, and let
143 * nsPerByte be the data rate of the output link in
144 * nanoseconds/byte. Then nsecPerByte is set to
145 * nsPerByte / f. E.g., 1600 (= 800 / .5)
146 * for a class that gets 50% of an ethernet's bandwidth.
147 *
148 * action the routine to call when the class is over limit.
149 *
150 * maxq max allowable queue size for class (in packets).
151 *
152 * parent parent class pointer.
153 *
154 * borrow class to borrow from (should be either 'parent' or null).
155 *
156 * maxidle max value allowed for class 'idle' time estimate (this
157 * parameter determines how large an initial burst of packets
158 * can be before overlimit action is invoked.
159 *
160 * offtime how long 'delay' action will delay when class goes over
161 * limit (this parameter determines the steady-state burst
162 * size when a class is running over its limit).
163 *
164 * Maxidle and offtime have to be computed from the following: If the
165 * average packet size is s, the bandwidth fraction allocated to this
166 * class is f, we want to allow b packet bursts, and the gain of the
167 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
168 *
169 * ptime = s * nsPerByte * (1 - f) / f
170 * maxidle = ptime * (1 - g^b) / g^b
171 * minidle = -ptime * (1 / (f - 1))
172 * offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
173 *
174 * Operationally, it's convenient to specify maxidle & offtime in units
175 * independent of the link bandwidth so the maxidle & offtime passed to
176 * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
177 * (The constant factor is a scale factor needed to make the parameters
178 * integers. This scaling also means that the 'unscaled' values of
179 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
180 * not nanoseconds.) Also note that the 'idle' filter computation keeps
181 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
182 * maxidle also must be scaled upward by this value. Thus, the passed
183 * values for maxidle and offtime can be computed as follows:
184 *
185 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
186 * offtime = offtime * 8 / (1000 * nsecPerByte)
187 *
188 * When USE_HRTIME is employed, then maxidle and offtime become:
189 * maxidle = maxilde * (8.0 / nsecPerByte);
190 * offtime = offtime * (8.0 / nsecPerByte);
191 */
192 struct rm_class *
rmc_newclass(int pri,struct rm_ifdat * ifd,u_int nsecPerByte,void (* action)(rm_class_t *,rm_class_t *),int maxq,struct rm_class * parent,struct rm_class * borrow,u_int maxidle,int minidle,u_int offtime,int pktsize,int flags)193 rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
194 void (*action)(rm_class_t *, rm_class_t *), int maxq,
195 struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
196 int minidle, u_int offtime, int pktsize, int flags)
197 {
198 struct rm_class *cl;
199 struct rm_class *peer;
200
201 if (pri >= RM_MAXPRIO)
202 return (NULL);
203 #ifndef ALTQ_RED
204 if (flags & RMCF_RED) {
205 #ifdef ALTQ_DEBUG
206 kprintf("rmc_newclass: RED not configured for CBQ!\n");
207 #endif
208 return (NULL);
209 }
210 #endif
211 #ifndef ALTQ_RIO
212 if (flags & RMCF_RIO) {
213 #ifdef ALTQ_DEBUG
214 kprintf("rmc_newclass: RIO not configured for CBQ!\n");
215 #endif
216 return (NULL);
217 }
218 #endif
219
220 cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO);
221 callout_init(&cl->callout_);
222 netmsg_init(&cl->callout_nmsg_, NULL, &netisr_adone_rport,
223 MSGF_PRIORITY, rmc_restart_dispatch);
224 cl->callout_nmsg_.lmsg.u.ms_resultp = cl;
225
226 cl->q_ = kmalloc(sizeof(*cl->q_), M_ALTQ, M_WAITOK | M_ZERO);
227
228 /*
229 * Class initialization.
230 */
231 cl->children_ = NULL;
232 cl->parent_ = parent;
233 cl->borrow_ = borrow;
234 cl->leaf_ = 1;
235 cl->ifdat_ = ifd;
236 cl->pri_ = pri;
237 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
238 cl->depth_ = 0;
239 cl->qthresh_ = 0;
240 cl->ns_per_byte_ = nsecPerByte;
241
242 qlimit(cl->q_) = maxq;
243 qtype(cl->q_) = Q_DROPHEAD;
244 qlen(cl->q_) = 0;
245 cl->flags_ = flags;
246
247 #if 1 /* minidle is also scaled in ALTQ */
248 cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
249 if (cl->minidle_ > 0)
250 cl->minidle_ = 0;
251 #else
252 cl->minidle_ = minidle;
253 #endif
254 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
255 if (cl->maxidle_ == 0)
256 cl->maxidle_ = 1;
257 #if 1 /* offtime is also scaled in ALTQ */
258 cl->avgidle_ = cl->maxidle_;
259 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
260 if (cl->offtime_ == 0)
261 cl->offtime_ = 1;
262 #else
263 cl->avgidle_ = 0;
264 cl->offtime_ = (offtime * nsecPerByte) / 8;
265 #endif
266 cl->overlimit = action;
267
268 #ifdef ALTQ_RED
269 if (flags & (RMCF_RED|RMCF_RIO)) {
270 int red_flags, red_pkttime;
271
272 red_flags = 0;
273 if (flags & RMCF_ECN)
274 red_flags |= REDF_ECN;
275 #ifdef ALTQ_RIO
276 if (flags & RMCF_CLEARDSCP)
277 red_flags |= RIOF_CLEARDSCP;
278 #endif
279 red_pkttime = nsecPerByte * pktsize / 1000;
280
281 if (flags & RMCF_RED) {
282 cl->red_ = red_alloc(0, 0,
283 qlimit(cl->q_) * 10/100,
284 qlimit(cl->q_) * 30/100,
285 red_flags, red_pkttime);
286 if (cl->red_ != NULL)
287 qtype(cl->q_) = Q_RED;
288 }
289 #ifdef ALTQ_RIO
290 else {
291 cl->red_ = (red_t *)rio_alloc(0, NULL,
292 red_flags, red_pkttime);
293 if (cl->red_ != NULL)
294 qtype(cl->q_) = Q_RIO;
295 }
296 #endif
297 }
298 #endif /* ALTQ_RED */
299
300 /*
301 * put the class into the class tree
302 */
303 crit_enter();
304 if ((peer = ifd->active_[pri]) != NULL) {
305 /* find the last class at this pri */
306 cl->peer_ = peer;
307 while (peer->peer_ != ifd->active_[pri])
308 peer = peer->peer_;
309 peer->peer_ = cl;
310 } else {
311 ifd->active_[pri] = cl;
312 cl->peer_ = cl;
313 }
314
315 if (cl->parent_) {
316 cl->next_ = parent->children_;
317 parent->children_ = cl;
318 parent->leaf_ = 0;
319 }
320
321 /*
322 * Compute the depth of this class and its ancestors in the class
323 * hierarchy.
324 */
325 rmc_depth_compute(cl);
326
327 /*
328 * If CBQ's WRR is enabled, then initialize the class WRR state.
329 */
330 if (ifd->wrr_) {
331 ifd->num_[pri]++;
332 ifd->alloc_[pri] += cl->allotment_;
333 rmc_wrr_set_weights(ifd);
334 }
335 crit_exit();
336 return (cl);
337 }
338
339 int
rmc_modclass(struct rm_class * cl,u_int nsecPerByte,int maxq,u_int maxidle,int minidle,u_int offtime,int pktsize)340 rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
341 int minidle, u_int offtime, int pktsize)
342 {
343 struct rm_ifdat *ifd;
344 u_int old_allotment;
345
346 ifd = cl->ifdat_;
347 old_allotment = cl->allotment_;
348
349 crit_enter();
350 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
351 cl->qthresh_ = 0;
352 cl->ns_per_byte_ = nsecPerByte;
353
354 qlimit(cl->q_) = maxq;
355
356 #if 1 /* minidle is also scaled in ALTQ */
357 cl->minidle_ = (minidle * nsecPerByte) / 8;
358 if (cl->minidle_ > 0)
359 cl->minidle_ = 0;
360 #else
361 cl->minidle_ = minidle;
362 #endif
363 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
364 if (cl->maxidle_ == 0)
365 cl->maxidle_ = 1;
366 #if 1 /* offtime is also scaled in ALTQ */
367 cl->avgidle_ = cl->maxidle_;
368 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
369 if (cl->offtime_ == 0)
370 cl->offtime_ = 1;
371 #else
372 cl->avgidle_ = 0;
373 cl->offtime_ = (offtime * nsecPerByte) / 8;
374 #endif
375
376 /*
377 * If CBQ's WRR is enabled, then initialize the class WRR state.
378 */
379 if (ifd->wrr_) {
380 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
381 rmc_wrr_set_weights(ifd);
382 }
383 crit_exit();
384 return (0);
385 }
386
387 /*
388 * static void
389 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
390 * the appropriate run robin weights for the CBQ weighted round robin
391 * algorithm.
392 *
393 * Returns: NONE
394 */
395
396 static void
rmc_wrr_set_weights(struct rm_ifdat * ifd)397 rmc_wrr_set_weights(struct rm_ifdat *ifd)
398 {
399 int i;
400 struct rm_class *cl, *clh;
401
402 for (i = 0; i < RM_MAXPRIO; i++) {
403 /*
404 * This is inverted from that of the simulator to
405 * maintain precision.
406 */
407 if (ifd->num_[i] == 0)
408 ifd->M_[i] = 0;
409 else
410 ifd->M_[i] = ifd->alloc_[i] /
411 (ifd->num_[i] * ifd->maxpkt_);
412 /*
413 * Compute the weighted allotment for each class.
414 * This takes the expensive div instruction out
415 * of the main loop for the wrr scheduling path.
416 * These only get recomputed when a class comes or
417 * goes.
418 */
419 if (ifd->active_[i] != NULL) {
420 clh = cl = ifd->active_[i];
421 do {
422 /* safe-guard for slow link or alloc_ == 0 */
423 if (ifd->M_[i] == 0)
424 cl->w_allotment_ = 0;
425 else
426 cl->w_allotment_ = cl->allotment_ /
427 ifd->M_[i];
428 cl = cl->peer_;
429 } while ((cl != NULL) && (cl != clh));
430 }
431 }
432 }
433
434 int
rmc_get_weight(struct rm_ifdat * ifd,int pri)435 rmc_get_weight(struct rm_ifdat *ifd, int pri)
436 {
437 if ((pri >= 0) && (pri < RM_MAXPRIO))
438 return (ifd->M_[pri]);
439 else
440 return (0);
441 }
442
443 /*
444 * static void
445 * rmc_depth_compute(struct rm_class *cl) - This function computes the
446 * appropriate depth of class 'cl' and its ancestors.
447 *
448 * Returns: NONE
449 */
450
451 static void
rmc_depth_compute(struct rm_class * cl)452 rmc_depth_compute(struct rm_class *cl)
453 {
454 rm_class_t *t = cl, *p;
455
456 /*
457 * Recompute the depth for the branch of the tree.
458 */
459 while (t != NULL) {
460 p = t->parent_;
461 if (p && (t->depth_ >= p->depth_)) {
462 p->depth_ = t->depth_ + 1;
463 t = p;
464 } else
465 t = NULL;
466 }
467 }
468
469 /*
470 * static void
471 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
472 * the depth of the tree after a class has been deleted.
473 *
474 * Returns: NONE
475 */
476
477 static void
rmc_depth_recompute(rm_class_t * cl)478 rmc_depth_recompute(rm_class_t *cl)
479 {
480 #if 1 /* ALTQ */
481 rm_class_t *p, *t;
482
483 p = cl;
484 while (p != NULL) {
485 if ((t = p->children_) == NULL) {
486 p->depth_ = 0;
487 } else {
488 int cdepth = 0;
489
490 while (t != NULL) {
491 if (t->depth_ > cdepth)
492 cdepth = t->depth_;
493 t = t->next_;
494 }
495
496 if (p->depth_ == cdepth + 1)
497 /* no change to this parent */
498 return;
499
500 p->depth_ = cdepth + 1;
501 }
502
503 p = p->parent_;
504 }
505 #else
506 rm_class_t *t;
507
508 if (cl->depth_ >= 1) {
509 if (cl->children_ == NULL) {
510 cl->depth_ = 0;
511 } else if ((t = cl->children_) != NULL) {
512 while (t != NULL) {
513 if (t->children_ != NULL)
514 rmc_depth_recompute(t);
515 t = t->next_;
516 }
517 } else
518 rmc_depth_compute(cl);
519 }
520 #endif
521 }
522
523 /*
524 * void
525 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
526 * function deletes a class from the link-sharing structure and frees
527 * all resources associated with the class.
528 *
529 * Returns: NONE
530 */
531
532 void
rmc_delete_class(struct rm_ifdat * ifd,struct rm_class * cl)533 rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
534 {
535 struct rm_class *p, *head, *previous;
536 struct netmsg_base smsg;
537 struct ifaltq_subque *ifsq =
538 &ifd->ifq_->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
539
540 KKASSERT(cl->children_ == NULL);
541
542 ALTQ_SQ_ASSERT_LOCKED(ifsq);
543 ALTQ_SQ_UNLOCK(ifsq);
544 callout_cancel(&cl->callout_);
545 /* Make sure that cl->callout_nmsg_ stops. */
546 netmsg_init(&smsg, NULL, &curthread->td_msgport, 0,
547 netmsg_sync_handler);
548 lwkt_domsg(netisr_cpuport(0), &smsg.lmsg, 0);
549 callout_terminate(&cl->callout_);
550 ALTQ_SQ_LOCK(ifsq);
551
552 crit_enter();
553
554 if (ifd->pollcache_ == cl)
555 ifd->pollcache_ = NULL;
556
557 /*
558 * Free packets in the packet queue.
559 * XXX - this may not be a desired behavior. Packets should be
560 * re-queued.
561 */
562 rmc_dropall(cl);
563
564 /*
565 * If the class has a parent, then remove the class from the
566 * class from the parent's children chain.
567 */
568 if (cl->parent_ != NULL) {
569 head = cl->parent_->children_;
570 p = previous = head;
571 if (head->next_ == NULL) {
572 KKASSERT(head == cl);
573 cl->parent_->children_ = NULL;
574 cl->parent_->leaf_ = 1;
575 } else while (p != NULL) {
576 if (p == cl) {
577 if (cl == head)
578 cl->parent_->children_ = cl->next_;
579 else
580 previous->next_ = cl->next_;
581 cl->next_ = NULL;
582 p = NULL;
583 } else {
584 previous = p;
585 p = p->next_;
586 }
587 }
588 }
589
590 /*
591 * Delete class from class priority peer list.
592 */
593 if ((p = ifd->active_[cl->pri_]) != NULL) {
594 /*
595 * If there is more than one member of this priority
596 * level, then look for class(cl) in the priority level.
597 */
598 if (p != p->peer_) {
599 while (p->peer_ != cl)
600 p = p->peer_;
601 p->peer_ = cl->peer_;
602
603 if (ifd->active_[cl->pri_] == cl)
604 ifd->active_[cl->pri_] = cl->peer_;
605 } else {
606 KKASSERT(p == cl);
607 ifd->active_[cl->pri_] = NULL;
608 }
609 }
610
611 /*
612 * Recompute the WRR weights.
613 */
614 if (ifd->wrr_) {
615 ifd->alloc_[cl->pri_] -= cl->allotment_;
616 ifd->num_[cl->pri_]--;
617 rmc_wrr_set_weights(ifd);
618 }
619
620 /*
621 * Re-compute the depth of the tree.
622 */
623 #if 1 /* ALTQ */
624 rmc_depth_recompute(cl->parent_);
625 #else
626 rmc_depth_recompute(ifd->root_);
627 #endif
628
629 crit_exit();
630
631 /*
632 * Free the class structure.
633 */
634 if (cl->red_ != NULL) {
635 #ifdef ALTQ_RIO
636 if (q_is_rio(cl->q_))
637 rio_destroy((rio_t *)cl->red_);
638 #endif
639 #ifdef ALTQ_RED
640 if (q_is_red(cl->q_))
641 red_destroy(cl->red_);
642 #endif
643 }
644 kfree(cl->q_, M_ALTQ);
645 kfree(cl, M_ALTQ);
646 }
647
648 /*
649 * void
650 * rmc_init(...) - Initialize the resource management data structures
651 * associated with the output portion of interface 'ifp'. 'ifd' is
652 * where the structures will be built (for backwards compatibility, the
653 * structures aren't kept in the ifnet struct). 'nsecPerByte'
654 * gives the link speed (inverse of bandwidth) in nanoseconds/byte.
655 * 'restart' is the driver-specific routine that the generic 'delay
656 * until under limit' action will call to restart output. `maxq'
657 * is the queue size of the 'link' & 'default' classes. 'maxqueued'
658 * is the maximum number of packets that the resource management
659 * code will allow to be queued 'downstream' (this is typically 1).
660 *
661 * Returns: NONE
662 */
663
664 void
rmc_init(struct ifaltq * ifq,struct rm_ifdat * ifd,u_int nsecPerByte,void (* restart)(struct ifaltq *),int maxq,int maxqueued,u_int maxidle,int minidle,u_int offtime,int flags)665 rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte,
666 void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
667 int minidle, u_int offtime, int flags)
668 {
669 int i, mtu;
670
671 /*
672 * Initialize the CBQ tracing/debug facility.
673 */
674 CBQTRACEINIT();
675
676 bzero(ifd, sizeof (*ifd));
677 mtu = ifq->altq_ifp->if_mtu;
678 ifd->ifq_ = ifq;
679 ifd->restart = restart;
680 ifd->maxqueued_ = maxqueued;
681 ifd->ns_per_byte_ = nsecPerByte;
682 ifd->maxpkt_ = mtu;
683 ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
684 ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
685 #if 1
686 ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
687 if (mtu * nsecPerByte > 10 * 1000000)
688 ifd->maxiftime_ /= 4;
689 #endif
690
691 reset_cutoff(ifd);
692 CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
693
694 /*
695 * Initialize the CBQ's WRR state.
696 */
697 for (i = 0; i < RM_MAXPRIO; i++) {
698 ifd->alloc_[i] = 0;
699 ifd->M_[i] = 0;
700 ifd->num_[i] = 0;
701 ifd->na_[i] = 0;
702 ifd->active_[i] = NULL;
703 }
704
705 /*
706 * Initialize current packet state.
707 */
708 ifd->qi_ = 0;
709 ifd->qo_ = 0;
710 for (i = 0; i < RM_MAXQUEUED; i++) {
711 ifd->class_[i] = NULL;
712 ifd->curlen_[i] = 0;
713 ifd->borrowed_[i] = NULL;
714 }
715
716 /*
717 * Create the root class of the link-sharing structure.
718 */
719 ifd->root_ = rmc_newclass(0, ifd, nsecPerByte, rmc_root_overlimit,
720 maxq, 0, 0, maxidle, minidle, offtime, 0, 0);
721 if (ifd->root_ == NULL) {
722 kprintf("rmc_init: root class not allocated\n");
723 return ;
724 }
725 ifd->root_->depth_ = 0;
726 }
727
728 /*
729 * void
730 * rmc_queue_packet(struct rm_class *cl, struct mbuf *m) - Add packet given by
731 * mbuf 'm' to queue for resource class 'cl'. This routine is called
732 * by a driver's if_output routine. This routine must be called with
733 * output packet completion interrupts locked out (to avoid racing with
734 * rmc_dequeue_next).
735 *
736 * Returns: 0 on successful queueing
737 * -1 when packet drop occurs
738 */
739 int
rmc_queue_packet(struct rm_class * cl,struct mbuf * m)740 rmc_queue_packet(struct rm_class *cl, struct mbuf *m)
741 {
742 struct timeval now;
743 struct rm_ifdat *ifd = cl->ifdat_;
744 int cpri = cl->pri_;
745 int is_empty = qempty(cl->q_);
746
747 RM_GETTIME(now);
748 if (ifd->cutoff_ > 0) {
749 if (TV_LT(&cl->undertime_, &now)) {
750 if (ifd->cutoff_ > cl->depth_)
751 ifd->cutoff_ = cl->depth_;
752 CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
753 }
754 #if 1 /* ALTQ */
755 else {
756 /*
757 * the class is overlimit. if the class has
758 * underlimit ancestors, set cutoff to the lowest
759 * depth among them.
760 */
761 struct rm_class *borrow = cl->borrow_;
762
763 while (borrow != NULL &&
764 borrow->depth_ < ifd->cutoff_) {
765 if (TV_LT(&borrow->undertime_, &now)) {
766 ifd->cutoff_ = borrow->depth_;
767 CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
768 break;
769 }
770 borrow = borrow->borrow_;
771 }
772 }
773 #else /* !ALTQ */
774 else if ((ifd->cutoff_ > 1) && cl->borrow_) {
775 if (TV_LT(&cl->borrow_->undertime_, &now)) {
776 ifd->cutoff_ = cl->borrow_->depth_;
777 CBQTRACE(rmc_queue_packet, 'ffob',
778 cl->borrow_->depth_);
779 }
780 }
781 #endif /* !ALTQ */
782 }
783
784 if (_rmc_addq(cl, m) < 0)
785 /* failed */
786 return (-1);
787
788 if (is_empty) {
789 CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
790 ifd->na_[cpri]++;
791 }
792
793 if (qlen(cl->q_) > qlimit(cl->q_)) {
794 /* note: qlimit can be set to 0 or 1 */
795 rmc_drop_action(cl);
796 return (-1);
797 }
798 return (0);
799 }
800
801 /*
802 * void
803 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
804 * classes to see if there are satified.
805 */
806
807 static void
rmc_tl_satisfied(struct rm_ifdat * ifd,struct timeval * now)808 rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now)
809 {
810 int i;
811 rm_class_t *p, *bp;
812
813 for (i = RM_MAXPRIO - 1; i >= 0; i--) {
814 if ((bp = ifd->active_[i]) != NULL) {
815 p = bp;
816 do {
817 if (!rmc_satisfied(p, now)) {
818 ifd->cutoff_ = p->depth_;
819 return;
820 }
821 p = p->peer_;
822 } while (p != bp);
823 }
824 }
825
826 reset_cutoff(ifd);
827 }
828
829 /*
830 * rmc_satisfied - Return 1 of the class is satisfied. O, otherwise.
831 */
832
833 static int
rmc_satisfied(struct rm_class * cl,struct timeval * now)834 rmc_satisfied(struct rm_class *cl, struct timeval *now)
835 {
836 rm_class_t *p;
837
838 if (cl == NULL)
839 return (1);
840 if (TV_LT(now, &cl->undertime_))
841 return (1);
842 if (cl->depth_ == 0) {
843 if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
844 return (0);
845 else
846 return (1);
847 }
848 if (cl->children_ != NULL) {
849 p = cl->children_;
850 while (p != NULL) {
851 if (!rmc_satisfied(p, now))
852 return (0);
853 p = p->next_;
854 }
855 }
856
857 return (1);
858 }
859
860 /*
861 * Return 1 if class 'cl' is under limit or can borrow from a parent,
862 * 0 if overlimit. As a side-effect, this routine will invoke the
863 * class overlimit action if the class if overlimit.
864 */
865
866 static int
rmc_under_limit(struct rm_class * cl,struct timeval * now)867 rmc_under_limit(struct rm_class *cl, struct timeval *now)
868 {
869 rm_class_t *p = cl;
870 rm_class_t *top;
871 struct rm_ifdat *ifd = cl->ifdat_;
872
873 ifd->borrowed_[ifd->qi_] = NULL;
874 /*
875 * If cl is the root class, then always return that it is
876 * underlimit. Otherwise, check to see if the class is underlimit.
877 */
878 if (cl->parent_ == NULL)
879 return (1);
880
881 if (cl->sleeping_) {
882 if (TV_LT(now, &cl->undertime_))
883 return (0);
884
885 callout_stop(&cl->callout_);
886 cl->sleeping_ = 0;
887 cl->undertime_.tv_sec = 0;
888 return (1);
889 }
890
891 top = NULL;
892 while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
893 if (((cl = cl->borrow_) == NULL) ||
894 (cl->depth_ > ifd->cutoff_)) {
895 #ifdef ADJUST_CUTOFF
896 if (cl != NULL)
897 /* cutoff is taking effect, just
898 return false without calling
899 the delay action. */
900 return (0);
901 #endif
902 #ifdef BORROW_OFFTIME
903 /*
904 * check if the class can borrow offtime too.
905 * borrow offtime from the top of the borrow
906 * chain if the top class is not overloaded.
907 */
908 if (cl != NULL) {
909 /* cutoff is taking effect, use this class as top. */
910 top = cl;
911 CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
912 }
913 if (top != NULL && top->avgidle_ == top->minidle_)
914 top = NULL;
915 p->overtime_ = *now;
916 (p->overlimit)(p, top);
917 #else
918 p->overtime_ = *now;
919 (p->overlimit)(p, NULL);
920 #endif
921 return (0);
922 }
923 top = cl;
924 }
925
926 if (cl != p)
927 ifd->borrowed_[ifd->qi_] = cl;
928 return (1);
929 }
930
931 /*
932 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
933 * Packet-by-packet round robin.
934 *
935 * The heart of the weighted round-robin scheduler, which decides which
936 * class next gets to send a packet. Highest priority first, then
937 * weighted round-robin within priorites.
938 *
939 * Each able-to-send class gets to send until its byte allocation is
940 * exhausted. Thus, the active pointer is only changed after a class has
941 * exhausted its allocation.
942 *
943 * If the scheduler finds no class that is underlimit or able to borrow,
944 * then the first class found that had a nonzero queue and is allowed to
945 * borrow gets to send.
946 */
947
948 static struct mbuf *
_rmc_wrr_dequeue_next(struct rm_ifdat * ifd,int op)949 _rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
950 {
951 struct rm_class *cl = NULL, *first = NULL;
952 u_int deficit;
953 int cpri;
954 struct mbuf *m;
955 struct timeval now;
956
957 RM_GETTIME(now);
958
959 /*
960 * if the driver polls the top of the queue and then removes
961 * the polled packet, we must return the same packet.
962 */
963 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
964 cl = ifd->pollcache_;
965 cpri = cl->pri_;
966 if (ifd->efficient_) {
967 /* check if this class is overlimit */
968 if (cl->undertime_.tv_sec != 0 &&
969 rmc_under_limit(cl, &now) == 0)
970 first = cl;
971 }
972 ifd->pollcache_ = NULL;
973 goto _wrr_out;
974 }
975 /* mode == ALTDQ_POLL || pollcache == NULL */
976 ifd->pollcache_ = NULL;
977 ifd->borrowed_[ifd->qi_] = NULL;
978 #ifdef ADJUST_CUTOFF
979 _again:
980 #endif
981 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
982 if (ifd->na_[cpri] == 0)
983 continue;
984 deficit = 0;
985 /*
986 * Loop through twice for a priority level, if some class
987 * was unable to send a packet the first round because
988 * of the weighted round-robin mechanism.
989 * During the second loop at this level, deficit==2.
990 * (This second loop is not needed if for every class,
991 * "M[cl->pri_])" times "cl->allotment" is greater than
992 * the byte size for the largest packet in the class.)
993 */
994 _wrr_loop:
995 cl = ifd->active_[cpri];
996 KKASSERT(cl != NULL);
997 do {
998 if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
999 cl->bytes_alloc_ += cl->w_allotment_;
1000 if (!qempty(cl->q_)) {
1001 if ((cl->undertime_.tv_sec == 0) ||
1002 rmc_under_limit(cl, &now)) {
1003 if (cl->bytes_alloc_ > 0 || deficit > 1)
1004 goto _wrr_out;
1005
1006 /* underlimit but no alloc */
1007 deficit = 1;
1008 #if 1
1009 ifd->borrowed_[ifd->qi_] = NULL;
1010 #endif
1011 }
1012 else if (first == NULL && cl->borrow_ != NULL)
1013 first = cl; /* borrowing candidate */
1014 }
1015
1016 cl->bytes_alloc_ = 0;
1017 cl = cl->peer_;
1018 } while (cl != ifd->active_[cpri]);
1019
1020 if (deficit == 1) {
1021 /* first loop found an underlimit class with deficit */
1022 /* Loop on same priority level, with new deficit. */
1023 deficit = 2;
1024 goto _wrr_loop;
1025 }
1026 }
1027
1028 #ifdef ADJUST_CUTOFF
1029 /*
1030 * no underlimit class found. if cutoff is taking effect,
1031 * increase cutoff and try again.
1032 */
1033 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1034 ifd->cutoff_++;
1035 CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
1036 goto _again;
1037 }
1038 #endif /* ADJUST_CUTOFF */
1039 /*
1040 * If LINK_EFFICIENCY is turned on, then the first overlimit
1041 * class we encounter will send a packet if all the classes
1042 * of the link-sharing structure are overlimit.
1043 */
1044 reset_cutoff(ifd);
1045 CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
1046
1047 if (!ifd->efficient_ || first == NULL)
1048 return (NULL);
1049
1050 cl = first;
1051 cpri = cl->pri_;
1052 #if 0 /* too time-consuming for nothing */
1053 if (cl->sleeping_)
1054 callout_stop(&cl->callout_);
1055 cl->sleeping_ = 0;
1056 cl->undertime_.tv_sec = 0;
1057 #endif
1058 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1059 ifd->cutoff_ = cl->borrow_->depth_;
1060
1061 /*
1062 * Deque the packet and do the book keeping...
1063 */
1064 _wrr_out:
1065 if (op == ALTDQ_REMOVE) {
1066 m = _rmc_getq(cl);
1067 if (m == NULL)
1068 panic("_rmc_wrr_dequeue_next");
1069 if (qempty(cl->q_))
1070 ifd->na_[cpri]--;
1071
1072 /*
1073 * Update class statistics and link data.
1074 */
1075 if (cl->bytes_alloc_ > 0)
1076 cl->bytes_alloc_ -= m_pktlen(m);
1077
1078 if ((cl->bytes_alloc_ <= 0) || first == cl)
1079 ifd->active_[cl->pri_] = cl->peer_;
1080 else
1081 ifd->active_[cl->pri_] = cl;
1082
1083 ifd->class_[ifd->qi_] = cl;
1084 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1085 ifd->now_[ifd->qi_] = now;
1086 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1087 ifd->queued_++;
1088 } else {
1089 /* mode == ALTDQ_PPOLL */
1090 m = _rmc_pollq(cl);
1091 #ifdef foo
1092 /*
1093 * Don't use poll cache; the poll/dequeue
1094 * model is no longer applicable to SMP
1095 * system. e.g.
1096 * CPU-A CPU-B
1097 * : :
1098 * poll :
1099 * : poll
1100 * dequeue (+) :
1101 *
1102 * The dequeue at (+) will hit the poll
1103 * cache set by CPU-B.
1104 */
1105 ifd->pollcache_ = cl;
1106 #endif
1107 }
1108 return (m);
1109 }
1110
1111 /*
1112 * Dequeue & return next packet from the highest priority class that
1113 * has a packet to send & has enough allocation to send it. This
1114 * routine is called by a driver whenever it needs a new packet to
1115 * output.
1116 */
1117 static struct mbuf *
_rmc_prr_dequeue_next(struct rm_ifdat * ifd,int op)1118 _rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
1119 {
1120 struct mbuf *m;
1121 int cpri;
1122 struct rm_class *cl, *first = NULL;
1123 struct timeval now;
1124
1125 RM_GETTIME(now);
1126
1127 /*
1128 * if the driver polls the top of the queue and then removes
1129 * the polled packet, we must return the same packet.
1130 */
1131 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1132 cl = ifd->pollcache_;
1133 cpri = cl->pri_;
1134 ifd->pollcache_ = NULL;
1135 goto _prr_out;
1136 }
1137 /* mode == ALTDQ_POLL || pollcache == NULL */
1138 ifd->pollcache_ = NULL;
1139 ifd->borrowed_[ifd->qi_] = NULL;
1140 #ifdef ADJUST_CUTOFF
1141 _again:
1142 #endif
1143 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1144 if (ifd->na_[cpri] == 0)
1145 continue;
1146 cl = ifd->active_[cpri];
1147 KKASSERT(cl != NULL);
1148 do {
1149 if (!qempty(cl->q_)) {
1150 if ((cl->undertime_.tv_sec == 0) ||
1151 rmc_under_limit(cl, &now))
1152 goto _prr_out;
1153 if (first == NULL && cl->borrow_ != NULL)
1154 first = cl;
1155 }
1156 cl = cl->peer_;
1157 } while (cl != ifd->active_[cpri]);
1158 }
1159
1160 #ifdef ADJUST_CUTOFF
1161 /*
1162 * no underlimit class found. if cutoff is taking effect, increase
1163 * cutoff and try again.
1164 */
1165 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1166 ifd->cutoff_++;
1167 goto _again;
1168 }
1169 #endif /* ADJUST_CUTOFF */
1170 /*
1171 * If LINK_EFFICIENCY is turned on, then the first overlimit
1172 * class we encounter will send a packet if all the classes
1173 * of the link-sharing structure are overlimit.
1174 */
1175 reset_cutoff(ifd);
1176 if (!ifd->efficient_ || first == NULL)
1177 return (NULL);
1178
1179 cl = first;
1180 cpri = cl->pri_;
1181 #if 0 /* too time-consuming for nothing */
1182 if (cl->sleeping_)
1183 callout_stop(&cl->callout_);
1184 cl->sleeping_ = 0;
1185 cl->undertime_.tv_sec = 0;
1186 #endif
1187 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1188 ifd->cutoff_ = cl->borrow_->depth_;
1189
1190 /*
1191 * Deque the packet and do the book keeping...
1192 */
1193 _prr_out:
1194 if (op == ALTDQ_REMOVE) {
1195 m = _rmc_getq(cl);
1196 if (m == NULL)
1197 panic("_rmc_prr_dequeue_next");
1198 if (qempty(cl->q_))
1199 ifd->na_[cpri]--;
1200
1201 ifd->active_[cpri] = cl->peer_;
1202
1203 ifd->class_[ifd->qi_] = cl;
1204 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1205 ifd->now_[ifd->qi_] = now;
1206 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1207 ifd->queued_++;
1208 } else {
1209 /* mode == ALTDQ_POLL */
1210 m = _rmc_pollq(cl);
1211 #ifdef foo
1212 /*
1213 * Don't use poll cache; the poll/dequeue
1214 * model is no longer applicable to SMP
1215 * system. e.g.
1216 * CPU-A CPU-B
1217 * : :
1218 * poll :
1219 * : poll
1220 * dequeue (+) :
1221 *
1222 * The dequeue at (+) will hit the poll
1223 * cache set by CPU-B.
1224 */
1225 ifd->pollcache_ = cl;
1226 #endif
1227 }
1228 return (m);
1229 }
1230
1231 /*
1232 * struct mbuf *
1233 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
1234 * is invoked by the packet driver to get the next packet to be
1235 * dequeued and output on the link. If WRR is enabled, then the
1236 * WRR dequeue next routine will determine the next packet to sent.
1237 * Otherwise, packet-by-packet round robin is invoked.
1238 *
1239 * Returns: NULL, if a packet is not available or if all
1240 * classes are overlimit.
1241 *
1242 * Otherwise, Pointer to the next packet.
1243 */
1244
1245 struct mbuf *
rmc_dequeue_next(struct rm_ifdat * ifd,int mode)1246 rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
1247 {
1248 if (ifd->queued_ >= ifd->maxqueued_)
1249 return (NULL);
1250 else if (ifd->wrr_)
1251 return (_rmc_wrr_dequeue_next(ifd, mode));
1252 else
1253 return (_rmc_prr_dequeue_next(ifd, mode));
1254 }
1255
1256 /*
1257 * Update the utilization estimate for the packet that just completed.
1258 * The packet's class & the parent(s) of that class all get their
1259 * estimators updated. This routine is called by the driver's output-
1260 * packet-completion interrupt service routine.
1261 */
1262
1263 /*
1264 * a macro to approximate "divide by 1000" that gives 0.000999,
1265 * if a value has enough effective digits.
1266 * (on pentium, mul takes 9 cycles but div takes 46!)
1267 */
1268 #define NSEC_TO_USEC(t) (((t) >> 10) + ((t) >> 16) + ((t) >> 17))
1269 void
rmc_update_class_util(struct rm_ifdat * ifd)1270 rmc_update_class_util(struct rm_ifdat *ifd)
1271 {
1272 int idle, avgidle, pktlen;
1273 int pkt_time, tidle;
1274 rm_class_t *cl, *borrowed;
1275 rm_class_t *borrows;
1276 struct timeval *nowp;
1277
1278 /*
1279 * Get the most recent completed class.
1280 */
1281 if ((cl = ifd->class_[ifd->qo_]) == NULL)
1282 return;
1283
1284 pktlen = ifd->curlen_[ifd->qo_];
1285 borrowed = ifd->borrowed_[ifd->qo_];
1286 borrows = borrowed;
1287
1288 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1289
1290 /*
1291 * Run estimator on class and its ancestors.
1292 */
1293 /*
1294 * rm_update_class_util is designed to be called when the
1295 * transfer is completed from a xmit complete interrupt,
1296 * but most drivers don't implement an upcall for that.
1297 * so, just use estimated completion time.
1298 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1299 */
1300 nowp = &ifd->now_[ifd->qo_];
1301 /* get pkt_time (for link) in usec */
1302 #if 1 /* use approximation */
1303 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
1304 pkt_time = NSEC_TO_USEC(pkt_time);
1305 #else
1306 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1307 #endif
1308 #if 1 /* ALTQ4PPP */
1309 if (TV_LT(nowp, &ifd->ifnow_)) {
1310 int iftime;
1311
1312 /*
1313 * make sure the estimated completion time does not go
1314 * too far. it can happen when the link layer supports
1315 * data compression or the interface speed is set to
1316 * a much lower value.
1317 */
1318 TV_DELTA(&ifd->ifnow_, nowp, iftime);
1319 if (iftime+pkt_time < ifd->maxiftime_) {
1320 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1321 } else {
1322 TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1323 }
1324 } else {
1325 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1326 }
1327 #else
1328 if (TV_LT(nowp, &ifd->ifnow_)) {
1329 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1330 } else {
1331 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1332 }
1333 #endif
1334
1335 while (cl != NULL) {
1336 TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
1337 if (idle >= 2000000)
1338 /*
1339 * this class is idle enough, reset avgidle.
1340 * (TV_DELTA returns 2000000 us when delta is large.)
1341 */
1342 cl->avgidle_ = cl->maxidle_;
1343
1344 /* get pkt_time (for class) in usec */
1345 #if 1 /* use approximation */
1346 pkt_time = pktlen * cl->ns_per_byte_;
1347 pkt_time = NSEC_TO_USEC(pkt_time);
1348 #else
1349 pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1350 #endif
1351 idle -= pkt_time;
1352
1353 avgidle = cl->avgidle_;
1354 avgidle += idle - (avgidle >> RM_FILTER_GAIN);
1355 cl->avgidle_ = avgidle;
1356
1357 /* Are we overlimit ? */
1358 if (avgidle <= 0) {
1359 CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1360 #if 1 /* ALTQ */
1361 /*
1362 * need some lower bound for avgidle, otherwise
1363 * a borrowing class gets unbounded penalty.
1364 */
1365 if (avgidle < cl->minidle_)
1366 avgidle = cl->avgidle_ = cl->minidle_;
1367 #endif
1368 /* set next idle to make avgidle 0 */
1369 tidle = pkt_time +
1370 (((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
1371 TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
1372 ++cl->stats_.over;
1373 } else {
1374 cl->avgidle_ =
1375 (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1376 cl->undertime_.tv_sec = 0;
1377 if (cl->sleeping_) {
1378 callout_stop(&cl->callout_);
1379 cl->sleeping_ = 0;
1380 }
1381 }
1382
1383 if (borrows != NULL) {
1384 if (borrows != cl)
1385 ++cl->stats_.borrows;
1386 else
1387 borrows = NULL;
1388 }
1389 cl->last_ = ifd->ifnow_;
1390 cl->last_pkttime_ = pkt_time;
1391
1392 #if 1
1393 if (cl->parent_ == NULL) {
1394 /* take stats of root class */
1395 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1396 }
1397 #endif
1398
1399 cl = cl->parent_;
1400 }
1401
1402 /*
1403 * Check to see if cutoff needs to set to a new level.
1404 */
1405 cl = ifd->class_[ifd->qo_];
1406 if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1407 #if 1 /* ALTQ */
1408 if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
1409 rmc_tl_satisfied(ifd, nowp);
1410 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1411 } else {
1412 ifd->cutoff_ = borrowed->depth_;
1413 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1414 }
1415 #else /* !ALTQ */
1416 if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
1417 reset_cutoff(ifd);
1418 #ifdef notdef
1419 rmc_tl_satisfied(ifd, &now);
1420 #endif
1421 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1422 } else {
1423 ifd->cutoff_ = borrowed->depth_;
1424 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1425 }
1426 #endif /* !ALTQ */
1427 }
1428
1429 /*
1430 * Release class slot
1431 */
1432 ifd->borrowed_[ifd->qo_] = NULL;
1433 ifd->class_[ifd->qo_] = NULL;
1434 ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1435 ifd->queued_--;
1436 }
1437
1438 /*
1439 * void
1440 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1441 * over-limit action routines. These get invoked by rmc_under_limit()
1442 * if a class with packets to send if over its bandwidth limit & can't
1443 * borrow from a parent class.
1444 *
1445 * Returns: NONE
1446 */
1447
1448 static void
rmc_drop_action(struct rm_class * cl)1449 rmc_drop_action(struct rm_class *cl)
1450 {
1451 struct rm_ifdat *ifd = cl->ifdat_;
1452
1453 KKASSERT(qlen(cl->q_) > 0);
1454 _rmc_dropq(cl);
1455 if (qempty(cl->q_))
1456 ifd->na_[cl->pri_]--;
1457 }
1458
1459 void
rmc_dropall(struct rm_class * cl)1460 rmc_dropall(struct rm_class *cl)
1461 {
1462 struct rm_ifdat *ifd = cl->ifdat_;
1463
1464 if (!qempty(cl->q_)) {
1465 _flushq(cl->q_);
1466
1467 ifd->na_[cl->pri_]--;
1468 }
1469 }
1470
1471 /*
1472 * void
1473 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1474 * delay action routine. It is invoked via rmc_under_limit when the
1475 * packet is discoverd to be overlimit.
1476 *
1477 * If the delay action is result of borrow class being overlimit, then
1478 * delay for the offtime of the borrowing class that is overlimit.
1479 *
1480 * Returns: NONE
1481 */
1482
1483 void
rmc_delay_action(struct rm_class * cl,struct rm_class * borrow)1484 rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1485 {
1486 int delay, t, extradelay;
1487
1488 cl->stats_.overactions++;
1489 TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
1490 #ifndef BORROW_OFFTIME
1491 delay += cl->offtime_;
1492 #endif
1493
1494 if (!cl->sleeping_) {
1495 CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1496 #ifdef BORROW_OFFTIME
1497 if (borrow != NULL)
1498 extradelay = borrow->offtime_;
1499 else
1500 #endif
1501 extradelay = cl->offtime_;
1502
1503 #ifdef ALTQ
1504 /*
1505 * XXX recalculate suspend time:
1506 * current undertime is (tidle + pkt_time) calculated
1507 * from the last transmission.
1508 * tidle: time required to bring avgidle back to 0
1509 * pkt_time: target waiting time for this class
1510 * we need to replace pkt_time by offtime
1511 */
1512 extradelay -= cl->last_pkttime_;
1513 #endif
1514 if (extradelay > 0) {
1515 TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1516 delay += extradelay;
1517 }
1518
1519 cl->sleeping_ = 1;
1520 cl->stats_.delays++;
1521
1522 /*
1523 * Since packets are phased randomly with respect to the
1524 * clock, 1 tick (the next clock tick) can be an arbitrarily
1525 * short time so we have to wait for at least two ticks.
1526 * NOTE: If there's no other traffic, we need the timer as
1527 * a 'backstop' to restart this class.
1528 */
1529 if (delay > ustick * 2)
1530 t = (delay + ustick - 1) / ustick;
1531 else
1532 t = 2;
1533 callout_reset_bycpu(&cl->callout_, t, rmc_restart, cl, 0);
1534 }
1535 }
1536
1537 /*
1538 * void
1539 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1540 * called by the system timer code & is responsible checking if the
1541 * class is still sleeping (it might have been restarted as a side
1542 * effect of the queue scan on a packet arrival) and, if so, restarting
1543 * output for the class. Inspecting the class state & restarting output
1544 * require locking the class structure. In general the driver is
1545 * responsible for locking but this is the only routine that is not
1546 * called directly or indirectly from the interface driver so it has
1547 * know about system locking conventions. Under bsd, locking is done
1548 * by raising IPL to splimp so that's what's implemented here. On a
1549 * different system this would probably need to be changed.
1550 *
1551 * Since this function is called from an independant timeout, we
1552 * have to set up the lock conditions expected for the ALTQ operation.
1553 * Note that the restart will probably fall through to an if_start.
1554 *
1555 * Returns: NONE
1556 */
1557
1558 static void
rmc_restart_dispatch(netmsg_t nmsg)1559 rmc_restart_dispatch(netmsg_t nmsg)
1560 {
1561 struct rm_class *cl = nmsg->lmsg.u.ms_resultp;
1562 struct rm_ifdat *ifd = cl->ifdat_;
1563 struct ifaltq_subque *ifsq =
1564 &ifd->ifq_->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
1565
1566 ASSERT_NETISR0;
1567
1568 crit_enter();
1569 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */
1570 crit_exit();
1571
1572 ALTQ_SQ_LOCK(ifsq);
1573 if (cl->sleeping_) {
1574 cl->sleeping_ = 0;
1575 cl->undertime_.tv_sec = 0;
1576
1577 if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1578 CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1579 (ifd->restart)(ifd->ifq_);
1580 }
1581 }
1582 ALTQ_SQ_UNLOCK(ifsq);
1583 }
1584
1585 static void
rmc_restart(void * xcl)1586 rmc_restart(void *xcl)
1587 {
1588 struct rm_class *cl = xcl;
1589 struct lwkt_msg *lmsg = &cl->callout_nmsg_.lmsg;
1590
1591 KASSERT(mycpuid == 0, ("not on cpu0"));
1592 crit_enter();
1593 if (lmsg->ms_flags & MSGF_DONE)
1594 lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg);
1595 crit_exit();
1596 }
1597
1598 /*
1599 * void
1600 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1601 * handling routine for the root class of the link sharing structure.
1602 *
1603 * Returns: NONE
1604 */
1605
1606 static void
rmc_root_overlimit(struct rm_class * cl,struct rm_class * borrow)1607 rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
1608 {
1609 panic("rmc_root_overlimit");
1610 }
1611
1612 /*
1613 * Packet Queue handling routines. Eventually, this is to localize the
1614 * effects on the code whether queues are red queues or droptail
1615 * queues.
1616 */
1617
1618 static int
_rmc_addq(rm_class_t * cl,struct mbuf * m)1619 _rmc_addq(rm_class_t *cl, struct mbuf *m)
1620 {
1621 #ifdef ALTQ_RIO
1622 if (q_is_rio(cl->q_))
1623 return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1624 #endif
1625 #ifdef ALTQ_RED
1626 if (q_is_red(cl->q_))
1627 return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1628 #endif /* ALTQ_RED */
1629
1630 if (cl->flags_ & RMCF_CLEARDSCP)
1631 write_dsfield(m, cl->pktattr_, 0);
1632
1633 _addq(cl->q_, m);
1634 return (0);
1635 }
1636
1637 /* note: _rmc_dropq is not called for red */
1638 static void
_rmc_dropq(rm_class_t * cl)1639 _rmc_dropq(rm_class_t *cl)
1640 {
1641 struct mbuf *m;
1642
1643 if ((m = _getq(cl->q_)) != NULL)
1644 m_freem(m);
1645 }
1646
1647 static struct mbuf *
_rmc_getq(rm_class_t * cl)1648 _rmc_getq(rm_class_t *cl)
1649 {
1650 #ifdef ALTQ_RIO
1651 if (q_is_rio(cl->q_))
1652 return rio_getq((rio_t *)cl->red_, cl->q_);
1653 #endif
1654 #ifdef ALTQ_RED
1655 if (q_is_red(cl->q_))
1656 return red_getq(cl->red_, cl->q_);
1657 #endif
1658 return _getq(cl->q_);
1659 }
1660
1661 static struct mbuf *
_rmc_pollq(rm_class_t * cl)1662 _rmc_pollq(rm_class_t *cl)
1663 {
1664 return qhead(cl->q_);
1665 }
1666
1667 #ifdef CBQ_TRACE
1668 /*
1669 * DDB hook to trace cbq events:
1670 * the last 1024 events are held in a circular buffer.
1671 * use "call cbqtrace_dump(N)" to display 20 events from Nth event.
1672 */
1673 void cbqtrace_dump(int);
1674 static char *rmc_funcname(void *);
1675
1676 static struct rmc_funcs {
1677 void *func;
1678 char *name;
1679 } rmc_funcs[] = {
1680 rmc_init, "rmc_init",
1681 rmc_queue_packet, "rmc_queue_packet",
1682 rmc_under_limit, "rmc_under_limit",
1683 rmc_update_class_util, "rmc_update_class_util",
1684 rmc_delay_action, "rmc_delay_action",
1685 rmc_restart, "rmc_restart",
1686 _rmc_wrr_dequeue_next, "_rmc_wrr_dequeue_next",
1687 NULL, NULL
1688 };
1689
1690 static char *
rmc_funcname(void * func)1691 rmc_funcname(void *func)
1692 {
1693 struct rmc_funcs *fp;
1694
1695 for (fp = rmc_funcs; fp->func != NULL; fp++) {
1696 if (fp->func == func)
1697 return (fp->name);
1698 }
1699
1700 return ("unknown");
1701 }
1702
1703 void
cbqtrace_dump(int counter)1704 cbqtrace_dump(int counter)
1705 {
1706 int i, *p;
1707 char *cp;
1708
1709 counter = counter % NCBQTRACE;
1710 p = (int *)&cbqtrace_buffer[counter];
1711
1712 for (i=0; i<20; i++) {
1713 kprintf("[0x%x] ", *p++);
1714 kprintf("%s: ", rmc_funcname((void *)*p++));
1715 cp = (char *)p++;
1716 kprintf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
1717 kprintf("%d\n",*p++);
1718
1719 if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
1720 p = (int *)cbqtrace_buffer;
1721 }
1722 }
1723 #endif /* CBQ_TRACE */
1724 #endif /* ALTQ_CBQ */
1725