1 /* $NetBSD: altq_hfsc.c,v 1.26 2016/04/20 08:58:48 knakahara Exp $ */
2 /* $KAME: altq_hfsc.c,v 1.26 2005/04/13 03:44:24 suz Exp $ */
3
4 /*
5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
6 *
7 * Permission to use, copy, modify, and distribute this software and
8 * its documentation is hereby granted (including for commercial or
9 * for-profit use), provided that both the copyright notice and this
10 * permission notice appear in all copies of the software, derivative
11 * works, or modified versions, and any portions thereof.
12 *
13 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
14 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
15 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 *
28 * Carnegie Mellon encourages (but does not require) users of this
29 * software to return any improvements or extensions that they make,
30 * and to grant Carnegie Mellon the rights to redistribute these
31 * changes without encumbrance.
32 */
33 /*
34 * H-FSC is described in Proceedings of SIGCOMM'97,
35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
36 * Real-Time and Priority Service"
37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
38 *
39 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
40 * when a class has an upperlimit, the fit-time is computed from the
41 * upperlimit service curve. the link-sharing scheduler does not schedule
42 * a class whose fit-time exceeds the current time.
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: altq_hfsc.c,v 1.26 2016/04/20 08:58:48 knakahara Exp $");
47
48 #ifdef _KERNEL_OPT
49 #include "opt_altq.h"
50 #include "opt_inet.h"
51 #include "pf.h"
52 #endif
53
54 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
55
56 #include <sys/param.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/socket.h>
60 #include <sys/systm.h>
61 #include <sys/errno.h>
62 #include <sys/queue.h>
63 #if 1 /* ALTQ3_COMPAT */
64 #include <sys/sockio.h>
65 #include <sys/proc.h>
66 #include <sys/kernel.h>
67 #endif /* ALTQ3_COMPAT */
68 #include <sys/kauth.h>
69
70 #include <net/if.h>
71 #include <netinet/in.h>
72
73 #if NPF > 0
74 #include <net/pfvar.h>
75 #endif
76 #include <altq/altq.h>
77 #include <altq/altq_hfsc.h>
78 #ifdef ALTQ3_COMPAT
79 #include <altq/altq_conf.h>
80 #endif
81
82 /*
83 * function prototypes
84 */
85 static int hfsc_clear_interface(struct hfsc_if *);
86 static int hfsc_request(struct ifaltq *, int, void *);
87 static void hfsc_purge(struct hfsc_if *);
88 static struct hfsc_class *hfsc_class_create(struct hfsc_if *,
89 struct service_curve *, struct service_curve *, struct service_curve *,
90 struct hfsc_class *, int, int, int);
91 static int hfsc_class_destroy(struct hfsc_class *);
92 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *);
93 static int hfsc_enqueue(struct ifaltq *, struct mbuf *);
94 static struct mbuf *hfsc_dequeue(struct ifaltq *, int);
95
96 static int hfsc_addq(struct hfsc_class *, struct mbuf *);
97 static struct mbuf *hfsc_getq(struct hfsc_class *);
98 static struct mbuf *hfsc_pollq(struct hfsc_class *);
99 static void hfsc_purgeq(struct hfsc_class *);
100
101 static void update_cfmin(struct hfsc_class *);
102 static void set_active(struct hfsc_class *, int);
103 static void set_passive(struct hfsc_class *);
104
105 static void init_ed(struct hfsc_class *, int);
106 static void update_ed(struct hfsc_class *, int);
107 static void update_d(struct hfsc_class *, int);
108 static void init_vf(struct hfsc_class *, int);
109 static void update_vf(struct hfsc_class *, int, u_int64_t);
110 static ellist_t *ellist_alloc(void);
111 static void ellist_destroy(ellist_t *);
112 static void ellist_insert(struct hfsc_class *);
113 static void ellist_remove(struct hfsc_class *);
114 static void ellist_update(struct hfsc_class *);
115 struct hfsc_class *ellist_get_mindl(ellist_t *, u_int64_t);
116 static actlist_t *actlist_alloc(void);
117 static void actlist_destroy(actlist_t *);
118 static void actlist_insert(struct hfsc_class *);
119 static void actlist_remove(struct hfsc_class *);
120 static void actlist_update(struct hfsc_class *);
121
122 static struct hfsc_class *actlist_firstfit(struct hfsc_class *,
123 u_int64_t);
124
125 static inline u_int64_t seg_x2y(u_int64_t, u_int64_t);
126 static inline u_int64_t seg_y2x(u_int64_t, u_int64_t);
127 static inline u_int64_t m2sm(u_int);
128 static inline u_int64_t m2ism(u_int);
129 static inline u_int64_t d2dx(u_int);
130 static u_int sm2m(u_int64_t);
131 static u_int dx2d(u_int64_t);
132
133 static void sc2isc(struct service_curve *, struct internal_sc *);
134 static void rtsc_init(struct runtime_sc *, struct internal_sc *,
135 u_int64_t, u_int64_t);
136 static u_int64_t rtsc_y2x(struct runtime_sc *, u_int64_t);
137 static u_int64_t rtsc_x2y(struct runtime_sc *, u_int64_t);
138 static void rtsc_min(struct runtime_sc *, struct internal_sc *,
139 u_int64_t, u_int64_t);
140
141 static void get_class_stats(struct hfsc_classstats *,
142 struct hfsc_class *);
143 static struct hfsc_class *clh_to_clp(struct hfsc_if *, u_int32_t);
144
145
146 #ifdef ALTQ3_COMPAT
147 static struct hfsc_if *hfsc_attach(struct ifaltq *, u_int);
148 static void hfsc_detach(struct hfsc_if *);
149 static int hfsc_class_modify(struct hfsc_class *, struct service_curve *,
150 struct service_curve *, struct service_curve *);
151
152 static int hfsccmd_if_attach(struct hfsc_attach *);
153 static int hfsccmd_if_detach(struct hfsc_interface *);
154 static int hfsccmd_add_class(struct hfsc_add_class *);
155 static int hfsccmd_delete_class(struct hfsc_delete_class *);
156 static int hfsccmd_modify_class(struct hfsc_modify_class *);
157 static int hfsccmd_add_filter(struct hfsc_add_filter *);
158 static int hfsccmd_delete_filter(struct hfsc_delete_filter *);
159 static int hfsccmd_class_stats(struct hfsc_class_stats *);
160
161 altqdev_decl(hfsc);
162 #endif /* ALTQ3_COMPAT */
163
164 /*
165 * macros
166 */
167 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
168
169 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */
170
171 #ifdef ALTQ3_COMPAT
172 /* hif_list keeps all hfsc_if's allocated. */
173 static struct hfsc_if *hif_list = NULL;
174 #endif /* ALTQ3_COMPAT */
175
176 #if NPF > 0
177 int
hfsc_pfattach(struct pf_altq * a)178 hfsc_pfattach(struct pf_altq *a)
179 {
180 struct ifnet *ifp;
181 int s, error;
182
183 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
184 return (EINVAL);
185 s = splnet();
186 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc,
187 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
188 splx(s);
189 return (error);
190 }
191
192 int
hfsc_add_altq(struct pf_altq * a)193 hfsc_add_altq(struct pf_altq *a)
194 {
195 struct hfsc_if *hif;
196 struct ifnet *ifp;
197
198 if ((ifp = ifunit(a->ifname)) == NULL)
199 return (EINVAL);
200 if (!ALTQ_IS_READY(&ifp->if_snd))
201 return (ENODEV);
202
203 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK|M_ZERO);
204 if (hif == NULL)
205 return (ENOMEM);
206
207 hif->hif_eligible = ellist_alloc();
208 if (hif->hif_eligible == NULL) {
209 free(hif, M_DEVBUF);
210 return (ENOMEM);
211 }
212
213 hif->hif_ifq = &ifp->if_snd;
214
215 /* keep the state in pf_altq */
216 a->altq_disc = hif;
217
218 return (0);
219 }
220
221 int
hfsc_remove_altq(struct pf_altq * a)222 hfsc_remove_altq(struct pf_altq *a)
223 {
224 struct hfsc_if *hif;
225
226 if ((hif = a->altq_disc) == NULL)
227 return (EINVAL);
228 a->altq_disc = NULL;
229
230 (void)hfsc_clear_interface(hif);
231 (void)hfsc_class_destroy(hif->hif_rootclass);
232
233 ellist_destroy(hif->hif_eligible);
234
235 free(hif, M_DEVBUF);
236
237 return (0);
238 }
239
240 int
hfsc_add_queue(struct pf_altq * a)241 hfsc_add_queue(struct pf_altq *a)
242 {
243 struct hfsc_if *hif;
244 struct hfsc_class *cl, *parent;
245 struct hfsc_opts *opts;
246 struct service_curve rtsc, lssc, ulsc;
247
248 if ((hif = a->altq_disc) == NULL)
249 return (EINVAL);
250
251 opts = &a->pq_u.hfsc_opts;
252
253 if (a->parent_qid == HFSC_NULLCLASS_HANDLE &&
254 hif->hif_rootclass == NULL)
255 parent = NULL;
256 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
257 return (EINVAL);
258
259 if (a->qid == 0)
260 return (EINVAL);
261
262 if (clh_to_clp(hif, a->qid) != NULL)
263 return (EBUSY);
264
265 rtsc.m1 = opts->rtsc_m1;
266 rtsc.d = opts->rtsc_d;
267 rtsc.m2 = opts->rtsc_m2;
268 lssc.m1 = opts->lssc_m1;
269 lssc.d = opts->lssc_d;
270 lssc.m2 = opts->lssc_m2;
271 ulsc.m1 = opts->ulsc_m1;
272 ulsc.d = opts->ulsc_d;
273 ulsc.m2 = opts->ulsc_m2;
274
275 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc,
276 parent, a->qlimit, opts->flags, a->qid);
277 if (cl == NULL)
278 return (ENOMEM);
279
280 return (0);
281 }
282
283 int
hfsc_remove_queue(struct pf_altq * a)284 hfsc_remove_queue(struct pf_altq *a)
285 {
286 struct hfsc_if *hif;
287 struct hfsc_class *cl;
288
289 if ((hif = a->altq_disc) == NULL)
290 return (EINVAL);
291
292 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
293 return (EINVAL);
294
295 return (hfsc_class_destroy(cl));
296 }
297
298 int
hfsc_getqstats(struct pf_altq * a,void * ubuf,int * nbytes)299 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
300 {
301 struct hfsc_if *hif;
302 struct hfsc_class *cl;
303 struct hfsc_classstats stats;
304 int error = 0;
305
306 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL)
307 return (EBADF);
308
309 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
310 return (EINVAL);
311
312 if (*nbytes < sizeof(stats))
313 return (EINVAL);
314
315 get_class_stats(&stats, cl);
316
317 if ((error = copyout((void *)&stats, ubuf, sizeof(stats))) != 0)
318 return (error);
319 *nbytes = sizeof(stats);
320 return (0);
321 }
322 #endif /* NPF > 0 */
323
324 /*
325 * bring the interface back to the initial state by discarding
326 * all the filters and classes except the root class.
327 */
328 static int
hfsc_clear_interface(struct hfsc_if * hif)329 hfsc_clear_interface(struct hfsc_if *hif)
330 {
331 struct hfsc_class *cl;
332
333 #ifdef ALTQ3_COMPAT
334 /* free the filters for this interface */
335 acc_discard_filters(&hif->hif_classifier, NULL, 1);
336 #endif
337
338 /* clear out the classes */
339 while (hif->hif_rootclass != NULL &&
340 (cl = hif->hif_rootclass->cl_children) != NULL) {
341 /*
342 * remove the first leaf class found in the hierarchy
343 * then start over
344 */
345 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
346 if (!is_a_parent_class(cl)) {
347 (void)hfsc_class_destroy(cl);
348 break;
349 }
350 }
351 }
352
353 return (0);
354 }
355
356 static int
hfsc_request(struct ifaltq * ifq,int req,void * arg)357 hfsc_request(struct ifaltq *ifq, int req, void *arg)
358 {
359 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
360
361 switch (req) {
362 case ALTRQ_PURGE:
363 hfsc_purge(hif);
364 break;
365 }
366 return (0);
367 }
368
369 /* discard all the queued packets on the interface */
370 static void
hfsc_purge(struct hfsc_if * hif)371 hfsc_purge(struct hfsc_if *hif)
372 {
373 struct hfsc_class *cl;
374
375 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
376 if (!qempty(cl->cl_q))
377 hfsc_purgeq(cl);
378 if (ALTQ_IS_ENABLED(hif->hif_ifq))
379 hif->hif_ifq->ifq_len = 0;
380 }
381
382 struct hfsc_class *
hfsc_class_create(struct hfsc_if * hif,struct service_curve * rsc,struct service_curve * fsc,struct service_curve * usc,struct hfsc_class * parent,int qlimit,int flags,int qid)383 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
384 struct service_curve *fsc, struct service_curve *usc,
385 struct hfsc_class *parent, int qlimit, int flags, int qid)
386 {
387 struct hfsc_class *cl, *p;
388 int i, s;
389
390 if (hif->hif_classes >= HFSC_MAX_CLASSES)
391 return (NULL);
392
393 #ifndef ALTQ_RED
394 if (flags & HFCF_RED) {
395 #ifdef ALTQ_DEBUG
396 printf("hfsc_class_create: RED not configured for HFSC!\n");
397 #endif
398 return (NULL);
399 }
400 #endif
401
402 cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_WAITOK|M_ZERO);
403 if (cl == NULL)
404 return (NULL);
405
406 cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
407 if (cl->cl_q == NULL)
408 goto err_ret;
409
410 cl->cl_actc = actlist_alloc();
411 if (cl->cl_actc == NULL)
412 goto err_ret;
413
414 if (qlimit == 0)
415 qlimit = 50; /* use default */
416 qlimit(cl->cl_q) = qlimit;
417 qtype(cl->cl_q) = Q_DROPTAIL;
418 qlen(cl->cl_q) = 0;
419 cl->cl_flags = flags;
420 #ifdef ALTQ_RED
421 if (flags & (HFCF_RED|HFCF_RIO)) {
422 int red_flags, red_pkttime;
423 u_int m2;
424
425 m2 = 0;
426 if (rsc != NULL && rsc->m2 > m2)
427 m2 = rsc->m2;
428 if (fsc != NULL && fsc->m2 > m2)
429 m2 = fsc->m2;
430 if (usc != NULL && usc->m2 > m2)
431 m2 = usc->m2;
432
433 red_flags = 0;
434 if (flags & HFCF_ECN)
435 red_flags |= REDF_ECN;
436 #ifdef ALTQ_RIO
437 if (flags & HFCF_CLEARDSCP)
438 red_flags |= RIOF_CLEARDSCP;
439 #endif
440 if (m2 < 8)
441 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
442 else
443 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
444 * 1000 * 1000 * 1000 / (m2 / 8);
445 if (flags & HFCF_RED) {
446 cl->cl_red = red_alloc(0, 0,
447 qlimit(cl->cl_q) * 10/100,
448 qlimit(cl->cl_q) * 30/100,
449 red_flags, red_pkttime);
450 if (cl->cl_red != NULL)
451 qtype(cl->cl_q) = Q_RED;
452 }
453 #ifdef ALTQ_RIO
454 else {
455 cl->cl_red = (red_t *)rio_alloc(0, NULL,
456 red_flags, red_pkttime);
457 if (cl->cl_red != NULL)
458 qtype(cl->cl_q) = Q_RIO;
459 }
460 #endif
461 }
462 #endif /* ALTQ_RED */
463
464 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
465 cl->cl_rsc = malloc(sizeof(struct internal_sc), M_DEVBUF,
466 M_WAITOK|M_ZERO);
467 if (cl->cl_rsc == NULL)
468 goto err_ret;
469 sc2isc(rsc, cl->cl_rsc);
470 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
471 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
472 }
473 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
474 cl->cl_fsc = malloc(sizeof(struct internal_sc), M_DEVBUF,
475 M_WAITOK|M_ZERO);
476 if (cl->cl_fsc == NULL)
477 goto err_ret;
478 sc2isc(fsc, cl->cl_fsc);
479 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
480 }
481 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
482 cl->cl_usc = malloc(sizeof(struct internal_sc), M_DEVBUF,
483 M_WAITOK|M_ZERO);
484 if (cl->cl_usc == NULL)
485 goto err_ret;
486 sc2isc(usc, cl->cl_usc);
487 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
488 }
489
490 cl->cl_id = hif->hif_classid++;
491 cl->cl_handle = qid;
492 cl->cl_hif = hif;
493 cl->cl_parent = parent;
494
495 s = splnet();
496 hif->hif_classes++;
497
498 /*
499 * find a free slot in the class table. if the slot matching
500 * the lower bits of qid is free, use this slot. otherwise,
501 * use the first free slot.
502 */
503 i = qid % HFSC_MAX_CLASSES;
504 if (hif->hif_class_tbl[i] == NULL)
505 hif->hif_class_tbl[i] = cl;
506 else {
507 for (i = 0; i < HFSC_MAX_CLASSES; i++)
508 if (hif->hif_class_tbl[i] == NULL) {
509 hif->hif_class_tbl[i] = cl;
510 break;
511 }
512 if (i == HFSC_MAX_CLASSES) {
513 splx(s);
514 goto err_ret;
515 }
516 }
517
518 if (flags & HFCF_DEFAULTCLASS)
519 hif->hif_defaultclass = cl;
520
521 if (parent == NULL) {
522 /* this is root class */
523 hif->hif_rootclass = cl;
524 } else {
525 /* add this class to the children list of the parent */
526 if ((p = parent->cl_children) == NULL)
527 parent->cl_children = cl;
528 else {
529 while (p->cl_siblings != NULL)
530 p = p->cl_siblings;
531 p->cl_siblings = cl;
532 }
533 }
534 splx(s);
535
536 return (cl);
537
538 err_ret:
539 if (cl->cl_actc != NULL)
540 actlist_destroy(cl->cl_actc);
541 if (cl->cl_red != NULL) {
542 #ifdef ALTQ_RIO
543 if (q_is_rio(cl->cl_q))
544 rio_destroy((rio_t *)cl->cl_red);
545 #endif
546 #ifdef ALTQ_RED
547 if (q_is_red(cl->cl_q))
548 red_destroy(cl->cl_red);
549 #endif
550 }
551 if (cl->cl_fsc != NULL)
552 free(cl->cl_fsc, M_DEVBUF);
553 if (cl->cl_rsc != NULL)
554 free(cl->cl_rsc, M_DEVBUF);
555 if (cl->cl_usc != NULL)
556 free(cl->cl_usc, M_DEVBUF);
557 if (cl->cl_q != NULL)
558 free(cl->cl_q, M_DEVBUF);
559 free(cl, M_DEVBUF);
560 return (NULL);
561 }
562
563 static int
hfsc_class_destroy(struct hfsc_class * cl)564 hfsc_class_destroy(struct hfsc_class *cl)
565 {
566 int i, s;
567
568 if (cl == NULL)
569 return (0);
570
571 if (is_a_parent_class(cl))
572 return (EBUSY);
573
574 s = splnet();
575
576 #ifdef ALTQ3_COMPAT
577 /* delete filters referencing to this class */
578 acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
579 #endif /* ALTQ3_COMPAT */
580
581 if (!qempty(cl->cl_q))
582 hfsc_purgeq(cl);
583
584 if (cl->cl_parent == NULL) {
585 /* this is root class */
586 } else {
587 struct hfsc_class *p = cl->cl_parent->cl_children;
588
589 if (p == cl)
590 cl->cl_parent->cl_children = cl->cl_siblings;
591 else do {
592 if (p->cl_siblings == cl) {
593 p->cl_siblings = cl->cl_siblings;
594 break;
595 }
596 } while ((p = p->cl_siblings) != NULL);
597 ASSERT(p != NULL);
598 }
599
600 for (i = 0; i < HFSC_MAX_CLASSES; i++)
601 if (cl->cl_hif->hif_class_tbl[i] == cl) {
602 cl->cl_hif->hif_class_tbl[i] = NULL;
603 break;
604 }
605
606 cl->cl_hif->hif_classes--;
607 splx(s);
608
609 actlist_destroy(cl->cl_actc);
610
611 if (cl->cl_red != NULL) {
612 #ifdef ALTQ_RIO
613 if (q_is_rio(cl->cl_q))
614 rio_destroy((rio_t *)cl->cl_red);
615 #endif
616 #ifdef ALTQ_RED
617 if (q_is_red(cl->cl_q))
618 red_destroy(cl->cl_red);
619 #endif
620 }
621
622 if (cl == cl->cl_hif->hif_rootclass)
623 cl->cl_hif->hif_rootclass = NULL;
624 if (cl == cl->cl_hif->hif_defaultclass)
625 cl->cl_hif->hif_defaultclass = NULL;
626
627 if (cl->cl_usc != NULL)
628 free(cl->cl_usc, M_DEVBUF);
629 if (cl->cl_fsc != NULL)
630 free(cl->cl_fsc, M_DEVBUF);
631 if (cl->cl_rsc != NULL)
632 free(cl->cl_rsc, M_DEVBUF);
633 free(cl->cl_q, M_DEVBUF);
634 free(cl, M_DEVBUF);
635
636 return (0);
637 }
638
639 /*
640 * hfsc_nextclass returns the next class in the tree.
641 * usage:
642 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
643 * do_something;
644 */
645 static struct hfsc_class *
hfsc_nextclass(struct hfsc_class * cl)646 hfsc_nextclass(struct hfsc_class *cl)
647 {
648 if (cl->cl_children != NULL)
649 cl = cl->cl_children;
650 else if (cl->cl_siblings != NULL)
651 cl = cl->cl_siblings;
652 else {
653 while ((cl = cl->cl_parent) != NULL)
654 if (cl->cl_siblings) {
655 cl = cl->cl_siblings;
656 break;
657 }
658 }
659
660 return (cl);
661 }
662
663 /*
664 * hfsc_enqueue is an enqueue function to be registered to
665 * (*altq_enqueue) in struct ifaltq.
666 */
667 static int
hfsc_enqueue(struct ifaltq * ifq,struct mbuf * m)668 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m)
669 {
670 struct altq_pktattr pktattr;
671 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
672 struct hfsc_class *cl;
673 struct m_tag *t;
674 int len;
675
676 /* grab class set by classifier */
677 if ((m->m_flags & M_PKTHDR) == 0) {
678 /* should not happen */
679 printf("altq: packet for %s does not have pkthdr\n",
680 ifq->altq_ifp->if_xname);
681 m_freem(m);
682 return (ENOBUFS);
683 }
684 cl = NULL;
685 if ((t = m_tag_find(m, PACKET_TAG_ALTQ_QID, NULL)) != NULL)
686 cl = clh_to_clp(hif, ((struct altq_tag *)(t+1))->qid);
687 #ifdef ALTQ3_COMPAT
688 else if ((ifq->altq_flags & ALTQF_CLASSIFY))
689 cl = m->m_pkthdr.pattr_class;
690 #endif
691 if (cl == NULL || is_a_parent_class(cl)) {
692 cl = hif->hif_defaultclass;
693 if (cl == NULL) {
694 m_freem(m);
695 return (ENOBUFS);
696 }
697 }
698 #ifdef ALTQ3_COMPAT
699 if (m->m_pkthdr.pattr_af != AF_UNSPEC) {
700 pktattr.pattr_class = m->m_pkthdr.pattr_class;
701 pktattr.pattr_af = m->m_pkthdr.pattr_af;
702 pktattr.pattr_hdr = m->m_pkthdr.pattr_hdr;
703
704 cl->cl_pktattr = &pktattr; /* save proto hdr used by ECN */
705 } else
706 #endif
707 cl->cl_pktattr = NULL;
708 len = m_pktlen(m);
709 if (hfsc_addq(cl, m) != 0) {
710 /* drop occurred. mbuf was freed in hfsc_addq. */
711 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
712 return (ENOBUFS);
713 }
714 IFQ_INC_LEN(ifq);
715 cl->cl_hif->hif_packets++;
716
717 /* successfully queued. */
718 if (qlen(cl->cl_q) == 1)
719 set_active(cl, m_pktlen(m));
720
721 return (0);
722 }
723
724 /*
725 * hfsc_dequeue is a dequeue function to be registered to
726 * (*altq_dequeue) in struct ifaltq.
727 *
728 * note: ALTDQ_POLL returns the next packet without removing the packet
729 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
730 * ALTDQ_REMOVE must return the same packet if called immediately
731 * after ALTDQ_POLL.
732 */
733 static struct mbuf *
hfsc_dequeue(struct ifaltq * ifq,int op)734 hfsc_dequeue(struct ifaltq *ifq, int op)
735 {
736 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
737 struct hfsc_class *cl;
738 struct mbuf *m;
739 int len, next_len;
740 int realtime = 0;
741 u_int64_t cur_time;
742
743 if (hif->hif_packets == 0)
744 /* no packet in the tree */
745 return (NULL);
746
747 cur_time = read_machclk();
748
749 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
750
751 cl = hif->hif_pollcache;
752 hif->hif_pollcache = NULL;
753 /* check if the class was scheduled by real-time criteria */
754 if (cl->cl_rsc != NULL)
755 realtime = (cl->cl_e <= cur_time);
756 } else {
757 /*
758 * if there are eligible classes, use real-time criteria.
759 * find the class with the minimum deadline among
760 * the eligible classes.
761 */
762 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time))
763 != NULL) {
764 realtime = 1;
765 } else {
766 #ifdef ALTQ_DEBUG
767 int fits = 0;
768 #endif
769 /*
770 * use link-sharing criteria
771 * get the class with the minimum vt in the hierarchy
772 */
773 cl = hif->hif_rootclass;
774 while (is_a_parent_class(cl)) {
775
776 cl = actlist_firstfit(cl, cur_time);
777 if (cl == NULL) {
778 #ifdef ALTQ_DEBUG
779 if (fits > 0)
780 printf("%d fit but none found\n",fits);
781 #endif
782 return (NULL);
783 }
784 /*
785 * update parent's cl_cvtmin.
786 * don't update if the new vt is smaller.
787 */
788 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
789 cl->cl_parent->cl_cvtmin = cl->cl_vt;
790 #ifdef ALTQ_DEBUG
791 fits++;
792 #endif
793 }
794 }
795
796 if (op == ALTDQ_POLL) {
797 hif->hif_pollcache = cl;
798 m = hfsc_pollq(cl);
799 return (m);
800 }
801 }
802
803 m = hfsc_getq(cl);
804 if (m == NULL)
805 panic("hfsc_dequeue:");
806 len = m_pktlen(m);
807 cl->cl_hif->hif_packets--;
808 IFQ_DEC_LEN(ifq);
809 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
810
811 update_vf(cl, len, cur_time);
812 if (realtime)
813 cl->cl_cumul += len;
814
815 if (!qempty(cl->cl_q)) {
816 if (cl->cl_rsc != NULL) {
817 /* update ed */
818 next_len = m_pktlen(qhead(cl->cl_q));
819
820 if (realtime)
821 update_ed(cl, next_len);
822 else
823 update_d(cl, next_len);
824 }
825 } else {
826 /* the class becomes passive */
827 set_passive(cl);
828 }
829
830 return (m);
831 }
832
833 static int
hfsc_addq(struct hfsc_class * cl,struct mbuf * m)834 hfsc_addq(struct hfsc_class *cl, struct mbuf *m)
835 {
836
837 #ifdef ALTQ_RIO
838 if (q_is_rio(cl->cl_q))
839 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
840 m, cl->cl_pktattr);
841 #endif
842 #ifdef ALTQ_RED
843 if (q_is_red(cl->cl_q))
844 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
845 #endif
846 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
847 m_freem(m);
848 return (-1);
849 }
850
851 if (cl->cl_flags & HFCF_CLEARDSCP)
852 write_dsfield(m, cl->cl_pktattr, 0);
853
854 _addq(cl->cl_q, m);
855
856 return (0);
857 }
858
859 static struct mbuf *
hfsc_getq(struct hfsc_class * cl)860 hfsc_getq(struct hfsc_class *cl)
861 {
862 #ifdef ALTQ_RIO
863 if (q_is_rio(cl->cl_q))
864 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
865 #endif
866 #ifdef ALTQ_RED
867 if (q_is_red(cl->cl_q))
868 return red_getq(cl->cl_red, cl->cl_q);
869 #endif
870 return _getq(cl->cl_q);
871 }
872
873 static struct mbuf *
hfsc_pollq(struct hfsc_class * cl)874 hfsc_pollq(struct hfsc_class *cl)
875 {
876 return qhead(cl->cl_q);
877 }
878
879 static void
hfsc_purgeq(struct hfsc_class * cl)880 hfsc_purgeq(struct hfsc_class *cl)
881 {
882 struct mbuf *m;
883
884 if (qempty(cl->cl_q))
885 return;
886
887 while ((m = _getq(cl->cl_q)) != NULL) {
888 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
889 m_freem(m);
890 cl->cl_hif->hif_packets--;
891 IFQ_DEC_LEN(cl->cl_hif->hif_ifq);
892 }
893 ASSERT(qlen(cl->cl_q) == 0);
894
895 update_vf(cl, 0, 0); /* remove cl from the actlist */
896 set_passive(cl);
897 }
898
899 static void
set_active(struct hfsc_class * cl,int len)900 set_active(struct hfsc_class *cl, int len)
901 {
902 if (cl->cl_rsc != NULL)
903 init_ed(cl, len);
904 if (cl->cl_fsc != NULL)
905 init_vf(cl, len);
906
907 cl->cl_stats.period++;
908 }
909
910 static void
set_passive(struct hfsc_class * cl)911 set_passive(struct hfsc_class *cl)
912 {
913 if (cl->cl_rsc != NULL)
914 ellist_remove(cl);
915
916 /*
917 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
918 * needs to be called explicitly to remove a class from actlist
919 */
920 }
921
922 static void
init_ed(struct hfsc_class * cl,int next_len)923 init_ed(struct hfsc_class *cl, int next_len)
924 {
925 u_int64_t cur_time;
926
927 cur_time = read_machclk();
928
929 /* update the deadline curve */
930 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
931
932 /*
933 * update the eligible curve.
934 * for concave, it is equal to the deadline curve.
935 * for convex, it is a linear curve with slope m2.
936 */
937 cl->cl_eligible = cl->cl_deadline;
938 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
939 cl->cl_eligible.dx = 0;
940 cl->cl_eligible.dy = 0;
941 }
942
943 /* compute e and d */
944 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
945 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
946
947 ellist_insert(cl);
948 }
949
950 static void
update_ed(struct hfsc_class * cl,int next_len)951 update_ed(struct hfsc_class *cl, int next_len)
952 {
953 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
954 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
955
956 ellist_update(cl);
957 }
958
959 static void
update_d(struct hfsc_class * cl,int next_len)960 update_d(struct hfsc_class *cl, int next_len)
961 {
962 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
963 }
964
965 static void
init_vf(struct hfsc_class * cl,int len)966 init_vf(struct hfsc_class *cl, int len)
967 {
968 struct hfsc_class *max_cl, *p;
969 u_int64_t vt, f, cur_time;
970 int go_active;
971
972 cur_time = 0;
973 go_active = 1;
974 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
975
976 if (go_active && cl->cl_nactive++ == 0)
977 go_active = 1;
978 else
979 go_active = 0;
980
981 if (go_active) {
982 max_cl = actlist_last(cl->cl_parent->cl_actc);
983 if (max_cl != NULL) {
984 /*
985 * set vt to the average of the min and max
986 * classes. if the parent's period didn't
987 * change, don't decrease vt of the class.
988 */
989 vt = max_cl->cl_vt;
990 if (cl->cl_parent->cl_cvtmin != 0)
991 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
992
993 if (cl->cl_parent->cl_vtperiod !=
994 cl->cl_parentperiod || vt > cl->cl_vt)
995 cl->cl_vt = vt;
996 } else {
997 /*
998 * first child for a new parent backlog period.
999 * add parent's cvtmax to vtoff of children
1000 * to make a new vt (vtoff + vt) larger than
1001 * the vt in the last period for all children.
1002 */
1003 vt = cl->cl_parent->cl_cvtmax;
1004 for (p = cl->cl_parent->cl_children; p != NULL;
1005 p = p->cl_siblings)
1006 p->cl_vtoff += vt;
1007 cl->cl_vt = 0;
1008 cl->cl_parent->cl_cvtmax = 0;
1009 cl->cl_parent->cl_cvtmin = 0;
1010 }
1011 cl->cl_initvt = cl->cl_vt;
1012
1013 /* update the virtual curve */
1014 vt = cl->cl_vt + cl->cl_vtoff;
1015 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
1016 if (cl->cl_virtual.x == vt) {
1017 cl->cl_virtual.x -= cl->cl_vtoff;
1018 cl->cl_vtoff = 0;
1019 }
1020 cl->cl_vtadj = 0;
1021
1022 cl->cl_vtperiod++; /* increment vt period */
1023 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
1024 if (cl->cl_parent->cl_nactive == 0)
1025 cl->cl_parentperiod++;
1026 cl->cl_f = 0;
1027
1028 actlist_insert(cl);
1029
1030 if (cl->cl_usc != NULL) {
1031 /* class has upper limit curve */
1032 if (cur_time == 0)
1033 cur_time = read_machclk();
1034
1035 /* update the ulimit curve */
1036 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
1037 cl->cl_total);
1038 /* compute myf */
1039 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
1040 cl->cl_total);
1041 cl->cl_myfadj = 0;
1042 }
1043 }
1044
1045 if (cl->cl_myf > cl->cl_cfmin)
1046 f = cl->cl_myf;
1047 else
1048 f = cl->cl_cfmin;
1049 if (f != cl->cl_f) {
1050 cl->cl_f = f;
1051 update_cfmin(cl->cl_parent);
1052 }
1053 }
1054 }
1055
1056 static void
update_vf(struct hfsc_class * cl,int len,u_int64_t cur_time)1057 update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time)
1058 {
1059 u_int64_t f, myf_bound, delta;
1060 int go_passive;
1061
1062 go_passive = qempty(cl->cl_q);
1063
1064 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
1065
1066 cl->cl_total += len;
1067
1068 if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
1069 continue;
1070
1071 if (go_passive && --cl->cl_nactive == 0)
1072 go_passive = 1;
1073 else
1074 go_passive = 0;
1075
1076 if (go_passive) {
1077 /* no more active child, going passive */
1078
1079 /* update cvtmax of the parent class */
1080 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
1081 cl->cl_parent->cl_cvtmax = cl->cl_vt;
1082
1083 /* remove this class from the vt list */
1084 actlist_remove(cl);
1085
1086 update_cfmin(cl->cl_parent);
1087
1088 continue;
1089 }
1090
1091 /*
1092 * update vt and f
1093 */
1094 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
1095 - cl->cl_vtoff + cl->cl_vtadj;
1096
1097 /*
1098 * if vt of the class is smaller than cvtmin,
1099 * the class was skipped in the past due to non-fit.
1100 * if so, we need to adjust vtadj.
1101 */
1102 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
1103 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
1104 cl->cl_vt = cl->cl_parent->cl_cvtmin;
1105 }
1106
1107 /* update the vt list */
1108 actlist_update(cl);
1109
1110 if (cl->cl_usc != NULL) {
1111 cl->cl_myf = cl->cl_myfadj
1112 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
1113
1114 /*
1115 * if myf lags behind by more than one clock tick
1116 * from the current time, adjust myfadj to prevent
1117 * a rate-limited class from going greedy.
1118 * in a steady state under rate-limiting, myf
1119 * fluctuates within one clock tick.
1120 */
1121 myf_bound = cur_time - machclk_per_tick;
1122 if (cl->cl_myf < myf_bound) {
1123 delta = cur_time - cl->cl_myf;
1124 cl->cl_myfadj += delta;
1125 cl->cl_myf += delta;
1126 }
1127 }
1128
1129 /* cl_f is max(cl_myf, cl_cfmin) */
1130 if (cl->cl_myf > cl->cl_cfmin)
1131 f = cl->cl_myf;
1132 else
1133 f = cl->cl_cfmin;
1134 if (f != cl->cl_f) {
1135 cl->cl_f = f;
1136 update_cfmin(cl->cl_parent);
1137 }
1138 }
1139 }
1140
1141 static void
update_cfmin(struct hfsc_class * cl)1142 update_cfmin(struct hfsc_class *cl)
1143 {
1144 struct hfsc_class *p;
1145 u_int64_t cfmin;
1146
1147 if (TAILQ_EMPTY(cl->cl_actc)) {
1148 cl->cl_cfmin = 0;
1149 return;
1150 }
1151 cfmin = HT_INFINITY;
1152 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1153 if (p->cl_f == 0) {
1154 cl->cl_cfmin = 0;
1155 return;
1156 }
1157 if (p->cl_f < cfmin)
1158 cfmin = p->cl_f;
1159 }
1160 cl->cl_cfmin = cfmin;
1161 }
1162
1163 /*
1164 * TAILQ based ellist and actlist implementation
1165 * (ion wanted to make a calendar queue based implementation)
1166 */
1167 /*
1168 * eligible list holds backlogged classes being sorted by their eligible times.
1169 * there is one eligible list per interface.
1170 */
1171
1172 static ellist_t *
ellist_alloc(void)1173 ellist_alloc(void)
1174 {
1175 ellist_t *head;
1176
1177 head = malloc(sizeof(ellist_t), M_DEVBUF, M_WAITOK);
1178 TAILQ_INIT(head);
1179 return (head);
1180 }
1181
1182 static void
ellist_destroy(ellist_t * head)1183 ellist_destroy(ellist_t *head)
1184 {
1185 free(head, M_DEVBUF);
1186 }
1187
1188 static void
ellist_insert(struct hfsc_class * cl)1189 ellist_insert(struct hfsc_class *cl)
1190 {
1191 struct hfsc_if *hif = cl->cl_hif;
1192 struct hfsc_class *p;
1193
1194 /* check the last entry first */
1195 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
1196 p->cl_e <= cl->cl_e) {
1197 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1198 return;
1199 }
1200
1201 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
1202 if (cl->cl_e < p->cl_e) {
1203 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1204 return;
1205 }
1206 }
1207 ASSERT(0); /* should not reach here */
1208 }
1209
1210 static void
ellist_remove(struct hfsc_class * cl)1211 ellist_remove(struct hfsc_class *cl)
1212 {
1213 struct hfsc_if *hif = cl->cl_hif;
1214
1215 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1216 }
1217
1218 static void
ellist_update(struct hfsc_class * cl)1219 ellist_update(struct hfsc_class *cl)
1220 {
1221 struct hfsc_if *hif = cl->cl_hif;
1222 struct hfsc_class *p, *last;
1223
1224 /*
1225 * the eligible time of a class increases monotonically.
1226 * if the next entry has a larger eligible time, nothing to do.
1227 */
1228 p = TAILQ_NEXT(cl, cl_ellist);
1229 if (p == NULL || cl->cl_e <= p->cl_e)
1230 return;
1231
1232 /* check the last entry */
1233 last = TAILQ_LAST(hif->hif_eligible, _eligible);
1234 ASSERT(last != NULL);
1235 if (last->cl_e <= cl->cl_e) {
1236 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1237 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1238 return;
1239 }
1240
1241 /*
1242 * the new position must be between the next entry
1243 * and the last entry
1244 */
1245 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1246 if (cl->cl_e < p->cl_e) {
1247 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1248 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1249 return;
1250 }
1251 }
1252 ASSERT(0); /* should not reach here */
1253 }
1254
1255 /* find the class with the minimum deadline among the eligible classes */
1256 struct hfsc_class *
ellist_get_mindl(ellist_t * head,u_int64_t cur_time)1257 ellist_get_mindl(ellist_t *head, u_int64_t cur_time)
1258 {
1259 struct hfsc_class *p, *cl = NULL;
1260
1261 TAILQ_FOREACH(p, head, cl_ellist) {
1262 if (p->cl_e > cur_time)
1263 break;
1264 if (cl == NULL || p->cl_d < cl->cl_d)
1265 cl = p;
1266 }
1267 return (cl);
1268 }
1269
1270 /*
1271 * active children list holds backlogged child classes being sorted
1272 * by their virtual time.
1273 * each intermediate class has one active children list.
1274 */
1275 static actlist_t *
actlist_alloc(void)1276 actlist_alloc(void)
1277 {
1278 actlist_t *head;
1279
1280 head = malloc(sizeof(actlist_t), M_DEVBUF, M_WAITOK);
1281 TAILQ_INIT(head);
1282 return (head);
1283 }
1284
1285 static void
actlist_destroy(actlist_t * head)1286 actlist_destroy(actlist_t *head)
1287 {
1288 free(head, M_DEVBUF);
1289 }
1290 static void
actlist_insert(struct hfsc_class * cl)1291 actlist_insert(struct hfsc_class *cl)
1292 {
1293 struct hfsc_class *p;
1294
1295 /* check the last entry first */
1296 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
1297 || p->cl_vt <= cl->cl_vt) {
1298 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1299 return;
1300 }
1301
1302 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
1303 if (cl->cl_vt < p->cl_vt) {
1304 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1305 return;
1306 }
1307 }
1308 ASSERT(0); /* should not reach here */
1309 }
1310
1311 static void
actlist_remove(struct hfsc_class * cl)1312 actlist_remove(struct hfsc_class *cl)
1313 {
1314 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1315 }
1316
1317 static void
actlist_update(struct hfsc_class * cl)1318 actlist_update(struct hfsc_class *cl)
1319 {
1320 struct hfsc_class *p, *last;
1321
1322 /*
1323 * the virtual time of a class increases monotonically during its
1324 * backlogged period.
1325 * if the next entry has a larger virtual time, nothing to do.
1326 */
1327 p = TAILQ_NEXT(cl, cl_actlist);
1328 if (p == NULL || cl->cl_vt < p->cl_vt)
1329 return;
1330
1331 /* check the last entry */
1332 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
1333 ASSERT(last != NULL);
1334 if (last->cl_vt <= cl->cl_vt) {
1335 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1336 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1337 return;
1338 }
1339
1340 /*
1341 * the new position must be between the next entry
1342 * and the last entry
1343 */
1344 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1345 if (cl->cl_vt < p->cl_vt) {
1346 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1347 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1348 return;
1349 }
1350 }
1351 ASSERT(0); /* should not reach here */
1352 }
1353
1354 static struct hfsc_class *
actlist_firstfit(struct hfsc_class * cl,u_int64_t cur_time)1355 actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
1356 {
1357 struct hfsc_class *p;
1358
1359 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1360 if (p->cl_f <= cur_time)
1361 return (p);
1362 }
1363 return (NULL);
1364 }
1365
1366 /*
1367 * service curve support functions
1368 *
1369 * external service curve parameters
1370 * m: bits/sec
1371 * d: msec
1372 * internal service curve parameters
1373 * sm: (bytes/tsc_interval) << SM_SHIFT
1374 * ism: (tsc_count/byte) << ISM_SHIFT
1375 * dx: tsc_count
1376 *
1377 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1378 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1379 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1380 * digits in decimal using the following table.
1381 *
1382 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1383 * ----------+-------------------------------------------------------
1384 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1385 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1386 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1387 *
1388 * nsec/byte 80000 8000 800 80 8
1389 * ism(500MHz) 40000 4000 400 40 4
1390 * ism(200MHz) 16000 1600 160 16 1.6
1391 */
1392 #define SM_SHIFT 24
1393 #define ISM_SHIFT 10
1394
1395 #define SM_MASK ((1LL << SM_SHIFT) - 1)
1396 #define ISM_MASK ((1LL << ISM_SHIFT) - 1)
1397
1398 static inline u_int64_t
seg_x2y(u_int64_t x,u_int64_t sm)1399 seg_x2y(u_int64_t x, u_int64_t sm)
1400 {
1401 u_int64_t y;
1402
1403 /*
1404 * compute
1405 * y = x * sm >> SM_SHIFT
1406 * but divide it for the upper and lower bits to avoid overflow
1407 */
1408 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
1409 return (y);
1410 }
1411
1412 static inline u_int64_t
seg_y2x(u_int64_t y,u_int64_t ism)1413 seg_y2x(u_int64_t y, u_int64_t ism)
1414 {
1415 u_int64_t x;
1416
1417 if (y == 0)
1418 x = 0;
1419 else if (ism == HT_INFINITY)
1420 x = HT_INFINITY;
1421 else {
1422 x = (y >> ISM_SHIFT) * ism
1423 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
1424 }
1425 return (x);
1426 }
1427
1428 static inline u_int64_t
m2sm(u_int m)1429 m2sm(u_int m)
1430 {
1431 u_int64_t sm;
1432
1433 sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
1434 return (sm);
1435 }
1436
1437 static inline u_int64_t
m2ism(u_int m)1438 m2ism(u_int m)
1439 {
1440 u_int64_t ism;
1441
1442 if (m == 0)
1443 ism = HT_INFINITY;
1444 else
1445 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1446 return (ism);
1447 }
1448
1449 static inline u_int64_t
d2dx(u_int d)1450 d2dx(u_int d)
1451 {
1452 u_int64_t dx;
1453
1454 dx = ((u_int64_t)d * machclk_freq) / 1000;
1455 return (dx);
1456 }
1457
1458 static u_int
sm2m(u_int64_t sm)1459 sm2m(u_int64_t sm)
1460 {
1461 u_int64_t m;
1462
1463 m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1464 return ((u_int)m);
1465 }
1466
1467 static u_int
dx2d(u_int64_t dx)1468 dx2d(u_int64_t dx)
1469 {
1470 u_int64_t d;
1471
1472 d = dx * 1000 / machclk_freq;
1473 return ((u_int)d);
1474 }
1475
1476 static void
sc2isc(struct service_curve * sc,struct internal_sc * isc)1477 sc2isc(struct service_curve *sc, struct internal_sc *isc)
1478 {
1479 isc->sm1 = m2sm(sc->m1);
1480 isc->ism1 = m2ism(sc->m1);
1481 isc->dx = d2dx(sc->d);
1482 isc->dy = seg_x2y(isc->dx, isc->sm1);
1483 isc->sm2 = m2sm(sc->m2);
1484 isc->ism2 = m2ism(sc->m2);
1485 }
1486
1487 /*
1488 * initialize the runtime service curve with the given internal
1489 * service curve starting at (x, y).
1490 */
1491 static void
rtsc_init(struct runtime_sc * rtsc,struct internal_sc * isc,u_int64_t x,u_int64_t y)1492 rtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x,
1493 u_int64_t y)
1494 {
1495 rtsc->x = x;
1496 rtsc->y = y;
1497 rtsc->sm1 = isc->sm1;
1498 rtsc->ism1 = isc->ism1;
1499 rtsc->dx = isc->dx;
1500 rtsc->dy = isc->dy;
1501 rtsc->sm2 = isc->sm2;
1502 rtsc->ism2 = isc->ism2;
1503 }
1504
1505 /*
1506 * calculate the y-projection of the runtime service curve by the
1507 * given x-projection value
1508 */
1509 static u_int64_t
rtsc_y2x(struct runtime_sc * rtsc,u_int64_t y)1510 rtsc_y2x(struct runtime_sc *rtsc, u_int64_t y)
1511 {
1512 u_int64_t x;
1513
1514 if (y < rtsc->y)
1515 x = rtsc->x;
1516 else if (y <= rtsc->y + rtsc->dy) {
1517 /* x belongs to the 1st segment */
1518 if (rtsc->dy == 0)
1519 x = rtsc->x + rtsc->dx;
1520 else
1521 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1522 } else {
1523 /* x belongs to the 2nd segment */
1524 x = rtsc->x + rtsc->dx
1525 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1526 }
1527 return (x);
1528 }
1529
1530 static u_int64_t
rtsc_x2y(struct runtime_sc * rtsc,u_int64_t x)1531 rtsc_x2y(struct runtime_sc *rtsc, u_int64_t x)
1532 {
1533 u_int64_t y;
1534
1535 if (x <= rtsc->x)
1536 y = rtsc->y;
1537 else if (x <= rtsc->x + rtsc->dx)
1538 /* y belongs to the 1st segment */
1539 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1540 else
1541 /* y belongs to the 2nd segment */
1542 y = rtsc->y + rtsc->dy
1543 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1544 return (y);
1545 }
1546
1547 /*
1548 * update the runtime service curve by taking the minimum of the current
1549 * runtime service curve and the service curve starting at (x, y).
1550 */
1551 static void
rtsc_min(struct runtime_sc * rtsc,struct internal_sc * isc,u_int64_t x,u_int64_t y)1552 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x,
1553 u_int64_t y)
1554 {
1555 u_int64_t y1, y2, dx, dy;
1556
1557 if (isc->sm1 <= isc->sm2) {
1558 /* service curve is convex */
1559 y1 = rtsc_x2y(rtsc, x);
1560 if (y1 < y)
1561 /* the current rtsc is smaller */
1562 return;
1563 rtsc->x = x;
1564 rtsc->y = y;
1565 return;
1566 }
1567
1568 /*
1569 * service curve is concave
1570 * compute the two y values of the current rtsc
1571 * y1: at x
1572 * y2: at (x + dx)
1573 */
1574 y1 = rtsc_x2y(rtsc, x);
1575 if (y1 <= y) {
1576 /* rtsc is below isc, no change to rtsc */
1577 return;
1578 }
1579
1580 y2 = rtsc_x2y(rtsc, x + isc->dx);
1581 if (y2 >= y + isc->dy) {
1582 /* rtsc is above isc, replace rtsc by isc */
1583 rtsc->x = x;
1584 rtsc->y = y;
1585 rtsc->dx = isc->dx;
1586 rtsc->dy = isc->dy;
1587 return;
1588 }
1589
1590 /*
1591 * the two curves intersect
1592 * compute the offsets (dx, dy) using the reverse
1593 * function of seg_x2y()
1594 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1595 */
1596 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1597 /*
1598 * check if (x, y1) belongs to the 1st segment of rtsc.
1599 * if so, add the offset.
1600 */
1601 if (rtsc->x + rtsc->dx > x)
1602 dx += rtsc->x + rtsc->dx - x;
1603 dy = seg_x2y(dx, isc->sm1);
1604
1605 rtsc->x = x;
1606 rtsc->y = y;
1607 rtsc->dx = dx;
1608 rtsc->dy = dy;
1609 return;
1610 }
1611
1612 static void
get_class_stats(struct hfsc_classstats * sp,struct hfsc_class * cl)1613 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
1614 {
1615 sp->class_id = cl->cl_id;
1616 sp->class_handle = cl->cl_handle;
1617
1618 if (cl->cl_rsc != NULL) {
1619 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1620 sp->rsc.d = dx2d(cl->cl_rsc->dx);
1621 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1622 } else {
1623 sp->rsc.m1 = 0;
1624 sp->rsc.d = 0;
1625 sp->rsc.m2 = 0;
1626 }
1627 if (cl->cl_fsc != NULL) {
1628 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1629 sp->fsc.d = dx2d(cl->cl_fsc->dx);
1630 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1631 } else {
1632 sp->fsc.m1 = 0;
1633 sp->fsc.d = 0;
1634 sp->fsc.m2 = 0;
1635 }
1636 if (cl->cl_usc != NULL) {
1637 sp->usc.m1 = sm2m(cl->cl_usc->sm1);
1638 sp->usc.d = dx2d(cl->cl_usc->dx);
1639 sp->usc.m2 = sm2m(cl->cl_usc->sm2);
1640 } else {
1641 sp->usc.m1 = 0;
1642 sp->usc.d = 0;
1643 sp->usc.m2 = 0;
1644 }
1645
1646 sp->total = cl->cl_total;
1647 sp->cumul = cl->cl_cumul;
1648
1649 sp->d = cl->cl_d;
1650 sp->e = cl->cl_e;
1651 sp->vt = cl->cl_vt;
1652 sp->f = cl->cl_f;
1653
1654 sp->initvt = cl->cl_initvt;
1655 sp->vtperiod = cl->cl_vtperiod;
1656 sp->parentperiod = cl->cl_parentperiod;
1657 sp->nactive = cl->cl_nactive;
1658 sp->vtoff = cl->cl_vtoff;
1659 sp->cvtmax = cl->cl_cvtmax;
1660 sp->myf = cl->cl_myf;
1661 sp->cfmin = cl->cl_cfmin;
1662 sp->cvtmin = cl->cl_cvtmin;
1663 sp->myfadj = cl->cl_myfadj;
1664 sp->vtadj = cl->cl_vtadj;
1665
1666 sp->cur_time = read_machclk();
1667 sp->machclk_freq = machclk_freq;
1668
1669 sp->qlength = qlen(cl->cl_q);
1670 sp->qlimit = qlimit(cl->cl_q);
1671 sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1672 sp->drop_cnt = cl->cl_stats.drop_cnt;
1673 sp->period = cl->cl_stats.period;
1674
1675 sp->qtype = qtype(cl->cl_q);
1676 #ifdef ALTQ_RED
1677 if (q_is_red(cl->cl_q))
1678 red_getstats(cl->cl_red, &sp->red[0]);
1679 #endif
1680 #ifdef ALTQ_RIO
1681 if (q_is_rio(cl->cl_q))
1682 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1683 #endif
1684 }
1685
1686 /* convert a class handle to the corresponding class pointer */
1687 static struct hfsc_class *
clh_to_clp(struct hfsc_if * hif,u_int32_t chandle)1688 clh_to_clp(struct hfsc_if *hif, u_int32_t chandle)
1689 {
1690 int i;
1691 struct hfsc_class *cl;
1692
1693 if (chandle == 0)
1694 return (NULL);
1695 /*
1696 * first, try optimistically the slot matching the lower bits of
1697 * the handle. if it fails, do the linear table search.
1698 */
1699 i = chandle % HFSC_MAX_CLASSES;
1700 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
1701 return (cl);
1702 for (i = 0; i < HFSC_MAX_CLASSES; i++)
1703 if ((cl = hif->hif_class_tbl[i]) != NULL &&
1704 cl->cl_handle == chandle)
1705 return (cl);
1706 return (NULL);
1707 }
1708
1709 #ifdef ALTQ3_COMPAT
1710 static struct hfsc_if *
hfsc_attach(struct ifaltq * ifq,u_int bandwidth)1711 hfsc_attach(struct ifaltq *ifq, u_int bandwidth)
1712 {
1713 struct hfsc_if *hif;
1714
1715 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK|M_ZERO);
1716 if (hif == NULL)
1717 return (NULL);
1718
1719 hif->hif_eligible = ellist_alloc();
1720 if (hif->hif_eligible == NULL) {
1721 free(hif, M_DEVBUF);
1722 return NULL;
1723 }
1724
1725 hif->hif_ifq = ifq;
1726
1727 /* add this state to the hfsc list */
1728 hif->hif_next = hif_list;
1729 hif_list = hif;
1730
1731 return (hif);
1732 }
1733
1734 static void
hfsc_detach(struct hfsc_if * hif)1735 hfsc_detach(struct hfsc_if *hif)
1736 {
1737 (void)hfsc_clear_interface(hif);
1738 (void)hfsc_class_destroy(hif->hif_rootclass);
1739
1740 /* remove this interface from the hif list */
1741 if (hif_list == hif)
1742 hif_list = hif->hif_next;
1743 else {
1744 struct hfsc_if *h;
1745
1746 for (h = hif_list; h != NULL; h = h->hif_next)
1747 if (h->hif_next == hif) {
1748 h->hif_next = hif->hif_next;
1749 break;
1750 }
1751 ASSERT(h != NULL);
1752 }
1753
1754 ellist_destroy(hif->hif_eligible);
1755
1756 free(hif, M_DEVBUF);
1757 }
1758
1759 static int
hfsc_class_modify(struct hfsc_class * cl,struct service_curve * rsc,struct service_curve * fsc,struct service_curve * usc)1760 hfsc_class_modify(struct hfsc_class *cl, struct service_curve *rsc,
1761 struct service_curve *fsc, struct service_curve *usc)
1762 {
1763 struct internal_sc *rsc_tmp, *fsc_tmp, *usc_tmp;
1764 u_int64_t cur_time;
1765 int s;
1766
1767 rsc_tmp = fsc_tmp = usc_tmp = NULL;
1768 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) &&
1769 cl->cl_rsc == NULL) {
1770 rsc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
1771 M_WAITOK);
1772 if (rsc_tmp == NULL)
1773 return (ENOMEM);
1774 }
1775 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) &&
1776 cl->cl_fsc == NULL) {
1777 fsc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
1778 M_WAITOK);
1779 if (fsc_tmp == NULL)
1780 return (ENOMEM);
1781 }
1782 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0) &&
1783 cl->cl_usc == NULL) {
1784 usc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
1785 M_WAITOK);
1786 if (usc_tmp == NULL)
1787 return (ENOMEM);
1788 }
1789
1790 cur_time = read_machclk();
1791 s = splnet();
1792
1793 if (rsc != NULL) {
1794 if (rsc->m1 == 0 && rsc->m2 == 0) {
1795 if (cl->cl_rsc != NULL) {
1796 if (!qempty(cl->cl_q))
1797 hfsc_purgeq(cl);
1798 free(cl->cl_rsc, M_DEVBUF);
1799 cl->cl_rsc = NULL;
1800 }
1801 } else {
1802 if (cl->cl_rsc == NULL)
1803 cl->cl_rsc = rsc_tmp;
1804 sc2isc(rsc, cl->cl_rsc);
1805 rtsc_init(&cl->cl_deadline, cl->cl_rsc, cur_time,
1806 cl->cl_cumul);
1807 cl->cl_eligible = cl->cl_deadline;
1808 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
1809 cl->cl_eligible.dx = 0;
1810 cl->cl_eligible.dy = 0;
1811 }
1812 }
1813 }
1814
1815 if (fsc != NULL) {
1816 if (fsc->m1 == 0 && fsc->m2 == 0) {
1817 if (cl->cl_fsc != NULL) {
1818 if (!qempty(cl->cl_q))
1819 hfsc_purgeq(cl);
1820 free(cl->cl_fsc, M_DEVBUF);
1821 cl->cl_fsc = NULL;
1822 }
1823 } else {
1824 if (cl->cl_fsc == NULL)
1825 cl->cl_fsc = fsc_tmp;
1826 sc2isc(fsc, cl->cl_fsc);
1827 rtsc_init(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt,
1828 cl->cl_total);
1829 }
1830 }
1831
1832 if (usc != NULL) {
1833 if (usc->m1 == 0 && usc->m2 == 0) {
1834 if (cl->cl_usc != NULL) {
1835 free(cl->cl_usc, M_DEVBUF);
1836 cl->cl_usc = NULL;
1837 cl->cl_myf = 0;
1838 }
1839 } else {
1840 if (cl->cl_usc == NULL)
1841 cl->cl_usc = usc_tmp;
1842 sc2isc(usc, cl->cl_usc);
1843 rtsc_init(&cl->cl_ulimit, cl->cl_usc, cur_time,
1844 cl->cl_total);
1845 }
1846 }
1847
1848 if (!qempty(cl->cl_q)) {
1849 if (cl->cl_rsc != NULL)
1850 update_ed(cl, m_pktlen(qhead(cl->cl_q)));
1851 if (cl->cl_fsc != NULL)
1852 update_vf(cl, 0, cur_time);
1853 /* is this enough? */
1854 }
1855
1856 splx(s);
1857
1858 return (0);
1859 }
1860
1861 /*
1862 * hfsc device interface
1863 */
1864 int
hfscopen(dev_t dev,int flag,int fmt,struct lwp * l)1865 hfscopen(dev_t dev, int flag, int fmt,
1866 struct lwp *l)
1867 {
1868 if (machclk_freq == 0)
1869 init_machclk();
1870
1871 if (machclk_freq == 0) {
1872 printf("hfsc: no CPU clock available!\n");
1873 return (ENXIO);
1874 }
1875
1876 /* everything will be done when the queueing scheme is attached. */
1877 return 0;
1878 }
1879
1880 int
hfscclose(dev_t dev,int flag,int fmt,struct lwp * l)1881 hfscclose(dev_t dev, int flag, int fmt,
1882 struct lwp *l)
1883 {
1884 struct hfsc_if *hif;
1885
1886 while ((hif = hif_list) != NULL) {
1887 /* destroy all */
1888 if (ALTQ_IS_ENABLED(hif->hif_ifq))
1889 altq_disable(hif->hif_ifq);
1890
1891 int error = altq_detach(hif->hif_ifq);
1892 switch (error) {
1893 case 0:
1894 case ENXIO: /* already disabled */
1895 break;
1896 default:
1897 return error;
1898 }
1899 hfsc_detach(hif);
1900 }
1901
1902 return 0;
1903 }
1904
1905 int
hfscioctl(dev_t dev,ioctlcmd_t cmd,void * addr,int flag,struct lwp * l)1906 hfscioctl(dev_t dev, ioctlcmd_t cmd, void *addr, int flag,
1907 struct lwp *l)
1908 {
1909 struct hfsc_if *hif;
1910 struct hfsc_interface *ifacep;
1911 int error = 0;
1912
1913 /* check super-user privilege */
1914 switch (cmd) {
1915 case HFSC_GETSTATS:
1916 break;
1917 default:
1918 #if (__FreeBSD_version > 400000)
1919 if ((error = suser(p)) != 0)
1920 return (error);
1921 #else
1922 if ((error = kauth_authorize_network(l->l_cred,
1923 KAUTH_NETWORK_ALTQ, KAUTH_REQ_NETWORK_ALTQ_HFSC, NULL,
1924 NULL, NULL)) != 0)
1925 return (error);
1926 #endif
1927 break;
1928 }
1929
1930 switch (cmd) {
1931
1932 case HFSC_IF_ATTACH:
1933 error = hfsccmd_if_attach((struct hfsc_attach *)addr);
1934 break;
1935
1936 case HFSC_IF_DETACH:
1937 error = hfsccmd_if_detach((struct hfsc_interface *)addr);
1938 break;
1939
1940 case HFSC_ENABLE:
1941 case HFSC_DISABLE:
1942 case HFSC_CLEAR_HIERARCHY:
1943 ifacep = (struct hfsc_interface *)addr;
1944 if ((hif = altq_lookup(ifacep->hfsc_ifname,
1945 ALTQT_HFSC)) == NULL) {
1946 error = EBADF;
1947 break;
1948 }
1949
1950 switch (cmd) {
1951
1952 case HFSC_ENABLE:
1953 if (hif->hif_defaultclass == NULL) {
1954 #ifdef ALTQ_DEBUG
1955 printf("hfsc: no default class\n");
1956 #endif
1957 error = EINVAL;
1958 break;
1959 }
1960 error = altq_enable(hif->hif_ifq);
1961 break;
1962
1963 case HFSC_DISABLE:
1964 error = altq_disable(hif->hif_ifq);
1965 break;
1966
1967 case HFSC_CLEAR_HIERARCHY:
1968 hfsc_clear_interface(hif);
1969 break;
1970 }
1971 break;
1972
1973 case HFSC_ADD_CLASS:
1974 error = hfsccmd_add_class((struct hfsc_add_class *)addr);
1975 break;
1976
1977 case HFSC_DEL_CLASS:
1978 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
1979 break;
1980
1981 case HFSC_MOD_CLASS:
1982 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
1983 break;
1984
1985 case HFSC_ADD_FILTER:
1986 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
1987 break;
1988
1989 case HFSC_DEL_FILTER:
1990 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
1991 break;
1992
1993 case HFSC_GETSTATS:
1994 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
1995 break;
1996
1997 default:
1998 error = EINVAL;
1999 break;
2000 }
2001 return error;
2002 }
2003
2004 static int
hfsccmd_if_attach(struct hfsc_attach * ap)2005 hfsccmd_if_attach(struct hfsc_attach *ap)
2006 {
2007 struct hfsc_if *hif;
2008 struct ifnet *ifp;
2009 int error;
2010
2011 if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
2012 return (ENXIO);
2013
2014 if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
2015 return (ENOMEM);
2016
2017 /*
2018 * set HFSC to this ifnet structure.
2019 */
2020 if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
2021 hfsc_enqueue, hfsc_dequeue, hfsc_request,
2022 &hif->hif_classifier, acc_classify)) != 0)
2023 hfsc_detach(hif);
2024
2025 return (error);
2026 }
2027
2028 static int
hfsccmd_if_detach(struct hfsc_interface * ap)2029 hfsccmd_if_detach(struct hfsc_interface *ap)
2030 {
2031 struct hfsc_if *hif;
2032 int error;
2033
2034 if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
2035 return (EBADF);
2036
2037 if (ALTQ_IS_ENABLED(hif->hif_ifq))
2038 altq_disable(hif->hif_ifq);
2039
2040 if ((error = altq_detach(hif->hif_ifq)))
2041 return (error);
2042
2043 hfsc_detach(hif);
2044 return 0;
2045 }
2046
2047 static int
hfsccmd_add_class(struct hfsc_add_class * ap)2048 hfsccmd_add_class(struct hfsc_add_class *ap)
2049 {
2050 struct hfsc_if *hif;
2051 struct hfsc_class *cl, *parent;
2052 int i;
2053
2054 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2055 return (EBADF);
2056
2057 if (ap->parent_handle == HFSC_NULLCLASS_HANDLE &&
2058 hif->hif_rootclass == NULL)
2059 parent = NULL;
2060 else if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL)
2061 return (EINVAL);
2062
2063 /* assign a class handle (use a free slot number for now) */
2064 for (i = 1; i < HFSC_MAX_CLASSES; i++)
2065 if (hif->hif_class_tbl[i] == NULL)
2066 break;
2067 if (i == HFSC_MAX_CLASSES)
2068 return (EBUSY);
2069
2070 if ((cl = hfsc_class_create(hif, &ap->service_curve, NULL, NULL,
2071 parent, ap->qlimit, ap->flags, i)) == NULL)
2072 return (ENOMEM);
2073
2074 /* return a class handle to the user */
2075 ap->class_handle = i;
2076
2077 return (0);
2078 }
2079
2080 static int
hfsccmd_delete_class(struct hfsc_delete_class * ap)2081 hfsccmd_delete_class(struct hfsc_delete_class *ap)
2082 {
2083 struct hfsc_if *hif;
2084 struct hfsc_class *cl;
2085
2086 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2087 return (EBADF);
2088
2089 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2090 return (EINVAL);
2091
2092 return hfsc_class_destroy(cl);
2093 }
2094
2095 static int
hfsccmd_modify_class(struct hfsc_modify_class * ap)2096 hfsccmd_modify_class(struct hfsc_modify_class *ap)
2097 {
2098 struct hfsc_if *hif;
2099 struct hfsc_class *cl;
2100 struct service_curve *rsc = NULL;
2101 struct service_curve *fsc = NULL;
2102 struct service_curve *usc = NULL;
2103
2104 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2105 return (EBADF);
2106
2107 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2108 return (EINVAL);
2109
2110 if (ap->sctype & HFSC_REALTIMESC)
2111 rsc = &ap->service_curve;
2112 if (ap->sctype & HFSC_LINKSHARINGSC)
2113 fsc = &ap->service_curve;
2114 if (ap->sctype & HFSC_UPPERLIMITSC)
2115 usc = &ap->service_curve;
2116
2117 return hfsc_class_modify(cl, rsc, fsc, usc);
2118 }
2119
2120 static int
hfsccmd_add_filter(struct hfsc_add_filter * ap)2121 hfsccmd_add_filter(struct hfsc_add_filter *ap)
2122 {
2123 struct hfsc_if *hif;
2124 struct hfsc_class *cl;
2125
2126 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2127 return (EBADF);
2128
2129 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2130 return (EINVAL);
2131
2132 if (is_a_parent_class(cl)) {
2133 #ifdef ALTQ_DEBUG
2134 printf("hfsccmd_add_filter: not a leaf class!\n");
2135 #endif
2136 return (EINVAL);
2137 }
2138
2139 return acc_add_filter(&hif->hif_classifier, &ap->filter,
2140 cl, &ap->filter_handle);
2141 }
2142
2143 static int
hfsccmd_delete_filter(struct hfsc_delete_filter * ap)2144 hfsccmd_delete_filter(struct hfsc_delete_filter *ap)
2145 {
2146 struct hfsc_if *hif;
2147
2148 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2149 return (EBADF);
2150
2151 return acc_delete_filter(&hif->hif_classifier,
2152 ap->filter_handle);
2153 }
2154
2155 static int
hfsccmd_class_stats(struct hfsc_class_stats * ap)2156 hfsccmd_class_stats(struct hfsc_class_stats *ap)
2157 {
2158 struct hfsc_if *hif;
2159 struct hfsc_class *cl;
2160 struct hfsc_classstats stats, *usp;
2161 int n, nclasses, error;
2162
2163 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2164 return (EBADF);
2165
2166 ap->cur_time = read_machclk();
2167 ap->machclk_freq = machclk_freq;
2168 ap->hif_classes = hif->hif_classes;
2169 ap->hif_packets = hif->hif_packets;
2170
2171 /* skip the first N classes in the tree */
2172 nclasses = ap->nskip;
2173 for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
2174 cl = hfsc_nextclass(cl), n++)
2175 ;
2176 if (n != nclasses)
2177 return (EINVAL);
2178
2179 /* then, read the next N classes in the tree */
2180 nclasses = ap->nclasses;
2181 usp = ap->stats;
2182 for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
2183
2184 get_class_stats(&stats, cl);
2185
2186 if ((error = copyout((void *)&stats, (void *)usp++,
2187 sizeof(stats))) != 0)
2188 return (error);
2189 }
2190
2191 ap->nclasses = n;
2192
2193 return (0);
2194 }
2195
2196 #ifdef KLD_MODULE
2197
2198 static struct altqsw hfsc_sw =
2199 {"hfsc", hfscopen, hfscclose, hfscioctl};
2200
2201 ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
2202 MODULE_DEPEND(altq_hfsc, altq_red, 1, 1, 1);
2203 MODULE_DEPEND(altq_hfsc, altq_rio, 1, 1, 1);
2204
2205 #endif /* KLD_MODULE */
2206 #endif /* ALTQ3_COMPAT */
2207
2208 #endif /* ALTQ_HFSC */
2209